There are some issues of drivers that cannot be fixed now. The drivers are not good enought for the LTS quality requirements of openEuler,so apply to remove.
Yanling Song (1): net/spnic: Remove spnic driver.
Yun Xu (1): SCSI: spfc: remove SPFC driver
arch/arm64/configs/openeuler_defconfig | 3 - arch/x86/configs/openeuler_defconfig | 3 - drivers/net/ethernet/Kconfig | 1 - drivers/net/ethernet/Makefile | 1 - drivers/net/ethernet/ramaxel/Kconfig | 20 - drivers/net/ethernet/ramaxel/Makefile | 6 - drivers/net/ethernet/ramaxel/spnic/Kconfig | 14 - drivers/net/ethernet/ramaxel/spnic/Makefile | 39 - .../ethernet/ramaxel/spnic/hw/sphw_api_cmd.c | 1165 ---- .../ethernet/ramaxel/spnic/hw/sphw_api_cmd.h | 277 - .../ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h | 127 - .../net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c | 1573 ------ .../net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h | 195 - .../ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h | 60 - .../ramaxel/spnic/hw/sphw_comm_msg_intf.h | 273 - .../ethernet/ramaxel/spnic/hw/sphw_common.c | 88 - .../ethernet/ramaxel/spnic/hw/sphw_common.h | 106 - .../net/ethernet/ramaxel/spnic/hw/sphw_crm.h | 982 ---- .../net/ethernet/ramaxel/spnic/hw/sphw_csr.h | 158 - .../net/ethernet/ramaxel/spnic/hw/sphw_eqs.c | 1272 ----- .../net/ethernet/ramaxel/spnic/hw/sphw_eqs.h | 157 - .../net/ethernet/ramaxel/spnic/hw/sphw_hw.h | 643 --- .../ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c | 1341 ----- .../ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h | 329 -- .../ethernet/ramaxel/spnic/hw/sphw_hw_comm.c | 1280 ----- .../ethernet/ramaxel/spnic/hw/sphw_hw_comm.h | 45 - .../ethernet/ramaxel/spnic/hw/sphw_hwdev.c | 1324 ----- .../ethernet/ramaxel/spnic/hw/sphw_hwdev.h | 93 - .../net/ethernet/ramaxel/spnic/hw/sphw_hwif.c | 886 --- .../net/ethernet/ramaxel/spnic/hw/sphw_hwif.h | 102 - .../net/ethernet/ramaxel/spnic/hw/sphw_mbox.c | 1792 ------ .../net/ethernet/ramaxel/spnic/hw/sphw_mbox.h | 271 - .../net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c | 895 --- .../net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h | 106 - .../ramaxel/spnic/hw/sphw_mgmt_msg_base.h | 19 - .../net/ethernet/ramaxel/spnic/hw/sphw_mt.h | 533 -- .../ramaxel/spnic/hw/sphw_prof_adap.c | 94 - .../ramaxel/spnic/hw/sphw_prof_adap.h | 49 - .../ethernet/ramaxel/spnic/hw/sphw_profile.h | 36 - .../net/ethernet/ramaxel/spnic/hw/sphw_wq.c | 152 - .../net/ethernet/ramaxel/spnic/hw/sphw_wq.h | 119 - .../net/ethernet/ramaxel/spnic/spnic_dbg.c | 752 --- .../net/ethernet/ramaxel/spnic/spnic_dcb.c | 965 ---- .../net/ethernet/ramaxel/spnic/spnic_dcb.h | 56 - .../ethernet/ramaxel/spnic/spnic_dev_mgmt.c | 811 --- .../ethernet/ramaxel/spnic/spnic_dev_mgmt.h | 78 - .../ethernet/ramaxel/spnic/spnic_ethtool.c | 994 ---- .../ramaxel/spnic/spnic_ethtool_stats.c | 1035 ---- .../net/ethernet/ramaxel/spnic/spnic_filter.c | 411 -- .../net/ethernet/ramaxel/spnic/spnic_irq.c | 178 - .../net/ethernet/ramaxel/spnic/spnic_lld.c | 937 ---- .../net/ethernet/ramaxel/spnic/spnic_lld.h | 75 - .../ethernet/ramaxel/spnic/spnic_mag_cfg.c | 778 --- .../ethernet/ramaxel/spnic/spnic_mag_cmd.h | 643 --- .../net/ethernet/ramaxel/spnic/spnic_main.c | 924 ---- .../ramaxel/spnic/spnic_mgmt_interface.h | 605 -- .../ethernet/ramaxel/spnic/spnic_netdev_ops.c | 1526 ------ .../net/ethernet/ramaxel/spnic/spnic_nic.h | 148 - .../ethernet/ramaxel/spnic/spnic_nic_cfg.c | 1334 ----- .../ethernet/ramaxel/spnic/spnic_nic_cfg.h | 709 --- .../ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c | 658 --- .../ethernet/ramaxel/spnic/spnic_nic_cmd.h | 105 - .../ethernet/ramaxel/spnic/spnic_nic_dbg.c | 151 - .../ethernet/ramaxel/spnic/spnic_nic_dbg.h | 16 - .../ethernet/ramaxel/spnic/spnic_nic_dev.h | 354 -- .../ethernet/ramaxel/spnic/spnic_nic_event.c | 506 -- .../net/ethernet/ramaxel/spnic/spnic_nic_io.c | 1123 ---- .../net/ethernet/ramaxel/spnic/spnic_nic_io.h | 305 - .../net/ethernet/ramaxel/spnic/spnic_nic_qp.h | 416 -- .../net/ethernet/ramaxel/spnic/spnic_ntuple.c | 841 --- .../ethernet/ramaxel/spnic/spnic_pci_id_tbl.h | 12 - .../net/ethernet/ramaxel/spnic/spnic_rss.c | 741 --- .../net/ethernet/ramaxel/spnic/spnic_rss.h | 50 - .../ethernet/ramaxel/spnic/spnic_rss_cfg.c | 333 -- drivers/net/ethernet/ramaxel/spnic/spnic_rx.c | 1238 ----- drivers/net/ethernet/ramaxel/spnic/spnic_rx.h | 118 - .../net/ethernet/ramaxel/spnic/spnic_sriov.c | 200 - .../net/ethernet/ramaxel/spnic/spnic_sriov.h | 24 - drivers/net/ethernet/ramaxel/spnic/spnic_tx.c | 877 --- drivers/net/ethernet/ramaxel/spnic/spnic_tx.h | 129 - drivers/scsi/Kconfig | 1 - drivers/scsi/Makefile | 1 - drivers/scsi/spfc/Kconfig | 16 - drivers/scsi/spfc/Makefile | 47 - drivers/scsi/spfc/common/unf_common.h | 1753 ------ drivers/scsi/spfc/common/unf_disc.c | 1276 ----- drivers/scsi/spfc/common/unf_disc.h | 51 - drivers/scsi/spfc/common/unf_event.c | 517 -- drivers/scsi/spfc/common/unf_event.h | 83 - drivers/scsi/spfc/common/unf_exchg.c | 2317 -------- drivers/scsi/spfc/common/unf_exchg.h | 436 -- drivers/scsi/spfc/common/unf_exchg_abort.c | 825 --- drivers/scsi/spfc/common/unf_exchg_abort.h | 23 - drivers/scsi/spfc/common/unf_fcstruct.h | 459 -- drivers/scsi/spfc/common/unf_gs.c | 2521 --------- drivers/scsi/spfc/common/unf_gs.h | 58 - drivers/scsi/spfc/common/unf_init.c | 353 -- drivers/scsi/spfc/common/unf_io.c | 1219 ---- drivers/scsi/spfc/common/unf_io.h | 96 - drivers/scsi/spfc/common/unf_io_abnormal.c | 986 ---- drivers/scsi/spfc/common/unf_io_abnormal.h | 19 - drivers/scsi/spfc/common/unf_log.h | 178 - drivers/scsi/spfc/common/unf_lport.c | 1008 ---- drivers/scsi/spfc/common/unf_lport.h | 519 -- drivers/scsi/spfc/common/unf_ls.c | 4883 ----------------- drivers/scsi/spfc/common/unf_ls.h | 61 - drivers/scsi/spfc/common/unf_npiv.c | 1005 ---- drivers/scsi/spfc/common/unf_npiv.h | 47 - drivers/scsi/spfc/common/unf_npiv_portman.c | 360 -- drivers/scsi/spfc/common/unf_npiv_portman.h | 17 - drivers/scsi/spfc/common/unf_portman.c | 2431 -------- drivers/scsi/spfc/common/unf_portman.h | 96 - drivers/scsi/spfc/common/unf_rport.c | 2286 -------- drivers/scsi/spfc/common/unf_rport.h | 301 - drivers/scsi/spfc/common/unf_scsi.c | 1462 ----- drivers/scsi/spfc/common/unf_scsi_common.h | 570 -- drivers/scsi/spfc/common/unf_service.c | 1430 ----- drivers/scsi/spfc/common/unf_service.h | 66 - drivers/scsi/spfc/common/unf_type.h | 216 - drivers/scsi/spfc/hw/spfc_chipitf.c | 1105 ---- drivers/scsi/spfc/hw/spfc_chipitf.h | 797 --- drivers/scsi/spfc/hw/spfc_cqm_bat_cla.c | 1611 ------ drivers/scsi/spfc/hw/spfc_cqm_bat_cla.h | 215 - drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.c | 885 --- drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.h | 65 - drivers/scsi/spfc/hw/spfc_cqm_main.c | 987 ---- drivers/scsi/spfc/hw/spfc_cqm_main.h | 411 -- drivers/scsi/spfc/hw/spfc_cqm_object.c | 937 ---- drivers/scsi/spfc/hw/spfc_cqm_object.h | 279 - drivers/scsi/spfc/hw/spfc_hba.c | 1751 ------ drivers/scsi/spfc/hw/spfc_hba.h | 341 -- drivers/scsi/spfc/hw/spfc_hw_wqe.h | 1645 ------ drivers/scsi/spfc/hw/spfc_io.c | 1193 ---- drivers/scsi/spfc/hw/spfc_io.h | 138 - drivers/scsi/spfc/hw/spfc_lld.c | 997 ---- drivers/scsi/spfc/hw/spfc_lld.h | 76 - drivers/scsi/spfc/hw/spfc_module.h | 297 - drivers/scsi/spfc/hw/spfc_parent_context.h | 269 - drivers/scsi/spfc/hw/spfc_queue.c | 4852 ---------------- drivers/scsi/spfc/hw/spfc_queue.h | 711 --- drivers/scsi/spfc/hw/spfc_service.c | 2170 -------- drivers/scsi/spfc/hw/spfc_service.h | 282 - drivers/scsi/spfc/hw/spfc_utils.c | 102 - drivers/scsi/spfc/hw/spfc_utils.h | 202 - drivers/scsi/spfc/hw/spfc_wqe.c | 646 --- drivers/scsi/spfc/hw/spfc_wqe.h | 239 - drivers/scsi/spfc/sphw_api_cmd.c | 1 - drivers/scsi/spfc/sphw_cmdq.c | 1 - drivers/scsi/spfc/sphw_common.c | 1 - drivers/scsi/spfc/sphw_eqs.c | 1 - drivers/scsi/spfc/sphw_hw_cfg.c | 1 - drivers/scsi/spfc/sphw_hw_comm.c | 1 - drivers/scsi/spfc/sphw_hwdev.c | 1 - drivers/scsi/spfc/sphw_hwif.c | 1 - drivers/scsi/spfc/sphw_mbox.c | 1 - drivers/scsi/spfc/sphw_mgmt.c | 1 - drivers/scsi/spfc/sphw_prof_adap.c | 1 - drivers/scsi/spfc/sphw_wq.c | 1 - 158 files changed, 90993 deletions(-) delete mode 100644 drivers/net/ethernet/ramaxel/Kconfig delete mode 100644 drivers/net/ethernet/ramaxel/Makefile delete mode 100644 drivers/net/ethernet/ramaxel/spnic/Kconfig delete mode 100644 drivers/net/ethernet/ramaxel/spnic/Makefile delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_filter.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_irq.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_lld.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_lld.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_main.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rss.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rss.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rx.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rx.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_tx.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_tx.h delete mode 100644 drivers/scsi/spfc/Kconfig delete mode 100644 drivers/scsi/spfc/Makefile delete mode 100644 drivers/scsi/spfc/common/unf_common.h delete mode 100644 drivers/scsi/spfc/common/unf_disc.c delete mode 100644 drivers/scsi/spfc/common/unf_disc.h delete mode 100644 drivers/scsi/spfc/common/unf_event.c delete mode 100644 drivers/scsi/spfc/common/unf_event.h delete mode 100644 drivers/scsi/spfc/common/unf_exchg.c delete mode 100644 drivers/scsi/spfc/common/unf_exchg.h delete mode 100644 drivers/scsi/spfc/common/unf_exchg_abort.c delete mode 100644 drivers/scsi/spfc/common/unf_exchg_abort.h delete mode 100644 drivers/scsi/spfc/common/unf_fcstruct.h delete mode 100644 drivers/scsi/spfc/common/unf_gs.c delete mode 100644 drivers/scsi/spfc/common/unf_gs.h delete mode 100644 drivers/scsi/spfc/common/unf_init.c delete mode 100644 drivers/scsi/spfc/common/unf_io.c delete mode 100644 drivers/scsi/spfc/common/unf_io.h delete mode 100644 drivers/scsi/spfc/common/unf_io_abnormal.c delete mode 100644 drivers/scsi/spfc/common/unf_io_abnormal.h delete mode 100644 drivers/scsi/spfc/common/unf_log.h delete mode 100644 drivers/scsi/spfc/common/unf_lport.c delete mode 100644 drivers/scsi/spfc/common/unf_lport.h delete mode 100644 drivers/scsi/spfc/common/unf_ls.c delete mode 100644 drivers/scsi/spfc/common/unf_ls.h delete mode 100644 drivers/scsi/spfc/common/unf_npiv.c delete mode 100644 drivers/scsi/spfc/common/unf_npiv.h delete mode 100644 drivers/scsi/spfc/common/unf_npiv_portman.c delete mode 100644 drivers/scsi/spfc/common/unf_npiv_portman.h delete mode 100644 drivers/scsi/spfc/common/unf_portman.c delete mode 100644 drivers/scsi/spfc/common/unf_portman.h delete mode 100644 drivers/scsi/spfc/common/unf_rport.c delete mode 100644 drivers/scsi/spfc/common/unf_rport.h delete mode 100644 drivers/scsi/spfc/common/unf_scsi.c delete mode 100644 drivers/scsi/spfc/common/unf_scsi_common.h delete mode 100644 drivers/scsi/spfc/common/unf_service.c delete mode 100644 drivers/scsi/spfc/common/unf_service.h delete mode 100644 drivers/scsi/spfc/common/unf_type.h delete mode 100644 drivers/scsi/spfc/hw/spfc_chipitf.c delete mode 100644 drivers/scsi/spfc/hw/spfc_chipitf.h delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_bat_cla.c delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_bat_cla.h delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.c delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.h delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_main.c delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_main.h delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_object.c delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_object.h delete mode 100644 drivers/scsi/spfc/hw/spfc_hba.c delete mode 100644 drivers/scsi/spfc/hw/spfc_hba.h delete mode 100644 drivers/scsi/spfc/hw/spfc_hw_wqe.h delete mode 100644 drivers/scsi/spfc/hw/spfc_io.c delete mode 100644 drivers/scsi/spfc/hw/spfc_io.h delete mode 100644 drivers/scsi/spfc/hw/spfc_lld.c delete mode 100644 drivers/scsi/spfc/hw/spfc_lld.h delete mode 100644 drivers/scsi/spfc/hw/spfc_module.h delete mode 100644 drivers/scsi/spfc/hw/spfc_parent_context.h delete mode 100644 drivers/scsi/spfc/hw/spfc_queue.c delete mode 100644 drivers/scsi/spfc/hw/spfc_queue.h delete mode 100644 drivers/scsi/spfc/hw/spfc_service.c delete mode 100644 drivers/scsi/spfc/hw/spfc_service.h delete mode 100644 drivers/scsi/spfc/hw/spfc_utils.c delete mode 100644 drivers/scsi/spfc/hw/spfc_utils.h delete mode 100644 drivers/scsi/spfc/hw/spfc_wqe.c delete mode 100644 drivers/scsi/spfc/hw/spfc_wqe.h delete mode 120000 drivers/scsi/spfc/sphw_api_cmd.c delete mode 120000 drivers/scsi/spfc/sphw_cmdq.c delete mode 120000 drivers/scsi/spfc/sphw_common.c delete mode 120000 drivers/scsi/spfc/sphw_eqs.c delete mode 120000 drivers/scsi/spfc/sphw_hw_cfg.c delete mode 120000 drivers/scsi/spfc/sphw_hw_comm.c delete mode 120000 drivers/scsi/spfc/sphw_hwdev.c delete mode 120000 drivers/scsi/spfc/sphw_hwif.c delete mode 120000 drivers/scsi/spfc/sphw_mbox.c delete mode 120000 drivers/scsi/spfc/sphw_mgmt.c delete mode 120000 drivers/scsi/spfc/sphw_prof_adap.c delete mode 120000 drivers/scsi/spfc/sphw_wq.c
From: Yun Xu xuyun@ramaxel.com
Ramaxel inclusion category: bugfix bugzilla:https://gitee.com/openeuler/kernel/issues/I4ZR0O CVE: NA
There are some issues of the driver that cannot be fixed now. The driver is not good enough for the LTS quality requirements of openEuler,so remove it.
Signed-off-by: Yanling Song songyl@ramaxel.com Reviewed-by: Yun Xu xuyun@ramaxel.com --- arch/arm64/configs/openeuler_defconfig | 1 - arch/x86/configs/openeuler_defconfig | 1 - drivers/scsi/Kconfig | 1 - drivers/scsi/Makefile | 1 - drivers/scsi/spfc/Kconfig | 16 - drivers/scsi/spfc/Makefile | 47 - drivers/scsi/spfc/common/unf_common.h | 1753 ------- drivers/scsi/spfc/common/unf_disc.c | 1276 ----- drivers/scsi/spfc/common/unf_disc.h | 51 - drivers/scsi/spfc/common/unf_event.c | 517 -- drivers/scsi/spfc/common/unf_event.h | 83 - drivers/scsi/spfc/common/unf_exchg.c | 2317 --------- drivers/scsi/spfc/common/unf_exchg.h | 436 -- drivers/scsi/spfc/common/unf_exchg_abort.c | 825 --- drivers/scsi/spfc/common/unf_exchg_abort.h | 23 - drivers/scsi/spfc/common/unf_fcstruct.h | 459 -- drivers/scsi/spfc/common/unf_gs.c | 2521 --------- drivers/scsi/spfc/common/unf_gs.h | 58 - drivers/scsi/spfc/common/unf_init.c | 353 -- drivers/scsi/spfc/common/unf_io.c | 1219 ----- drivers/scsi/spfc/common/unf_io.h | 96 - drivers/scsi/spfc/common/unf_io_abnormal.c | 986 ---- drivers/scsi/spfc/common/unf_io_abnormal.h | 19 - drivers/scsi/spfc/common/unf_log.h | 178 - drivers/scsi/spfc/common/unf_lport.c | 1008 ---- drivers/scsi/spfc/common/unf_lport.h | 519 -- drivers/scsi/spfc/common/unf_ls.c | 4883 ------------------ drivers/scsi/spfc/common/unf_ls.h | 61 - drivers/scsi/spfc/common/unf_npiv.c | 1005 ---- drivers/scsi/spfc/common/unf_npiv.h | 47 - drivers/scsi/spfc/common/unf_npiv_portman.c | 360 -- drivers/scsi/spfc/common/unf_npiv_portman.h | 17 - drivers/scsi/spfc/common/unf_portman.c | 2431 --------- drivers/scsi/spfc/common/unf_portman.h | 96 - drivers/scsi/spfc/common/unf_rport.c | 2286 -------- drivers/scsi/spfc/common/unf_rport.h | 301 -- drivers/scsi/spfc/common/unf_scsi.c | 1462 ------ drivers/scsi/spfc/common/unf_scsi_common.h | 570 -- drivers/scsi/spfc/common/unf_service.c | 1430 ----- drivers/scsi/spfc/common/unf_service.h | 66 - drivers/scsi/spfc/common/unf_type.h | 216 - drivers/scsi/spfc/hw/spfc_chipitf.c | 1105 ---- drivers/scsi/spfc/hw/spfc_chipitf.h | 797 --- drivers/scsi/spfc/hw/spfc_cqm_bat_cla.c | 1611 ------ drivers/scsi/spfc/hw/spfc_cqm_bat_cla.h | 215 - drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.c | 885 ---- drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.h | 65 - drivers/scsi/spfc/hw/spfc_cqm_main.c | 987 ---- drivers/scsi/spfc/hw/spfc_cqm_main.h | 411 -- drivers/scsi/spfc/hw/spfc_cqm_object.c | 937 ---- drivers/scsi/spfc/hw/spfc_cqm_object.h | 279 - drivers/scsi/spfc/hw/spfc_hba.c | 1751 ------- drivers/scsi/spfc/hw/spfc_hba.h | 341 -- drivers/scsi/spfc/hw/spfc_hw_wqe.h | 1645 ------ drivers/scsi/spfc/hw/spfc_io.c | 1193 ----- drivers/scsi/spfc/hw/spfc_io.h | 138 - drivers/scsi/spfc/hw/spfc_lld.c | 997 ---- drivers/scsi/spfc/hw/spfc_lld.h | 76 - drivers/scsi/spfc/hw/spfc_module.h | 297 -- drivers/scsi/spfc/hw/spfc_parent_context.h | 269 - drivers/scsi/spfc/hw/spfc_queue.c | 4852 ----------------- drivers/scsi/spfc/hw/spfc_queue.h | 711 --- drivers/scsi/spfc/hw/spfc_service.c | 2170 -------- drivers/scsi/spfc/hw/spfc_service.h | 282 - drivers/scsi/spfc/hw/spfc_utils.c | 102 - drivers/scsi/spfc/hw/spfc_utils.h | 202 - drivers/scsi/spfc/hw/spfc_wqe.c | 646 --- drivers/scsi/spfc/hw/spfc_wqe.h | 239 - drivers/scsi/spfc/sphw_api_cmd.c | 1 - drivers/scsi/spfc/sphw_cmdq.c | 1 - drivers/scsi/spfc/sphw_common.c | 1 - drivers/scsi/spfc/sphw_eqs.c | 1 - drivers/scsi/spfc/sphw_hw_cfg.c | 1 - drivers/scsi/spfc/sphw_hw_comm.c | 1 - drivers/scsi/spfc/sphw_hwdev.c | 1 - drivers/scsi/spfc/sphw_hwif.c | 1 - drivers/scsi/spfc/sphw_mbox.c | 1 - drivers/scsi/spfc/sphw_mgmt.c | 1 - drivers/scsi/spfc/sphw_prof_adap.c | 1 - drivers/scsi/spfc/sphw_wq.c | 1 - 80 files changed, 53210 deletions(-) delete mode 100644 drivers/scsi/spfc/Kconfig delete mode 100644 drivers/scsi/spfc/Makefile delete mode 100644 drivers/scsi/spfc/common/unf_common.h delete mode 100644 drivers/scsi/spfc/common/unf_disc.c delete mode 100644 drivers/scsi/spfc/common/unf_disc.h delete mode 100644 drivers/scsi/spfc/common/unf_event.c delete mode 100644 drivers/scsi/spfc/common/unf_event.h delete mode 100644 drivers/scsi/spfc/common/unf_exchg.c delete mode 100644 drivers/scsi/spfc/common/unf_exchg.h delete mode 100644 drivers/scsi/spfc/common/unf_exchg_abort.c delete mode 100644 drivers/scsi/spfc/common/unf_exchg_abort.h delete mode 100644 drivers/scsi/spfc/common/unf_fcstruct.h delete mode 100644 drivers/scsi/spfc/common/unf_gs.c delete mode 100644 drivers/scsi/spfc/common/unf_gs.h delete mode 100644 drivers/scsi/spfc/common/unf_init.c delete mode 100644 drivers/scsi/spfc/common/unf_io.c delete mode 100644 drivers/scsi/spfc/common/unf_io.h delete mode 100644 drivers/scsi/spfc/common/unf_io_abnormal.c delete mode 100644 drivers/scsi/spfc/common/unf_io_abnormal.h delete mode 100644 drivers/scsi/spfc/common/unf_log.h delete mode 100644 drivers/scsi/spfc/common/unf_lport.c delete mode 100644 drivers/scsi/spfc/common/unf_lport.h delete mode 100644 drivers/scsi/spfc/common/unf_ls.c delete mode 100644 drivers/scsi/spfc/common/unf_ls.h delete mode 100644 drivers/scsi/spfc/common/unf_npiv.c delete mode 100644 drivers/scsi/spfc/common/unf_npiv.h delete mode 100644 drivers/scsi/spfc/common/unf_npiv_portman.c delete mode 100644 drivers/scsi/spfc/common/unf_npiv_portman.h delete mode 100644 drivers/scsi/spfc/common/unf_portman.c delete mode 100644 drivers/scsi/spfc/common/unf_portman.h delete mode 100644 drivers/scsi/spfc/common/unf_rport.c delete mode 100644 drivers/scsi/spfc/common/unf_rport.h delete mode 100644 drivers/scsi/spfc/common/unf_scsi.c delete mode 100644 drivers/scsi/spfc/common/unf_scsi_common.h delete mode 100644 drivers/scsi/spfc/common/unf_service.c delete mode 100644 drivers/scsi/spfc/common/unf_service.h delete mode 100644 drivers/scsi/spfc/common/unf_type.h delete mode 100644 drivers/scsi/spfc/hw/spfc_chipitf.c delete mode 100644 drivers/scsi/spfc/hw/spfc_chipitf.h delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_bat_cla.c delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_bat_cla.h delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.c delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.h delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_main.c delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_main.h delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_object.c delete mode 100644 drivers/scsi/spfc/hw/spfc_cqm_object.h delete mode 100644 drivers/scsi/spfc/hw/spfc_hba.c delete mode 100644 drivers/scsi/spfc/hw/spfc_hba.h delete mode 100644 drivers/scsi/spfc/hw/spfc_hw_wqe.h delete mode 100644 drivers/scsi/spfc/hw/spfc_io.c delete mode 100644 drivers/scsi/spfc/hw/spfc_io.h delete mode 100644 drivers/scsi/spfc/hw/spfc_lld.c delete mode 100644 drivers/scsi/spfc/hw/spfc_lld.h delete mode 100644 drivers/scsi/spfc/hw/spfc_module.h delete mode 100644 drivers/scsi/spfc/hw/spfc_parent_context.h delete mode 100644 drivers/scsi/spfc/hw/spfc_queue.c delete mode 100644 drivers/scsi/spfc/hw/spfc_queue.h delete mode 100644 drivers/scsi/spfc/hw/spfc_service.c delete mode 100644 drivers/scsi/spfc/hw/spfc_service.h delete mode 100644 drivers/scsi/spfc/hw/spfc_utils.c delete mode 100644 drivers/scsi/spfc/hw/spfc_utils.h delete mode 100644 drivers/scsi/spfc/hw/spfc_wqe.c delete mode 100644 drivers/scsi/spfc/hw/spfc_wqe.h delete mode 120000 drivers/scsi/spfc/sphw_api_cmd.c delete mode 120000 drivers/scsi/spfc/sphw_cmdq.c delete mode 120000 drivers/scsi/spfc/sphw_common.c delete mode 120000 drivers/scsi/spfc/sphw_eqs.c delete mode 120000 drivers/scsi/spfc/sphw_hw_cfg.c delete mode 120000 drivers/scsi/spfc/sphw_hw_comm.c delete mode 120000 drivers/scsi/spfc/sphw_hwdev.c delete mode 120000 drivers/scsi/spfc/sphw_hwif.c delete mode 120000 drivers/scsi/spfc/sphw_mbox.c delete mode 120000 drivers/scsi/spfc/sphw_mgmt.c delete mode 120000 drivers/scsi/spfc/sphw_prof_adap.c delete mode 120000 drivers/scsi/spfc/sphw_wq.c
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 401ab0f99631..1fd250cc103c 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -2403,7 +2403,6 @@ CONFIG_SCSI_QLA_FC=m CONFIG_SCSI_QLA_ISCSI=m CONFIG_QEDI=m CONFIG_QEDF=m -CONFIG_SPFC=m CONFIG_SCSI_HUAWEI_FC=m CONFIG_SCSI_FC_HIFC=m CONFIG_SCSI_LPFC=m diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index 64afe7021c45..cd9aaba18fa4 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -2367,7 +2367,6 @@ CONFIG_SCSI_QLA_FC=m CONFIG_SCSI_QLA_ISCSI=m CONFIG_QEDI=m CONFIG_QEDF=m -CONFIG_SPFC=m CONFIG_SCSI_HUAWEI_FC=m CONFIG_SCSI_FC_HIFC=m CONFIG_SCSI_LPFC=m diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 170d59df48d1..0fbe4edeccd0 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1151,7 +1151,6 @@ source "drivers/scsi/qla2xxx/Kconfig" source "drivers/scsi/qla4xxx/Kconfig" source "drivers/scsi/qedi/Kconfig" source "drivers/scsi/qedf/Kconfig" -source "drivers/scsi/spfc/Kconfig" source "drivers/scsi/huawei/Kconfig"
config SCSI_LPFC diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 299d3318fac8..78a3c832394c 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -85,7 +85,6 @@ obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ -obj-$(CONFIG_SPFC) += spfc/ obj-$(CONFIG_SCSI_LPFC) += lpfc/ obj-$(CONFIG_SCSI_HUAWEI_FC) += huawei/ obj-$(CONFIG_SCSI_BFA_FC) += bfa/ diff --git a/drivers/scsi/spfc/Kconfig b/drivers/scsi/spfc/Kconfig deleted file mode 100644 index 9d4566d90809..000000000000 --- a/drivers/scsi/spfc/Kconfig +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Ramaxel SPFC driver configuration -# - -config SPFC - tristate "Ramaxel Fabric Channel Host Adapter Support" - default m - depends on PCI && SCSI - depends on SCSI_FC_ATTRS - depends on ARM64 || X86_64 - help - This driver supports Ramaxel Fabric Channel PCIe host adapter. - To compile this driver as part of the kernel, choose Y here. - If unsure, choose N. - The default is M. diff --git a/drivers/scsi/spfc/Makefile b/drivers/scsi/spfc/Makefile deleted file mode 100644 index 849b730ac733..000000000000 --- a/drivers/scsi/spfc/Makefile +++ /dev/null @@ -1,47 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_SPFC) += spfc.o - -subdir-ccflags-y += -I$(srctree)/$(src)/../../net/ethernet/ramaxel/spnic/hw -subdir-ccflags-y += -I$(srctree)/$(src)/hw -subdir-ccflags-y += -I$(srctree)/$(src)/common - -spfc-objs := common/unf_init.o \ - common/unf_event.o \ - common/unf_exchg.o \ - common/unf_exchg_abort.o \ - common/unf_io.o \ - common/unf_io_abnormal.o \ - common/unf_lport.o \ - common/unf_npiv.o \ - common/unf_npiv_portman.o \ - common/unf_disc.o \ - common/unf_rport.o \ - common/unf_service.o \ - common/unf_ls.o \ - common/unf_gs.o \ - common/unf_portman.o \ - common/unf_scsi.o \ - hw/spfc_utils.o \ - hw/spfc_lld.o \ - hw/spfc_io.o \ - hw/spfc_wqe.o \ - hw/spfc_service.o \ - hw/spfc_chipitf.o \ - hw/spfc_queue.o \ - hw/spfc_hba.o \ - hw/spfc_cqm_bat_cla.o \ - hw/spfc_cqm_bitmap_table.o \ - hw/spfc_cqm_main.o \ - hw/spfc_cqm_object.o \ - sphw_hwdev.o \ - sphw_hw_cfg.o \ - sphw_hw_comm.o \ - sphw_prof_adap.o \ - sphw_common.o \ - sphw_hwif.o \ - sphw_wq.o \ - sphw_cmdq.o \ - sphw_eqs.o \ - sphw_mbox.o \ - sphw_mgmt.o \ - sphw_api_cmd.o diff --git a/drivers/scsi/spfc/common/unf_common.h b/drivers/scsi/spfc/common/unf_common.h deleted file mode 100644 index 9613649308bf..000000000000 --- a/drivers/scsi/spfc/common/unf_common.h +++ /dev/null @@ -1,1753 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_COMMON_H -#define UNF_COMMON_H - -#include "unf_type.h" -#include "unf_fcstruct.h" - -/* version num */ -#define SPFC_DRV_VERSION "B101" -#define SPFC_DRV_DESC "Ramaxel Memory Technology Fibre Channel Driver" - -#define UNF_MAX_SECTORS 0xffff -#define UNF_PKG_FREE_OXID 0x0 -#define UNF_PKG_FREE_RXID 0x1 - -#define UNF_SPFC_MAXRPORT_NUM (2048) -#define SPFC_DEFAULT_RPORT_INDEX (UNF_SPFC_MAXRPORT_NUM - 1) - -/* session use sq num */ -#define UNF_SQ_NUM_PER_SESSION 3 - -extern atomic_t fc_mem_ref; -extern u32 unf_dgb_level; -extern u32 spfc_dif_type; -extern u32 spfc_dif_enable; -extern u8 spfc_guard; -extern int link_lose_tmo; - -/* define bits */ -#define UNF_BIT(n) (0x1UL << (n)) -#define UNF_BIT_0 UNF_BIT(0) -#define UNF_BIT_1 UNF_BIT(1) -#define UNF_BIT_2 UNF_BIT(2) -#define UNF_BIT_3 UNF_BIT(3) -#define UNF_BIT_4 UNF_BIT(4) -#define UNF_BIT_5 UNF_BIT(5) - -#define UNF_BITS_PER_BYTE 8 - -#define UNF_NOTIFY_UP_CLEAN_FLASH 2 - -/* Echo macro define */ -#define ECHO_MG_VERSION_LOCAL 1 -#define ECHO_MG_VERSION_REMOTE 2 - -#define SPFC_WIN_NPIV_NUM 32 - -#define UNF_GET_NAME_HIGH_WORD(name) (((name) >> 32) & 0xffffffff) -#define UNF_GET_NAME_LOW_WORD(name) ((name) & 0xffffffff) - -#define UNF_FIRST_LPORT_ID_MASK 0xffffff00 -#define UNF_PORT_ID_MASK 0x000000ff -#define UNF_FIRST_LPORT_ID 0x00000000 -#define UNF_SECOND_LPORT_ID 0x00000001 -#define UNF_EIGHTH_LPORT_ID 0x00000007 -#define SPFC_MAX_COUNTER_TYPE 128 - -#define UNF_EVENT_ASYN 0 -#define UNF_EVENT_SYN 1 -#define UNF_GLOBAL_EVENT_ASYN 2 -#define UNF_GLOBAL_EVENT_SYN 3 - -#define UNF_GET_SLOT_ID_BY_PORTID(port_id) (((port_id) & 0x001f00) >> 8) -#define UNF_GET_FUNC_ID_BY_PORTID(port_id) ((port_id) & 0x0000ff) -#define UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(port_id) \ - (((port_id) & 0x00FF00) >> 8) - -#define UNF_FC_SERVER_BOARD_8_G 13 /* 8G mode */ -#define UNF_FC_SERVER_BOARD_16_G 7 /* 16G mode */ -#define UNF_FC_SERVER_BOARD_32_G 6 /* 32G mode */ - -#define UNF_PORT_TYPE_FC_QSFP 1 -#define UNF_PORT_TYPE_FC_SFP 0 -#define UNF_PORT_UNGRADE_FW_RESET_ACTIVE 0 -#define UNF_PORT_UNGRADE_FW_RESET_INACTIVE 1 - -enum unf_rport_qos_level { - UNF_QOS_LEVEL_DEFAULT = 0, - UNF_QOS_LEVEL_MIDDLE, - UNF_QOS_LEVEL_HIGH, - UNF_QOS_LEVEL_BUTT -}; - -struct buff_list { - u8 *vaddr; - dma_addr_t paddr; -}; - -struct buf_describe { - struct buff_list *buflist; - u32 buf_size; - u32 buf_num; -}; - -#define IO_STATICS -struct unf_port_info { - u32 local_nport_id; - u32 nport_id; - u32 rport_index; - u64 port_name; - enum unf_rport_qos_level qos_level; - u8 cs_ctrl; - u8 rsvd0[3]; - u32 sqn_base; -}; - -struct unf_cfg_item { - char *puc_name; - u32 min_value; - u32 default_value; - u32 max_value; -}; - -struct unf_port_param { - u32 ra_tov; - u32 ed_tov; -}; - -/* get wwpn adn wwnn */ -struct unf_get_chip_info_argout { - u8 board_type; - u64 wwpn; - u64 wwnn; - u64 sys_mac; -}; - -/* get sfp info: present and speed */ -struct unf_get_port_info_argout { - u8 sfp_speed; - u8 present; - u8 rsvd[2]; -}; - -/* SFF-8436(QSFP+) Rev 4.7 */ -struct unf_sfp_plus_field_a0 { - u8 identifier; - /* offset 1~2 */ - struct { - u8 reserved; - u8 status; - } status_indicator; - /* offset 3~21 */ - struct { - u8 rx_tx_los; - u8 tx_fault; - u8 all_resv; - - u8 ini_complete : 1; - u8 bit_resv : 3; - u8 temp_low_warn : 1; - u8 temp_high_warn : 1; - u8 temp_low_alarm : 1; - u8 temp_high_alarm : 1; - - u8 resv : 4; - u8 vcc_low_warn : 1; - u8 vcc_high_warn : 1; - u8 vcc_low_alarm : 1; - u8 vcc_high_alarm : 1; - - u8 resv8; - u8 rx_pow[2]; - u8 tx_bias[2]; - u8 reserved[6]; - u8 vendor_specifics[3]; - } interrupt_flag; - /* offset 22~33 */ - struct { - u8 temp[2]; - u8 reserved[2]; - u8 supply_vol[2]; - u8 reserveds[2]; - u8 vendor_specific[4]; - } module_monitors; - /* offset 34~81 */ - struct { - u8 rx_pow[8]; - u8 tx_bias[8]; - u8 reserved[16]; - u8 vendor_specific[16]; - } channel_monitor_val; - - /* offset 82~85 */ - u8 reserved[4]; - - /* offset 86~97 */ - struct { - /* 86~88 */ - u8 tx_disable; - u8 rx_rate_select; - u8 tx_rate_select; - - /* 89~92 */ - u8 rx4_app_select; - u8 rx3_app_select; - u8 rx2_app_select; - u8 rx1_app_select; - /* 93 */ - u8 power_override : 1; - u8 power_set : 1; - u8 reserved : 6; - - /* 94~97 */ - u8 tx4_app_select; - u8 tx3_app_select; - u8 tx2_app_select; - u8 tx1_app_select; - /* 98~99 */ - u8 reserved2[2]; - } control; - /* 100~106 */ - struct { - /* 100 */ - u8 m_rx1_los : 1; - u8 m_rx2_los : 1; - u8 m_rx3_los : 1; - u8 m_rx4_los : 1; - u8 m_tx1_los : 1; - u8 m_tx2_los : 1; - u8 m_tx3_los : 1; - u8 m_tx4_los : 1; - /* 101 */ - u8 m_tx1_fault : 1; - u8 m_tx2_fault : 1; - u8 m_tx3_fault : 1; - u8 m_tx4_fault : 1; - u8 reserved : 4; - /* 102 */ - u8 reserved1; - /* 103 */ - u8 mini_cmp_flag : 1; - u8 rsv : 3; - u8 m_temp_low_warn : 1; - u8 m_temp_high_warn : 1; - u8 m_temp_low_alarm : 1; - u8 m_temp_high_alarm : 1; - /* 104 */ - u8 rsv1 : 4; - u8 m_vcc_low_warn : 1; - u8 m_vcc_high_warn : 1; - u8 m_vcc_low_alarm : 1; - u8 m_vcc_high_alarm : 1; - /* 105~106 */ - u8 vendor_specific[2]; - } module_channel_mask_bit; - /* 107~118 */ - u8 resv[12]; - /* 119~126 */ - u8 password_reserved[8]; - /* 127 */ - u8 page_select; -}; - -/* page 00 */ -struct unf_sfp_plus_field_00 { - /* 128~191 */ - struct { - u8 id; - u8 id_ext; - u8 connector; - u8 speci_com[6]; - u8 mode; - u8 speed; - u8 encoding; - u8 br_nominal; - u8 ext_rate_select_com; - u8 length_smf; - u8 length_om3; - u8 length_om2; - u8 length_om1; - u8 length_copper; - u8 device_tech; - u8 vendor_name[16]; - u8 ex_module; - u8 vendor_oui[3]; - u8 vendor_pn[16]; - u8 vendor_rev[2]; - /* Wave length or Copper cable Attenuation*/ - u8 wave_or_copper_attenuation[2]; - u8 wave_length_toler[2]; /* Wavelength tolerance */ - u8 max_temp; - u8 cc_base; - } base_id_fields; - /* 192~223 */ - struct { - u8 options[4]; - u8 vendor_sn[16]; - u8 date_code[8]; - u8 diagn_monit_type; - u8 enhance_opt; - u8 reserved; - u8 ccext; - } ext_id_fields; - /* 224~255 */ - u8 vendor_spec_eeprom[32]; -}; - -/* page 01 */ -struct unf_sfp_plus_field_01 { - u8 optional01[128]; -}; - -/* page 02 */ -struct unf_sfp_plus_field_02 { - u8 optional02[128]; -}; - -/* page 03 */ -struct unf_sfp_plus_field_03 { - u8 temp_high_alarm[2]; - u8 temp_low_alarm[2]; - u8 temp_high_warn[2]; - u8 temp_low_warn[2]; - - u8 reserved1[8]; - - u8 vcc_high_alarm[2]; - u8 vcc_low_alarm[2]; - u8 vcc_high_warn[2]; - u8 vcc_low_warn[2]; - - u8 reserved2[8]; - u8 vendor_specific1[16]; - - u8 pow_high_alarm[2]; - u8 pow_low_alarm[2]; - u8 pow_high_warn[2]; - u8 pow_low_warn[2]; - - u8 bias_high_alarm[2]; - u8 bias_low_alarm[2]; - u8 bias_high_warn[2]; - u8 bias_low_warn[2]; - - u8 tx_power_high_alarm[2]; - u8 tx_power_low_alarm[2]; - u8 reserved3[4]; - - u8 reserved4[8]; - - u8 vendor_specific2[16]; - u8 reserved5[2]; - u8 vendor_specific3[12]; - u8 rx_ampl[2]; - u8 rx_tx_sq_disable; - u8 rx_output_disable; - u8 chan_monit_mask[12]; - u8 reserved6[2]; -}; - -struct unf_sfp_plus_info { - struct unf_sfp_plus_field_a0 sfp_plus_info_a0; - struct unf_sfp_plus_field_00 sfp_plus_info_00; - struct unf_sfp_plus_field_01 sfp_plus_info_01; - struct unf_sfp_plus_field_02 sfp_plus_info_02; - struct unf_sfp_plus_field_03 sfp_plus_info_03; -}; - -struct unf_sfp_data_field_a0 { - /* Offset 0~63 */ - struct { - u8 id; - u8 id_ext; - u8 connector; - u8 transceiver[8]; - u8 encoding; - u8 br_nominal; /* Nominal signalling rate, units of 100MBd. */ - u8 rate_identifier; /* Type of rate select functionality */ - /* Link length supported for single mode fiber, units of km */ - u8 length_smk_km; - /* Link length supported for single mode fiber, - *units of 100 m - */ - u8 length_smf; - /* Link length supported for 50 um OM2 fiber,units of 10 m */ - u8 length_smf_om2; - /* Link length supported for 62.5 um OM1 fiber, units of 10 m */ - u8 length_smf_om1; - /*Link length supported for copper/direct attach cable, - *units of m - */ - u8 length_cable; - /* Link length supported for 50 um OM3 fiber, units of 10m */ - u8 length_om3; - u8 vendor_name[16]; /* ASCII */ - /* Code for electronic or optical compatibility*/ - u8 transceiver2; - u8 vendor_oui[3]; /* SFP vendor IEEE company ID */ - u8 vendor_pn[16]; /* Part number provided by SFP vendor (ASCII) - */ - /* Revision level for part number provided by vendor (ASCII) */ - u8 vendor_rev[4]; - /* Laser wavelength (Passive/Active Cable - *Specification Compliance) - */ - u8 wave_length[2]; - u8 unallocated; - /* Check code for Base ID Fields (addresses 0 to 62)*/ - u8 cc_base; - } base_id_fields; - - /* Offset 64~95 */ - struct { - u8 options[2]; - u8 br_max; - u8 br_min; - u8 vendor_sn[16]; - u8 date_code[8]; - u8 diag_monitoring_type; - u8 enhanced_options; - u8 sff8472_compliance; - u8 cc_ext; - } ext_id_fields; - - /* Offset 96~255 */ - struct { - u8 vendor_spec_eeprom[32]; - u8 rsvd[128]; - } vendor_spec_id_fields; -}; - -struct unf_sfp_data_field_a2 { - /* Offset 0~119 */ - struct { - /* 0~39 */ - struct { - u8 temp_alarm_high[2]; - u8 temp_alarm_low[2]; - u8 temp_warning_high[2]; - u8 temp_warning_low[2]; - - u8 vcc_alarm_high[2]; - u8 vcc_alarm_low[2]; - u8 vcc_warning_high[2]; - u8 vcc_warning_low[2]; - - u8 bias_alarm_high[2]; - u8 bias_alarm_low[2]; - u8 bias_warning_high[2]; - u8 bias_warning_low[2]; - - u8 tx_alarm_high[2]; - u8 tx_alarm_low[2]; - u8 tx_warning_high[2]; - u8 tx_warning_low[2]; - - u8 rx_alarm_high[2]; - u8 rx_alarm_low[2]; - u8 rx_warning_high[2]; - u8 rx_warning_low[2]; - } alarm_warn_th; - - u8 unallocated0[16]; - u8 ext_cal_constants[36]; - u8 unallocated1[3]; - u8 cc_dmi; - - /* 96~105 */ - struct { - u8 temp[2]; - u8 vcc[2]; - u8 tx_bias[2]; - u8 tx_power[2]; - u8 rx_power[2]; - } diag; - - u8 unallocated2[4]; - - struct { - u8 data_rdy_bar_state : 1; - u8 rx_los : 1; - u8 tx_fault_state : 1; - u8 soft_rate_select_state : 1; - u8 rate_select_state : 1; - u8 rs_state : 1; - u8 soft_tx_disable_select : 1; - u8 tx_disable_state : 1; - } status_ctrl; - u8 rsvd; - - /* 112~113 */ - struct { - /* 112 */ - u8 tx_alarm_low : 1; - u8 tx_alarm_high : 1; - u8 tx_bias_alarm_low : 1; - u8 tx_bias_alarm_high : 1; - u8 vcc_alarm_low : 1; - u8 vcc_alarm_high : 1; - u8 temp_alarm_low : 1; - u8 temp_alarm_high : 1; - - /* 113 */ - u8 rsvd : 6; - u8 rx_alarm_low : 1; - u8 rx_alarm_high : 1; - } alarm; - - u8 unallocated3[2]; - - /* 116~117 */ - struct { - /* 116 */ - u8 tx_warn_lo : 1; - u8 tx_warn_hi : 1; - u8 bias_warn_lo : 1; - u8 bias_warn_hi : 1; - u8 vcc_warn_lo : 1; - u8 vcc_warn_hi : 1; - u8 temp_warn_lo : 1; - u8 temp_warn_hi : 1; - - /* 117 */ - u8 rsvd : 6; - u8 rx_warn_lo : 1; - u8 rx_warn_hi : 1; - } warning; - - u8 ext_status_and_ctrl[2]; - } diag; - - /* Offset 120~255 */ - struct { - u8 vendor_spec[8]; - u8 user_eeprom[120]; - u8 vendor_ctrl[8]; - } general_use_fields; -}; - -struct unf_sfp_info { - struct unf_sfp_data_field_a0 sfp_info_a0; - struct unf_sfp_data_field_a2 sfp_info_a2; -}; - -struct unf_sfp_err_rome_info { - struct unf_sfp_info sfp_info; - struct unf_sfp_plus_info sfp_plus_info; -}; - -struct unf_err_code { - u32 loss_of_signal_count; - u32 bad_rx_char_count; - u32 loss_of_sync_count; - u32 link_fail_count; - u32 rx_eof_a_count; - u32 dis_frame_count; - u32 bad_crc_count; - u32 proto_error_count; -}; - -/* config file */ -enum unf_port_mode { - UNF_PORT_MODE_UNKNOWN = 0x00, - UNF_PORT_MODE_TGT = 0x10, - UNF_PORT_MODE_INI = 0x20, - UNF_PORT_MODE_BOTH = 0x30 -}; - -enum unf_port_upgrade { - UNF_PORT_UNSUPPORT_UPGRADE_REPORT = 0x00, - UNF_PORT_SUPPORT_UPGRADE_REPORT = 0x01, - UNF_PORT_UPGRADE_BUTT -}; - -#define UNF_BYTES_OF_DWORD 0x4 -static inline void __attribute__((unused)) unf_big_end_to_cpu(u8 *buffer, u32 size) -{ - u32 *buf = NULL; - u32 word_sum = 0; - u32 index = 0; - - if (!buffer) - return; - - buf = (u32 *)buffer; - - /* byte to word */ - if (size % UNF_BYTES_OF_DWORD == 0) - word_sum = size / UNF_BYTES_OF_DWORD; - else - return; - - /* word to byte */ - while (index < word_sum) { - *buf = be32_to_cpu(*buf); - buf++; - index++; - } -} - -static inline void __attribute__((unused)) unf_cpu_to_big_end(void *buffer, u32 size) -{ -#define DWORD_BIT 32 -#define BYTE_BIT 8 - u32 *buf = NULL; - u32 word_sum = 0; - u32 index = 0; - u32 tmp = 0; - - if (!buffer) - return; - - buf = (u32 *)buffer; - - /* byte to dword */ - word_sum = size / UNF_BYTES_OF_DWORD; - - /* dword to byte */ - while (index < word_sum) { - *buf = cpu_to_be32(*buf); - buf++; - index++; - } - - if (size % UNF_BYTES_OF_DWORD) { - tmp = cpu_to_be32(*buf); - tmp = - tmp >> (DWORD_BIT - (size % UNF_BYTES_OF_DWORD) * BYTE_BIT); - memcpy(buf, &tmp, (size % UNF_BYTES_OF_DWORD)); - } -} - -#define UNF_TOP_AUTO_MASK 0x0f -#define UNF_TOP_UNKNOWN 0xff -#define SPFC_TOP_AUTO 0x0 - -#define UNF_NORMAL_MODE 0 -#define UNF_SET_NOMAL_MODE(mode) ((mode) = UNF_NORMAL_MODE) - -/* - * * SCSI status - */ -#define SCSI_GOOD 0x00 -#define SCSI_CHECK_CONDITION 0x02 -#define SCSI_CONDITION_MET 0x04 -#define SCSI_BUSY 0x08 -#define SCSI_INTERMEDIATE 0x10 -#define SCSI_INTERMEDIATE_COND_MET 0x14 -#define SCSI_RESERVATION_CONFLICT 0x18 -#define SCSI_TASK_SET_FULL 0x28 -#define SCSI_ACA_ACTIVE 0x30 -#define SCSI_TASK_ABORTED 0x40 - -enum unf_act_topo { - UNF_ACT_TOP_PUBLIC_LOOP = 0x1, - UNF_ACT_TOP_PRIVATE_LOOP = 0x2, - UNF_ACT_TOP_P2P_DIRECT = 0x4, - UNF_ACT_TOP_P2P_FABRIC = 0x8, - UNF_TOP_LOOP_MASK = 0x03, - UNF_TOP_P2P_MASK = 0x0c, - UNF_TOP_FCOE_MASK = 0x30, - UNF_ACT_TOP_UNKNOWN -}; - -#define UNF_FL_PORT_LOOP_ADDR 0x00 -#define UNF_INVALID_LOOP_ADDR 0xff - -#define UNF_LOOP_ROLE_MASTER_OR_SLAVE 0x0 -#define UNF_LOOP_ROLE_ONLY_SLAVE 0x1 - -#define UNF_TOU16_CHECK(dest, src, over_action) \ - do { \ - if (unlikely(0xFFFF < (src))) { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, \ - UNF_ERR, "ToU16 error, src 0x%x ", \ - (src)); \ - over_action; \ - } \ - ((dest) = (u16)(src)); \ - } while (0) - -#define UNF_PORT_SPEED_AUTO 0 -#define UNF_PORT_SPEED_2_G 2 -#define UNF_PORT_SPEED_4_G 4 -#define UNF_PORT_SPEED_8_G 8 -#define UNF_PORT_SPEED_10_G 10 -#define UNF_PORT_SPEED_16_G 16 -#define UNF_PORT_SPEED_32_G 32 - -#define UNF_PORT_SPEED_UNKNOWN (~0) -#define UNF_PORT_SFP_SPEED_ERR 0xFF - -#define UNF_OP_DEBUG_DUMP 0x0001 -#define UNF_OP_FCPORT_INFO 0x0002 -#define UNF_OP_FCPORT_LINK_CMD_TEST 0x0003 -#define UNF_OP_TEST_MBX 0x0004 - -/* max frame size */ -#define UNF_MAX_FRAME_SIZE 2112 - -/* default */ -#define UNF_DEFAULT_FRAME_SIZE 2048 -#define UNF_DEFAULT_EDTOV 2000 -#define UNF_DEFAULT_RATOV 10000 -#define UNF_DEFAULT_FABRIC_RATOV 10000 -#define UNF_MAX_RETRY_COUNT 3 -#define UNF_RRQ_MIN_TIMEOUT_INTERVAL 30000 -#define UNF_LOGO_TIMEOUT_INTERVAL 3000 -#define UNF_SFS_MIN_TIMEOUT_INTERVAL 15000 -#define UNF_WRITE_RRQ_SENDERR_INTERVAL 3000 -#define UNF_REC_TOV 3000 - -#define UNF_WAIT_SEM_TIMEOUT (5000UL) -#define UNF_WAIT_ABTS_RSP_TIMEOUT (20000UL) -#define UNF_MAX_ABTS_WAIT_INTERVAL ((UNF_WAIT_SEM_TIMEOUT - 500) / 1000) - -#define UNF_TGT_RRQ_REDUNDANT_TIME 2000 -#define UNF_INI_RRQ_REDUNDANT_TIME 500 -#define UNF_INI_ELS_REDUNDANT_TIME 2000 - -/* ELS command values */ -#define UNF_ELS_CMND_HIGH_MASK 0xff000000 -#define UNF_ELS_CMND_RJT 0x01000000 -#define UNF_ELS_CMND_ACC 0x02000000 -#define UNF_ELS_CMND_PLOGI 0x03000000 -#define UNF_ELS_CMND_FLOGI 0x04000000 -#define UNF_ELS_CMND_LOGO 0x05000000 -#define UNF_ELS_CMND_RLS 0x0F000000 -#define UNF_ELS_CMND_ECHO 0x10000000 -#define UNF_ELS_CMND_REC 0x13000000 -#define UNF_ELS_CMND_RRQ 0x12000000 -#define UNF_ELS_CMND_PRLI 0x20000000 -#define UNF_ELS_CMND_PRLO 0x21000000 -#define UNF_ELS_CMND_PDISC 0x50000000 -#define UNF_ELS_CMND_FDISC 0x51000000 -#define UNF_ELS_CMND_ADISC 0x52000000 -#define UNF_ELS_CMND_FAN 0x60000000 -#define UNF_ELS_CMND_RSCN 0x61000000 -#define UNF_FCP_CMND_SRR 0x14000000 -#define UNF_GS_CMND_SCR 0x62000000 - -#define UNF_PLOGI_VERSION_UPPER 0x20 -#define UNF_PLOGI_VERSION_LOWER 0x20 -#define UNF_PLOGI_CONCURRENT_SEQ 0x00FF -#define UNF_PLOGI_RO_CATEGORY 0x00FE -#define UNF_PLOGI_SEQ_PER_XCHG 0x0001 -#define UNF_LGN_INFRAMESIZE 2048 - -/* CT_IU pream defines */ -#define UNF_REV_NPORTID_INIT 0x01000000 -#define UNF_FSTYPE_OPT_INIT 0xfc020000 -#define UNF_FSTYPE_RFT_ID 0x02170000 -#define UNF_FSTYPE_GID_PT 0x01A10000 -#define UNF_FSTYPE_GID_FT 0x01710000 -#define UNF_FSTYPE_RFF_ID 0x021F0000 -#define UNF_FSTYPE_GFF_ID 0x011F0000 -#define UNF_FSTYPE_GNN_ID 0x01130000 -#define UNF_FSTYPE_GPN_ID 0x01120000 - -#define UNF_CT_IU_RSP_MASK 0xffff0000 -#define UNF_CT_IU_REASON_MASK 0x00ff0000 -#define UNF_CT_IU_EXPLAN_MASK 0x0000ff00 -#define UNF_CT_IU_REJECT 0x80010000 -#define UNF_CT_IU_ACCEPT 0x80020000 - -#define UNF_FABRIC_FULL_REG 0x00000003 - -#define UNF_FC4_SCSI_BIT8 0x00000100 -#define UNF_FC4_FCP_TYPE 0x00000008 -#define UNF_FRAG_REASON_VENDOR 0 - -/* GID_PT, GID_FT */ -#define UNF_GID_PT_TYPE 0x7F000000 -#define UNF_GID_FT_TYPE 0x00000008 - -/* - *FC4 defines - */ -#define UNF_FC4_FRAME_PAGE_SIZE 0x10 -#define UNF_FC4_FRAME_PAGE_SIZE_SHIFT 16 - -#define UNF_FC4_FRAME_PARM_0_FCP 0x08000000 -#define UNF_FC4_FRAME_PARM_0_I_PAIR 0x00002000 -#define UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE 0x00000100 -#define UNF_FC4_FRAME_PARM_0_MASK \ - (UNF_FC4_FRAME_PARM_0_FCP | UNF_FC4_FRAME_PARM_0_I_PAIR | \ - UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE) -#define UNF_FC4_FRAME_PARM_3_INI 0x00000020 -#define UNF_FC4_FRAME_PARM_3_TGT 0x00000010 -#define UNF_FC4_FRAME_PARM_3_BOTH \ - (UNF_FC4_FRAME_PARM_3_INI | UNF_FC4_FRAME_PARM_3_TGT) -#define UNF_FC4_FRAME_PARM_3_R_XFER_DIS 0x00000002 -#define UNF_FC4_FRAME_PARM_3_W_XFER_DIS 0x00000001 -#define UNF_FC4_FRAME_PARM_3_REC_SUPPORT 0x00000400 /* bit 10 */ -#define UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT 0x00000200 /* bit 9 */ -#define UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT 0x00000100 /* bit 8 */ -#define UNF_FC4_FRAME_PARM_3_CONF_ALLOW 0x00000080 /* bit 7 */ - -#define UNF_FC4_FRAME_PARM_3_MASK \ - (UNF_FC4_FRAME_PARM_3_INI | UNF_FC4_FRAME_PARM_3_TGT | \ - UNF_FC4_FRAME_PARM_3_R_XFER_DIS) - -#define UNF_FC4_TYPE_SHIFT 24 -#define UNF_FC4_TYPE_MASK 0xff -/* FC4 feature we support */ -#define UNF_GFF_ACC_MASK 0xFF000000 - -/* Reject CT_IU Reason Codes */ -#define UNF_CTIU_RJT_MASK 0xffff0000 -#define UNF_CTIU_RJT_INVALID_COMMAND 0x00010000 -#define UNF_CTIU_RJT_INVALID_VERSION 0x00020000 -#define UNF_CTIU_RJT_LOGIC_ERR 0x00030000 -#define UNF_CTIU_RJT_INVALID_SIZE 0x00040000 -#define UNF_CTIU_RJT_LOGIC_BUSY 0x00050000 -#define UNF_CTIU_RJT_PROTOCOL_ERR 0x00070000 -#define UNF_CTIU_RJT_UNABLE_PERFORM 0x00090000 -#define UNF_CTIU_RJT_NOT_SUPPORTED 0x000B0000 - -/* FS_RJT Reason code explanations, FC-GS-2 6.5 */ -#define UNF_CTIU_RJT_EXP_MASK 0x0000FF00 -#define UNF_CTIU_RJT_EXP_NO_ADDTION 0x00000000 -#define UNF_CTIU_RJT_EXP_PORTID_NO_REG 0x00000100 -#define UNF_CTIU_RJT_EXP_PORTNAME_NO_REG 0x00000200 -#define UNF_CTIU_RJT_EXP_NODENAME_NO_REG 0x00000300 -#define UNF_CTIU_RJT_EXP_FC4TYPE_NO_REG 0x00000700 -#define UNF_CTIU_RJT_EXP_PORTTYPE_NO_REG 0x00000A00 - -/* - * LS_RJT defines - */ -#define UNF_FC_LS_RJT_REASON_MASK 0x00ff0000 - -/* - * LS_RJT reason code defines - */ -#define UNF_LS_OK 0x00000000 -#define UNF_LS_RJT_INVALID_COMMAND 0x00010000 -#define UNF_LS_RJT_LOGICAL_ERROR 0x00030000 -#define UNF_LS_RJT_BUSY 0x00050000 -#define UNF_LS_RJT_PROTOCOL_ERROR 0x00070000 -#define UNF_LS_RJT_REQUEST_DENIED 0x00090000 -#define UNF_LS_RJT_NOT_SUPPORTED 0x000b0000 -#define UNF_LS_RJT_CLASS_ERROR 0x000c0000 - -/* - * LS_RJT code explanation - */ -#define UNF_LS_RJT_NO_ADDITIONAL_INFO 0x00000000 -#define UNF_LS_RJT_INV_DATA_FIELD_SIZE 0x00000700 -#define UNF_LS_RJT_INV_COMMON_SERV_PARAM 0x00000F00 -#define UNF_LS_RJT_INVALID_OXID_RXID 0x00001700 -#define UNF_LS_RJT_COMMAND_IN_PROGRESS 0x00001900 -#define UNF_LS_RJT_INSUFFICIENT_RESOURCES 0x00002900 -#define UNF_LS_RJT_COMMAND_NOT_SUPPORTED 0x00002C00 -#define UNF_LS_RJT_UNABLE_TO_SUPLY_REQ_DATA 0x00002A00 -#define UNF_LS_RJT_INVALID_PAYLOAD_LENGTH 0x00002D00 - -#define UNF_P2P_LOCAL_NPORT_ID 0x000000EF -#define UNF_P2P_REMOTE_NPORT_ID 0x000000D6 - -#define UNF_BBCREDIT_MANAGE_NFPORT 0 -#define UNF_BBCREDIT_MANAGE_LPORT 1 -#define UNF_BBCREDIT_LPORT 0 -#define UNF_CONTIN_INCREASE_SUPPORT 1 -#define UNF_CLASS_VALID 1 -#define UNF_CLASS_INVALID 0 -#define UNF_NOT_MEANINGFUL 0 -#define UNF_NO_SERVICE_PARAMS 0 -#define UNF_CLEAN_ADDRESS_DEFAULT 0 -#define UNF_PRIORITY_ENABLE 1 -#define UNF_PRIORITY_DISABLE 0 -#define UNF_SEQUEN_DELIVERY_REQ 1 /* Sequential delivery requested */ - -#define UNF_FC_PROTOCOL_CLASS_3 0x0 -#define UNF_FC_PROTOCOL_CLASS_2 0x1 -#define UNF_FC_PROTOCOL_CLASS_1 0x2 -#define UNF_FC_PROTOCOL_CLASS_F 0x3 -#define UNF_FC_PROTOCOL_CLASS_OTHER 0x4 - -#define UNF_RSCN_PORT_ADDR 0x0 -#define UNF_RSCN_AREA_ADDR_GROUP 0x1 -#define UNF_RSCN_DOMAIN_ADDR_GROUP 0x2 -#define UNF_RSCN_FABRIC_ADDR_GROUP 0x3 - -#define UNF_GET_RSCN_PLD_LEN(cmnd) ((cmnd) & 0x0000ffff) -#define UNF_RSCN_PAGE_LEN 0x4 - -#define UNF_PORT_LINK_UP 0x0000 -#define UNF_PORT_LINK_DOWN 0x0001 -#define UNF_PORT_RESET_START 0x0002 -#define UNF_PORT_RESET_END 0x0003 -#define UNF_PORT_LINK_UNKNOWN 0x0004 -#define UNF_PORT_NOP 0x0005 -#define UNF_PORT_CORE_FATAL_ERROR 0x0006 -#define UNF_PORT_CORE_UNRECOVERABLE_ERROR 0x0007 -#define UNF_PORT_CORE_RECOVERABLE_ERROR 0x0008 -#define UNF_PORT_LOGOUT 0x0009 -#define UNF_PORT_CLEAR_VLINK 0x000a -#define UNF_PORT_UPDATE_PROCESS 0x000b -#define UNF_PORT_DEBUG_DUMP 0x000c -#define UNF_PORT_GET_FWLOG 0x000d -#define UNF_PORT_CLEAN_DONE 0x000e -#define UNF_PORT_BEGIN_REMOVE 0x000f -#define UNF_PORT_RELEASE_RPORT_INDEX 0x0010 -#define UNF_PORT_ABNORMAL_RESET 0x0012 - -/* - * SCSI begin - */ -#define SCSIOPC_TEST_UNIT_READY 0x00 -#define SCSIOPC_INQUIRY 0x12 -#define SCSIOPC_MODE_SENSE_6 0x1A -#define SCSIOPC_MODE_SENSE_10 0x5A -#define SCSIOPC_MODE_SELECT_6 0x15 -#define SCSIOPC_RESERVE 0x16 -#define SCSIOPC_RELEASE 0x17 -#define SCSIOPC_START_STOP_UNIT 0x1B -#define SCSIOPC_READ_CAPACITY_10 0x25 -#define SCSIOPC_READ_CAPACITY_16 0x9E -#define SCSIOPC_READ_6 0x08 -#define SCSIOPC_READ_10 0x28 -#define SCSIOPC_READ_12 0xA8 -#define SCSIOPC_READ_16 0x88 -#define SCSIOPC_WRITE_6 0x0A -#define SCSIOPC_WRITE_10 0x2A -#define SCSIOPC_WRITE_12 0xAA -#define SCSIOPC_WRITE_16 0x8A -#define SCSIOPC_WRITE_VERIFY 0x2E -#define SCSIOPC_VERIFY_10 0x2F -#define SCSIOPC_VERIFY_12 0xAF -#define SCSIOPC_VERIFY_16 0x8F -#define SCSIOPC_REQUEST_SENSE 0x03 -#define SCSIOPC_REPORT_LUN 0xA0 -#define SCSIOPC_FORMAT_UNIT 0x04 -#define SCSIOPC_SEND_DIAGNOSTIC 0x1D -#define SCSIOPC_WRITE_SAME_10 0x41 -#define SCSIOPC_WRITE_SAME_16 0x93 -#define SCSIOPC_READ_BUFFER 0x3C -#define SCSIOPC_WRITE_BUFFER 0x3B - -#define SCSIOPC_LOG_SENSE 0x4D -#define SCSIOPC_MODE_SELECT_10 0x55 -#define SCSIOPC_SYNCHRONIZE_CACHE_10 0x35 -#define SCSIOPC_SYNCHRONIZE_CACHE_16 0x91 -#define SCSIOPC_WRITE_AND_VERIFY_10 0x2E -#define SCSIOPC_WRITE_AND_VERIFY_12 0xAE -#define SCSIOPC_WRITE_AND_VERIFY_16 0x8E -#define SCSIOPC_READ_MEDIA_SERIAL_NUMBER 0xAB -#define SCSIOPC_REASSIGN_BLOCKS 0x07 -#define SCSIOPC_ATA_PASSTHROUGH_16 0x85 -#define SCSIOPC_ATA_PASSTHROUGH_12 0xa1 - -/* - * SCSI end - */ -#define IS_READ_COMMAND(opcode) \ - ((opcode) == SCSIOPC_READ_6 || (opcode) == SCSIOPC_READ_10 || \ - (opcode) == SCSIOPC_READ_12 || (opcode) == SCSIOPC_READ_16) -#define IS_WRITE_COMMAND(opcode) \ - ((opcode) == SCSIOPC_WRITE_6 || (opcode) == SCSIOPC_WRITE_10 || \ - (opcode) == SCSIOPC_WRITE_12 || (opcode) == SCSIOPC_WRITE_16) - -#define IS_VERIFY_COMMAND(opcode) \ - ((opcode) == SCSIOPC_VERIFY_10 || (opcode) == SCSIOPC_VERIFY_12 || \ - (opcode) == SCSIOPC_VERIFY_16) - -#define FCP_RSP_LEN_VALID_MASK 0x1 -#define FCP_SNS_LEN_VALID_MASK 0x2 -#define FCP_RESID_OVER_MASK 0x4 -#define FCP_RESID_UNDER_MASK 0x8 -#define FCP_CONF_REQ_MASK 0x10 -#define FCP_SCSI_STATUS_GOOD 0x0 - -#define UNF_DELAYED_WORK_SYNC(ret, port_id, work, work_symb) \ - do { \ - if (!cancel_delayed_work_sync(work)) { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, \ - UNF_INFO, \ - "[info]LPort or RPort(0x%x) %s worker " \ - "can't destroy, or no " \ - "worker", \ - port_id, work_symb); \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = RETURN_OK; \ - } \ - } while (0) - -#define UNF_GET_SFS_ENTRY(pkg) ((union unf_sfs_u *)(void *)(((struct unf_frame_pkg *)(pkg)) \ - ->unf_cmnd_pload_bl.buffer_ptr)) -/* FLOGI */ -#define UNF_GET_FLOGI_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->flogi.flogi_payload)) -#define UNF_FLOGI_PAYLOAD_LEN sizeof(struct unf_flogi_fdisc_payload) - -/* FLOGI ACC */ -#define UNF_GET_FLOGI_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg))) \ - ->flogi_acc.flogi_payload)) -#define UNF_FLOGI_ACC_PAYLOAD_LEN sizeof(struct unf_flogi_fdisc_payload) - -/* FDISC */ -#define UNF_FDISC_PAYLOAD_LEN UNF_FLOGI_PAYLOAD_LEN -#define UNF_FDISC_ACC_PAYLOAD_LEN UNF_FLOGI_ACC_PAYLOAD_LEN - -/* PLOGI */ -#define UNF_GET_PLOGI_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->plogi.payload)) -#define UNF_PLOGI_PAYLOAD_LEN sizeof(struct unf_plogi_payload) - -/* PLOGI ACC */ -#define UNF_GET_PLOGI_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->plogi_acc.payload)) -#define UNF_PLOGI_ACC_PAYLOAD_LEN sizeof(struct unf_plogi_payload) - -/* LOGO */ -#define UNF_GET_LOGO_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->logo.payload)) -#define UNF_LOGO_PAYLOAD_LEN sizeof(struct unf_logo_payload) - -/* ECHO */ -#define UNF_GET_ECHO_PAYLOAD(pkg) \ - (((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->echo.echo_pld) - -/* ECHO PHYADDR */ -#define UNF_GET_ECHO_PAYLOAD_PHYADDR(pkg) \ - (((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->echo.phy_echo_addr) - -#define UNF_ECHO_PAYLOAD_LEN sizeof(struct unf_echo_payload) - -/* REC */ -#define UNF_GET_REC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->rec.rec_pld)) - -#define UNF_REC_PAYLOAD_LEN sizeof(struct unf_rec_pld) - -/* ECHO ACC */ -#define UNF_GET_ECHO_ACC_PAYLOAD(pkg) \ - (((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->echo_acc.echo_pld) -#define UNF_ECHO_ACC_PAYLOAD_LEN sizeof(struct unf_echo_payload) - -/* RRQ */ -#define UNF_GET_RRQ_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->rrq.cmnd)) -#define UNF_RRQ_PAYLOAD_LEN \ - (sizeof(struct unf_rrq) - sizeof(struct unf_fc_head)) - -/* PRLI */ -#define UNF_GET_PRLI_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->prli.payload)) -#define UNF_PRLI_PAYLOAD_LEN sizeof(struct unf_prli_payload) - -/* PRLI ACC */ -#define UNF_GET_PRLI_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->prli_acc.payload)) -#define UNF_PRLI_ACC_PAYLOAD_LEN sizeof(struct unf_prli_payload) - -/* PRLO */ -#define UNF_GET_PRLO_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->prlo.payload)) -#define UNF_PRLO_PAYLOAD_LEN sizeof(struct unf_prli_payload) - -#define UNF_GET_PRLO_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->prlo_acc.payload)) -#define UNF_PRLO_ACC_PAYLOAD_LEN sizeof(struct unf_prli_payload) - -/* PDISC */ -#define UNF_GET_PDISC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->pdisc.payload)) -#define UNF_PDISC_PAYLOAD_LEN sizeof(struct unf_plogi_payload) - -/* PDISC ACC */ -#define UNF_GET_PDISC_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->pdisc_acc.payload)) -#define UNF_PDISC_ACC_PAYLOAD_LEN sizeof(struct unf_plogi_payload) - -/* ADISC */ -#define UNF_GET_ADISC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->adisc.adisc_payl)) -#define UNF_ADISC_PAYLOAD_LEN sizeof(struct unf_adisc_payload) - -/* ADISC ACC */ -#define UNF_GET_ADISC_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->adisc_acc.adisc_payl)) -#define UNF_ADISC_ACC_PAYLOAD_LEN sizeof(struct unf_adisc_payload) - -/* RSCN ACC */ -#define UNF_GET_RSCN_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->els_acc.cmnd)) -#define UNF_RSCN_ACC_PAYLOAD_LEN \ - (sizeof(struct unf_els_acc) - sizeof(struct unf_fc_head)) - -/* LOGO ACC */ -#define UNF_GET_LOGO_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->els_acc.cmnd)) -#define UNF_LOGO_ACC_PAYLOAD_LEN \ - (sizeof(struct unf_els_acc) - sizeof(struct unf_fc_head)) - -/* RRQ ACC */ -#define UNF_GET_RRQ_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->els_acc.cmnd)) -#define UNF_RRQ_ACC_PAYLOAD_LEN \ - (sizeof(struct unf_els_acc) - sizeof(struct unf_fc_head)) - -/* REC ACC */ -#define UNF_GET_REC_ACC_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)(UNF_GET_SFS_ENTRY(pkg)))->els_acc.cmnd)) -#define UNF_REC_ACC_PAYLOAD_LEN \ - (sizeof(struct unf_els_acc) - sizeof(struct unf_fc_head)) - -/* GPN_ID */ -#define UNF_GET_GPNID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gpn_id.ctiu_pream)) -#define UNF_GPNID_PAYLOAD_LEN \ - (sizeof(struct unf_gpnid) - sizeof(struct unf_fc_head)) - -#define UNF_GET_GPNID_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gpn_id_rsp.ctiu_pream)) -#define UNF_GPNID_RSP_PAYLOAD_LEN \ - (sizeof(struct unf_gpnid_rsp) - sizeof(struct unf_fc_head)) - -/* GNN_ID */ -#define UNF_GET_GNNID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gnn_id.ctiu_pream)) -#define UNF_GNNID_PAYLOAD_LEN \ - (sizeof(struct unf_gnnid) - sizeof(struct unf_fc_head)) - -#define UNF_GET_GNNID_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gnn_id_rsp.ctiu_pream)) -#define UNF_GNNID_RSP_PAYLOAD_LEN \ - (sizeof(struct unf_gnnid_rsp) - sizeof(struct unf_fc_head)) - -/* GFF_ID */ -#define UNF_GET_GFFID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gff_id.ctiu_pream)) -#define UNF_GFFID_PAYLOAD_LEN \ - (sizeof(struct unf_gffid) - sizeof(struct unf_fc_head)) - -#define UNF_GET_GFFID_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->gff_id_rsp.ctiu_pream)) -#define UNF_GFFID_RSP_PAYLOAD_LEN \ - (sizeof(struct unf_gffid_rsp) - sizeof(struct unf_fc_head)) - -/* GID_FT/GID_PT */ -#define UNF_GET_GID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg)) \ - ->get_id.gid_req.ctiu_pream)) - -#define UNF_GID_PAYLOAD_LEN (sizeof(struct unf_ctiu_prem) + sizeof(u32)) -#define UNF_GET_GID_ACC_PAYLOAD(pkg) \ - (((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg)) \ - ->get_id.gid_rsp.gid_acc_pld) -#define UNF_GID_ACC_PAYLOAD_LEN sizeof(struct unf_gid_acc_pld) - -/* RFT_ID */ -#define UNF_GET_RFTID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->rft_id.ctiu_pream)) -#define UNF_RFTID_PAYLOAD_LEN \ - (sizeof(struct unf_rftid) - sizeof(struct unf_fc_head)) - -#define UNF_GET_RFTID_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->rft_id_rsp.ctiu_pream)) -#define UNF_RFTID_RSP_PAYLOAD_LEN sizeof(struct unf_ctiu_prem) - -/* RFF_ID */ -#define UNF_GET_RFFID_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->rff_id.ctiu_pream)) -#define UNF_RFFID_PAYLOAD_LEN \ - (sizeof(struct unf_rffid) - sizeof(struct unf_fc_head)) - -#define UNF_GET_RFFID_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->rff_id_rsp.ctiu_pream)) -#define UNF_RFFID_RSP_PAYLOAD_LEN sizeof(struct unf_ctiu_prem) - -/* ACC&RJT */ -#define UNF_GET_ELS_ACC_RJT_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->els_rjt.cmnd)) -#define UNF_ELS_ACC_RJT_LEN \ - (sizeof(struct unf_els_rjt) - sizeof(struct unf_fc_head)) - -/* SCR */ -#define UNF_SCR_PAYLOAD(pkg) \ - (((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->scr.payload) -#define UNF_SCR_PAYLOAD_LEN \ - (sizeof(struct unf_scr) - sizeof(struct unf_fc_head)) - -#define UNF_SCR_RSP_PAYLOAD(pkg) \ - (&(((union unf_sfs_u *)UNF_GET_SFS_ENTRY(pkg))->els_acc.cmnd)) -#define UNF_SCR_RSP_PAYLOAD_LEN \ - (sizeof(struct unf_els_acc) - sizeof(struct unf_fc_head)) - -#define UNF_GS_RSP_PAYLOAD_LEN \ - (sizeof(union unf_sfs_u) - sizeof(struct unf_fc_head)) - -#define UNF_GET_XCHG_TAG(pkg) \ - (((struct unf_frame_pkg *)(pkg)) \ - ->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) -#define UNF_GET_ABTS_XCHG_TAG(pkg) \ - ((u16)(((pkg)->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) >> 16)) -#define UNF_GET_IO_XCHG_TAG(pkg) \ - ((u16)((pkg)->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX])) - -#define UNF_GET_HOTPOOL_TAG(pkg) \ - (((struct unf_frame_pkg *)(pkg)) \ - ->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]) -#define UNF_GET_SID(pkg) \ - (((struct unf_frame_pkg *)(pkg))->frame_head.csctl_sid & \ - UNF_NPORTID_MASK) -#define UNF_GET_DID(pkg) \ - (((struct unf_frame_pkg *)(pkg))->frame_head.rctl_did & \ - UNF_NPORTID_MASK) -#define UNF_GET_OXID(pkg) \ - (((struct unf_frame_pkg *)(pkg))->frame_head.oxid_rxid >> 16) -#define UNF_GET_RXID(pkg) \ - ((u16)((struct unf_frame_pkg *)(pkg))->frame_head.oxid_rxid) -#define UNF_GET_XID_RELEASE_TIMER(pkg) \ - (((struct unf_frame_pkg *)(pkg))->release_task_id_timer) -#define UNF_GETXCHGALLOCTIME(pkg) \ - (((struct unf_frame_pkg *)(pkg)) \ - ->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]) - -#define UNF_SET_XCHG_ALLOC_TIME(pkg, xchg) \ - (((struct unf_frame_pkg *)(pkg)) \ - ->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = \ - (((struct unf_xchg *)(xchg)) \ - ->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME])) -#define UNF_SET_ABORT_INFO_IOTYPE(pkg, xchg) \ - (((struct unf_frame_pkg *)(pkg)) \ - ->private_data[PKG_PRIVATE_XCHG_ABORT_INFO] |= \ - (((u8)(((struct unf_xchg *)(xchg))->data_direction & 0x7)) \ - << 2)) - -#define UNF_CHECK_NPORT_FPORT_BIT(els_payload) \ - (((struct unf_flogi_fdisc_payload *)(els_payload)) \ - ->fabric_parms.co_parms.nport) - -#define UNF_GET_RSP_BUF(pkg) \ - ((void *)(((struct unf_frame_pkg *)(pkg))->unf_rsp_pload_bl.buffer_ptr)) -#define UNF_GET_RSP_LEN(pkg) \ - (((struct unf_frame_pkg *)(pkg))->unf_rsp_pload_bl.length) - -#define UNF_N_PORT 0 -#define UNF_F_PORT 1 - -#define UNF_GET_RA_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.r_a_tov) -#define UNF_GET_RT_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.r_t_tov) -#define UNF_GET_E_D_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.e_d_tov) -#define UNF_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.e_d_tov_resolution) -#define UNF_GET_BB_SC_N_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.bbscn) -#define UNF_GET_BB_CREDIT_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.bb_credit) - -enum unf_pcie_error_code { - UNF_PCIE_ERROR_NONE = 0, - UNF_PCIE_DATAPARITYDETECTED = 1, - UNF_PCIE_SIGNALTARGETABORT, - UNF_PCIE_RECEIVEDTARGETABORT, - UNF_PCIE_RECEIVEDMASTERABORT, - UNF_PCIE_SIGNALEDSYSTEMERROR, - UNF_PCIE_DETECTEDPARITYERROR, - UNF_PCIE_CORRECTABLEERRORDETECTED, - UNF_PCIE_NONFATALERRORDETECTED, - UNF_PCIE_FATALERRORDETECTED, - UNF_PCIE_UNSUPPORTEDREQUESTDETECTED, - UNF_PCIE_AUXILIARYPOWERDETECTED, - UNF_PCIE_TRANSACTIONSPENDING, - - UNF_PCIE_UNCORRECTINTERERRSTATUS, - UNF_PCIE_UNSUPPORTREQERRSTATUS, - UNF_PCIE_ECRCERRORSTATUS, - UNF_PCIE_MALFORMEDTLPSTATUS, - UNF_PCIE_RECEIVEROVERFLOWSTATUS, - UNF_PCIE_UNEXPECTCOMPLETESTATUS, - UNF_PCIE_COMPLETERABORTSTATUS, - UNF_PCIE_COMPLETIONTIMEOUTSTATUS, - UNF_PCIE_FLOWCTRLPROTOCOLERRSTATUS, - UNF_PCIE_POISONEDTLPSTATUS, - UNF_PCIE_SURPRISEDOWNERRORSTATUS, - UNF_PCIE_DATALINKPROTOCOLERRSTATUS, - UNF_PCIE_ADVISORYNONFATALERRSTATUS, - UNF_PCIE_REPLAYTIMERTIMEOUTSTATUS, - UNF_PCIE_REPLAYNUMROLLOVERSTATUS, - UNF_PCIE_BADDLLPSTATUS, - UNF_PCIE_BADTLPSTATUS, - UNF_PCIE_RECEIVERERRORSTATUS, - - UNF_PCIE_BUTT -}; - -#define UNF_DMA_HI32(a) (((a) >> 32) & 0xffffffff) -#define UNF_DMA_LO32(a) ((a) & 0xffffffff) - -#define UNF_WWN_LEN 8 -#define UNF_MAC_LEN 6 - -/* send BLS/ELS/BLS REPLY/ELS REPLY/GS/ */ -/* rcvd BLS/ELS/REQ DONE/REPLY DONE */ -#define UNF_PKG_BLS_REQ 0x0100 -#define UNF_PKG_BLS_REQ_DONE 0x0101 -#define UNF_PKG_BLS_REPLY 0x0102 -#define UNF_PKG_BLS_REPLY_DONE 0x0103 - -#define UNF_PKG_ELS_REQ 0x0200 -#define UNF_PKG_ELS_REQ_DONE 0x0201 - -#define UNF_PKG_ELS_REPLY 0x0202 -#define UNF_PKG_ELS_REPLY_DONE 0x0203 - -#define UNF_PKG_GS_REQ 0x0300 -#define UNF_PKG_GS_REQ_DONE 0x0301 - -#define UNF_PKG_TGT_XFER 0x0400 -#define UNF_PKG_TGT_RSP 0x0401 -#define UNF_PKG_TGT_RSP_NOSGL 0x0402 -#define UNF_PKG_TGT_RSP_STATUS 0x0403 - -#define UNF_PKG_INI_IO 0x0500 -#define UNF_PKG_INI_RCV_TGT_RSP 0x0507 - -/* external sgl struct start */ -struct unf_esgl_page { - u64 page_address; - dma_addr_t esgl_phy_addr; - u32 page_size; -}; - -/* external sgl struct end */ -struct unf_esgl { - struct list_head entry_esgl; - struct unf_esgl_page page; -}; - -#define UNF_RESPONE_DATA_LEN 8 -struct unf_frame_payld { - u8 *buffer_ptr; - dma_addr_t buf_dma_addr; - u32 length; -}; - -enum pkg_private_index { - PKG_PRIVATE_LOWLEVEL_XCHG_ADD = 0, - PKG_PRIVATE_XCHG_HOT_POOL_INDEX = 1, /* Hot Pool Index */ - PKG_PRIVATE_XCHG_RPORT_INDEX = 2, /* RPort index */ - PKG_PRIVATE_XCHG_VP_INDEX = 3, /* VPort index */ - PKG_PRIVATE_XCHG_SSQ_INDEX, - PKG_PRIVATE_RPORT_RX_SIZE, - PKG_PRIVATE_XCHG_TIMEER, - PKG_PRIVATE_XCHG_ALLOC_TIME, - PKG_PRIVATE_XCHG_ABORT_INFO, - PKG_PRIVATE_ECHO_CMD_SND_TIME, /* local send echo cmd time stamp */ - PKG_PRIVATE_ECHO_ACC_RCV_TIME, /* local receive echo acc time stamp */ - PKG_PRIVATE_ECHO_CMD_RCV_TIME, /* remote receive echo cmd time stamp */ - PKG_PRIVATE_ECHO_RSP_SND_TIME, /* remote send echo rsp time stamp */ - PKG_MAX_PRIVATE_DATA_SIZE -}; - -extern u32 dix_flag; -extern u32 dif_sgl_mode; -extern u32 dif_app_esc_check; -extern u32 dif_ref_esc_check; - -#define UNF_DIF_ACTION_NONE 0 - -enum unf_adm_dif_mode_E { - UNF_SWITCH_DIF_DIX = 0, - UNF_APP_REF_ESCAPE, - ALL_DIF_MODE = 20, -}; - -#define UNF_DIF_CRC_ERR 0x1001 -#define UNF_DIF_APP_ERR 0x1002 -#define UNF_DIF_LBA_ERR 0x1003 - -#define UNF_VERIFY_CRC_MASK (1 << 1) -#define UNF_VERIFY_APP_MASK (1 << 2) -#define UNF_VERIFY_LBA_MASK (1 << 3) - -#define UNF_REPLACE_CRC_MASK (1 << 8) -#define UNF_REPLACE_APP_MASK (1 << 9) -#define UNF_REPLACE_LBA_MASK (1 << 10) - -#define UNF_DIF_ACTION_MASK (0xff << 16) -#define UNF_DIF_ACTION_INSERT (0x1 << 16) -#define UNF_DIF_ACTION_VERIFY_AND_DELETE (0x2 << 16) -#define UNF_DIF_ACTION_VERIFY_AND_FORWARD (0x3 << 16) -#define UNF_DIF_ACTION_VERIFY_AND_REPLACE (0x4 << 16) - -#define UNF_DIF_ACTION_NO_INCREASE_REFTAG (0x1 << 24) - -#define UNF_DEFAULT_CRC_GUARD_SEED (0) -#define UNF_CAL_512_BLOCK_CNT(data_len) ((data_len) >> 9) -#define UNF_CAL_BLOCK_CNT(data_len, sector_size) ((data_len) / (sector_size)) -#define UNF_CAL_CRC_BLK_CNT(crc_data_len, sector_size) \ - ((crc_data_len) / ((sector_size) + 8)) - -#define UNF_DIF_DOUBLE_SGL (1 << 1) -#define UNF_DIF_SECTSIZE_4KB (1 << 2) -#define UNF_DIF_SECTSIZE_512 (0 << 2) -#define UNF_DIF_LBA_NONE_INCREASE (1 << 3) -#define UNF_DIF_TYPE3 (1 << 4) - -#define SECTOR_SIZE_512 512 -#define SECTOR_SIZE_4096 4096 -#define SPFC_DIF_APP_REF_ESC_NOT_CHECK 1 -#define SPFC_DIF_APP_REF_ESC_CHECK 0 - -struct unf_dif { - u16 crc; - u16 app_tag; - u32 lba; -}; - -enum unf_io_state { UNF_INI_IO = 0, UNF_TGT_XFER = 1, UNF_TGT_RSP = 2 }; - -#define UNF_PKG_LAST_RESPONSE 0 -#define UNF_PKG_NOT_LAST_RESPONSE 1 - -struct unf_frame_pkg { - /* pkt type:BLS/ELS/FC4LS/CMND/XFER/RSP */ - u32 type; - u32 last_pkg_flag; - u32 fcp_conf_flag; - -#define UNF_FCP_RESPONSE_VALID 0x01 -#define UNF_FCP_SENSE_VALID 0x02 - u32 response_and_sense_valid_flag; /* resp and sense vailed flag */ - u32 cmnd; - struct unf_fc_head frame_head; - u32 entry_count; - void *xchg_contex; - u32 transfer_len; - u32 residus_len; - u32 status; - u32 status_sub_code; - enum unf_io_state io_state; - u32 qos_level; - u32 private_data[PKG_MAX_PRIVATE_DATA_SIZE]; - struct unf_fcp_cmnd *fcp_cmnd; - struct unf_dif_control_info dif_control; - struct unf_frame_payld unf_cmnd_pload_bl; - struct unf_frame_payld unf_rsp_pload_bl; - struct unf_frame_payld unf_sense_pload_bl; - void *upper_cmd; - u32 abts_maker_status; - u32 release_task_id_timer; - u8 byte_orders; - u8 rx_or_ox_id; - u8 class_mode; - u8 rsvd; - u8 *peresp; - u32 rcvrsp_len; - ulong timeout; - u32 origin_hottag; - u32 origin_magicnum; -}; - -#define UNF_MAX_SFS_XCHG 2048 -#define UNF_RESERVE_SFS_XCHG 128 /* times on exchange mgr num */ - -struct unf_lport_cfg_item { - u32 port_id; - u32 port_mode; /* INI(0x20), TGT(0x10), BOTH(0x30) */ - u32 port_topology; /* 0x3:loop , 0xc:p2p ,0xf:auto */ - u32 max_queue_depth; - u32 max_io; /* Recommended Value 512-4096 */ - u32 max_login; - u32 max_sfs_xchg; - u32 port_speed; /* 0:auto 1:1Gbps 2:2Gbps 4:4Gbps 8:8Gbps 16:16Gbps */ - u32 tape_support; /* ape support */ - u32 fcp_conf; /* fcp confirm support */ - u32 bbscn; -}; - -struct unf_port_dynamic_info { - u32 sfp_posion; - u32 sfp_valid; - u32 phy_link; - u32 firmware_state; - u32 cur_speed; - u32 mailbox_timeout_cnt; -}; - -struct unf_port_intr_coalsec { - u32 delay_timer; - u32 depth; -}; - -struct unf_port_topo { - u32 topo_cfg; - enum unf_act_topo topo_act; -}; - -struct unf_port_transfer_para { - u32 type; - u32 value; -}; - -struct unf_buf { - u8 *buf; - u32 buf_len; -}; - -/* get ucode & up ver */ -#define SPFC_VER_LEN (16) -#define SPFC_COMPILE_TIME_LEN (20) -struct unf_fw_version { - u32 message_type; - u8 fw_version[SPFC_VER_LEN]; -}; - -struct unf_port_wwn { - u64 sys_port_wwn; - u64 sys_node_name; -}; - -enum unf_port_config_set_op { - UNF_PORT_CFG_SET_SPEED, - UNF_PORT_CFG_SET_PORT_SWITCH, - UNF_PORT_CFG_SET_POWER_STATE, - UNF_PORT_CFG_SET_PORT_STATE, - UNF_PORT_CFG_UPDATE_WWN, - UNF_PORT_CFG_TEST_FLASH, - UNF_PORT_CFG_UPDATE_FABRIC_PARAM, - UNF_PORT_CFG_UPDATE_PLOGI_PARAM, - UNF_PORT_CFG_SET_BUTT -}; - -enum unf_port_cfg_get_op { - UNF_PORT_CFG_GET_TOPO_ACT, - UNF_PORT_CFG_GET_LOOP_MAP, - UNF_PORT_CFG_GET_SFP_PRESENT, - UNF_PORT_CFG_GET_FW_VER, - UNF_PORT_CFG_GET_HW_VER, - UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, - UNF_PORT_CFG_GET_WORKBALE_BBSCN, - UNF_PORT_CFG_GET_FC_SERDES, - UNF_PORT_CFG_GET_LOOP_ALPA, - UNF_PORT_CFG_GET_MAC_ADDR, - UNF_PORT_CFG_GET_SFP_VER, - UNF_PORT_CFG_GET_SFP_SUPPORT_UPDATE, - UNF_PORT_CFG_GET_SFP_LOG, - UNF_PORT_CFG_GET_PCIE_LINK_STATE, - UNF_PORT_CFG_GET_FLASH_DATA_INFO, - UNF_PORT_CFG_GET_BUTT, -}; - -enum unf_port_config_state { - UNF_PORT_CONFIG_STATE_START, - UNF_PORT_CONFIG_STATE_STOP, - UNF_PORT_CONFIG_STATE_RESET, - UNF_PORT_CONFIG_STATE_STOP_INTR, - UNF_PORT_CONFIG_STATE_BUTT -}; - -enum unf_port_config_update { - UNF_PORT_CONFIG_UPDATE_FW_MINIMUM, - UNF_PORT_CONFIG_UPDATE_FW_ALL, - UNF_PORT_CONFIG_UPDATE_BUTT -}; - -enum unf_disable_vp_mode { - UNF_DISABLE_VP_MODE_ONLY = 0x8, - UNF_DISABLE_VP_MODE_REINIT_LINK = 0x9, - UNF_DISABLE_VP_MODE_NOFAB_LOGO = 0xA, - UNF_DISABLE_VP_MODE_LOGO_ALL = 0xB -}; - -struct unf_vport_info { - u16 vp_index; - u64 node_name; - u64 port_name; - u32 port_mode; /* INI, TGT or both */ - enum unf_disable_vp_mode disable_mode; - u32 nport_id; /* maybe acquired by lowlevel and update to common */ - void *vport; -}; - -struct unf_port_login_parms { - enum unf_act_topo act_topo; - - u32 rport_index; - u32 seq_cnt : 1; - u32 ed_tov : 1; - u32 reserved : 14; - u32 tx_mfs : 16; - u32 ed_tov_timer_val; - - u8 remote_rttov_tag; - u8 remote_edtov_tag; - u16 remote_bb_credit; - u16 compared_bbscn; - u32 compared_edtov_val; - u32 compared_ratov_val; - u32 els_cmnd_code; -}; - -struct unf_mbox_head_info { - /* mbox header */ - u8 cmnd_type; - u8 length; - u8 port_id; - u8 pad0; - - /* operation */ - u32 opcode : 4; - u32 pad1 : 28; -}; - -struct unf_mbox_head_sts { - /* mbox header */ - u8 cmnd_type; - u8 length; - u8 port_id; - u8 pad0; - - /* operation */ - u16 pad1; - u8 pad2; - u8 status; -}; - -struct unf_low_level_service_op { - u32 (*unf_ls_gs_send)(void *hba, struct unf_frame_pkg *pkg); - u32 (*unf_bls_send)(void *hba, struct unf_frame_pkg *pkg); - u32 (*unf_cmnd_send)(void *hba, struct unf_frame_pkg *pkg); - u32 (*unf_rsp_send)(void *handle, struct unf_frame_pkg *pkg); - u32 (*unf_release_rport_res)(void *handle, struct unf_port_info *rport_info); - u32 (*unf_flush_ini_resp_que)(void *handle); - u32 (*unf_alloc_rport_res)(void *handle, struct unf_port_info *rport_info); - u32 (*ll_release_xid)(void *handle, struct unf_frame_pkg *pkg); - u32 (*unf_xfer_send)(void *handle, struct unf_frame_pkg *pkg); -}; - -struct unf_low_level_port_mgr_op { - /* fcport/opcode/input parameter */ - u32 (*ll_port_config_set)(void *fc_port, enum unf_port_config_set_op opcode, void *para_in); - - /* fcport/opcode/output parameter */ - u32 (*ll_port_config_get)(void *fc_port, enum unf_port_cfg_get_op opcode, void *para_out); -}; - -struct unf_chip_info { - u8 chip_type; - u8 chip_work_mode; - u8 disable_err_flag; -}; - -struct unf_low_level_functioon_op { - struct unf_chip_info chip_info; - /* low level type */ - u32 low_level_type; - const char *name; - struct pci_dev *dev; - u64 sys_node_name; - u64 sys_port_name; - struct unf_lport_cfg_item lport_cfg_items; -#define UNF_LOW_LEVEL_MGR_TYPE_ACTIVE 0 -#define UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE 1 - const u32 xchg_mgr_type; - -#define UNF_NO_EXTRA_ABTS_XCHG 0x0 -#define UNF_LL_IOC_ABTS_XCHG 0x1 - const u32 abts_xchg; - -#define UNF_CM_RPORT_SET_QUALIFIER 0x0 -#define UNF_CM_RPORT_SET_QUALIFIER_REUSE 0x1 -#define UNF_CM_RPORT_SET_QUALIFIER_SPFC 0x2 - - /* low level pass-through flag. */ -#define UNF_LOW_LEVEL_PASS_THROUGH_FIP 0x0 -#define UNF_LOW_LEVEL_PASS_THROUGH_FABRIC_LOGIN 0x1 -#define UNF_LOW_LEVEL_PASS_THROUGH_PORT_LOGIN 0x2 - u32 passthrough_flag; - - /* low level parameter */ - u32 support_max_npiv_num; - u32 support_max_ssq_num; - u32 support_max_speed; - u32 support_min_speed; - u32 fc_ser_max_speed; - - u32 support_max_rport; - - u32 support_max_hot_tag_range; - u32 sfp_type; - u32 update_fw_reset_active; - u32 support_upgrade_report; - u32 multi_conf_support; - u32 port_type; -#define UNF_LOW_LEVEL_RELEASE_RPORT_SYNC 0x0 -#define UNF_LOW_LEVEL_RELEASE_RPORT_ASYNC 0x1 - u8 rport_release_type; -#define UNF_LOW_LEVEL_SIRT_PAGE_MODE_FIXED 0x0 -#define UNF_LOW_LEVEL_SIRT_PAGE_MODE_XCHG 0x1 - u8 sirt_page_mode; - u8 sfp_speed; - - /* IO reference */ - struct unf_low_level_service_op service_op; - - /* Port Mgr reference */ - struct unf_low_level_port_mgr_op port_mgr_op; - - u8 chip_id; -}; - -struct unf_cm_handle_op { - /* return:L_Port */ - void *(*unf_alloc_local_port)(void *private_data, - struct unf_low_level_functioon_op *low_level_op); - - /* input para:L_Port */ - u32 (*unf_release_local_port)(void *lport); - - /* input para:L_Port, FRAME_PKG_S */ - u32 (*unf_receive_ls_gs_pkg)(void *lport, struct unf_frame_pkg *pkg); - - /* input para:L_Port, FRAME_PKG_S */ - u32 (*unf_receive_bls_pkg)(void *lport, struct unf_frame_pkg *pkg); - /* input para:L_Port, FRAME_PKG_S */ - u32 (*unf_send_els_done)(void *lport, struct unf_frame_pkg *pkg); - - /* input para:L_Port, FRAME_PKG_S */ - u32 (*unf_receive_marker_status)(void *lport, struct unf_frame_pkg *pkg); - u32 (*unf_receive_abts_marker_status)(void *lport, struct unf_frame_pkg *pkg); - /* input para:L_Port, FRAME_PKG_S */ - u32 (*unf_receive_ini_response)(void *lport, struct unf_frame_pkg *pkg); - - int (*unf_get_cfg_parms)(char *section_name, - struct unf_cfg_item *cfg_parm, u32 *cfg_value, - u32 item_num); - - /* TGT IO interface */ - u32 (*unf_process_fcp_cmnd)(void *lport, struct unf_frame_pkg *pkg); - - /* TGT IO Done */ - u32 (*unf_tgt_cmnd_xfer_or_rsp_echo)(void *lport, struct unf_frame_pkg *pkg); - - u32 (*unf_cm_get_sgl_entry)(void *pkg, char **buf, u32 *buf_len); - u32 (*unf_cm_get_dif_sgl_entry)(void *pkg, char **buf, u32 *buf_len); - - struct unf_esgl_page *(*unf_get_one_free_esgl_page)(void *lport, struct unf_frame_pkg *pkg); - - /* input para:L_Port, EVENT */ - u32 (*unf_fc_port_event)(void *lport, u32 events, void *input); - - int (*unf_drv_start_work)(void *lport); - - void (*unf_card_rport_chip_err)(struct pci_dev const *pci_dev); -}; - -u32 unf_get_cm_handle_ops(struct unf_cm_handle_op *cm_handle); -int unf_common_init(void); -void unf_common_exit(void); - -#endif diff --git a/drivers/scsi/spfc/common/unf_disc.c b/drivers/scsi/spfc/common/unf_disc.c deleted file mode 100644 index c48d0ba670d4..000000000000 --- a/drivers/scsi/spfc/common/unf_disc.c +++ /dev/null @@ -1,1276 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_disc.h" -#include "unf_log.h" -#include "unf_common.h" -#include "unf_event.h" -#include "unf_lport.h" -#include "unf_rport.h" -#include "unf_exchg.h" -#include "unf_ls.h" -#include "unf_gs.h" -#include "unf_portman.h" - -#define UNF_LIST_RSCN_PAGE_CNT 2560 -#define UNF_MAX_PORTS_PRI_LOOP 2 -#define UNF_MAX_GS_SEND_NUM 8 -#define UNF_OS_REMOVE_CARD_TIMEOUT (60 * 1000) - -static void unf_set_disc_state(struct unf_disc *disc, - enum unf_disc_state states) -{ - FC_CHECK_RETURN_VOID(disc); - - if (states != disc->states) { - /* Reset disc retry count */ - disc->retry_count = 0; - } - - disc->states = states; -} - -static inline u32 unf_get_loop_map(struct unf_lport *lport, u8 loop_map[], u32 loop_map_size) -{ - struct unf_buf buf = {0}; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport->low_level_func.port_mgr_op.ll_port_config_get, - UNF_RETURN_ERROR); - - buf.buf = loop_map; - buf.buf_len = loop_map_size; - - ret = lport->low_level_func.port_mgr_op.ll_port_config_get(lport->fc_port, - UNF_PORT_CFG_GET_LOOP_MAP, - (void *)&buf); - return ret; -} - -static void unf_login_with_loop_node(struct unf_lport *lport, u32 alpa) -{ - /* Only used for Private Loop LOGIN */ - struct unf_rport *unf_rport = NULL; - ulong rport_flag = 0; - u32 port_feature = 0; - u32 ret; - - /* Check AL_PA validity */ - if (lport->nport_id == alpa) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) is the same as RPort with AL_PA(0x%x), do nothing", - lport->port_id, alpa); - return; - } - - if (alpa == 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) is fabric, do nothing", - lport->port_id, alpa); - return; - } - - /* Get & set R_Port: reuse only */ - unf_rport = unf_get_rport_by_nport_id(lport, alpa); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) RPort(0x%x_0x%p) login with private loop", - lport->port_id, lport->nport_id, alpa, unf_rport); - - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, alpa); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) allocate new RPort(0x%x) failed", - lport->port_id, lport->nport_id, alpa); - return; - } - - /* Update R_Port state & N_Port_ID */ - spin_lock_irqsave(&unf_rport->rport_state_lock, rport_flag); - unf_rport->nport_id = alpa; - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, rport_flag); - - /* Private Loop: check whether need delay to send PLOGI or not */ - port_feature = unf_rport->options; - - /* check Rport and Lport feature */ - if (port_feature == UNF_PORT_MODE_UNKNOWN && - lport->options == UNF_PORT_MODE_INI) { - /* Start to send PLOGI */ - ret = unf_send_plogi(lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI to RPort(0x%x) failed", - lport->port_id, lport->nport_id, unf_rport->nport_id); - - unf_rport_error_recovery(unf_rport); - } - } else { - unf_check_rport_need_delay_plogi(lport, unf_rport, port_feature); - } -} - -static int unf_discover_private_loop(void *arg_in, void *arg_out) -{ - struct unf_lport *unf_lport = (struct unf_lport *)arg_in; - u32 ret = UNF_RETURN_ERROR; - u32 i = 0; - u8 loop_id = 0; - u32 alpa_index = 0; - u8 loop_map[UNF_LOOPMAP_COUNT]; - - FC_CHECK_RETURN_VALUE(unf_lport, UNF_RETURN_ERROR); - memset(loop_map, 0x0, UNF_LOOPMAP_COUNT); - - /* Get Port Loop Map */ - ret = unf_get_loop_map(unf_lport, loop_map, UNF_LOOPMAP_COUNT); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) get loop map failed", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Check Loop Map Ports Count */ - if (loop_map[ARRAY_INDEX_0] > UNF_MAX_PORTS_PRI_LOOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has more than %d ports(%u) in private loop", - unf_lport->port_id, UNF_MAX_PORTS_PRI_LOOP, loop_map[ARRAY_INDEX_0]); - - return UNF_RETURN_ERROR; - } - - /* AL_PA = 0 means Public Loop */ - if (loop_map[ARRAY_INDEX_1] == UNF_FL_PORT_LOOP_ADDR || - loop_map[ARRAY_INDEX_2] == UNF_FL_PORT_LOOP_ADDR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) one or more AL_PA is 0x00, indicate it's FL_Port", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Discovery Private Loop Ports */ - for (i = 0; i < loop_map[ARRAY_INDEX_0]; i++) { - alpa_index = i + 1; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) start to disc(0x%x) with count(0x%x)", - unf_lport->port_id, loop_map[alpa_index], i); - - /* Check whether need delay to send PLOGI or not */ - loop_id = loop_map[alpa_index]; - unf_login_with_loop_node(unf_lport, (u32)loop_id); - } - - return RETURN_OK; -} - -u32 unf_disc_start(void *lport) -{ - /* - * Call by: - * 1. Enter Private Loop Login - * 2. Analysis RSCN payload - * 3. SCR callback - * * - * Doing: - * Fabric/Public Loop: Send GID_PT - * Private Loop: (delay to) send PLOGI or send LOGO immediately - * P2P: do nothing - */ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_rport *unf_rport = NULL; - struct unf_disc *disc = NULL; - struct unf_cm_event_report *event = NULL; - u32 ret = RETURN_OK; - ulong flag = 0; - enum unf_act_topo act_topo = UNF_ACT_TOP_UNKNOWN; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - act_topo = unf_lport->act_topo; - disc = &unf_lport->disc; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x) with topo(0x%x) begin to discovery", - unf_lport->port_id, act_topo); - - if (act_topo == UNF_ACT_TOP_P2P_FABRIC || - act_topo == UNF_ACT_TOP_PUBLIC_LOOP) { - /* 1. Fabric or Public Loop Topology: for directory server */ - unf_rport = unf_get_rport_by_nport_id(unf_lport, - UNF_FC_FID_DIR_SERV); /* 0xfffffc */ - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) unable to get SNS RPort(0xfffffc)", - unf_lport->port_id); - - unf_rport = unf_rport_get_free_and_init(unf_lport, UNF_PORT_TYPE_FC, - UNF_FC_FID_DIR_SERV); - if (!unf_rport) - return UNF_RETURN_ERROR; - - unf_rport->nport_id = UNF_FC_FID_DIR_SERV; - } - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_set_disc_state(disc, UNF_DISC_ST_START); /* disc start */ - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_NORMAL_ENTER); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* - * NOTE: Send GID_PT - * The Name Server shall, when it receives a GID_PT request, - * return all Port Identifiers having registered support for the - * specified Port Type. One or more Port Identifiers, having - * registered as the specified Port Type, are returned. - */ - ret = unf_send_gid_pt(unf_lport, unf_rport); - if (ret != RETURN_OK) - unf_disc_error_recovery(unf_lport); - } else if (act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - /* Private Loop: to thread process */ - event = unf_get_one_event_node(unf_lport); - FC_CHECK_RETURN_VALUE(event, UNF_RETURN_ERROR); - - event->lport = unf_lport; - event->event_asy_flag = UNF_EVENT_ASYN; - event->unf_event_task = unf_discover_private_loop; - event->para_in = (void *)unf_lport; - - unf_post_one_event_node(unf_lport, event); - } else { - /* P2P toplogy mode: Do nothing */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) with topo(0x%x) need do nothing", - unf_lport->port_id, act_topo); - } - - return ret; -} - -static u32 unf_disc_stop(void *lport) -{ - /* Call by GID_ACC processer */ - struct unf_lport *unf_lport = NULL; - struct unf_lport *root_lport = NULL; - struct unf_rport *sns_port = NULL; - struct unf_disc_rport *disc_rport = NULL; - struct unf_disc *disc = NULL; - struct unf_disc *root_disc = NULL; - struct list_head *node = NULL; - ulong flag = 0; - u32 ret = RETURN_OK; - u32 nport_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - unf_lport = (struct unf_lport *)lport; - disc = &unf_lport->disc; - root_lport = (struct unf_lport *)unf_lport->root_lport; - root_disc = &root_lport->disc; - - /* Get R_Port for Directory server */ - sns_port = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find fabric RPort(0xfffffc) failed", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* for R_Port from disc pool busy list */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - if (list_empty(&disc->disc_rport_mgr.list_disc_rports_busy)) { - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - return RETURN_OK; - } - - node = UNF_OS_LIST_NEXT(&disc->disc_rport_mgr.list_disc_rports_busy); - do { - /* Delete from Disc busy list */ - disc_rport = list_entry(node, struct unf_disc_rport, entry_rport); - nport_id = disc_rport->nport_id; - list_del_init(node); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Add back to (free) Disc R_Port pool (list) */ - spin_lock_irqsave(&root_disc->rport_busy_pool_lock, flag); - list_add_tail(node, &root_disc->disc_rport_mgr.list_disc_rports_pool); - spin_unlock_irqrestore(&root_disc->rport_busy_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x_0x%x) remove nportid:0x%x from rportbusy list", - unf_lport->port_id, unf_lport->nport_id, disc_rport->nport_id); - /* Send GNN_ID to Name Server */ - ret = unf_get_and_post_disc_event(unf_lport, sns_port, nport_id, - UNF_DISC_GET_NODE_NAME); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - unf_lport->nport_id, UNF_DISC_GET_NODE_NAME, nport_id); - - /* NOTE: go to next stage */ - unf_rcv_gnn_id_rsp_unknown(unf_lport, sns_port, nport_id); - } - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - node = UNF_OS_LIST_NEXT(&disc->disc_rport_mgr.list_disc_rports_busy); - } while (node != &disc->disc_rport_mgr.list_disc_rports_busy); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - return ret; -} - -static u32 unf_init_rport_pool(struct unf_lport *lport) -{ - struct unf_rport_pool *rport_pool = NULL; - struct unf_rport *unf_rport = NULL; - u32 ret = RETURN_OK; - u32 i = 0; - u32 bitmap_cnt = 0; - ulong flag = 0; - u32 max_login = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* Init RPort Pool info */ - rport_pool = &lport->rport_pool; - max_login = lport->low_level_func.lport_cfg_items.max_login; - rport_pool->rport_pool_completion = NULL; - rport_pool->rport_pool_count = max_login; - spin_lock_init(&rport_pool->rport_free_pool_lock); - INIT_LIST_HEAD(&rport_pool->list_rports_pool); /* free RPort pool */ - - /* 1. Alloc RPort Pool buffer/resource (memory) */ - rport_pool->rport_pool_add = vmalloc((size_t)(max_login * sizeof(struct unf_rport))); - if (!rport_pool->rport_pool_add) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) allocate RPort(s) resource failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - memset(rport_pool->rport_pool_add, 0, (max_login * sizeof(struct unf_rport))); - - /* 2. Alloc R_Port Pool bitmap */ - bitmap_cnt = (lport->low_level_func.support_max_rport) / BITS_PER_LONG + 1; - rport_pool->rpi_bitmap = vmalloc((size_t)(bitmap_cnt * sizeof(ulong))); - if (!rport_pool->rpi_bitmap) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) allocate RPort Bitmap failed", lport->port_id); - - vfree(rport_pool->rport_pool_add); - rport_pool->rport_pool_add = NULL; - return UNF_RETURN_ERROR; - } - memset(rport_pool->rpi_bitmap, 0, (bitmap_cnt * sizeof(ulong))); - - /* 3. Rport resource Management: Add Rports (buffer) to Rport Pool List - */ - unf_rport = (struct unf_rport *)(rport_pool->rport_pool_add); - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - for (i = 0; i < rport_pool->rport_pool_count; i++) { - spin_lock_init(&unf_rport->rport_state_lock); - list_add_tail(&unf_rport->entry_rport, &rport_pool->list_rports_pool); - sema_init(&unf_rport->task_sema, 0); - unf_rport++; - } - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - - return ret; -} - -static void unf_free_rport_pool(struct unf_lport *lport) -{ - struct unf_rport_pool *rport_pool = NULL; - bool wait = false; - ulong flag = 0; - u32 remain = 0; - u64 timeout = 0; - u32 max_login = 0; - u32 i; - struct unf_rport *unf_rport = NULL; - struct completion rport_pool_completion; - - init_completion(&rport_pool_completion); - FC_CHECK_RETURN_VOID(lport); - - rport_pool = &lport->rport_pool; - max_login = lport->low_level_func.lport_cfg_items.max_login; - - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - if (rport_pool->rport_pool_count != max_login) { - rport_pool->rport_pool_completion = &rport_pool_completion; - remain = max_login - rport_pool->rport_pool_count; - wait = true; - } - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - - if (wait) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to wait for RPort pool completion, remain(0x%x)", - lport->port_id, remain); - - unf_show_all_rport(lport); - - timeout = wait_for_completion_timeout(rport_pool->rport_pool_completion, - msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT)); - if (timeout == 0) - unf_cm_mark_dirty_mem(lport, UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) wait for RPort pool completion end", - lport->port_id); - - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - rport_pool->rport_pool_completion = NULL; - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - } - - unf_rport = (struct unf_rport *)(rport_pool->rport_pool_add); - for (i = 0; i < rport_pool->rport_pool_count; i++) { - if (!unf_rport) - break; - unf_rport++; - } - - if ((lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) == 0) { - vfree(rport_pool->rport_pool_add); - rport_pool->rport_pool_add = NULL; - vfree(rport_pool->rpi_bitmap); - rport_pool->rpi_bitmap = NULL; - } -} - -static void unf_init_rscn_node(struct unf_port_id_page *port_id_page) -{ - FC_CHECK_RETURN_VOID(port_id_page); - - port_id_page->addr_format = 0; - port_id_page->event_qualifier = 0; - port_id_page->reserved = 0; - port_id_page->port_id_area = 0; - port_id_page->port_id_domain = 0; - port_id_page->port_id_port = 0; -} - -struct unf_port_id_page *unf_get_free_rscn_node(void *rscn_mg) -{ - /* Call by Save RSCN Port_ID */ - struct unf_rscn_mgr *rscn_mgr = NULL; - struct unf_port_id_page *port_id_node = NULL; - struct list_head *list_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(rscn_mg, NULL); - rscn_mgr = (struct unf_rscn_mgr *)rscn_mg; - - spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); - if (list_empty(&rscn_mgr->list_free_rscn_page)) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_WARN, - "[warn]No RSCN node anymore"); - - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); - return NULL; - } - - /* Get from list_free_RSCN_page */ - list_node = UNF_OS_LIST_NEXT(&rscn_mgr->list_free_rscn_page); - list_del(list_node); - rscn_mgr->free_rscn_count--; - port_id_node = list_entry(list_node, struct unf_port_id_page, list_node_rscn); - unf_init_rscn_node(port_id_node); - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); - - return port_id_node; -} - -static void unf_release_rscn_node(void *rscn_mg, void *port_id_node) -{ - /* Call by RSCN GID_ACC */ - struct unf_rscn_mgr *rscn_mgr = NULL; - struct unf_port_id_page *port_id_page = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(rscn_mg); - FC_CHECK_RETURN_VOID(port_id_node); - rscn_mgr = (struct unf_rscn_mgr *)rscn_mg; - port_id_page = (struct unf_port_id_page *)port_id_node; - - /* Back to list_free_RSCN_page */ - spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); - rscn_mgr->free_rscn_count++; - unf_init_rscn_node(port_id_page); - list_add_tail(&port_id_page->list_node_rscn, &rscn_mgr->list_free_rscn_page); - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); -} - -static u32 unf_init_rscn_pool(struct unf_lport *lport) -{ - struct unf_rscn_mgr *rscn_mgr = NULL; - struct unf_port_id_page *port_id_page = NULL; - u32 ret = RETURN_OK; - u32 i = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - rscn_mgr = &lport->disc.rscn_mgr; - - /* Get RSCN Pool buffer */ - rscn_mgr->rscn_pool_add = vmalloc(UNF_LIST_RSCN_PAGE_CNT * sizeof(struct unf_port_id_page)); - if (!rscn_mgr->rscn_pool_add) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate RSCN pool failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - memset(rscn_mgr->rscn_pool_add, 0, - UNF_LIST_RSCN_PAGE_CNT * sizeof(struct unf_port_id_page)); - - spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); - port_id_page = (struct unf_port_id_page *)(rscn_mgr->rscn_pool_add); - for (i = 0; i < UNF_LIST_RSCN_PAGE_CNT; i++) { - /* Add tail to list_free_RSCN_page */ - list_add_tail(&port_id_page->list_node_rscn, &rscn_mgr->list_free_rscn_page); - - rscn_mgr->free_rscn_count++; - port_id_page++; - } - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); - - return ret; -} - -static void unf_freerscn_pool(struct unf_lport *lport) -{ - struct unf_disc *disc = NULL; - - FC_CHECK_RETURN_VOID(lport); - - disc = &lport->disc; - if (disc->rscn_mgr.rscn_pool_add) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, "[info]Port(0x%x) free RSCN pool", lport->nport_id); - - vfree(disc->rscn_mgr.rscn_pool_add); - disc->rscn_mgr.rscn_pool_add = NULL; - } -} - -static u32 unf_init_rscn_mgr(struct unf_lport *lport) -{ - struct unf_rscn_mgr *rscn_mgr = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - rscn_mgr = &lport->disc.rscn_mgr; - - INIT_LIST_HEAD(&rscn_mgr->list_free_rscn_page); /* free RSCN page list */ - INIT_LIST_HEAD(&rscn_mgr->list_using_rscn_page); /* busy RSCN page list */ - spin_lock_init(&rscn_mgr->rscn_id_list_lock); - rscn_mgr->free_rscn_count = 0; - rscn_mgr->unf_get_free_rscn_node = unf_get_free_rscn_node; - rscn_mgr->unf_release_rscn_node = unf_release_rscn_node; - - ret = unf_init_rscn_pool(lport); - return ret; -} - -static void unf_destroy_rscn_mngr(struct unf_lport *lport) -{ - struct unf_rscn_mgr *rscn_mgr = NULL; - - FC_CHECK_RETURN_VOID(lport); - rscn_mgr = &lport->disc.rscn_mgr; - - rscn_mgr->free_rscn_count = 0; - rscn_mgr->unf_get_free_rscn_node = NULL; - rscn_mgr->unf_release_rscn_node = NULL; - - unf_freerscn_pool(lport); -} - -static u32 unf_init_disc_rport_pool(struct unf_lport *lport) -{ - struct unf_disc_rport_mg *disc_mgr = NULL; - struct unf_disc_rport *disc_rport = NULL; - u32 i = 0; - u32 max_log_in = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - max_log_in = lport->low_level_func.lport_cfg_items.max_login; - disc_mgr = &lport->disc.disc_rport_mgr; - - /* Alloc R_Port Disc Pool buffer */ - disc_mgr->disc_pool_add = - vmalloc(max_log_in * sizeof(struct unf_disc_rport)); - if (!disc_mgr->disc_pool_add) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate disc RPort pool failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - memset(disc_mgr->disc_pool_add, 0, (max_log_in * sizeof(struct unf_disc_rport))); - - /* Add R_Port to (free) DISC R_Port Pool */ - spin_lock_irqsave(&lport->disc.rport_busy_pool_lock, flag); - disc_rport = (struct unf_disc_rport *)(disc_mgr->disc_pool_add); - for (i = 0; i < max_log_in; i++) { - /* Add tail to list_disc_Rport_pool */ - list_add_tail(&disc_rport->entry_rport, &disc_mgr->list_disc_rports_pool); - - disc_rport++; - } - spin_unlock_irqrestore(&lport->disc.rport_busy_pool_lock, flag); - - return RETURN_OK; -} - -static void unf_free_disc_rport_pool(struct unf_lport *lport) -{ - struct unf_disc *disc = NULL; - - FC_CHECK_RETURN_VOID(lport); - - disc = &lport->disc; - if (disc->disc_rport_mgr.disc_pool_add) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, "[info]Port(0x%x) free disc RPort pool", lport->port_id); - - vfree(disc->disc_rport_mgr.disc_pool_add); - disc->disc_rport_mgr.disc_pool_add = NULL; - } -} - -int unf_discover_port_info(void *arg_in) -{ - struct unf_disc_gs_event_info *disc_gs_info = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VALUE(arg_in, UNF_RETURN_ERROR); - - disc_gs_info = (struct unf_disc_gs_event_info *)arg_in; - unf_lport = (struct unf_lport *)disc_gs_info->lport; - unf_rport = (struct unf_rport *)disc_gs_info->rport; - - switch (disc_gs_info->type) { - case UNF_DISC_GET_PORT_NAME: - ret = unf_send_gpn_id(unf_lport, unf_rport, disc_gs_info->rport_id); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send GPN_ID failed RPort(0x%x)", - unf_lport->nport_id, disc_gs_info->rport_id); - unf_rcv_gpn_id_rsp_unknown(unf_lport, disc_gs_info->rport_id); - } - break; - case UNF_DISC_GET_FEATURE: - ret = unf_send_gff_id(unf_lport, unf_rport, disc_gs_info->rport_id); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send GFF_ID failed to get RPort(0x%x)'s feature", - unf_lport->port_id, disc_gs_info->rport_id); - - unf_rcv_gff_id_rsp_unknown(unf_lport, disc_gs_info->rport_id); - } - break; - case UNF_DISC_GET_NODE_NAME: - ret = unf_send_gnn_id(unf_lport, unf_rport, disc_gs_info->rport_id); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) GNN_ID send failed with NPort ID(0x%x)", - unf_lport->port_id, disc_gs_info->rport_id); - - /* NOTE: Continue to next stage */ - unf_rcv_gnn_id_rsp_unknown(unf_lport, unf_rport, disc_gs_info->rport_id); - } - break; - default: - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]Send GS packet type(0x%x) is unknown", disc_gs_info->type); - } - - kfree(disc_gs_info); - - return (int)ret; -} - -u32 unf_get_and_post_disc_event(void *lport, void *sns_port, u32 nport_id, - enum unf_disc_type type) -{ - struct unf_disc_gs_event_info *disc_gs_info = NULL; - ulong flag = 0; - struct unf_lport *root_lport = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_disc_manage_info *disc_info = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - - unf_lport = (struct unf_lport *)lport; - - if (unf_lport->link_up == UNF_PORT_LINK_DOWN) - return RETURN_OK; - - root_lport = unf_lport->root_lport; - disc_info = &root_lport->disc.disc_thread_info; - - if (disc_info->thread_exit) - return RETURN_OK; - - disc_gs_info = kmalloc(sizeof(struct unf_disc_gs_event_info), GFP_ATOMIC); - if (!disc_gs_info) - return UNF_RETURN_ERROR; - - disc_gs_info->type = type; - disc_gs_info->lport = unf_lport; - disc_gs_info->rport = sns_port; - disc_gs_info->rport_id = nport_id; - - INIT_LIST_HEAD(&disc_gs_info->list_entry); - - spin_lock_irqsave(&disc_info->disc_event_list_lock, flag); - list_add_tail(&disc_gs_info->list_entry, &disc_info->list_head); - spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag); - wake_up_process(disc_info->thread); - return RETURN_OK; -} - -static int unf_disc_event_process(void *arg) -{ - struct list_head *node = NULL; - struct unf_disc_gs_event_info *disc_gs_info = NULL; - ulong flags = 0; - struct unf_disc *disc = (struct unf_disc *)arg; - struct unf_disc_manage_info *disc_info = &disc->disc_thread_info; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) enter discovery thread.", disc->lport->port_id); - - while (!kthread_should_stop()) { - if (disc_info->thread_exit) - break; - - spin_lock_irqsave(&disc_info->disc_event_list_lock, flags); - if ((list_empty(&disc_info->list_head)) || - (atomic_read(&disc_info->disc_contrl_size) == 0)) { - spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flags); - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout((long)msecs_to_jiffies(UNF_S_TO_MS)); - } else { - node = UNF_OS_LIST_NEXT(&disc_info->list_head); - list_del_init(node); - disc_gs_info = list_entry(node, struct unf_disc_gs_event_info, list_entry); - spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flags); - unf_discover_port_info(disc_gs_info); - } - } - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "Port(0x%x) discovery thread over.", disc->lport->port_id); - - return RETURN_OK; -} - -void unf_flush_disc_event(void *disc, void *vport) -{ - struct unf_disc *unf_disc = (struct unf_disc *)disc; - struct unf_disc_manage_info *disc_info = NULL; - struct list_head *list = NULL; - struct list_head *list_tmp = NULL; - struct unf_disc_gs_event_info *disc_gs_info = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(disc); - - disc_info = &unf_disc->disc_thread_info; - - spin_lock_irqsave(&disc_info->disc_event_list_lock, flag); - list_for_each_safe(list, list_tmp, &disc_info->list_head) { - disc_gs_info = list_entry(list, struct unf_disc_gs_event_info, list_entry); - - if (!vport || disc_gs_info->lport == vport) { - list_del_init(&disc_gs_info->list_entry); - kfree(disc_gs_info); - } - } - - if (!vport) - atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM); - spin_unlock_irqrestore(&disc_info->disc_event_list_lock, flag); -} - -void unf_disc_ctrl_size_inc(void *lport, u32 cmnd) -{ - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = (struct unf_lport *)lport; - unf_lport = unf_lport->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - if (atomic_read(&unf_lport->disc.disc_thread_info.disc_contrl_size) == - UNF_MAX_GS_SEND_NUM) - return; - - if (cmnd == NS_GPN_ID || cmnd == NS_GNN_ID || cmnd == NS_GFF_ID) - atomic_inc(&unf_lport->disc.disc_thread_info.disc_contrl_size); -} - -void unf_destroy_disc_thread(void *disc) -{ - struct unf_disc_manage_info *disc_info = NULL; - struct unf_disc *unf_disc = (struct unf_disc *)disc; - - FC_CHECK_RETURN_VOID(unf_disc); - - disc_info = &unf_disc->disc_thread_info; - - disc_info->thread_exit = true; - unf_flush_disc_event(unf_disc, NULL); - - wake_up_process(disc_info->thread); - kthread_stop(disc_info->thread); - disc_info->thread = NULL; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) destroy discovery thread succeed.", - unf_disc->lport->port_id); -} - -u32 unf_crerate_disc_thread(void *disc) -{ - struct unf_disc_manage_info *disc_info = NULL; - struct unf_disc *unf_disc = (struct unf_disc *)disc; - - FC_CHECK_RETURN_VALUE(unf_disc, UNF_RETURN_ERROR); - - /* If the thread cannot be found, apply for a new thread. */ - disc_info = &unf_disc->disc_thread_info; - - memset(disc_info, 0, sizeof(struct unf_disc_manage_info)); - - INIT_LIST_HEAD(&disc_info->list_head); - spin_lock_init(&disc_info->disc_event_list_lock); - atomic_set(&disc_info->disc_contrl_size, UNF_MAX_GS_SEND_NUM); - - disc_info->thread_exit = false; - disc_info->thread = kthread_create(unf_disc_event_process, unf_disc, "%x_DiscT", - unf_disc->lport->port_id); - - if (IS_ERR(disc_info->thread) || !disc_info->thread) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) creat discovery thread(0x%p) unsuccessful.", - unf_disc->lport->port_id, disc_info->thread); - - return UNF_RETURN_ERROR; - } - - wake_up_process(disc_info->thread); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) creat discovery thread succeed.", unf_disc->lport->port_id); - - return RETURN_OK; -} - -void unf_disc_ref_cnt_dec(struct unf_disc *disc) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(disc); - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - if (atomic_dec_and_test(&disc->disc_ref_cnt)) { - if (disc->disc_completion) - complete(disc->disc_completion); - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); -} - -void unf_wait_disc_complete(struct unf_lport *lport) -{ - struct unf_disc *disc = NULL; - bool wait = false; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - u64 time_out = 0; - - struct completion disc_completion; - - init_completion(&disc_completion); - disc = &lport->disc; - - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), (&disc->disc_work), - "Disc_work"); - if (ret == RETURN_OK) - unf_disc_ref_cnt_dec(disc); - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - if (atomic_read(&disc->disc_ref_cnt) != 0) { - disc->disc_completion = &disc_completion; - wait = true; - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - if (wait) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to wait for discover completion", - lport->port_id); - - time_out = - wait_for_completion_timeout(disc->disc_completion, - msecs_to_jiffies(UNF_OS_REMOVE_CARD_TIMEOUT)); - if (time_out == 0) - unf_cm_mark_dirty_mem(lport, UNF_LPORT_DIRTY_FLAG_DISC_DIRTY); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) wait for discover completion end", lport->port_id); - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - disc->disc_completion = NULL; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } -} - -void unf_disc_mgr_destroy(void *lport) -{ - struct unf_disc *disc = NULL; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - unf_lport = (struct unf_lport *)lport; - - disc = &unf_lport->disc; - disc->retry_count = 0; - disc->disc_temp.unf_disc_start = NULL; - disc->disc_temp.unf_disc_stop = NULL; - disc->disc_temp.unf_disc_callback = NULL; - - unf_free_disc_rport_pool(unf_lport); - unf_destroy_rscn_mngr(unf_lport); - unf_wait_disc_complete(unf_lport); - - if (unf_lport->root_lport != unf_lport) - return; - - unf_destroy_disc_thread(disc); - unf_free_rport_pool(unf_lport); - unf_lport->destroy_step = UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR; -} - -void unf_disc_error_recovery(void *lport) -{ - struct unf_rport *unf_rport = NULL; - struct unf_disc *disc = NULL; - ulong delay = 0; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = (struct unf_lport *)lport; - disc = &unf_lport->disc; - - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_DIR_SERV); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) find RPort failed", unf_lport->port_id); - return; - } - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - - /* Delay work is pending */ - if (delayed_work_pending(&disc->disc_work)) { - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) disc_work is running and do nothing", - unf_lport->port_id); - return; - } - - /* Continue to retry */ - if (disc->retry_count < disc->max_retry_count) { - disc->retry_count++; - delay = (ulong)unf_lport->ed_tov; - if (queue_delayed_work(unf_wq, &disc->disc_work, - (ulong)msecs_to_jiffies((u32)delay))) - atomic_inc(&disc->disc_ref_cnt); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } else { - /* Go to next stage */ - if (disc->states == UNF_DISC_ST_GIDPT_WAIT) { - /* GID_PT_WAIT --->>> Send GID_FT */ - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_RETRY_TIMEOUT); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - while ((ret != RETURN_OK) && - (disc->retry_count < disc->max_retry_count)) { - ret = unf_send_gid_ft(unf_lport, unf_rport); - disc->retry_count++; - } - } else if (disc->states == UNF_DISC_ST_GIDFT_WAIT) { - /* GID_FT_WAIT --->>> Send LOGO */ - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_RETRY_TIMEOUT); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } else { - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } - } -} - -enum unf_disc_state unf_disc_stat_start(enum unf_disc_state old_state, - enum unf_disc_event event) -{ - enum unf_disc_state next_state = UNF_DISC_ST_END; - - if (event == UNF_EVENT_DISC_NORMAL_ENTER) - next_state = UNF_DISC_ST_GIDPT_WAIT; - else - next_state = old_state; - - return next_state; -} - -enum unf_disc_state unf_disc_stat_gid_pt_wait(enum unf_disc_state old_state, - enum unf_disc_event event) -{ - enum unf_disc_state next_state = UNF_DISC_ST_END; - - switch (event) { - case UNF_EVENT_DISC_FAILED: - next_state = UNF_DISC_ST_GIDPT_WAIT; - break; - - case UNF_EVENT_DISC_RETRY_TIMEOUT: - next_state = UNF_DISC_ST_GIDFT_WAIT; - break; - - case UNF_EVENT_DISC_SUCCESS: - next_state = UNF_DISC_ST_END; - break; - - case UNF_EVENT_DISC_LINKDOWN: - next_state = UNF_DISC_ST_START; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -enum unf_disc_state unf_disc_stat_gid_ft_wait(enum unf_disc_state old_state, - enum unf_disc_event event) -{ - enum unf_disc_state next_state = UNF_DISC_ST_END; - - switch (event) { - case UNF_EVENT_DISC_FAILED: - next_state = UNF_DISC_ST_GIDFT_WAIT; - break; - - case UNF_EVENT_DISC_RETRY_TIMEOUT: - next_state = UNF_DISC_ST_END; - break; - - case UNF_EVENT_DISC_LINKDOWN: - next_state = UNF_DISC_ST_START; - break; - - case UNF_EVENT_DISC_SUCCESS: - next_state = UNF_DISC_ST_END; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -enum unf_disc_state unf_disc_stat_end(enum unf_disc_state old_state, enum unf_disc_event event) -{ - enum unf_disc_state next_state = UNF_DISC_ST_END; - - if (event == UNF_EVENT_DISC_LINKDOWN) - next_state = UNF_DISC_ST_START; - else - next_state = old_state; - - return next_state; -} - -void unf_disc_state_ma(struct unf_lport *lport, enum unf_disc_event event) -{ - struct unf_disc *disc = NULL; - enum unf_disc_state old_state = UNF_DISC_ST_START; - enum unf_disc_state next_state = UNF_DISC_ST_START; - - FC_CHECK_RETURN_VOID(lport); - - disc = &lport->disc; - old_state = disc->states; - - switch (disc->states) { - case UNF_DISC_ST_START: - next_state = unf_disc_stat_start(old_state, event); - break; - - case UNF_DISC_ST_GIDPT_WAIT: - next_state = unf_disc_stat_gid_pt_wait(old_state, event); - break; - - case UNF_DISC_ST_GIDFT_WAIT: - next_state = unf_disc_stat_gid_ft_wait(old_state, event); - break; - - case UNF_DISC_ST_END: - next_state = unf_disc_stat_end(old_state, event); - break; - - default: - next_state = old_state; - break; - } - - unf_set_disc_state(disc, next_state); -} - -static void unf_lport_disc_timeout(struct work_struct *work) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_disc *disc = NULL; - enum unf_disc_state state = UNF_DISC_ST_END; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(work); - - disc = container_of(work, struct unf_disc, disc_work.work); - if (!disc) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Get discover pointer failed"); - - return; - } - - unf_lport = disc->lport; - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Find Port by discovery work failed"); - - unf_disc_ref_cnt_dec(disc); - return; - } - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - state = disc->states; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_DIR_SERV); /* 0xfffffc */ - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find fabric RPort failed", unf_lport->port_id); - - unf_disc_ref_cnt_dec(disc); - return; - } - - switch (state) { - case UNF_DISC_ST_START: - break; - - case UNF_DISC_ST_GIDPT_WAIT: - (void)unf_send_gid_pt(unf_lport, unf_rport); - break; - - case UNF_DISC_ST_GIDFT_WAIT: - (void)unf_send_gid_ft(unf_lport, unf_rport); - break; - - case UNF_DISC_ST_END: - break; - - default: - break; - } - - unf_disc_ref_cnt_dec(disc); -} - -u32 unf_init_disc_mgr(struct unf_lport *lport) -{ - struct unf_disc *disc = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - disc = &lport->disc; - disc->max_retry_count = UNF_DISC_RETRY_TIMES; - disc->retry_count = 0; - disc->disc_flag = UNF_DISC_NONE; - INIT_LIST_HEAD(&disc->list_busy_rports); - INIT_LIST_HEAD(&disc->list_delete_rports); - INIT_LIST_HEAD(&disc->list_destroy_rports); - spin_lock_init(&disc->rport_busy_pool_lock); - - disc->disc_rport_mgr.disc_pool_add = NULL; - INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rports_pool); - INIT_LIST_HEAD(&disc->disc_rport_mgr.list_disc_rports_busy); - - disc->disc_completion = NULL; - disc->lport = lport; - INIT_DELAYED_WORK(&disc->disc_work, unf_lport_disc_timeout); - disc->disc_temp.unf_disc_start = unf_disc_start; - disc->disc_temp.unf_disc_stop = unf_disc_stop; - disc->disc_temp.unf_disc_callback = NULL; - atomic_set(&disc->disc_ref_cnt, 0); - - /* Init RSCN Manager */ - ret = unf_init_rscn_mgr(lport); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - - if (lport->root_lport != lport) - return ret; - - ret = unf_crerate_disc_thread(disc); - if (ret != RETURN_OK) { - unf_destroy_rscn_mngr(lport); - - return UNF_RETURN_ERROR; - } - - /* Init R_Port free Pool */ - ret = unf_init_rport_pool(lport); - if (ret != RETURN_OK) { - unf_destroy_disc_thread(disc); - unf_destroy_rscn_mngr(lport); - - return UNF_RETURN_ERROR; - } - - /* Init R_Port free disc Pool */ - ret = unf_init_disc_rport_pool(lport); - if (ret != RETURN_OK) { - unf_destroy_disc_thread(disc); - unf_free_rport_pool(lport); - unf_destroy_rscn_mngr(lport); - - return UNF_RETURN_ERROR; - } - - return ret; -} diff --git a/drivers/scsi/spfc/common/unf_disc.h b/drivers/scsi/spfc/common/unf_disc.h deleted file mode 100644 index 7ecad3eec424..000000000000 --- a/drivers/scsi/spfc/common/unf_disc.h +++ /dev/null @@ -1,51 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_DISC_H -#define UNF_DISC_H - -#include "unf_type.h" - -#define UNF_DISC_RETRY_TIMES 3 -#define UNF_DISC_NONE 0 -#define UNF_DISC_FABRIC 1 -#define UNF_DISC_LOOP 2 - -enum unf_disc_state { - UNF_DISC_ST_START = 0x3000, - UNF_DISC_ST_GIDPT_WAIT, - UNF_DISC_ST_GIDFT_WAIT, - UNF_DISC_ST_END -}; - -enum unf_disc_event { - UNF_EVENT_DISC_NORMAL_ENTER = 0x8000, - UNF_EVENT_DISC_FAILED = 0x8001, - UNF_EVENT_DISC_SUCCESS = 0x8002, - UNF_EVENT_DISC_RETRY_TIMEOUT = 0x8003, - UNF_EVENT_DISC_LINKDOWN = 0x8004 -}; - -enum unf_disc_type { - UNF_DISC_GET_PORT_NAME = 0, - UNF_DISC_GET_NODE_NAME, - UNF_DISC_GET_FEATURE -}; - -struct unf_disc_gs_event_info { - void *lport; - void *rport; - u32 rport_id; - enum unf_disc_type type; - struct list_head list_entry; -}; - -u32 unf_get_and_post_disc_event(void *lport, void *sns_port, u32 nport_id, - enum unf_disc_type type); - -void unf_flush_disc_event(void *disc, void *vport); -void unf_disc_ctrl_size_inc(void *lport, u32 cmnd); -void unf_disc_error_recovery(void *lport); -void unf_disc_mgr_destroy(void *lport); - -#endif diff --git a/drivers/scsi/spfc/common/unf_event.c b/drivers/scsi/spfc/common/unf_event.c deleted file mode 100644 index cf51c31ca4a3..000000000000 --- a/drivers/scsi/spfc/common/unf_event.c +++ /dev/null @@ -1,517 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_event.h" -#include "unf_log.h" -#include "unf_common.h" -#include "unf_lport.h" - -struct unf_event_list fc_event_list; -struct unf_global_event_queue global_event_queue; - -/* Max global event node */ -#define UNF_MAX_GLOBAL_ENENT_NODE 24 - -u32 unf_init_event_msg(struct unf_lport *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - struct unf_cm_event_report *event_node = NULL; - u32 ret = RETURN_OK; - u32 index = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - event_mgr = &lport->event_mgr; - - /* Get and Initial Event Node resource */ - event_mgr->mem_add = vmalloc((size_t)event_mgr->free_event_count * - sizeof(struct unf_cm_event_report)); - if (!event_mgr->mem_add) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate event manager failed", - lport->port_id); - - return UNF_RETURN_ERROR; - } - memset(event_mgr->mem_add, 0, - ((size_t)event_mgr->free_event_count * sizeof(struct unf_cm_event_report))); - - event_node = (struct unf_cm_event_report *)(event_mgr->mem_add); - - spin_lock_irqsave(&event_mgr->port_event_lock, flag); - for (index = 0; index < event_mgr->free_event_count; index++) { - INIT_LIST_HEAD(&event_node->list_entry); - list_add_tail(&event_node->list_entry, &event_mgr->list_free_event); - event_node++; - } - spin_unlock_irqrestore(&event_mgr->port_event_lock, flag); - - return ret; -} - -static void unf_del_event_center_fun_op(struct unf_lport *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - - FC_CHECK_RETURN_VOID(lport); - - event_mgr = &lport->event_mgr; - event_mgr->unf_get_free_event_func = NULL; - event_mgr->unf_release_event = NULL; - event_mgr->unf_post_event_func = NULL; -} - -void unf_init_event_node(struct unf_cm_event_report *event_node) -{ - FC_CHECK_RETURN_VOID(event_node); - - event_node->event = UNF_EVENT_TYPE_REQUIRE; - event_node->event_asy_flag = UNF_EVENT_ASYN; - event_node->delay_times = 0; - event_node->para_in = NULL; - event_node->para_out = NULL; - event_node->result = 0; - event_node->lport = NULL; - event_node->unf_event_task = NULL; -} - -struct unf_cm_event_report *unf_get_free_event_node(void *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - struct unf_cm_event_report *event_node = NULL; - struct list_head *list_node = NULL; - struct unf_lport *unf_lport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - unf_lport = (struct unf_lport *)lport; - unf_lport = unf_lport->root_lport; - - if (unlikely(atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP)) - return NULL; - - event_mgr = &unf_lport->event_mgr; - - spin_lock_irqsave(&event_mgr->port_event_lock, flags); - if (list_empty(&event_mgr->list_free_event)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) have no event node anymore", - unf_lport->port_id); - - spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); - return NULL; - } - - list_node = UNF_OS_LIST_NEXT(&event_mgr->list_free_event); - list_del(list_node); - event_mgr->free_event_count--; - event_node = list_entry(list_node, struct unf_cm_event_report, list_entry); - - unf_init_event_node(event_node); - spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); - - return event_node; -} - -void unf_post_event(void *lport, void *event_node) -{ - struct unf_cm_event_report *cm_event_node = NULL; - struct unf_chip_manage_info *card_thread_info = NULL; - struct unf_lport *unf_lport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(event_node); - cm_event_node = (struct unf_cm_event_report *)event_node; - - /* If null, post to global event center */ - if (!lport) { - spin_lock_irqsave(&fc_event_list.fc_event_list_lock, flags); - fc_event_list.list_num++; - list_add_tail(&cm_event_node->list_entry, &fc_event_list.list_head); - spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, flags); - - wake_up_process(event_task_thread); - } else { - unf_lport = (struct unf_lport *)lport; - unf_lport = unf_lport->root_lport; - card_thread_info = unf_lport->chip_info; - - /* Post to global event center */ - if (!card_thread_info) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN, - "[warn]Port(0x%x) has strange event with type(0x%x)", - unf_lport->nport_id, cm_event_node->event); - - spin_lock_irqsave(&fc_event_list.fc_event_list_lock, flags); - fc_event_list.list_num++; - list_add_tail(&cm_event_node->list_entry, &fc_event_list.list_head); - spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, flags); - - wake_up_process(event_task_thread); - } else { - spin_lock_irqsave(&card_thread_info->chip_event_list_lock, flags); - card_thread_info->list_num++; - list_add_tail(&cm_event_node->list_entry, &card_thread_info->list_head); - spin_unlock_irqrestore(&card_thread_info->chip_event_list_lock, flags); - - wake_up_process(card_thread_info->thread); - } - } -} - -void unf_check_event_mgr_status(struct unf_event_mgr *event_mgr) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(event_mgr); - - spin_lock_irqsave(&event_mgr->port_event_lock, flag); - if (event_mgr->emg_completion && event_mgr->free_event_count == UNF_MAX_EVENT_NODE) - complete(event_mgr->emg_completion); - - spin_unlock_irqrestore(&event_mgr->port_event_lock, flag); -} - -void unf_release_event(void *lport, void *event_node) -{ - struct unf_event_mgr *event_mgr = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_cm_event_report *cm_event_node = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(event_node); - - cm_event_node = (struct unf_cm_event_report *)event_node; - unf_lport = (struct unf_lport *)lport; - unf_lport = unf_lport->root_lport; - event_mgr = &unf_lport->event_mgr; - - spin_lock_irqsave(&event_mgr->port_event_lock, flags); - event_mgr->free_event_count++; - unf_init_event_node(cm_event_node); - list_add_tail(&cm_event_node->list_entry, &event_mgr->list_free_event); - spin_unlock_irqrestore(&event_mgr->port_event_lock, flags); - - unf_check_event_mgr_status(event_mgr); -} - -void unf_release_global_event(void *event_node) -{ - ulong flag = 0; - struct unf_cm_event_report *cm_event_node = NULL; - - FC_CHECK_RETURN_VOID(event_node); - cm_event_node = (struct unf_cm_event_report *)event_node; - - unf_init_event_node(cm_event_node); - - spin_lock_irqsave(&global_event_queue.global_event_list_lock, flag); - global_event_queue.list_number++; - list_add_tail(&cm_event_node->list_entry, &global_event_queue.global_event_list); - spin_unlock_irqrestore(&global_event_queue.global_event_list_lock, flag); -} - -u32 unf_init_event_center(void *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - u32 ret = RETURN_OK; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - unf_lport = (struct unf_lport *)lport; - - /* Initial Disc manager */ - event_mgr = &unf_lport->event_mgr; - event_mgr->free_event_count = UNF_MAX_EVENT_NODE; - event_mgr->unf_get_free_event_func = unf_get_free_event_node; - event_mgr->unf_release_event = unf_release_event; - event_mgr->unf_post_event_func = unf_post_event; - - INIT_LIST_HEAD(&event_mgr->list_free_event); - spin_lock_init(&event_mgr->port_event_lock); - event_mgr->emg_completion = NULL; - - ret = unf_init_event_msg(unf_lport); - - return ret; -} - -void unf_wait_event_mgr_complete(struct unf_event_mgr *event_mgr) -{ - struct unf_event_mgr *event_mgr_temp = NULL; - bool wait = false; - ulong mg_flag = 0; - - struct completion fc_event_completion; - - init_completion(&fc_event_completion); - FC_CHECK_RETURN_VOID(event_mgr); - event_mgr_temp = event_mgr; - - spin_lock_irqsave(&event_mgr_temp->port_event_lock, mg_flag); - if (event_mgr_temp->free_event_count != UNF_MAX_EVENT_NODE) { - event_mgr_temp->emg_completion = &fc_event_completion; - wait = true; - } - spin_unlock_irqrestore(&event_mgr_temp->port_event_lock, mg_flag); - - if (wait) - wait_for_completion(event_mgr_temp->emg_completion); - - spin_lock_irqsave(&event_mgr_temp->port_event_lock, mg_flag); - event_mgr_temp->emg_completion = NULL; - spin_unlock_irqrestore(&event_mgr_temp->port_event_lock, mg_flag); -} - -u32 unf_event_center_destroy(void *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - struct list_head *list = NULL; - struct list_head *list_tmp = NULL; - struct unf_cm_event_report *event_node = NULL; - u32 ret = RETURN_OK; - ulong flag = 0; - ulong list_lock_flag = 0; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - unf_lport = (struct unf_lport *)lport; - event_mgr = &unf_lport->event_mgr; - - spin_lock_irqsave(&fc_event_list.fc_event_list_lock, list_lock_flag); - if (!list_empty(&fc_event_list.list_head)) { - list_for_each_safe(list, list_tmp, &fc_event_list.list_head) { - event_node = list_entry(list, struct unf_cm_event_report, list_entry); - - if (event_node->lport == unf_lport) { - list_del_init(&event_node->list_entry); - if (event_node->event_asy_flag == UNF_EVENT_SYN) { - event_node->result = UNF_RETURN_ERROR; - complete(&event_node->event_comp); - } - - spin_lock_irqsave(&event_mgr->port_event_lock, flag); - event_mgr->free_event_count++; - list_add_tail(&event_node->list_entry, - &event_mgr->list_free_event); - spin_unlock_irqrestore(&event_mgr->port_event_lock, flag); - } - } - } - spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, list_lock_flag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to wait event", - unf_lport->port_id); - - unf_wait_event_mgr_complete(event_mgr); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) wait event process end", - unf_lport->port_id); - - unf_del_event_center_fun_op(unf_lport); - - vfree(event_mgr->mem_add); - event_mgr->mem_add = NULL; - unf_lport->destroy_step = UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER; - - return ret; -} - -static void unf_procee_asyn_event(struct unf_cm_event_report *event_node) -{ - struct unf_lport *lport = NULL; - u32 ret = UNF_RETURN_ERROR; - - lport = (struct unf_lport *)event_node->lport; - - FC_CHECK_RETURN_VOID(lport); - if (event_node->unf_event_task) { - ret = (u32)event_node->unf_event_task(event_node->para_in, - event_node->para_out); - } - - if (lport->event_mgr.unf_release_event) - lport->event_mgr.unf_release_event(lport, event_node); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN, - "[warn]Port(0x%x) handle event(0x%x) failed", - lport->port_id, event_node->event); - } -} - -void unf_handle_event(struct unf_cm_event_report *event_node) -{ - u32 ret = UNF_RETURN_ERROR; - u32 event = 0; - u32 event_asy_flag = UNF_EVENT_ASYN; - - FC_CHECK_RETURN_VOID(event_node); - - event = event_node->event; - event_asy_flag = event_node->event_asy_flag; - - switch (event_asy_flag) { - case UNF_EVENT_SYN: /* synchronous event node */ - case UNF_GLOBAL_EVENT_SYN: - if (event_node->unf_event_task) - ret = (u32)event_node->unf_event_task(event_node->para_in, - event_node->para_out); - - event_node->result = ret; - complete(&event_node->event_comp); - break; - - case UNF_EVENT_ASYN: /* asynchronous event node */ - unf_procee_asyn_event(event_node); - break; - - case UNF_GLOBAL_EVENT_ASYN: - if (event_node->unf_event_task) { - ret = (u32)event_node->unf_event_task(event_node->para_in, - event_node->para_out); - } - - unf_release_global_event(event_node); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN, - "[warn]handle global event(0x%x) failed", event); - } - break; - - default: - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_WARN, - "[warn]Unknown event(0x%x)", event); - break; - } -} - -u32 unf_init_global_event_msg(void) -{ - struct unf_cm_event_report *event_node = NULL; - u32 ret = RETURN_OK; - u32 index = 0; - ulong flag = 0; - - INIT_LIST_HEAD(&global_event_queue.global_event_list); - spin_lock_init(&global_event_queue.global_event_list_lock); - global_event_queue.list_number = 0; - - global_event_queue.global_event_add = vmalloc(UNF_MAX_GLOBAL_ENENT_NODE * - sizeof(struct unf_cm_event_report)); - if (!global_event_queue.global_event_add) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Can't allocate global event queue"); - - return UNF_RETURN_ERROR; - } - memset(global_event_queue.global_event_add, 0, - (UNF_MAX_GLOBAL_ENENT_NODE * sizeof(struct unf_cm_event_report))); - - event_node = (struct unf_cm_event_report *)(global_event_queue.global_event_add); - - spin_lock_irqsave(&global_event_queue.global_event_list_lock, flag); - for (index = 0; index < UNF_MAX_GLOBAL_ENENT_NODE; index++) { - INIT_LIST_HEAD(&event_node->list_entry); - list_add_tail(&event_node->list_entry, &global_event_queue.global_event_list); - - global_event_queue.list_number++; - event_node++; - } - spin_unlock_irqrestore(&global_event_queue.global_event_list_lock, flag); - - return ret; -} - -void unf_destroy_global_event_msg(void) -{ - if (global_event_queue.list_number != UNF_MAX_GLOBAL_ENENT_NODE) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_CRITICAL, - "[warn]Global event release not complete with remain nodes(0x%x)", - global_event_queue.list_number); - } - - vfree(global_event_queue.global_event_add); -} - -u32 unf_schedule_global_event(void *para_in, u32 event_asy_flag, - int (*unf_event_task)(void *arg_in, void *arg_out)) -{ - struct list_head *list_node = NULL; - struct unf_cm_event_report *event_node = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - spinlock_t *event_list_lock = NULL; - - FC_CHECK_RETURN_VALUE(unf_event_task, UNF_RETURN_ERROR); - - if (event_asy_flag != UNF_GLOBAL_EVENT_ASYN && event_asy_flag != UNF_GLOBAL_EVENT_SYN) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Event async flag(0x%x) abnormity", - event_asy_flag); - - return UNF_RETURN_ERROR; - } - - event_list_lock = &global_event_queue.global_event_list_lock; - spin_lock_irqsave(event_list_lock, flag); - if (list_empty(&global_event_queue.global_event_list)) { - spin_unlock_irqrestore(event_list_lock, flag); - - return UNF_RETURN_ERROR; - } - - list_node = UNF_OS_LIST_NEXT(&global_event_queue.global_event_list); - list_del_init(list_node); - global_event_queue.list_number--; - event_node = list_entry(list_node, struct unf_cm_event_report, list_entry); - spin_unlock_irqrestore(event_list_lock, flag); - - /* Initial global event */ - unf_init_event_node(event_node); - init_completion(&event_node->event_comp); - event_node->event_asy_flag = event_asy_flag; - event_node->unf_event_task = unf_event_task; - event_node->para_in = (void *)para_in; - event_node->para_out = NULL; - - unf_post_event(NULL, event_node); - - if (event_asy_flag == UNF_GLOBAL_EVENT_SYN) { - /* must wait for complete */ - wait_for_completion(&event_node->event_comp); - ret = event_node->result; - unf_release_global_event(event_node); - } else { - ret = RETURN_OK; - } - - return ret; -} - -struct unf_cm_event_report *unf_get_one_event_node(void *lport) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(unf_lport->event_mgr.unf_get_free_event_func, NULL); - - return unf_lport->event_mgr.unf_get_free_event_func((void *)unf_lport); -} - -void unf_post_one_event_node(void *lport, struct unf_cm_event_report *event) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(event); - - FC_CHECK_RETURN_VOID(unf_lport->event_mgr.unf_post_event_func); - FC_CHECK_RETURN_VOID(event); - - unf_lport->event_mgr.unf_post_event_func((void *)unf_lport, event); -} diff --git a/drivers/scsi/spfc/common/unf_event.h b/drivers/scsi/spfc/common/unf_event.h deleted file mode 100644 index 3fbd72bff8d7..000000000000 --- a/drivers/scsi/spfc/common/unf_event.h +++ /dev/null @@ -1,83 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_EVENT_H -#define UNF_EVENT_H - -#include "unf_type.h" - -#define UNF_MAX_EVENT_NODE 256 - -enum unf_event_type { - UNF_EVENT_TYPE_ALARM = 0, /* Alarm */ - UNF_EVENT_TYPE_REQUIRE, /* Require */ - UNF_EVENT_TYPE_RECOVERY, /* Recovery */ - UNF_EVENT_TYPE_BUTT -}; - -struct unf_cm_event_report { - /* event type */ - u32 event; - - /* ASY flag */ - u32 event_asy_flag; - - /* Delay times,must be async event */ - u32 delay_times; - - struct list_head list_entry; - - void *lport; - - /* parameter */ - void *para_in; - void *para_out; - u32 result; - - /* recovery strategy */ - int (*unf_event_task)(void *arg_in, void *arg_out); - - struct completion event_comp; -}; - -struct unf_event_mgr { - spinlock_t port_event_lock; - u32 free_event_count; - - struct list_head list_free_event; - - struct completion *emg_completion; - - void *mem_add; - struct unf_cm_event_report *(*unf_get_free_event_func)(void *lport); - void (*unf_release_event)(void *lport, void *event_node); - void (*unf_post_event_func)(void *lport, void *event_node); -}; - -struct unf_global_event_queue { - void *global_event_add; - u32 list_number; - struct list_head global_event_list; - spinlock_t global_event_list_lock; -}; - -struct unf_event_list { - struct list_head list_head; - spinlock_t fc_event_list_lock; - u32 list_num; /* list node number */ -}; - -void unf_handle_event(struct unf_cm_event_report *event_node); -u32 unf_init_global_event_msg(void); -void unf_destroy_global_event_msg(void); -u32 unf_schedule_global_event(void *para_in, u32 event_asy_flag, - int (*unf_event_task)(void *arg_in, void *arg_out)); -struct unf_cm_event_report *unf_get_one_event_node(void *lport); -void unf_post_one_event_node(void *lport, struct unf_cm_event_report *event); -u32 unf_event_center_destroy(void *lport); -u32 unf_init_event_center(void *lport); - -extern struct task_struct *event_task_thread; -extern struct unf_global_event_queue global_event_queue; -extern struct unf_event_list fc_event_list; -#endif diff --git a/drivers/scsi/spfc/common/unf_exchg.c b/drivers/scsi/spfc/common/unf_exchg.c deleted file mode 100644 index ab35cc318b6f..000000000000 --- a/drivers/scsi/spfc/common/unf_exchg.c +++ /dev/null @@ -1,2317 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_exchg.h" -#include "unf_log.h" -#include "unf_common.h" -#include "unf_rport.h" -#include "unf_service.h" -#include "unf_io.h" -#include "unf_exchg_abort.h" - -#define SPFC_XCHG_TYPE_MASK 0xFFFF -#define UNF_DEL_XCHG_TIMER_SAFE(xchg) \ - do { \ - if (cancel_delayed_work(&((xchg)->timeout_work))) { \ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, \ - "Exchange(0x%p) is free, but timer is pending.", \ - xchg); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_IO_ATT, \ - UNF_CRITICAL, \ - "Exchange(0x%p) is free, but timer is running.", \ - xchg); \ - } \ - } while (0) - -static struct unf_io_flow_id io_stage_table[] = { - {"XCHG_ALLOC"}, {"TGT_RECEIVE_ABTS"}, - {"TGT_ABTS_DONE"}, {"TGT_IO_SRR"}, - {"SFS_RESPONSE"}, {"SFS_TIMEOUT"}, - {"INI_SEND_CMND"}, {"INI_RESPONSE_DONE"}, - {"INI_EH_ABORT"}, {"INI_EH_DEVICE_RESET"}, - {"INI_EH_BLS_DONE"}, {"INI_IO_TIMEOUT"}, - {"INI_REQ_TIMEOUT"}, {"XCHG_CANCEL_TIMER"}, - {"XCHG_FREE_XCHG"}, {"SEND_ELS"}, - {"IO_XCHG_WAIT"}, -}; - -static void unf_init_xchg_attribute(struct unf_xchg *xchg); -static void unf_delay_work_del_syn(struct unf_xchg *xchg); -static void unf_free_lport_sfs_xchg(struct unf_xchg_mgr *xchg_mgr, - bool done_ini_flag); -static void unf_free_lport_destroy_xchg(struct unf_xchg_mgr *xchg_mgr); - -void unf_wake_up_scsi_task_cmnd(struct unf_lport *lport) -{ - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_xchg *xchg = NULL; - ulong hot_pool_lock_flags = 0; - ulong xchg_flag = 0; - struct unf_xchg_mgr *xchg_mgrs = NULL; - u32 i; - - FC_CHECK_RETURN_VOID(lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - xchg_mgrs = unf_get_xchg_mgr_by_lport(lport, i); - - if (!xchg_mgrs) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MINOR, - "Can't find LPort(0x%x) MgrIdx %u exchange manager.", - lport->port_id, i); - continue; - } - - spin_lock_irqsave(&xchg_mgrs->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - list_for_each_safe(node, next_node, - (&xchg_mgrs->hot_pool->ini_busylist)) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - if (INI_IO_STATE_UPTASK & xchg->io_state && - (atomic_read(&xchg->ref_cnt) > 0)) { - UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS); - up(&xchg->task_sema); - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MINOR, - "Wake up task command exchange(0x%p), Hot Pool Tag(0x%x).", - xchg, xchg->hotpooltag); - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - } - - spin_unlock_irqrestore(&xchg_mgrs->hot_pool->xchg_hotpool_lock, - hot_pool_lock_flags); - } -} - -void *unf_cm_get_free_xchg(void *lport, u32 xchg_type) -{ - struct unf_lport *unf_lport = NULL; - struct unf_cm_xchg_mgr_template *xchg_mgr_temp = NULL; - - FC_CHECK_RETURN_VALUE(unlikely(lport), NULL); - - unf_lport = (struct unf_lport *)lport; - xchg_mgr_temp = &unf_lport->xchg_mgr_temp; - - /* Find the corresponding Lport Xchg management template. */ - FC_CHECK_RETURN_VALUE(unlikely(xchg_mgr_temp->unf_xchg_get_free_and_init), NULL); - - return xchg_mgr_temp->unf_xchg_get_free_and_init(unf_lport, xchg_type); -} - -void unf_cm_free_xchg(void *lport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_cm_xchg_mgr_template *xchg_mgr_temp = NULL; - - FC_CHECK_RETURN_VOID(unlikely(lport)); - FC_CHECK_RETURN_VOID(unlikely(xchg)); - - unf_lport = (struct unf_lport *)lport; - xchg_mgr_temp = &unf_lport->xchg_mgr_temp; - FC_CHECK_RETURN_VOID(unlikely(xchg_mgr_temp->unf_xchg_release)); - - /* - * unf_cm_free_xchg --->>> unf_free_xchg - * --->>> unf_xchg_ref_dec --->>> unf_free_fcp_xchg --->>> - * unf_done_ini_xchg - */ - xchg_mgr_temp->unf_xchg_release(lport, xchg); -} - -void *unf_cm_lookup_xchg_by_tag(void *lport, u16 hot_pool_tag) -{ - struct unf_lport *unf_lport = NULL; - struct unf_cm_xchg_mgr_template *xchg_mgr_temp = NULL; - - FC_CHECK_RETURN_VALUE(unlikely(lport), NULL); - - /* Find the corresponding Lport Xchg management template */ - unf_lport = (struct unf_lport *)lport; - xchg_mgr_temp = &unf_lport->xchg_mgr_temp; - - FC_CHECK_RETURN_VALUE(unlikely(xchg_mgr_temp->unf_look_up_xchg_by_tag), NULL); - - return xchg_mgr_temp->unf_look_up_xchg_by_tag(lport, hot_pool_tag); -} - -void *unf_cm_lookup_xchg_by_id(void *lport, u16 ox_id, u32 oid) -{ - struct unf_lport *unf_lport = NULL; - struct unf_cm_xchg_mgr_template *xchg_mgr_temp = NULL; - - FC_CHECK_RETURN_VALUE(unlikely(lport), NULL); - - unf_lport = (struct unf_lport *)lport; - xchg_mgr_temp = &unf_lport->xchg_mgr_temp; - - /* Find the corresponding Lport Xchg management template */ - FC_CHECK_RETURN_VALUE(unlikely(xchg_mgr_temp->unf_look_up_xchg_by_id), NULL); - - return xchg_mgr_temp->unf_look_up_xchg_by_id(lport, ox_id, oid); -} - -struct unf_xchg *unf_cm_lookup_xchg_by_cmnd_sn(void *lport, u64 command_sn, - u32 world_id, void *pinitiator) -{ - struct unf_lport *unf_lport = NULL; - struct unf_cm_xchg_mgr_template *xchg_mgr_temp = NULL; - struct unf_xchg *xchg = NULL; - - FC_CHECK_RETURN_VALUE(unlikely(lport), NULL); - - unf_lport = (struct unf_lport *)lport; - xchg_mgr_temp = &unf_lport->xchg_mgr_temp; - - FC_CHECK_RETURN_VALUE(unlikely(xchg_mgr_temp->unf_look_up_xchg_by_cmnd_sn), NULL); - - xchg = (struct unf_xchg *)xchg_mgr_temp->unf_look_up_xchg_by_cmnd_sn(unf_lport, - command_sn, - world_id, - pinitiator); - - return xchg; -} - -static u32 unf_init_xchg(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr, - u32 xchg_sum, u32 sfs_sum) -{ - struct unf_xchg *xchg_mem = NULL; - union unf_sfs_u *sfs_mm_start = NULL; - dma_addr_t sfs_dma_addr; - struct unf_xchg *xchg = NULL; - struct unf_xchg_free_pool *free_pool = NULL; - ulong flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VALUE((sfs_sum <= xchg_sum), UNF_RETURN_ERROR); - - free_pool = &xchg_mgr->free_pool; - xchg_mem = xchg_mgr->fcp_mm_start; - xchg = xchg_mem; - - sfs_mm_start = (union unf_sfs_u *)xchg_mgr->sfs_mm_start; - sfs_dma_addr = xchg_mgr->sfs_phy_addr; - /* 1. Allocate the SFS UNION memory to each SFS XCHG - * and mount the SFS XCHG to the corresponding FREE linked list - */ - free_pool->total_sfs_xchg = 0; - free_pool->sfs_xchg_sum = sfs_sum; - for (i = 0; i < sfs_sum; i++) { - INIT_LIST_HEAD(&xchg->list_xchg_entry); - INIT_LIST_HEAD(&xchg->list_esgls); - spin_lock_init(&xchg->xchg_state_lock); - sema_init(&xchg->task_sema, 0); - sema_init(&xchg->echo_info.echo_sync_sema, 0); - - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr = sfs_mm_start; - xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr = sfs_dma_addr; - xchg->fcp_sfs_union.sfs_entry.sfs_buff_len = sizeof(*sfs_mm_start); - list_add_tail(&xchg->list_xchg_entry, &free_pool->list_sfs_xchg_list); - free_pool->total_sfs_xchg++; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - - sfs_mm_start++; - sfs_dma_addr = sfs_dma_addr + sizeof(union unf_sfs_u); - xchg++; - } - - free_pool->total_fcp_xchg = 0; - - for (i = 0; (i < xchg_sum - sfs_sum); i++) { - INIT_LIST_HEAD(&xchg->list_xchg_entry); - - INIT_LIST_HEAD(&xchg->list_esgls); - spin_lock_init(&xchg->xchg_state_lock); - sema_init(&xchg->task_sema, 0); - sema_init(&xchg->echo_info.echo_sync_sema, 0); - - /* alloc dma buffer for fcp_rsp_iu */ - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - list_add_tail(&xchg->list_xchg_entry, &free_pool->list_free_xchg_list); - free_pool->total_fcp_xchg++; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - - xchg++; - } - - free_pool->fcp_xchg_sum = free_pool->total_fcp_xchg; - - return RETURN_OK; -} - -static u32 unf_get_xchg_config_sum(struct unf_lport *lport, u32 *xchg_sum) -{ - struct unf_lport_cfg_item *lport_cfg_items = NULL; - - lport_cfg_items = &lport->low_level_func.lport_cfg_items; - - /* It has been checked at the bottom layer. Don't need to check it - * again. - */ - *xchg_sum = lport_cfg_items->max_sfs_xchg + lport_cfg_items->max_io; - if ((*xchg_sum / UNF_EXCHG_MGR_NUM) == 0 || - lport_cfg_items->max_sfs_xchg / UNF_EXCHG_MGR_NUM == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) Xchgsum(%u) or SfsXchg(%u) is less than ExchangeMgrNum(%u).", - lport->port_id, *xchg_sum, lport_cfg_items->max_sfs_xchg, - UNF_EXCHG_MGR_NUM); - return UNF_RETURN_ERROR; - } - - if (*xchg_sum > (INVALID_VALUE16 - 1)) { - /* If the format of ox_id/rx_id is exceeded, this function is - * not supported - */ - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) Exchange num(0x%x) is Too Big.", - lport->port_id, *xchg_sum); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -static void unf_xchg_cancel_timer(void *xchg) -{ - struct unf_xchg *tmp_xchg = NULL; - bool need_dec_xchg_ref = false; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - tmp_xchg = (struct unf_xchg *)xchg; - - spin_lock_irqsave(&tmp_xchg->xchg_state_lock, flag); - if (cancel_delayed_work(&tmp_xchg->timeout_work)) - need_dec_xchg_ref = true; - - spin_unlock_irqrestore(&tmp_xchg->xchg_state_lock, flag); - - if (need_dec_xchg_ref) - unf_xchg_ref_dec(xchg, XCHG_CANCEL_TIMER); -} - -void unf_show_all_xchg(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *xchg = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(xchg_mgr); - - unf_lport = lport; - - /* hot Xchg */ - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, flags); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, "INI busy :"); - list_for_each_safe(xchg_node, next_xchg_node, &xchg_mgr->hot_pool->ini_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", - xchg, (u32)xchg->hotpooltag, (u32)xchg->xchg_type, - (u32)xchg->oxid, (u32)xchg->rxid, (u32)xchg->sid, (u32)xchg->did, - atomic_read(&xchg->ref_cnt), (u32)xchg->io_state, xchg->alloc_jif); - } - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, "SFS :"); - list_for_each_safe(xchg_node, next_xchg_node, &xchg_mgr->hot_pool->sfs_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "0x%p---0x%x---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", - xchg, xchg->cmnd_code, (u32)xchg->hotpooltag, - (u32)xchg->xchg_type, (u32)xchg->oxid, (u32)xchg->rxid, (u32)xchg->sid, - (u32)xchg->did, atomic_read(&xchg->ref_cnt), - (u32)xchg->io_state, xchg->alloc_jif); - } - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, "Destroy list."); - list_for_each_safe(xchg_node, next_xchg_node, &xchg_mgr->hot_pool->list_destroy_xchg) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", - xchg, (u32)xchg->hotpooltag, (u32)xchg->xchg_type, - (u32)xchg->oxid, (u32)xchg->rxid, (u32)xchg->sid, (u32)xchg->did, - atomic_read(&xchg->ref_cnt), (u32)xchg->io_state, xchg->alloc_jif); - } - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, flags); -} - -static u32 unf_free_lport_xchg(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr) -{ -#define UNF_OS_WAITIO_TIMEOUT (10 * 1000) - - ulong free_pool_lock_flags = 0; - bool wait = false; - u32 total_xchg = 0; - u32 total_xchg_sum = 0; - u32 ret = RETURN_OK; - u64 time_out = 0; - struct completion xchg_mgr_completion; - - init_completion(&xchg_mgr_completion); - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg_mgr, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg_mgr->hot_pool, UNF_RETURN_ERROR); - - unf_free_lport_sfs_xchg(xchg_mgr, false); - - /* free INI Mode exchanges belong to L_Port */ - unf_free_lport_ini_xchg(xchg_mgr, false); - - spin_lock_irqsave(&xchg_mgr->free_pool.xchg_freepool_lock, free_pool_lock_flags); - total_xchg = xchg_mgr->free_pool.total_fcp_xchg + xchg_mgr->free_pool.total_sfs_xchg; - total_xchg_sum = xchg_mgr->free_pool.fcp_xchg_sum + xchg_mgr->free_pool.sfs_xchg_sum; - if (total_xchg != total_xchg_sum) { - xchg_mgr->free_pool.xchg_mgr_completion = &xchg_mgr_completion; - wait = true; - } - spin_unlock_irqrestore(&xchg_mgr->free_pool.xchg_freepool_lock, free_pool_lock_flags); - - if (wait) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to wait for exchange manager completion (0x%x:0x%x)", - lport->port_id, total_xchg, total_xchg_sum); - - unf_show_all_xchg(lport, xchg_mgr); - - time_out = wait_for_completion_timeout(xchg_mgr->free_pool.xchg_mgr_completion, - msecs_to_jiffies(UNF_OS_WAITIO_TIMEOUT)); - if (time_out == 0) - unf_free_lport_destroy_xchg(xchg_mgr); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) wait for exchange manager completion end", - lport->port_id); - - spin_lock_irqsave(&xchg_mgr->free_pool.xchg_freepool_lock, free_pool_lock_flags); - xchg_mgr->free_pool.xchg_mgr_completion = NULL; - spin_unlock_irqrestore(&xchg_mgr->free_pool.xchg_freepool_lock, - free_pool_lock_flags); - } - - return ret; -} - -void unf_free_lport_all_xchg(struct unf_lport *lport) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - u32 i; - - FC_CHECK_RETURN_VOID(lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - xchg_mgr = unf_get_xchg_mgr_by_lport(lport, i); - ; - if (unlikely(!xchg_mgr)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) hot pool is NULL", - lport->port_id); - - continue; - } - unf_free_lport_sfs_xchg(xchg_mgr, false); - - /* free INI Mode exchanges belong to L_Port */ - unf_free_lport_ini_xchg(xchg_mgr, false); - - unf_free_lport_destroy_xchg(xchg_mgr); - } -} - -static void unf_delay_work_del_syn(struct unf_xchg *xchg) -{ - FC_CHECK_RETURN_VOID(xchg); - - /* synchronous release timer */ - if (!cancel_delayed_work_sync(&xchg->timeout_work)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Exchange(0x%p), State(0x%x) can't delete work timer, timer is running or no timer.", - xchg, xchg->io_state); - } else { - /* The reference count cannot be directly subtracted. - * This prevents the XCHG from being moved to the Free linked - * list when the card is unloaded. - */ - unf_cm_free_xchg(xchg->lport, xchg); - } -} - -static void unf_free_lport_sfs_xchg(struct unf_xchg_mgr *xchg_mgr, bool done_ini_flag) -{ - struct list_head *list = NULL; - struct unf_xchg *xchg = NULL; - ulong hot_pool_lock_flags = 0; - - FC_CHECK_RETURN_VOID(xchg_mgr); - FC_CHECK_RETURN_VOID(xchg_mgr->hot_pool); - - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - while (!list_empty(&xchg_mgr->hot_pool->sfs_busylist)) { - list = UNF_OS_LIST_NEXT(&xchg_mgr->hot_pool->sfs_busylist); - list_del_init(list); - - /* Prevent the xchg of the sfs from being accessed repeatedly. - * The xchg is first mounted to the destroy linked list. - */ - list_add_tail(list, &xchg_mgr->hot_pool->list_destroy_xchg); - - xchg = list_entry(list, struct unf_xchg, list_xchg_entry); - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - unf_delay_work_del_syn(xchg); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Free SFS Exchange(0x%p), State(0x%x), Reference count(%d), Start time(%llu).", - xchg, xchg->io_state, atomic_read(&xchg->ref_cnt), xchg->alloc_jif); - - unf_cm_free_xchg(xchg->lport, xchg); - - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - } - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); -} - -void unf_free_lport_ini_xchg(struct unf_xchg_mgr *xchg_mgr, bool done_ini_flag) -{ - struct list_head *list = NULL; - struct unf_xchg *xchg = NULL; - ulong hot_pool_lock_flags = 0; - u32 up_status = 0; - - FC_CHECK_RETURN_VOID(xchg_mgr); - FC_CHECK_RETURN_VOID(xchg_mgr->hot_pool); - - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - while (!list_empty(&xchg_mgr->hot_pool->ini_busylist)) { - /* for each INI busy_list (exchange) node */ - list = UNF_OS_LIST_NEXT(&xchg_mgr->hot_pool->ini_busylist); - - /* Put exchange node to destroy_list, prevent done repeatly */ - list_del_init(list); - list_add_tail(list, &xchg_mgr->hot_pool->list_destroy_xchg); - xchg = list_entry(list, struct unf_xchg, list_xchg_entry); - if (atomic_read(&xchg->ref_cnt) <= 0) - continue; - - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, - hot_pool_lock_flags); - unf_delay_work_del_syn(xchg); - - /* In the case of INI done, the command should be set to fail to - * prevent data inconsistency caused by the return of OK - */ - up_status = unf_get_up_level_cmnd_errcode(xchg->scsi_cmnd_info.err_code_table, - xchg->scsi_cmnd_info.err_code_table_cout, - UNF_IO_PORT_LOGOUT); - - if (INI_IO_STATE_UPABORT & xchg->io_state) { - /* - * About L_Port destroy: - * UP_ABORT ---to--->>> ABORT_Port_Removing - */ - up_status = UNF_IO_ABORT_PORT_REMOVING; - } - - xchg->scsi_cmnd_info.result = up_status; - up(&xchg->task_sema); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Free INI exchange(0x%p) state(0x%x) reference count(%d) start time(%llu)", - xchg, xchg->io_state, atomic_read(&xchg->ref_cnt), xchg->alloc_jif); - - unf_cm_free_xchg(xchg->lport, xchg); - - /* go to next INI busy_list (exchange) node */ - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - } - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); -} - -static void unf_free_lport_destroy_xchg(struct unf_xchg_mgr *xchg_mgr) -{ -#define UNF_WAIT_DESTROY_EMPTY_STEP_MS 1000 -#define UNF_WAIT_IO_STATE_TGT_FRONT_MS (10 * 1000) - - struct unf_xchg *xchg = NULL; - struct list_head *next_xchg_node = NULL; - ulong hot_pool_lock_flags = 0; - ulong xchg_flag = 0; - - FC_CHECK_RETURN_VOID(xchg_mgr); - FC_CHECK_RETURN_VOID(xchg_mgr->hot_pool); - - /* In this case, the timer on the destroy linked list is deleted. - * You only need to check whether the timer is released at the end of - * the tgt. - */ - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - while (!list_empty(&xchg_mgr->hot_pool->list_destroy_xchg)) { - next_xchg_node = UNF_OS_LIST_NEXT(&xchg_mgr->hot_pool->list_destroy_xchg); - xchg = list_entry(next_xchg_node, struct unf_xchg, list_xchg_entry); - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Free Exchange(0x%p), Type(0x%x), State(0x%x), Reference count(%d), Start time(%llu)", - xchg, xchg->xchg_type, xchg->io_state, - atomic_read(&xchg->ref_cnt), xchg->alloc_jif); - - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - - /* This interface can be invoked to ensure that the timer is - * successfully canceled or wait until the timer execution is - * complete - */ - unf_delay_work_del_syn(xchg); - - /* - * If the timer is canceled successfully, delete Xchg - * If the timer has burst, the Xchg may have been released,In - * this case, deleting the Xchg will be failed - */ - unf_cm_free_xchg(xchg->lport, xchg); - - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); - }; - - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, hot_pool_lock_flags); -} - -static void unf_free_all_big_sfs(struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_big_sfs *big_sfs = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - u32 i; - - FC_CHECK_RETURN_VOID(xchg_mgr); - - /* Release the free resources in the busy state */ - spin_lock_irqsave(&xchg_mgr->big_sfs_pool.big_sfs_pool_lock, flag); - list_for_each_safe(node, next_node, &xchg_mgr->big_sfs_pool.list_busypool) { - list_del(node); - list_add_tail(node, &xchg_mgr->big_sfs_pool.list_freepool); - } - - list_for_each_safe(node, next_node, &xchg_mgr->big_sfs_pool.list_freepool) { - list_del(node); - big_sfs = list_entry(node, struct unf_big_sfs, entry_bigsfs); - if (big_sfs->addr) - big_sfs->addr = NULL; - } - spin_unlock_irqrestore(&xchg_mgr->big_sfs_pool.big_sfs_pool_lock, flag); - - if (xchg_mgr->big_sfs_buf_list.buflist) { - for (i = 0; i < xchg_mgr->big_sfs_buf_list.buf_num; i++) { - kfree(xchg_mgr->big_sfs_buf_list.buflist[i].vaddr); - xchg_mgr->big_sfs_buf_list.buflist[i].vaddr = NULL; - } - - kfree(xchg_mgr->big_sfs_buf_list.buflist); - xchg_mgr->big_sfs_buf_list.buflist = NULL; - } -} - -static void unf_free_big_sfs_pool(struct unf_xchg_mgr *xchg_mgr) -{ - FC_CHECK_RETURN_VOID(xchg_mgr); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Free Big SFS Pool, Count(0x%x).", - xchg_mgr->big_sfs_pool.free_count); - - unf_free_all_big_sfs(xchg_mgr); - xchg_mgr->big_sfs_pool.free_count = 0; - - if (xchg_mgr->big_sfs_pool.big_sfs_pool) { - vfree(xchg_mgr->big_sfs_pool.big_sfs_pool); - xchg_mgr->big_sfs_pool.big_sfs_pool = NULL; - } -} - -static void unf_free_xchg_mgr_mem(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_xchg *xchg = NULL; - u32 i = 0; - u32 xchg_sum = 0; - struct unf_xchg_free_pool *free_pool = NULL; - - FC_CHECK_RETURN_VOID(xchg_mgr); - - unf_free_big_sfs_pool(xchg_mgr); - - /* The sfs is released first, and the XchgMgr is allocated by the get - * free page. Therefore, the XchgMgr is compared with the '0' - */ - if (xchg_mgr->sfs_mm_start != 0) { - dma_free_coherent(&lport->low_level_func.dev->dev, xchg_mgr->sfs_mem_size, - xchg_mgr->sfs_mm_start, xchg_mgr->sfs_phy_addr); - xchg_mgr->sfs_mm_start = 0; - } - - /* Release Xchg first */ - if (xchg_mgr->fcp_mm_start) { - unf_get_xchg_config_sum(lport, &xchg_sum); - xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM; - - xchg = xchg_mgr->fcp_mm_start; - for (i = 0; i < xchg_sum; i++) { - if (!xchg) - break; - xchg++; - } - - vfree(xchg_mgr->fcp_mm_start); - xchg_mgr->fcp_mm_start = NULL; - } - - /* release the hot pool */ - if (xchg_mgr->hot_pool) { - vfree(xchg_mgr->hot_pool); - xchg_mgr->hot_pool = NULL; - } - - free_pool = &xchg_mgr->free_pool; - - vfree(xchg_mgr); -} - -static void unf_free_xchg_mgr(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr) -{ - ulong flags = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(xchg_mgr); - - /* 1. At first, free exchanges for this Exch_Mgr */ - ret = unf_free_lport_xchg(lport, xchg_mgr); - - /* 2. Delete this Exch_Mgr entry */ - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - list_del_init(&xchg_mgr->xchg_mgr_entry); - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - /* 3. free Exch_Mgr memory if necessary */ - if (ret == RETURN_OK) { - /* free memory directly */ - unf_free_xchg_mgr_mem(lport, xchg_mgr); - } else { - /* Add it to Dirty list */ - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - list_add_tail(&xchg_mgr->xchg_mgr_entry, &lport->list_drty_xchg_mgr_head); - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - /* Mark dirty flag */ - unf_cm_mark_dirty_mem(lport, UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY); - } -} - -void unf_free_all_xchg_mgr(struct unf_lport *lport) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - ulong flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - - /* for each L_Port->Exch_Mgr_List */ - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - while (!list_empty(&lport->list_xchg_mgr_head)) { - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - xchg_mgr = unf_get_xchg_mgr_by_lport(lport, i); - unf_free_xchg_mgr(lport, xchg_mgr); - if (i < UNF_EXCHG_MGR_NUM) - lport->xchg_mgr[i] = NULL; - - i++; - - /* go to next */ - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - } - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR; -} - -static u32 unf_init_xchg_mgr(struct unf_xchg_mgr *xchg_mgr) -{ - FC_CHECK_RETURN_VALUE(xchg_mgr, UNF_RETURN_ERROR); - - memset(xchg_mgr, 0, sizeof(struct unf_xchg_mgr)); - - INIT_LIST_HEAD(&xchg_mgr->xchg_mgr_entry); - xchg_mgr->fcp_mm_start = NULL; - xchg_mgr->mem_szie = sizeof(struct unf_xchg_mgr); - - return RETURN_OK; -} - -static u32 unf_init_xchg_mgr_free_pool(struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_xchg_free_pool *free_pool = NULL; - - FC_CHECK_RETURN_VALUE(xchg_mgr, UNF_RETURN_ERROR); - - free_pool = &xchg_mgr->free_pool; - INIT_LIST_HEAD(&free_pool->list_free_xchg_list); - INIT_LIST_HEAD(&free_pool->list_sfs_xchg_list); - spin_lock_init(&free_pool->xchg_freepool_lock); - free_pool->fcp_xchg_sum = 0; - free_pool->xchg_mgr_completion = NULL; - - return RETURN_OK; -} - -static u32 unf_init_xchg_hot_pool(struct unf_lport *lport, struct unf_xchg_hot_pool *hot_pool, - u32 xchg_sum) -{ - FC_CHECK_RETURN_VALUE(hot_pool, UNF_RETURN_ERROR); - - INIT_LIST_HEAD(&hot_pool->sfs_busylist); - INIT_LIST_HEAD(&hot_pool->ini_busylist); - spin_lock_init(&hot_pool->xchg_hotpool_lock); - INIT_LIST_HEAD(&hot_pool->list_destroy_xchg); - hot_pool->total_xchges = 0; - hot_pool->wait_state = false; - hot_pool->lport = lport; - - /* Slab Pool Index */ - hot_pool->slab_next_index = 0; - UNF_TOU16_CHECK(hot_pool->slab_total_sum, xchg_sum, return UNF_RETURN_ERROR); - - return RETURN_OK; -} - -static u32 unf_alloc_and_init_big_sfs_pool(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr) -{ -#define UNF_MAX_RESOURCE_RESERVED_FOR_RSCN 20 -#define UNF_BIG_SFS_POOL_TYPES 6 - u32 i = 0; - u32 size = 0; - u32 align_size = 0; - u32 npiv_cnt = 0; - struct unf_big_sfs_pool *big_sfs_pool = NULL; - struct unf_big_sfs *big_sfs_buff = NULL; - u32 buf_total_size; - u32 buf_num; - u32 buf_cnt_per_huge_buf; - u32 alloc_idx; - u32 cur_buf_idx = 0; - u32 cur_buf_offset = 0; - - FC_CHECK_RETURN_VALUE(xchg_mgr, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - big_sfs_pool = &xchg_mgr->big_sfs_pool; - - INIT_LIST_HEAD(&big_sfs_pool->list_freepool); - INIT_LIST_HEAD(&big_sfs_pool->list_busypool); - spin_lock_init(&big_sfs_pool->big_sfs_pool_lock); - npiv_cnt = lport->low_level_func.support_max_npiv_num; - - /* - * The value*6 indicates GID_PT/GID_FT, RSCN, and ECHO - * Another command is received when a command is being responded - * A maximum of 20 resources are reserved for the RSCN. During the test, - * multiple rscn are found. As a result, the resources are insufficient - * and the disc fails. - */ - big_sfs_pool->free_count = (npiv_cnt + 1) * UNF_BIG_SFS_POOL_TYPES + - UNF_MAX_RESOURCE_RESERVED_FOR_RSCN; - big_sfs_buff = - (struct unf_big_sfs *)vmalloc(big_sfs_pool->free_count * sizeof(struct unf_big_sfs)); - if (!big_sfs_buff) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Allocate Big SFS buf fail."); - - return UNF_RETURN_ERROR; - } - memset(big_sfs_buff, 0, big_sfs_pool->free_count * sizeof(struct unf_big_sfs)); - xchg_mgr->mem_szie += (u32)(big_sfs_pool->free_count * sizeof(struct unf_big_sfs)); - big_sfs_pool->big_sfs_pool = (void *)big_sfs_buff; - - /* - * Use the larger value of sizeof (struct unf_gid_acc_pld) and sizeof - * (struct unf_rscn_pld) to avoid the icp error.Therefore, the value is - * directly assigned instead of being compared. - */ - size = sizeof(struct unf_gid_acc_pld); - align_size = ALIGN(size, PAGE_SIZE); - - buf_total_size = align_size * big_sfs_pool->free_count; - xchg_mgr->big_sfs_buf_list.buf_size = - buf_total_size > BUF_LIST_PAGE_SIZE ? BUF_LIST_PAGE_SIZE - : buf_total_size; - - buf_cnt_per_huge_buf = xchg_mgr->big_sfs_buf_list.buf_size / align_size; - buf_num = big_sfs_pool->free_count % buf_cnt_per_huge_buf - ? big_sfs_pool->free_count / buf_cnt_per_huge_buf + 1 - : big_sfs_pool->free_count / buf_cnt_per_huge_buf; - - xchg_mgr->big_sfs_buf_list.buflist = (struct buff_list *)kmalloc(buf_num * - sizeof(struct buff_list), GFP_KERNEL); - xchg_mgr->big_sfs_buf_list.buf_num = buf_num; - - if (!xchg_mgr->big_sfs_buf_list.buflist) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate BigSfs pool buf list failed out of memory"); - goto free_buff; - } - memset(xchg_mgr->big_sfs_buf_list.buflist, 0, buf_num * sizeof(struct buff_list)); - for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { - xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr = - kmalloc(xchg_mgr->big_sfs_buf_list.buf_size, GFP_ATOMIC); - if (xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr == - NULL) { - goto free_buff; - } - memset(xchg_mgr->big_sfs_buf_list.buflist[alloc_idx].vaddr, 0, - xchg_mgr->big_sfs_buf_list.buf_size); - } - - for (i = 0; i < big_sfs_pool->free_count; i++) { - if (i != 0 && !(i % buf_cnt_per_huge_buf)) - cur_buf_idx++; - - cur_buf_offset = align_size * (i % buf_cnt_per_huge_buf); - big_sfs_buff->addr = xchg_mgr->big_sfs_buf_list.buflist[cur_buf_idx].vaddr + - cur_buf_offset; - big_sfs_buff->size = size; - xchg_mgr->mem_szie += size; - list_add_tail(&big_sfs_buff->entry_bigsfs, &big_sfs_pool->list_freepool); - big_sfs_buff++; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[EVENT]Allocate BigSfs pool size:%d,align_size:%d,buf_num:%u,buf_size:%u", - size, align_size, xchg_mgr->big_sfs_buf_list.buf_num, - xchg_mgr->big_sfs_buf_list.buf_size); - return RETURN_OK; -free_buff: - unf_free_all_big_sfs(xchg_mgr); - vfree(big_sfs_buff); - big_sfs_pool->big_sfs_pool = NULL; - return UNF_RETURN_ERROR; -} - -static void unf_free_one_big_sfs(struct unf_xchg *xchg) -{ - ulong flag = 0; - struct unf_xchg_mgr *xchg_mgr = NULL; - - FC_CHECK_RETURN_VOID(xchg); - xchg_mgr = xchg->xchg_mgr; - FC_CHECK_RETURN_VOID(xchg_mgr); - if (!xchg->big_sfs_buf) - return; - - if (xchg->cmnd_code != NS_GID_PT && xchg->cmnd_code != NS_GID_FT && - xchg->cmnd_code != ELS_ECHO && - xchg->cmnd_code != (UNF_SET_ELS_ACC_TYPE(ELS_ECHO)) && xchg->cmnd_code != ELS_RSCN && - xchg->cmnd_code != (UNF_SET_ELS_ACC_TYPE(ELS_RSCN))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Exchange(0x%p), Command(0x%x) big SFS buf is not NULL.", - xchg, xchg->cmnd_code); - } - - spin_lock_irqsave(&xchg_mgr->big_sfs_pool.big_sfs_pool_lock, flag); - list_del(&xchg->big_sfs_buf->entry_bigsfs); - list_add_tail(&xchg->big_sfs_buf->entry_bigsfs, - &xchg_mgr->big_sfs_pool.list_freepool); - xchg_mgr->big_sfs_pool.free_count++; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Free one big SFS buf(0x%p), Count(0x%x), Exchange(0x%p), Command(0x%x).", - xchg->big_sfs_buf->addr, xchg_mgr->big_sfs_pool.free_count, - xchg, xchg->cmnd_code); - spin_unlock_irqrestore(&xchg_mgr->big_sfs_pool.big_sfs_pool_lock, flag); -} - -static void unf_free_exchg_mgr_info(struct unf_lport *lport) -{ - u32 i; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - struct unf_xchg_mgr *xchg_mgr = NULL; - - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - list_for_each_safe(node, next_node, &lport->list_xchg_mgr_head) { - list_del(node); - xchg_mgr = list_entry(node, struct unf_xchg_mgr, xchg_mgr_entry); - } - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - xchg_mgr = lport->xchg_mgr[i]; - - if (xchg_mgr) { - unf_free_big_sfs_pool(xchg_mgr); - - if (xchg_mgr->sfs_mm_start) { - dma_free_coherent(&lport->low_level_func.dev->dev, - xchg_mgr->sfs_mem_size, xchg_mgr->sfs_mm_start, - xchg_mgr->sfs_phy_addr); - xchg_mgr->sfs_mm_start = 0; - } - - if (xchg_mgr->fcp_mm_start) { - vfree(xchg_mgr->fcp_mm_start); - xchg_mgr->fcp_mm_start = NULL; - } - - if (xchg_mgr->hot_pool) { - vfree(xchg_mgr->hot_pool); - xchg_mgr->hot_pool = NULL; - } - - vfree(xchg_mgr); - lport->xchg_mgr[i] = NULL; - } - } -} - -static u32 unf_alloc_and_init_xchg_mgr(struct unf_lport *lport) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct unf_xchg *xchg_mem = NULL; - void *sfs_mm_start = 0; - dma_addr_t sfs_phy_addr = 0; - u32 xchg_sum = 0; - u32 sfs_xchg_sum = 0; - ulong flags = 0; - u32 ret = UNF_RETURN_ERROR; - u32 slab_num = 0; - u32 i = 0; - - ret = unf_get_xchg_config_sum(lport, &xchg_sum); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) can't get Exchange.", lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* SFS Exchange Sum */ - sfs_xchg_sum = lport->low_level_func.lport_cfg_items.max_sfs_xchg / - UNF_EXCHG_MGR_NUM; - xchg_sum = xchg_sum / UNF_EXCHG_MGR_NUM; - slab_num = lport->low_level_func.support_max_hot_tag_range / UNF_EXCHG_MGR_NUM; - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - /* Alloc Exchange Manager */ - xchg_mgr = (struct unf_xchg_mgr *)vmalloc(sizeof(struct unf_xchg_mgr)); - if (!xchg_mgr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) allocate Exchange Manager Memory Fail.", - lport->port_id); - goto exit; - } - - /* Init Exchange Manager */ - ret = unf_init_xchg_mgr(xchg_mgr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) initialization Exchange Manager unsuccessful.", - lport->port_id); - goto free_xchg_mgr; - } - - /* Initialize the Exchange Free Pool resource */ - ret = unf_init_xchg_mgr_free_pool(xchg_mgr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) initialization Exchange Manager Free Pool unsuccessful.", - lport->port_id); - goto free_xchg_mgr; - } - - /* Allocate memory for Hot Pool and Xchg slab */ - hot_pool = vmalloc(sizeof(struct unf_xchg_hot_pool) + - sizeof(struct unf_xchg *) * slab_num); - if (!hot_pool) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) allocate Hot Pool Memory Fail.", - lport->port_id); - goto free_xchg_mgr; - } - memset(hot_pool, 0, - sizeof(struct unf_xchg_hot_pool) + sizeof(struct unf_xchg *) * slab_num); - - xchg_mgr->mem_szie += (u32)(sizeof(struct unf_xchg_hot_pool) + - sizeof(struct unf_xchg *) * slab_num); - /* Initialize the Exchange Hot Pool resource */ - ret = unf_init_xchg_hot_pool(lport, hot_pool, slab_num); - if (ret != RETURN_OK) - goto free_hot_pool; - - hot_pool->base += (u16)(i * slab_num); - /* Allocate the memory of all Xchg (IO/SFS) */ - xchg_mem = vmalloc(sizeof(struct unf_xchg) * xchg_sum); - if (!xchg_mem) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) allocate Exchange Memory Fail.", - lport->port_id); - goto free_hot_pool; - } - memset(xchg_mem, 0, sizeof(struct unf_xchg) * xchg_sum); - - xchg_mgr->mem_szie += (u32)(sizeof(struct unf_xchg) * xchg_sum); - xchg_mgr->hot_pool = hot_pool; - xchg_mgr->fcp_mm_start = xchg_mem; - /* Allocate the memory used by the SFS Xchg to carry the - * ELS/BLS/GS command and response - */ - xchg_mgr->sfs_mem_size = (u32)(sizeof(union unf_sfs_u) * sfs_xchg_sum); - - /* Apply for the DMA space for sending sfs frames. - * If the value of DMA32 is less than 4 GB, cross-4G problems - * will not occur - */ - sfs_mm_start = dma_alloc_coherent(&lport->low_level_func.dev->dev, - xchg_mgr->sfs_mem_size, - &sfs_phy_addr, GFP_KERNEL); - if (!sfs_mm_start) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) Get Free Pagers Fail .", - lport->port_id); - goto free_xchg_mem; - } - memset(sfs_mm_start, 0, sizeof(union unf_sfs_u) * sfs_xchg_sum); - - xchg_mgr->mem_szie += xchg_mgr->sfs_mem_size; - xchg_mgr->sfs_mm_start = sfs_mm_start; - xchg_mgr->sfs_phy_addr = sfs_phy_addr; - /* The Xchg is initialized and mounted to the Free Pool */ - ret = unf_init_xchg(lport, xchg_mgr, xchg_sum, sfs_xchg_sum); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) initialization Exchange unsuccessful, Exchange Number(%d), SFS Exchange number(%d).", - lport->port_id, xchg_sum, sfs_xchg_sum); - dma_free_coherent(&lport->low_level_func.dev->dev, xchg_mgr->sfs_mem_size, - xchg_mgr->sfs_mm_start, xchg_mgr->sfs_phy_addr); - xchg_mgr->sfs_mm_start = 0; - goto free_xchg_mem; - } - - /* Apply for the memory used by GID_PT, GID_FT, and RSCN */ - ret = unf_alloc_and_init_big_sfs_pool(lport, xchg_mgr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) allocate big SFS fail", lport->port_id); - dma_free_coherent(&lport->low_level_func.dev->dev, xchg_mgr->sfs_mem_size, - xchg_mgr->sfs_mm_start, xchg_mgr->sfs_phy_addr); - xchg_mgr->sfs_mm_start = 0; - goto free_xchg_mem; - } - - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - lport->xchg_mgr[i] = (void *)xchg_mgr; - list_add_tail(&xchg_mgr->xchg_mgr_entry, &lport->list_xchg_mgr_head); - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) ExchangeMgr:(0x%p),Base:(0x%x).", - lport->port_id, lport->xchg_mgr[i], hot_pool->base); - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) allocate Exchange Manager size(0x%x).", - lport->port_id, xchg_mgr->mem_szie); - return RETURN_OK; -free_xchg_mem: - vfree(xchg_mem); -free_hot_pool: - vfree(hot_pool); -free_xchg_mgr: - vfree(xchg_mgr); -exit: - unf_free_exchg_mgr_info(lport); - return UNF_RETURN_ERROR; -} - -void unf_xchg_mgr_destroy(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - unf_free_all_xchg_mgr(lport); -} - -u32 unf_alloc_xchg_resource(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - INIT_LIST_HEAD(&lport->list_drty_xchg_mgr_head); - INIT_LIST_HEAD(&lport->list_xchg_mgr_head); - spin_lock_init(&lport->xchg_mgr_lock); - - /* LPort Xchg Management Unit Alloc */ - if (unf_alloc_and_init_xchg_mgr(lport) != RETURN_OK) - return UNF_RETURN_ERROR; - - return RETURN_OK; -} - -void unf_destroy_dirty_xchg(struct unf_lport *lport, bool show_only) -{ - u32 dirty_xchg = 0; - struct unf_xchg_mgr *xchg_mgr = NULL; - ulong flags = 0; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - - FC_CHECK_RETURN_VOID(lport); - - if (lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) { - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - list_for_each_safe(node, next_node, &lport->list_drty_xchg_mgr_head) { - xchg_mgr = list_entry(node, struct unf_xchg_mgr, xchg_mgr_entry); - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - if (xchg_mgr) { - dirty_xchg = (xchg_mgr->free_pool.total_fcp_xchg + - xchg_mgr->free_pool.total_sfs_xchg); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) has %u dirty exchange(s)", - lport->port_id, dirty_xchg); - - unf_show_all_xchg(lport, xchg_mgr); - - if (!show_only) { - /* Delete Dirty Exchange Mgr entry */ - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - list_del_init(&xchg_mgr->xchg_mgr_entry); - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - /* Free Dirty Exchange Mgr memory */ - unf_free_xchg_mgr_mem(lport, xchg_mgr); - } - } - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - } - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - } -} - -struct unf_xchg_mgr *unf_get_xchg_mgr_by_lport(struct unf_lport *lport, u32 idx) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE((idx < UNF_EXCHG_MGR_NUM), NULL); - - spin_lock_irqsave(&lport->xchg_mgr_lock, flags); - xchg_mgr = lport->xchg_mgr[idx]; - spin_unlock_irqrestore(&lport->xchg_mgr_lock, flags); - - return xchg_mgr; -} - -struct unf_xchg_hot_pool *unf_get_hot_pool_by_lport(struct unf_lport *lport, - u32 mgr_idx) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)(lport->root_lport); - - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - /* Get Xchg Manager */ - xchg_mgr = unf_get_xchg_mgr_by_lport(unf_lport, mgr_idx); - if (!xchg_mgr) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) Exchange Manager is NULL.", - unf_lport->port_id); - - return NULL; - } - - /* Get Xchg Manager Hot Pool */ - return xchg_mgr->hot_pool; -} - -static inline void unf_hot_pool_slab_set(struct unf_xchg_hot_pool *hot_pool, - u16 slab_index, struct unf_xchg *xchg) -{ - FC_CHECK_RETURN_VOID(hot_pool); - - hot_pool->xchg_slab[slab_index] = xchg; -} - -static inline struct unf_xchg *unf_get_xchg_by_xchg_tag(struct unf_xchg_hot_pool *hot_pool, - u16 slab_index) -{ - FC_CHECK_RETURN_VALUE(hot_pool, NULL); - - return hot_pool->xchg_slab[slab_index]; -} - -static void *unf_look_up_xchg_by_tag(void *lport, u16 hot_pool_tag) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct unf_xchg *xchg = NULL; - ulong flags = 0; - u32 exchg_mgr_idx = 0; - struct unf_xchg_mgr *xchg_mgr = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* In the case of NPIV, lport is the Vport pointer, - * the share uses the ExchMgr of RootLport - */ - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - exchg_mgr_idx = (hot_pool_tag * UNF_EXCHG_MGR_NUM) / - unf_lport->low_level_func.support_max_hot_tag_range; - if (unlikely(exchg_mgr_idx >= UNF_EXCHG_MGR_NUM)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) Get ExchgMgr %u err", - unf_lport->port_id, exchg_mgr_idx); - - return NULL; - } - - xchg_mgr = unf_lport->xchg_mgr[exchg_mgr_idx]; - - if (unlikely(!xchg_mgr)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) ExchgMgr %u is null", - unf_lport->port_id, exchg_mgr_idx); - - return NULL; - } - - hot_pool = xchg_mgr->hot_pool; - - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) Hot Pool is NULL.", - unf_lport->port_id); - - return NULL; - } - - if (unlikely(hot_pool_tag >= (hot_pool->slab_total_sum + hot_pool->base))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]LPort(0x%x) can't Input Tag(0x%x), Max(0x%x).", - unf_lport->port_id, hot_pool_tag, - (hot_pool->slab_total_sum + hot_pool->base)); - - return NULL; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - xchg = unf_get_xchg_by_xchg_tag(hot_pool, hot_pool_tag - hot_pool->base); - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - return (void *)xchg; -} - -static void *unf_find_xchg_by_ox_id(void *lport, u16 ox_id, u32 oid) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct unf_xchg *xchg = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_lport *unf_lport = NULL; - ulong flags = 0; - ulong xchg_flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* In the case of NPIV, the lport is the Vport pointer, - * and the share uses the ExchMgr of the RootLport - */ - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) MgrIdex %u Hot Pool is NULL.", - unf_lport->port_id, i); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - - /* 1. Traverse sfs_busy list */ - list_for_each_safe(node, next_node, &hot_pool->sfs_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags); - if (unf_check_oxid_matched(ox_id, oid, xchg)) { - atomic_inc(&xchg->ref_cnt); - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flags); - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - return xchg; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flags); - } - - /* 2. Traverse INI_Busy List */ - list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flags); - if (unf_check_oxid_matched(ox_id, oid, xchg)) { - atomic_inc(&xchg->ref_cnt); - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flags); - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - return xchg; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, - xchg_flags); - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } - - return NULL; -} - -static inline bool unf_check_xchg_matched(struct unf_xchg *xchg, u64 command_sn, - u32 world_id, void *pinitiator) -{ - bool matched = false; - - matched = (command_sn == xchg->cmnd_sn); - if (matched && (atomic_read(&xchg->ref_cnt) > 0)) - return true; - else - return false; -} - -static void *unf_look_up_xchg_by_cmnd_sn(void *lport, u64 command_sn, - u32 world_id, void *pinitiator) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_xchg *xchg = NULL; - ulong flags = 0; - u32 i; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* In NPIV, lport is a Vport pointer, and idle resources are shared by - * ExchMgr of RootLport. However, busy resources are mounted on each - * vport. Therefore, vport needs to be used. - */ - unf_lport = (struct unf_lport *)lport; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) hot pool is NULL", - unf_lport->port_id); - - continue; - } - - /* from busy_list */ - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - if (unf_check_xchg_matched(xchg, command_sn, world_id, pinitiator)) { - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - return xchg; - } - } - - /* vport: from destroy_list */ - if (unf_lport != unf_lport->root_lport) { - list_for_each_safe(node, next_node, &hot_pool->list_destroy_xchg) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - if (unf_check_xchg_matched(xchg, command_sn, world_id, - pinitiator)) { - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) lookup exchange from destroy list", - unf_lport->port_id); - - return xchg; - } - } - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } - - return NULL; -} - -static inline u32 unf_alloc_hot_pool_slab(struct unf_xchg_hot_pool *hot_pool, struct unf_xchg *xchg) -{ - u16 slab_index = 0; - - FC_CHECK_RETURN_VALUE(hot_pool, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - /* Check whether the hotpool tag is in the specified range sirt. - * If yes, set up the management relationship. If no, handle the problem - * according to the normal IO. If the sirt digitmap is used but the tag - * is occupied, it indicates that the I/O is discarded. - */ - - hot_pool->slab_next_index = (u16)hot_pool->slab_next_index; - slab_index = hot_pool->slab_next_index; - while (unf_get_xchg_by_xchg_tag(hot_pool, slab_index)) { - slab_index++; - slab_index = slab_index % hot_pool->slab_total_sum; - - /* Rewind occurs */ - if (slab_index == hot_pool->slab_next_index) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_MAJOR, - "There is No Slab At Hot Pool(0x%p) for xchg(0x%p).", - hot_pool, xchg); - - return UNF_RETURN_ERROR; - } - } - - unf_hot_pool_slab_set(hot_pool, slab_index, xchg); - xchg->hotpooltag = slab_index + hot_pool->base; - slab_index++; - hot_pool->slab_next_index = slab_index % hot_pool->slab_total_sum; - - return RETURN_OK; -} - -struct unf_esgl_page * -unf_get_and_add_one_free_esgl_page(struct unf_lport *lport, struct unf_xchg *xchg) -{ - struct unf_esgl *esgl = NULL; - struct list_head *list_head = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(xchg, NULL); - - /* Obtain a new Esgl from the EsglPool and add it to the list_esgls of - * the Xchg - */ - spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag); - if (!list_empty(&lport->esgl_pool.list_esgl_pool)) { - list_head = UNF_OS_LIST_NEXT(&lport->esgl_pool.list_esgl_pool); - list_del(list_head); - lport->esgl_pool.esgl_pool_count--; - list_add_tail(list_head, &xchg->list_esgls); - - esgl = list_entry(list_head, struct unf_esgl, entry_esgl); - atomic_inc(&xchg->esgl_cnt); - spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) esgl pool is empty", - lport->nport_id); - - spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); - return NULL; - } - - return &esgl->page; -} - -void unf_release_esgls(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct list_head *list = NULL; - struct list_head *list_tmp = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - FC_CHECK_RETURN_VOID(xchg->lport); - - if (atomic_read(&xchg->esgl_cnt) <= 0) - return; - - /* In the case of NPIV, the Vport pointer is saved in v_pstExch, - * and the EsglPool of RootLport is shared. - */ - unf_lport = (xchg->lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - spin_lock_irqsave(&unf_lport->esgl_pool.esgl_pool_lock, flag); - if (!list_empty(&xchg->list_esgls)) { - list_for_each_safe(list, list_tmp, &xchg->list_esgls) { - list_del(list); - list_add_tail(list, &unf_lport->esgl_pool.list_esgl_pool); - unf_lport->esgl_pool.esgl_pool_count++; - atomic_dec(&xchg->esgl_cnt); - } - } - spin_unlock_irqrestore(&unf_lport->esgl_pool.esgl_pool_lock, flag); -} - -static void unf_add_back_to_fcp_list(struct unf_xchg_free_pool *free_pool, struct unf_xchg *xchg) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(free_pool); - FC_CHECK_RETURN_VOID(xchg); - - unf_init_xchg_attribute(xchg); - - /* The released I/O resources are added to the queue tail to facilitate - * fault locating - */ - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - list_add_tail(&xchg->list_xchg_entry, &free_pool->list_free_xchg_list); - free_pool->total_fcp_xchg++; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); -} - -static void unf_check_xchg_mgr_status(struct unf_xchg_mgr *xchg_mgr) -{ - ulong flags = 0; - u32 total_xchg = 0; - u32 total_xchg_sum = 0; - - FC_CHECK_RETURN_VOID(xchg_mgr); - - spin_lock_irqsave(&xchg_mgr->free_pool.xchg_freepool_lock, flags); - - total_xchg = xchg_mgr->free_pool.total_fcp_xchg + xchg_mgr->free_pool.total_sfs_xchg; - total_xchg_sum = xchg_mgr->free_pool.fcp_xchg_sum + xchg_mgr->free_pool.sfs_xchg_sum; - - if (xchg_mgr->free_pool.xchg_mgr_completion && total_xchg == total_xchg_sum) - complete(xchg_mgr->free_pool.xchg_mgr_completion); - - spin_unlock_irqrestore(&xchg_mgr->free_pool.xchg_freepool_lock, flags); -} - -static void unf_free_fcp_xchg(struct unf_xchg *xchg) -{ - struct unf_xchg_free_pool *free_pool = NULL; - struct unf_xchg_mgr *xchg_mgr = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - - /* Releasing a Specified INI I/O and Invoking the scsi_done Process */ - unf_done_ini_xchg(xchg); - free_pool = xchg->free_pool; - xchg_mgr = xchg->xchg_mgr; - unf_lport = xchg->lport; - unf_rport = xchg->rport; - - atomic_dec(&unf_rport->pending_io_cnt); - /* Release the Esgls in the Xchg structure and return it to the EsglPool - * of the Lport - */ - unf_release_esgls(xchg); - - if (unlikely(xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu)) { - kfree(xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu); - xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu = NULL; - } - - /* Mount I/O resources to the FCP Free linked list */ - unf_add_back_to_fcp_list(free_pool, xchg); - - /* The Xchg is released synchronously and then forcibly released to - * prevent the Xchg from accessing the Xchg in the normal I/O process - */ - if (unlikely(unf_lport->port_removing)) - unf_check_xchg_mgr_status(xchg_mgr); -} - -static void unf_init_io_xchg_param(struct unf_xchg *xchg, struct unf_lport *lport, - struct unf_xchg_mgr *xchg_mgr) -{ - static atomic64_t exhd_id; - - xchg->start_jif = atomic64_inc_return(&exhd_id); - xchg->xchg_mgr = xchg_mgr; - xchg->free_pool = &xchg_mgr->free_pool; - xchg->hot_pool = xchg_mgr->hot_pool; - xchg->lport = lport; - xchg->xchg_type = UNF_XCHG_TYPE_INI; - xchg->free_xchg = unf_free_fcp_xchg; - xchg->scsi_or_tgt_cmnd_func = NULL; - xchg->io_state = UNF_IO_STATE_NEW; - xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE; - xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID; - xchg->io_send_abort = false; - xchg->io_abort_result = false; - xchg->oxid = INVALID_VALUE16; - xchg->abort_oxid = INVALID_VALUE16; - xchg->rxid = INVALID_VALUE16; - xchg->sid = INVALID_VALUE32; - xchg->did = INVALID_VALUE32; - xchg->oid = INVALID_VALUE32; - xchg->seq_id = INVALID_VALUE8; - xchg->cmnd_code = INVALID_VALUE32; - xchg->data_len = 0; - xchg->resid_len = 0; - xchg->data_direction = DMA_NONE; - xchg->may_consume_res_cnt = 0; - xchg->fast_consume_res_cnt = 0; - xchg->io_front_jif = 0; - xchg->tmf_state = 0; - xchg->ucode_abts_state = INVALID_VALUE32; - xchg->abts_state = 0; - xchg->rport_bind_jifs = INVALID_VALUE64; - xchg->scsi_id = INVALID_VALUE32; - xchg->qos_level = 0; - xchg->world_id = INVALID_VALUE32; - - memset(&xchg->dif_control, 0, sizeof(struct unf_dif_control_info)); - memset(&xchg->req_sgl_info, 0, sizeof(struct unf_req_sgl_info)); - memset(&xchg->dif_sgl_info, 0, sizeof(struct unf_req_sgl_info)); - xchg->scsi_cmnd_info.result = 0; - - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - - if (xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] == INVALID_VALUE32) { - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - } - - if (xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0) { - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - } - - atomic_set(&xchg->ref_cnt, 0); - atomic_set(&xchg->delay_flag, 0); - - if (delayed_work_pending(&xchg->timeout_work)) - UNF_DEL_XCHG_TIMER_SAFE(xchg); - - INIT_DELAYED_WORK(&xchg->timeout_work, unf_fc_ini_io_xchg_time_out); -} - -static struct unf_xchg *unf_alloc_io_xchg(struct unf_lport *lport, - struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_xchg *xchg = NULL; - struct list_head *list_node = NULL; - struct unf_xchg_free_pool *free_pool = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(xchg_mgr, NULL); - FC_CHECK_RETURN_VALUE(lport, NULL); - - free_pool = &xchg_mgr->free_pool; - hot_pool = xchg_mgr->hot_pool; - FC_CHECK_RETURN_VALUE(free_pool, NULL); - FC_CHECK_RETURN_VALUE(hot_pool, NULL); - - /* 1. Free Pool */ - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - if (unlikely(list_empty(&free_pool->list_free_xchg_list))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "Port(0x%x) have no Exchange anymore.", - lport->port_id); - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - return NULL; - } - - /* Select an idle node from free pool */ - list_node = UNF_OS_LIST_NEXT(&free_pool->list_free_xchg_list); - list_del(list_node); - free_pool->total_fcp_xchg--; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - - xchg = list_entry(list_node, struct unf_xchg, list_xchg_entry); - /* - * Hot Pool: - * When xchg is mounted to Hot Pool, the mount mode and release mode - * of Xchg must be specified and stored in the sfs linked list. - */ - flags = 0; - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - if (unf_alloc_hot_pool_slab(hot_pool, xchg) != RETURN_OK) { - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - unf_add_back_to_fcp_list(free_pool, xchg); - if (unlikely(lport->port_removing)) - unf_check_xchg_mgr_status(xchg_mgr); - - return NULL; - } - list_add_tail(&xchg->list_xchg_entry, &hot_pool->ini_busylist); - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - /* 3. Exchange State */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_init_io_xchg_param(xchg, lport, xchg_mgr); - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return xchg; -} - -static void unf_add_back_to_sfs_list(struct unf_xchg_free_pool *free_pool, - struct unf_xchg *xchg) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(free_pool); - FC_CHECK_RETURN_VOID(xchg); - - unf_init_xchg_attribute(xchg); - - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - - list_add_tail(&xchg->list_xchg_entry, &free_pool->list_sfs_xchg_list); - free_pool->total_sfs_xchg++; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); -} - -static void unf_free_sfs_xchg(struct unf_xchg *xchg) -{ - struct unf_xchg_free_pool *free_pool = NULL; - struct unf_xchg_mgr *xchg_mgr = NULL; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - - free_pool = xchg->free_pool; - unf_lport = xchg->lport; - xchg_mgr = xchg->xchg_mgr; - - /* The memory is applied for when the GID_PT/GID_FT is sent. - * If no response is received, the GID_PT/GID_FT needs to be forcibly - * released. - */ - - unf_free_one_big_sfs(xchg); - - unf_add_back_to_sfs_list(free_pool, xchg); - - if (unlikely(unf_lport->port_removing)) - unf_check_xchg_mgr_status(xchg_mgr); -} - -static void unf_fc_xchg_add_timer(void *xchg, ulong time_ms, - enum unf_timer_type time_type) -{ - ulong flag = 0; - struct unf_xchg *unf_xchg = NULL; - ulong times_ms = time_ms; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - unf_xchg = (struct unf_xchg *)xchg; - unf_lport = unf_xchg->lport; - FC_CHECK_RETURN_VOID(unf_lport); - - /* update timeout */ - switch (time_type) { - /* The processing of TGT RRQ timeout is the same as that of TGT IO - * timeout. The timeout period is different. - */ - case UNF_TIMER_TYPE_TGT_RRQ: - times_ms = times_ms + UNF_TGT_RRQ_REDUNDANT_TIME; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "TGT RRQ Timer set."); - break; - - case UNF_TIMER_TYPE_INI_RRQ: - times_ms = times_ms - UNF_INI_RRQ_REDUNDANT_TIME; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "INI RRQ Timer set."); - break; - - case UNF_TIMER_TYPE_SFS: - times_ms = times_ms + UNF_INI_ELS_REDUNDANT_TIME; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "INI ELS Timer set."); - break; - default: - break; - } - - /* The xchg of the timer must be valid. If the reference count of xchg - * is 0, the timer must not be added - */ - if (atomic_read(&unf_xchg->ref_cnt) <= 0) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[warn]Abnormal Exchange(0x%p), Reference count(0x%x), Can't add timer.", - unf_xchg, atomic_read(&unf_xchg->ref_cnt)); - return; - } - - /* Delay Work: Hold for timer */ - spin_lock_irqsave(&unf_xchg->xchg_state_lock, flag); - if (queue_delayed_work(unf_lport->xchg_wq, &unf_xchg->timeout_work, - (ulong)msecs_to_jiffies((u32)times_ms))) { - /* hold for timer */ - atomic_inc(&unf_xchg->ref_cnt); - } - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flag); -} - -static void unf_init_sfs_xchg_param(struct unf_xchg *xchg, - struct unf_lport *lport, - struct unf_xchg_mgr *xchg_mgr) -{ - xchg->free_pool = &xchg_mgr->free_pool; - xchg->hot_pool = xchg_mgr->hot_pool; - xchg->lport = lport; - xchg->xchg_mgr = xchg_mgr; - xchg->free_xchg = unf_free_sfs_xchg; - xchg->xchg_type = UNF_XCHG_TYPE_SFS; - xchg->io_state = UNF_IO_STATE_NEW; - xchg->scsi_cmnd_info.result = 0; - xchg->ob_callback_sts = UNF_IO_SUCCESS; - - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - - if (xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] == - INVALID_VALUE32) { - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - } - - if (xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] == 0) { - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - (u32)atomic64_inc_return(&((struct unf_lport *)lport->root_lport)->exchg_index); - } - - if (delayed_work_pending(&xchg->timeout_work)) - UNF_DEL_XCHG_TIMER_SAFE(xchg); - - INIT_DELAYED_WORK(&xchg->timeout_work, unf_sfs_xchg_time_out); -} - -static struct unf_xchg *unf_alloc_sfs_xchg(struct unf_lport *lport, - struct unf_xchg_mgr *xchg_mgr) -{ - struct unf_xchg *xchg = NULL; - struct list_head *list_node = NULL; - struct unf_xchg_free_pool *free_pool = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(xchg_mgr, NULL); - free_pool = &xchg_mgr->free_pool; - hot_pool = xchg_mgr->hot_pool; - FC_CHECK_RETURN_VALUE(free_pool, NULL); - FC_CHECK_RETURN_VALUE(hot_pool, NULL); - - /* Select an idle node from free pool */ - spin_lock_irqsave(&free_pool->xchg_freepool_lock, flags); - if (list_empty(&free_pool->list_sfs_xchg_list)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) have no Exchange anymore.", - lport->port_id); - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - return NULL; - } - - list_node = UNF_OS_LIST_NEXT(&free_pool->list_sfs_xchg_list); - list_del(list_node); - free_pool->total_sfs_xchg--; - spin_unlock_irqrestore(&free_pool->xchg_freepool_lock, flags); - - xchg = list_entry(list_node, struct unf_xchg, list_xchg_entry); - /* - * The xchg is mounted to the Hot Pool. - * The mount mode and release mode of the xchg must be specified - * and stored in the sfs linked list. - */ - flags = 0; - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - if (unf_alloc_hot_pool_slab(hot_pool, xchg) != RETURN_OK) { - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - unf_add_back_to_sfs_list(free_pool, xchg); - if (unlikely(lport->port_removing)) - unf_check_xchg_mgr_status(xchg_mgr); - - return NULL; - } - - list_add_tail(&xchg->list_xchg_entry, &hot_pool->sfs_busylist); - hot_pool->total_xchges++; - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_init_sfs_xchg_param(xchg, lport, xchg_mgr); - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return xchg; -} - -static void *unf_get_new_xchg(void *lport, u32 xchg_type) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg_mgr *xchg_mgr = NULL; - struct unf_xchg *xchg = NULL; - u32 exchg_type = 0; - u16 xchg_mgr_type; - u32 rtry_cnt = 0; - u32 last_exchg_mgr_idx; - - xchg_mgr_type = (xchg_type >> UNF_SHIFT_16); - exchg_type = xchg_type & SPFC_XCHG_TYPE_MASK; - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* In the case of NPIV, the lport is the Vport pointer, - * and the share uses the ExchMgr of the RootLport. - */ - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - if (unlikely((atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP) || - (atomic_read(&((struct unf_lport *)lport)->lport_no_operate_flag) == - UNF_LPORT_NOP))) { - return NULL; - } - - last_exchg_mgr_idx = (u32)atomic64_inc_return(&unf_lport->last_exchg_mgr_idx); -try_next_mgr: - rtry_cnt++; - if (unlikely(rtry_cnt > UNF_EXCHG_MGR_NUM)) - return NULL; - - /* If Fixed mode,only use XchgMgr 0 */ - if (unlikely(xchg_mgr_type == UNF_XCHG_MGR_TYPE_FIXED)) { - xchg_mgr = (struct unf_xchg_mgr *)unf_lport->xchg_mgr[ARRAY_INDEX_0]; - } else { - xchg_mgr = (struct unf_xchg_mgr *)unf_lport - ->xchg_mgr[last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM]; - } - if (unlikely(!xchg_mgr)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) get exchangemgr %u is null.", - unf_lport->port_id, last_exchg_mgr_idx % UNF_EXCHG_MGR_NUM); - return NULL; - } - last_exchg_mgr_idx++; - - /* Allocate entries based on the Exchange type */ - switch (exchg_type) { - case UNF_XCHG_TYPE_SFS: - xchg = unf_alloc_sfs_xchg(lport, xchg_mgr); - break; - case UNF_XCHG_TYPE_INI: - xchg = unf_alloc_io_xchg(lport, xchg_mgr); - break; - - default: - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) unwonted, Exchange type(0x%x).", - unf_lport->port_id, exchg_type); - break; - } - - if (likely(xchg)) { - xchg->oxid = INVALID_VALUE16; - xchg->abort_oxid = INVALID_VALUE16; - xchg->rxid = INVALID_VALUE16; - xchg->debug_hook = false; - xchg->alloc_jif = jiffies; - - atomic_set(&xchg->ref_cnt, 1); - atomic_set(&xchg->esgl_cnt, 0); - } else { - goto try_next_mgr; - } - - return xchg; -} - -static void unf_free_xchg(void *lport, void *xchg) -{ - struct unf_xchg *unf_xchg = NULL; - - FC_CHECK_RETURN_VOID(xchg); - - unf_xchg = (struct unf_xchg *)xchg; - unf_xchg_ref_dec(unf_xchg, XCHG_FREE_XCHG); -} - -u32 unf_init_xchg_mgr_temp(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - lport->xchg_mgr_temp.unf_xchg_get_free_and_init = unf_get_new_xchg; - lport->xchg_mgr_temp.unf_xchg_release = unf_free_xchg; - lport->xchg_mgr_temp.unf_look_up_xchg_by_tag = unf_look_up_xchg_by_tag; - lport->xchg_mgr_temp.unf_look_up_xchg_by_id = unf_find_xchg_by_ox_id; - lport->xchg_mgr_temp.unf_xchg_add_timer = unf_fc_xchg_add_timer; - lport->xchg_mgr_temp.unf_xchg_cancel_timer = unf_xchg_cancel_timer; - lport->xchg_mgr_temp.unf_xchg_abort_all_io = unf_xchg_abort_all_xchg; - lport->xchg_mgr_temp.unf_look_up_xchg_by_cmnd_sn = unf_look_up_xchg_by_cmnd_sn; - lport->xchg_mgr_temp.unf_xchg_abort_by_lun = unf_xchg_abort_by_lun; - lport->xchg_mgr_temp.unf_xchg_abort_by_session = unf_xchg_abort_by_session; - lport->xchg_mgr_temp.unf_xchg_mgr_io_xchg_abort = unf_xchg_mgr_io_xchg_abort; - lport->xchg_mgr_temp.unf_xchg_mgr_sfs_xchg_abort = unf_xchg_mgr_sfs_xchg_abort; - - return RETURN_OK; -} - -void unf_release_xchg_mgr_temp(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - if (lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) has dirty exchange, Don't release exchange manager template.", - lport->port_id); - - return; - } - - memset(&lport->xchg_mgr_temp, 0, sizeof(struct unf_cm_xchg_mgr_template)); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP; -} - -void unf_set_hot_pool_wait_state(struct unf_lport *lport, bool wait_state) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - ulong pool_lock_flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) hot pool is NULL", - lport->port_id); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - hot_pool->wait_state = wait_state; - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - } -} - -u32 unf_xchg_ref_inc(struct unf_xchg *xchg, enum unf_ioflow_id io_stage) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - ulong flags = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - if (unlikely(xchg->debug_hook)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OX_ID_RX_ID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Stage(%s)", - xchg, xchg->io_state, xchg->sid, xchg->did, - xchg->oxid, xchg->rxid, xchg->alloc_jif, - atomic_read(&xchg->ref_cnt), - io_stage_table[io_stage].stage); - } - - hot_pool = xchg->hot_pool; - FC_CHECK_RETURN_VALUE(hot_pool, UNF_RETURN_ERROR); - - /* Exchange -> Hot Pool Tag check */ - if (unlikely((xchg->hotpooltag >= (hot_pool->slab_total_sum + hot_pool->base)) || - xchg->hotpooltag < hot_pool->base)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Xchg(0x%p) S_ID(%xh) D_ID(0x%x) hot_pool_tag(0x%x) is bigger than slab total num(0x%x) base(0x%x)", - xchg, xchg->sid, xchg->did, xchg->hotpooltag, - hot_pool->slab_total_sum + hot_pool->base, hot_pool->base); - - return UNF_RETURN_ERROR; - } - - /* atomic read & inc */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (unlikely(atomic_read(&xchg->ref_cnt) <= 0)) { - ret = UNF_RETURN_ERROR; - } else { - if (unf_get_xchg_by_xchg_tag(hot_pool, xchg->hotpooltag - hot_pool->base) == xchg) { - atomic_inc(&xchg->ref_cnt); - ret = RETURN_OK; - } else { - ret = UNF_RETURN_ERROR; - } - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return ret; -} - -void unf_xchg_ref_dec(struct unf_xchg *xchg, enum unf_ioflow_id io_stage) -{ - /* Atomic dec ref_cnt & test, free exchange if necessary (ref_cnt==0) */ - struct unf_xchg_hot_pool *hot_pool = NULL; - void (*free_xchg)(struct unf_xchg *) = NULL; - ulong flags = 0; - ulong xchg_lock_falgs = 0; - - FC_CHECK_RETURN_VOID(xchg); - - if (xchg->debug_hook) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Xchg(0x%p) State(0x%x) SID_DID(0x%x_0x%x) OXID_RXID(0x%x_0x%x) AllocJiff(%llu) Refcnt(%d) Statge %s", - xchg, xchg->io_state, xchg->sid, xchg->did, xchg->oxid, - xchg->rxid, xchg->alloc_jif, - atomic_read(&xchg->ref_cnt), - io_stage_table[io_stage].stage); - } - - hot_pool = xchg->hot_pool; - FC_CHECK_RETURN_VOID(hot_pool); - FC_CHECK_RETURN_VOID((xchg->hotpooltag >= hot_pool->base)); - - /* - * 1. Atomic dec & test - * 2. Free exchange if necessary (ref_cnt == 0) - */ - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_falgs); - if (atomic_dec_and_test(&xchg->ref_cnt)) { - free_xchg = xchg->free_xchg; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_falgs); - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - unf_hot_pool_slab_set(hot_pool, - xchg->hotpooltag - hot_pool->base, NULL); - /* Delete exchange list entry */ - list_del_init(&xchg->list_xchg_entry); - hot_pool->total_xchges--; - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - /* unf_free_fcp_xchg --->>> unf_done_ini_xchg */ - if (free_xchg) - free_xchg(xchg); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_falgs); - } -} - -static void unf_init_xchg_attribute(struct unf_xchg *xchg) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->xchg_mgr = NULL; - xchg->free_pool = NULL; - xchg->hot_pool = NULL; - xchg->lport = NULL; - xchg->rport = NULL; - xchg->disc_rport = NULL; - xchg->io_state = UNF_IO_STATE_NEW; - xchg->io_send_stage = TGT_IO_SEND_STAGE_NONE; - xchg->io_send_result = TGT_IO_SEND_RESULT_INVALID; - xchg->io_send_abort = false; - xchg->io_abort_result = false; - xchg->abts_state = 0; - xchg->oxid = INVALID_VALUE16; - xchg->abort_oxid = INVALID_VALUE16; - xchg->rxid = INVALID_VALUE16; - xchg->sid = INVALID_VALUE32; - xchg->did = INVALID_VALUE32; - xchg->oid = INVALID_VALUE32; - xchg->disc_portid = INVALID_VALUE32; - xchg->seq_id = INVALID_VALUE8; - xchg->cmnd_code = INVALID_VALUE32; - xchg->cmnd_sn = INVALID_VALUE64; - xchg->data_len = 0; - xchg->resid_len = 0; - xchg->data_direction = DMA_NONE; - xchg->hotpooltag = INVALID_VALUE16; - xchg->big_sfs_buf = NULL; - xchg->may_consume_res_cnt = 0; - xchg->fast_consume_res_cnt = 0; - xchg->io_front_jif = INVALID_VALUE64; - xchg->ob_callback_sts = UNF_IO_SUCCESS; - xchg->start_jif = 0; - xchg->rport_bind_jifs = INVALID_VALUE64; - xchg->scsi_id = INVALID_VALUE32; - xchg->qos_level = 0; - xchg->world_id = INVALID_VALUE32; - - memset(&xchg->seq, 0, sizeof(struct unf_seq)); - memset(&xchg->fcp_cmnd, 0, sizeof(struct unf_fcp_cmnd)); - memset(&xchg->scsi_cmnd_info, 0, sizeof(struct unf_scsi_cmd_info)); - memset(&xchg->dif_info, 0, sizeof(struct dif_info)); - memset(xchg->private_data, 0, (PKG_MAX_PRIVATE_DATA_SIZE * sizeof(u32))); - xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK; - xchg->echo_info.response_time = 0; - - if (xchg->xchg_type == UNF_XCHG_TYPE_SFS) { - if (xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { - memset(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, 0, - sizeof(union unf_sfs_u)); - xchg->fcp_sfs_union.sfs_entry.cur_offset = 0; - } - } else if (xchg->xchg_type != UNF_XCHG_TYPE_INI) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Exchange Type(0x%x) SFS Union uninited.", - xchg->xchg_type); - } - xchg->xchg_type = UNF_XCHG_TYPE_INVALID; - xchg->xfer_or_rsp_echo = NULL; - xchg->scsi_or_tgt_cmnd_func = NULL; - xchg->ob_callback = NULL; - xchg->callback = NULL; - xchg->free_xchg = NULL; - - atomic_set(&xchg->ref_cnt, 0); - atomic_set(&xchg->esgl_cnt, 0); - atomic_set(&xchg->delay_flag, 0); - - if (delayed_work_pending(&xchg->timeout_work)) - UNF_DEL_XCHG_TIMER_SAFE(xchg); - - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); -} - -bool unf_busy_io_completed(struct unf_lport *lport) -{ - struct unf_xchg_mgr *xchg_mgr = NULL; - ulong pool_lock_flags = 0; - u32 i; - - FC_CHECK_RETURN_VALUE(lport, true); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - xchg_mgr = unf_get_xchg_mgr_by_lport(lport, i); - if (unlikely(!xchg_mgr)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) Exchange Manager is NULL", - lport->port_id); - continue; - } - - spin_lock_irqsave(&xchg_mgr->hot_pool->xchg_hotpool_lock, - pool_lock_flags); - if (!list_empty(&xchg_mgr->hot_pool->ini_busylist)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) ini busylist is not empty.", - lport->port_id); - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, - pool_lock_flags); - return false; - } - spin_unlock_irqrestore(&xchg_mgr->hot_pool->xchg_hotpool_lock, - pool_lock_flags); - } - return true; -} diff --git a/drivers/scsi/spfc/common/unf_exchg.h b/drivers/scsi/spfc/common/unf_exchg.h deleted file mode 100644 index 0a48be31b971..000000000000 --- a/drivers/scsi/spfc/common/unf_exchg.h +++ /dev/null @@ -1,436 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_EXCHG_H -#define UNF_EXCHG_H - -#include "unf_type.h" -#include "unf_fcstruct.h" -#include "unf_lport.h" -#include "unf_scsi_common.h" - -enum unf_ioflow_id { - XCHG_ALLOC = 0, - TGT_RECEIVE_ABTS, - TGT_ABTS_DONE, - TGT_IO_SRR, - SFS_RESPONSE, - SFS_TIMEOUT, - INI_SEND_CMND, - INI_RESPONSE_DONE, - INI_EH_ABORT, - INI_EH_DEVICE_RESET, - INI_EH_BLS_DONE, - INI_IO_TIMEOUT, - INI_REQ_TIMEOUT, - XCHG_CANCEL_TIMER, - XCHG_FREE_XCHG, - SEND_ELS, - IO_XCHG_WAIT, - XCHG_BUTT -}; - -enum unf_xchg_type { - UNF_XCHG_TYPE_INI = 0, /* INI IO */ - UNF_XCHG_TYPE_SFS = 1, - UNF_XCHG_TYPE_INVALID -}; - -enum unf_xchg_mgr_type { - UNF_XCHG_MGR_TYPE_RANDOM = 0, - UNF_XCHG_MGR_TYPE_FIXED = 1, - UNF_XCHG_MGR_TYPE_INVALID -}; - -enum tgt_io_send_stage { - TGT_IO_SEND_STAGE_NONE = 0, - TGT_IO_SEND_STAGE_DOING = 1, /* xfer/rsp into queue */ - TGT_IO_SEND_STAGE_DONE = 2, /* xfer/rsp into queue complete */ - TGT_IO_SEND_STAGE_ECHO = 3, /* driver handled TSTS */ - TGT_IO_SEND_STAGE_INVALID -}; - -enum tgt_io_send_result { - TGT_IO_SEND_RESULT_OK = 0, /* xfer/rsp enqueue succeed */ - TGT_IO_SEND_RESULT_FAIL = 1, /* xfer/rsp enqueue fail */ - TGT_IO_SEND_RESULT_INVALID -}; - -struct unf_io_flow_id { - char *stage; -}; - -#define unf_check_oxid_matched(ox_id, oid, xchg) \ - (((ox_id) == (xchg)->oxid) && ((oid) == (xchg)->oid) && \ - (atomic_read(&(xchg)->ref_cnt) > 0)) - -#define UNF_CHECK_ALLOCTIME_VALID(lport, xchg_tag, exchg, pkg_alloc_time, \ - xchg_alloc_time) \ - do { \ - if (unlikely(((pkg_alloc_time) != 0) && \ - ((pkg_alloc_time) != (xchg_alloc_time)))) { \ - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, \ - "Lport(0x%x_0x%x_0x%x_0x%p) AllocTime is not " \ - "equal,PKG " \ - "AllocTime:0x%x,Exhg AllocTime:0x%x", \ - (lport)->port_id, (lport)->nport_id, xchg_tag, \ - exchg, pkg_alloc_time, xchg_alloc_time); \ - return UNF_RETURN_ERROR; \ - }; \ - if (unlikely((pkg_alloc_time) == 0)) { \ - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, \ - "Lport(0x%x_0x%x_0x%x_0x%p) pkgtime err,PKG " \ - "AllocTime:0x%x,Exhg AllocTime:0x%x", \ - (lport)->port_id, (lport)->nport_id, xchg_tag, \ - exchg, pkg_alloc_time, xchg_alloc_time); \ - }; \ - } while (0) - -#define UNF_SET_SCSI_CMND_RESULT(xchg, cmnd_result) \ - ((xchg)->scsi_cmnd_info.result = (cmnd_result)) - -#define UNF_GET_GS_SFS_XCHG_TIMER(lport) (3 * (ulong)(lport)->ra_tov) - -#define UNF_GET_BLS_SFS_XCHG_TIMER(lport) (2 * (ulong)(lport)->ra_tov) - -#define UNF_GET_ELS_SFS_XCHG_TIMER(lport) (2 * (ulong)(lport)->ra_tov) - -#define UNF_ELS_ECHO_RESULT_OK 0 -#define UNF_ELS_ECHO_RESULT_FAIL 1 - -struct unf_xchg; -/* Xchg hot pool, busy IO lookup Xchg */ -struct unf_xchg_hot_pool { - /* Xchg sum, in hot pool */ - u16 total_xchges; - bool wait_state; - - /* pool lock */ - spinlock_t xchg_hotpool_lock; - - /* Xchg posiontion list */ - struct list_head sfs_busylist; - struct list_head ini_busylist; - struct list_head list_destroy_xchg; - - /* Next free hot point */ - u16 slab_next_index; - u16 slab_total_sum; - u16 base; - - struct unf_lport *lport; - - struct unf_xchg *xchg_slab[ARRAY_INDEX_0]; -}; - -/* Xchg's FREE POOL */ -struct unf_xchg_free_pool { - spinlock_t xchg_freepool_lock; - - u32 fcp_xchg_sum; - - /* IO used Xchg */ - struct list_head list_free_xchg_list; - u32 total_fcp_xchg; - - /* SFS used Xchg */ - struct list_head list_sfs_xchg_list; - u32 total_sfs_xchg; - u32 sfs_xchg_sum; - - struct completion *xchg_mgr_completion; -}; - -struct unf_big_sfs { - struct list_head entry_bigsfs; - void *addr; - u32 size; -}; - -struct unf_big_sfs_pool { - void *big_sfs_pool; - u32 free_count; - struct list_head list_freepool; - struct list_head list_busypool; - spinlock_t big_sfs_pool_lock; -}; - -/* Xchg Manager for vport Xchg */ -struct unf_xchg_mgr { - /* MG type */ - u32 mgr_type; - - /* MG entry */ - struct list_head xchg_mgr_entry; - - /* MG attribution */ - u32 mem_szie; - - /* MG alloced resource */ - void *fcp_mm_start; - - u32 sfs_mem_size; - void *sfs_mm_start; - dma_addr_t sfs_phy_addr; - - struct unf_xchg_free_pool free_pool; - struct unf_xchg_hot_pool *hot_pool; - - struct unf_big_sfs_pool big_sfs_pool; - - struct buf_describe big_sfs_buf_list; -}; - -struct unf_seq { - /* Seq ID */ - u8 seq_id; - - /* Seq Cnt */ - u16 seq_cnt; - - /* Seq state and len,maybe used for fcoe */ - u16 seq_stat; - u32 rec_data_len; -}; - -union unf_xchg_fcp_sfs { - struct unf_sfs_entry sfs_entry; - struct unf_fcp_rsp_iu_entry fcp_rsp_entry; -}; - -#define UNF_IO_STATE_NEW 0 -#define TGT_IO_STATE_SEND_XFERRDY (1 << 2) /* succeed to send XFer rdy */ -#define TGT_IO_STATE_RSP (1 << 5) /* chip send rsp */ -#define TGT_IO_STATE_ABORT (1 << 7) - -#define INI_IO_STATE_UPTASK \ - (1 << 15) /* INI Upper-layer Task Management Commands */ -#define INI_IO_STATE_UPABORT \ - (1 << 16) /* INI Upper-layer timeout Abort flag \ - */ -#define INI_IO_STATE_DRABORT (1 << 17) /* INI driver Abort flag */ -#define INI_IO_STATE_DONE (1 << 18) /* INI complete flag */ -#define INI_IO_STATE_WAIT_RRQ (1 << 19) /* INI wait send rrq */ -#define INI_IO_STATE_UPSEND_ERR (1 << 20) /* INI send fail flag */ -/* INI only clear firmware resource flag */ -#define INI_IO_STATE_ABORT_RESOURCE (1 << 21) -/* ioc abort:INI send ABTS ,5S timeout Semaphore,than set 1 */ -#define INI_IO_STATE_ABORT_TIMEOUT (1 << 22) -#define INI_IO_STATE_RRQSEND_ERR (1 << 23) /* INI send RRQ fail flag */ -#define INI_IO_STATE_LOGO (1 << 24) /* INI busy IO session logo status */ -#define INI_IO_STATE_TMF_ABORT (1 << 25) /* INI TMF ABORT IO flag */ -#define INI_IO_STATE_REC_TIMEOUT_WAIT (1 << 26) /* INI REC TIMEOUT WAIT */ -#define INI_IO_STATE_REC_TIMEOUT (1 << 27) /* INI REC TIMEOUT */ - -#define TMF_RESPONSE_RECEIVED (1 << 0) -#define MARKER_STS_RECEIVED (1 << 1) -#define ABTS_RESPONSE_RECEIVED (1 << 2) - -struct unf_scsi_cmd_info { - ulong time_out; - ulong abort_time_out; - void *scsi_cmnd; - void (*done)(struct unf_scsi_cmnd *scsi_cmd); - ini_get_sgl_entry_buf unf_get_sgl_entry_buf; - struct unf_ini_error_code *err_code_table; /* error code table */ - char *sense_buf; - u32 err_code_table_cout; /* Size of the error code table */ - u32 buf_len; - u32 entry_cnt; - u32 result; /* Stores command execution results */ - u32 port_id; -/* Re-search for rport based on scsiid during retry. Otherwise, - *data inconsistency will occur - */ - u32 scsi_id; - void *sgl; - uplevel_cmd_done uplevel_done; -}; - -struct unf_req_sgl_info { - void *sgl; - void *sgl_start; - u32 req_index; - u32 entry_index; -}; - -struct unf_els_echo_info { - u64 response_time; - struct semaphore echo_sync_sema; - u32 echo_result; -}; - -struct unf_xchg { - /* Mg resource relative */ - /* list delete from HotPool */ - struct unf_xchg_hot_pool *hot_pool; - - /* attach to FreePool */ - struct unf_xchg_free_pool *free_pool; - struct unf_xchg_mgr *xchg_mgr; - struct unf_lport *lport; /* Local LPort/VLPort */ - struct unf_rport *rport; /* Rmote Port */ - struct unf_rport *disc_rport; /* Discover Rmote Port */ - struct list_head list_xchg_entry; - struct list_head list_abort_xchg_entry; - spinlock_t xchg_state_lock; - - /* Xchg reference */ - atomic_t ref_cnt; - atomic_t esgl_cnt; - bool debug_hook; - /* Xchg attribution */ - u16 hotpooltag; - u16 abort_oxid; - u32 xchg_type; /* LS,TGT CMND ,REQ,or SCSI Cmnd */ - u16 oxid; - u16 rxid; - u32 sid; - u32 did; - u32 oid; /* ID of the exchange initiator */ - u32 disc_portid; /* Send GNN_ID/GFF_ID NPortId */ - u8 seq_id; - u8 byte_orders; /* Byte order */ - struct unf_seq seq; - - u32 cmnd_code; - u32 world_id; - /* Dif control */ - struct unf_dif_control_info dif_control; - struct dif_info dif_info; - /* IO status Abort,timer out */ - u32 io_state; /* TGT_IO_STATE_E */ - u32 tmf_state; /* TMF STATE */ - u32 ucode_abts_state; - u32 abts_state; - - /* IO Enqueuing */ - enum tgt_io_send_stage io_send_stage; /* tgt_io_send_stage */ - /* IO Enqueuing result, success or failure */ - enum tgt_io_send_result io_send_result; /* tgt_io_send_result */ - - u8 io_send_abort; /* is or not send io abort */ - /*result of io abort cmd(succ:true; fail:false)*/ - u8 io_abort_result; - /* for INI,Indicates the length of the data transmitted over the PCI - * link - */ - u32 data_len; - /* ResidLen,greater than 0 UnderFlow or Less than Overflow */ - int resid_len; - /* +++++++++++++++++IO Special++++++++++++++++++++ */ - /* point to tgt cmnd/req/scsi cmnd */ - /* Fcp cmnd */ - struct unf_fcp_cmnd fcp_cmnd; - - struct unf_scsi_cmd_info scsi_cmnd_info; - - struct unf_req_sgl_info req_sgl_info; - - struct unf_req_sgl_info dif_sgl_info; - - u64 cmnd_sn; - void *pinitiator; - - /* timestamp */ - u64 start_jif; - u64 alloc_jif; - - u64 io_front_jif; - - u32 may_consume_res_cnt; - u32 fast_consume_res_cnt; - - /* scsi req info */ - u32 data_direction; - - struct unf_big_sfs *big_sfs_buf; - - /* scsi cmnd sense_buffer pointer */ - union unf_xchg_fcp_sfs fcp_sfs_union; - - /* One exchange may use several External Sgls */ - struct list_head list_esgls; - struct unf_els_echo_info echo_info; - struct semaphore task_sema; - - /* for RRQ ,IO Xchg add to SFS Xchg */ - void *io_xchg; - - /* Xchg delay work */ - struct delayed_work timeout_work; - - void (*xfer_or_rsp_echo)(struct unf_xchg *xchg, u32 status); - - /* wait list XCHG send function */ - int (*scsi_or_tgt_cmnd_func)(struct unf_xchg *xchg); - - /* send result callback */ - void (*ob_callback)(struct unf_xchg *xchg); - - /* Response IO callback */ - void (*callback)(void *lport, void *rport, void *xchg); - - /* Xchg release function */ - void (*free_xchg)(struct unf_xchg *xchg); - - /* +++++++++++++++++low level Special++++++++++++++++++++ */ - /* private data,provide for low level */ - u32 private_data[PKG_MAX_PRIVATE_DATA_SIZE]; - - u64 rport_bind_jifs; - - /* sfs exchg ob callback status */ - u32 ob_callback_sts; - u32 scsi_id; - u32 qos_level; - void *ls_rsp_addr; - void *ls_req; - u32 status; - atomic_t delay_flag; - void *upper_ct; -}; - -struct unf_esgl_page * -unf_get_and_add_one_free_esgl_page(struct unf_lport *lport, - struct unf_xchg *xchg); -void unf_release_xchg_mgr_temp(struct unf_lport *lport); -u32 unf_init_xchg_mgr_temp(struct unf_lport *lport); -u32 unf_alloc_xchg_resource(struct unf_lport *lport); -void unf_free_all_xchg_mgr(struct unf_lport *lport); -void unf_xchg_mgr_destroy(struct unf_lport *lport); -u32 unf_xchg_ref_inc(struct unf_xchg *xchg, enum unf_ioflow_id io_stage); -void unf_xchg_ref_dec(struct unf_xchg *xchg, enum unf_ioflow_id io_stage); -struct unf_xchg_mgr *unf_get_xchg_mgr_by_lport(struct unf_lport *lport, - u32 mgr_idx); -struct unf_xchg_hot_pool *unf_get_hot_pool_by_lport(struct unf_lport *lport, - u32 mgr_idx); -void unf_free_lport_ini_xchg(struct unf_xchg_mgr *xchg_mgr, bool done_ini_flag); -struct unf_xchg *unf_cm_lookup_xchg_by_cmnd_sn(void *lport, u64 command_sn, - u32 world_id, void *pinitiator); -void *unf_cm_lookup_xchg_by_id(void *lport, u16 ox_id, u32 oid); -void unf_cm_xchg_abort_by_lun(struct unf_lport *lport, struct unf_rport *rport, - u64 lun_id, void *tm_xchg, - bool abort_all_lun_flag); -void unf_cm_xchg_abort_by_session(struct unf_lport *lport, - struct unf_rport *rport); - -void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport *lport, - struct unf_rport *rport, u32 sid, u32 did, - u32 extra_io_stat); -void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport *lport, - struct unf_rport *rport, u32 sid, u32 did); -void unf_cm_free_xchg(void *lport, void *xchg); -void *unf_cm_get_free_xchg(void *lport, u32 xchg_type); -void *unf_cm_lookup_xchg_by_tag(void *lport, u16 hot_pool_tag); -void unf_release_esgls(struct unf_xchg *xchg); -void unf_show_all_xchg(struct unf_lport *lport, struct unf_xchg_mgr *xchg_mgr); -void unf_destroy_dirty_xchg(struct unf_lport *lport, bool show_only); -void unf_wake_up_scsi_task_cmnd(struct unf_lport *lport); -void unf_set_hot_pool_wait_state(struct unf_lport *lport, bool wait_state); -void unf_free_lport_all_xchg(struct unf_lport *lport); -extern u32 unf_get_up_level_cmnd_errcode(struct unf_ini_error_code *err_table, - u32 err_table_count, u32 drv_err_code); -bool unf_busy_io_completed(struct unf_lport *lport); - -#endif diff --git a/drivers/scsi/spfc/common/unf_exchg_abort.c b/drivers/scsi/spfc/common/unf_exchg_abort.c deleted file mode 100644 index 68f751be04aa..000000000000 --- a/drivers/scsi/spfc/common/unf_exchg_abort.c +++ /dev/null @@ -1,825 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_exchg_abort.h" -#include "unf_log.h" -#include "unf_common.h" -#include "unf_rport.h" -#include "unf_service.h" -#include "unf_ls.h" -#include "unf_io.h" - -void unf_cm_xchg_mgr_abort_io_by_id(struct unf_lport *lport, struct unf_rport *rport, u32 sid, - u32 did, u32 extra_io_state) -{ - /* - * for target session: set ABORT - * 1. R_Port remove - * 2. Send PLOGI_ACC callback - * 3. RCVD PLOGI - * 4. RCVD LOGO - */ - FC_CHECK_RETURN_VOID(lport); - - if (lport->xchg_mgr_temp.unf_xchg_mgr_io_xchg_abort) { - /* The SID/DID of the Xchg is in reverse direction in different - * phases. Therefore, the reverse direction needs to be - * considered - */ - lport->xchg_mgr_temp.unf_xchg_mgr_io_xchg_abort(lport, rport, sid, did, - extra_io_state); - lport->xchg_mgr_temp.unf_xchg_mgr_io_xchg_abort(lport, rport, did, sid, - extra_io_state); - } -} - -void unf_cm_xchg_mgr_abort_sfs_by_id(struct unf_lport *lport, - struct unf_rport *rport, u32 sid, u32 did) -{ - FC_CHECK_RETURN_VOID(lport); - - if (lport->xchg_mgr_temp.unf_xchg_mgr_sfs_xchg_abort) { - /* The SID/DID of the Xchg is in reverse direction in different - * phases, therefore, the reverse direction needs to be - * considered - */ - lport->xchg_mgr_temp.unf_xchg_mgr_sfs_xchg_abort(lport, rport, sid, did); - lport->xchg_mgr_temp.unf_xchg_mgr_sfs_xchg_abort(lport, rport, did, sid); - } -} - -void unf_cm_xchg_abort_by_lun(struct unf_lport *lport, struct unf_rport *rport, - u64 lun_id, void *xchg, bool abort_all_lun_flag) -{ - /* - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - void (*unf_xchg_abort_by_lun)(void *, void *, u64, void *, bool) = NULL; - - FC_CHECK_RETURN_VOID(lport); - - unf_xchg_abort_by_lun = lport->xchg_mgr_temp.unf_xchg_abort_by_lun; - if (unf_xchg_abort_by_lun) - unf_xchg_abort_by_lun((void *)lport, (void *)rport, lun_id, - xchg, abort_all_lun_flag); -} - -void unf_cm_xchg_abort_by_session(struct unf_lport *lport, struct unf_rport *rport) -{ - void (*unf_xchg_abort_by_session)(void *, void *) = NULL; - - FC_CHECK_RETURN_VOID(lport); - - unf_xchg_abort_by_session = lport->xchg_mgr_temp.unf_xchg_abort_by_session; - if (unf_xchg_abort_by_session) - unf_xchg_abort_by_session((void *)lport, (void *)rport); -} - -static void unf_xchg_abort_all_sfs_xchg(struct unf_lport *lport, bool clean) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *xchg = NULL; - ulong pool_lock_falgs = 0; - ulong xchg_lock_flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, - UNF_MAJOR, "Port(0x%x) Hot Pool is NULL.", lport->port_id); - - continue; - } - - if (!clean) { - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - - /* Clearing the SFS_Busy_list Exchange Resource */ - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->sfs_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); - if (atomic_read(&xchg->ref_cnt) > 0) - xchg->io_state |= TGT_IO_STATE_ABORT; - - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_flags); - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - } else { - continue; - } - } -} - -static void unf_xchg_abort_ini_io_xchg(struct unf_lport *lport, bool clean) -{ - /* Clean L_Port/V_Port Link Down I/O: Abort */ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *xchg = NULL; - ulong pool_lock_falgs = 0; - ulong xchg_lock_flags = 0; - u32 io_state = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) hot pool is NULL", - lport->port_id); - - continue; - } - - if (!clean) { - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - - /* 1. Abort INI_Busy_List IO */ - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->ini_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); - if (atomic_read(&xchg->ref_cnt) > 0) - xchg->io_state |= INI_IO_STATE_DRABORT | io_state; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_flags); - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - } else { - /* Do nothing, just return */ - continue; - } - } -} - -void unf_xchg_abort_all_xchg(void *lport, u32 xchg_type, bool clean) -{ - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - unf_lport = (struct unf_lport *)lport; - - switch (xchg_type) { - case UNF_XCHG_TYPE_SFS: - unf_xchg_abort_all_sfs_xchg(unf_lport, clean); - break; - /* Clean L_Port/V_Port Link Down I/O: Abort */ - case UNF_XCHG_TYPE_INI: - unf_xchg_abort_ini_io_xchg(unf_lport, clean); - break; - default: - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) unknown exch type(0x%x)", - unf_lport->port_id, xchg_type); - break; - } -} - -static void unf_xchg_abort_ini_send_tm_cmd(void *lport, void *rport, u64 lun_id) -{ - /* - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_xchg *xchg = NULL; - ulong flags = 0; - ulong xchg_flag = 0; - u32 i = 0; - u64 raw_lun_id = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - unf_rport = (struct unf_rport *)rport; - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) hot pool is NULL", - unf_lport->port_id); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - - /* 1. for each exchange from busy list */ - list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - - raw_lun_id = *(u64 *)(xchg->fcp_cmnd.lun) >> UNF_SHIFT_16 & - UNF_RAW_LUN_ID_MASK; - if (lun_id == raw_lun_id && unf_rport == xchg->rport) { - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - xchg->io_state |= INI_IO_STATE_TMF_ABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD", - xchg, xchg->io_state, - ((struct unf_lport *)lport)->nport_id, - unf_rport->nport_id, xchg->hotpooltag); - } - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } -} - -static void unf_xchg_abort_ini_tmf_target_reset(void *lport, void *rport) -{ - /* - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_xchg *xchg = NULL; - ulong flags = 0; - ulong xchg_flag = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - unf_rport = (struct unf_rport *)rport; - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) hot pool is NULL", - unf_lport->port_id); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - - /* 1. for each exchange from busy_list */ - list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - if (unf_rport == xchg->rport) { - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - xchg->io_state |= INI_IO_STATE_TMF_ABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Exchange(%p) state(0x%x) S_ID(0x%x) D_ID(0x%x) tag(0x%x) abort by TMF CMD", - xchg, xchg->io_state, unf_lport->nport_id, - unf_rport->nport_id, xchg->hotpooltag); - } - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } -} - -void unf_xchg_abort_by_lun(void *lport, void *rport, u64 lun_id, void *xchg, - bool abort_all_lun_flag) -{ - /* ABORT: set UP_ABORT tag for target LUN I/O */ - struct unf_xchg *tm_xchg = (struct unf_xchg *)xchg; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[event]Port(0x%x) LUN_ID(0x%llx) TM_EXCH(0x%p) flag(%d)", - ((struct unf_lport *)lport)->port_id, lun_id, xchg, - abort_all_lun_flag); - - /* for INI Mode */ - if (!tm_xchg) { - /* - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - unf_xchg_abort_ini_send_tm_cmd(lport, rport, lun_id); - - return; - } -} - -void unf_xchg_abort_by_session(void *lport, void *rport) -{ - /* - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[event]Port(0x%x) Rport(0x%x) start session reset with TMF", - ((struct unf_lport *)lport)->port_id, ((struct unf_rport *)rport)->nport_id); - - unf_xchg_abort_ini_tmf_target_reset(lport, rport); -} - -void unf_xchg_up_abort_io_by_scsi_id(void *lport, u32 scsi_id) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_xchg *xchg = NULL; - ulong flags = 0; - ulong xchg_flag = 0; - u32 i; - u32 io_abort_flag = INI_IO_STATE_UPABORT | INI_IO_STATE_UPSEND_ERR | - INI_IO_STATE_TMF_ABORT; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) hot pool is NULL", - unf_lport->port_id); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - - /* 1. for each exchange from busy_list */ - list_for_each_safe(node, next_node, &hot_pool->ini_busylist) { - xchg = list_entry(node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - if (lport == xchg->lport && scsi_id == xchg->scsi_id && - !(xchg->io_state & io_abort_flag)) { - xchg->io_state |= INI_IO_STATE_UPABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Exchange(%p) scsi_cmd(0x%p) state(0x%x) scsi_id(0x%x) tag(0x%x) upabort by scsi id", - xchg, xchg->scsi_cmnd_info.scsi_cmnd, - xchg->io_state, scsi_id, xchg->hotpooltag); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - } - } - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } -} - -static void unf_ini_busy_io_xchg_abort(void *xchg_hot_pool, void *rport, - u32 sid, u32 did, u32 extra_io_state) -{ - /* - * for target session: Set (DRV) ABORT - * 1. R_Port remove - * 2. Send PLOGI_ACC callback - * 3. RCVD PLOGI - * 4. RCVD LOGO - */ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct unf_xchg *xchg = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_rport *unf_rport = NULL; - ulong xchg_lock_flags = 0; - - unf_rport = (struct unf_rport *)rport; - hot_pool = (struct unf_xchg_hot_pool *)xchg_hot_pool; - - /* ABORT INI IO: INI_BUSY_LIST */ - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->ini_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); - if (did == xchg->did && sid == xchg->sid && - unf_rport == xchg->rport && - (atomic_read(&xchg->ref_cnt) > 0)) { - xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_IMM_RETRY); - xchg->io_state |= INI_IO_STATE_DRABORT; - xchg->io_state |= extra_io_state; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Abort INI:0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", - xchg, (u32)xchg->hotpooltag, (u32)xchg->xchg_type, - (u32)xchg->oxid, (u32)xchg->rxid, - (u32)xchg->sid, (u32)xchg->did, (u32)xchg->io_state, - atomic_read(&xchg->ref_cnt), xchg->alloc_jif); - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_flags); - } -} - -void unf_xchg_mgr_io_xchg_abort(void *lport, void *rport, u32 sid, u32 did, u32 extra_io_state) -{ - /* - * for target session: set ABORT - * 1. R_Port remove - * 2. Send PLOGI_ACC callback - * 3. RCVD PLOGI - * 4. RCVD LOGO - */ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct unf_lport *unf_lport = NULL; - ulong pool_lock_falgs = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) hot pool is NULL", - unf_lport->port_id); - - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - - /* 1. Clear INI (session) IO: INI Mode */ - unf_ini_busy_io_xchg_abort(hot_pool, rport, sid, did, extra_io_state); - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - } -} - -void unf_xchg_mgr_sfs_xchg_abort(void *lport, void *rport, u32 sid, u32 did) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *xchg = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong pool_lock_falgs = 0; - ulong xchg_lock_flags = 0; - u32 i = 0; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport(unf_lport, i); - if (!hot_pool) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, - UNF_MAJOR, "Port(0x%x) Hot Pool is NULL.", - unf_lport->port_id); - - continue; - } - - unf_rport = (struct unf_rport *)rport; - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - - /* Clear the SFS exchange of the corresponding connection */ - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->sfs_busylist) { - xchg = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_lock_flags); - if (did == xchg->did && sid == xchg->sid && - unf_rport == xchg->rport && (atomic_read(&xchg->ref_cnt) > 0)) { - xchg->io_state |= TGT_IO_STATE_ABORT; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Abort SFS:0x%p---0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----0x%x----%llu.", - xchg, (u32)xchg->hotpooltag, (u32)xchg->xchg_type, - (u32)xchg->oxid, (u32)xchg->rxid, (u32)xchg->sid, - (u32)xchg->did, (u32)xchg->io_state, - atomic_read(&xchg->ref_cnt), xchg->alloc_jif); - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_lock_flags); - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_falgs); - } -} - -static void unf_fc_wait_abts_complete(struct unf_lport *lport, struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = lport; - struct unf_scsi_cmnd scsi_cmnd = {0}; - ulong flag = 0; - u32 time_out_value = 2000; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - u32 io_result; - - scsi_cmnd.scsi_id = xchg->scsi_cmnd_info.scsi_id; - scsi_cmnd.upper_cmnd = xchg->scsi_cmnd_info.scsi_cmnd; - scsi_cmnd.done = xchg->scsi_cmnd_info.done; - scsi_image_table = &unf_lport->rport_scsi_table; - - if (down_timeout(&xchg->task_sema, (s64)msecs_to_jiffies(time_out_value))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x)", - unf_lport->port_id, xchg, xchg->oxid, xchg->rxid); - goto ABTS_FIAILED; - } - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - if (xchg->ucode_abts_state == UNF_IO_SUCCESS || - xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)", - unf_lport->port_id, xchg, xchg->oxid, xchg->rxid, - xchg->ucode_abts_state); - io_result = DID_BUS_BUSY; - UNF_IO_RESULT_CNT(scsi_image_table, scsi_cmnd.scsi_id, io_result); - unf_complete_cmnd(&scsi_cmnd, io_result << UNF_SHIFT_16); - return; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) send ABTS failed. Exch(0x%p) hot_tag(0x%x) ret(0x%x) xchg->io_state (0x%x)", - unf_lport->port_id, xchg, xchg->hotpooltag, - xchg->scsi_cmnd_info.result, xchg->io_state); - goto ABTS_FIAILED; - -ABTS_FIAILED: - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - xchg->io_state &= ~INI_IO_STATE_UPABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); -} - -void unf_fc_abort_time_out_cmnd(struct unf_lport *lport, struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = lport; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - if (xchg->io_state & INI_IO_STATE_UPABORT) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "LPort(0x%x) xchange(0x%p) OX_ID(0x%x), RX_ID(0x%x) Cmdsn(0x%lx) has been aborted.", - unf_lport->port_id, xchg, xchg->oxid, - xchg->rxid, (ulong)xchg->cmnd_sn); - return; - } - xchg->io_state |= INI_IO_STATE_UPABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_KEVENT, - "LPort(0x%x) exchg(0x%p) OX_ID(0x%x) RX_ID(0x%x) Cmdsn(0x%lx) timeout abort it", - unf_lport->port_id, xchg, xchg->oxid, xchg->rxid, (ulong)xchg->cmnd_sn); - - unf_lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, - (ulong)UNF_WAIT_ABTS_RSP_TIMEOUT, UNF_TIMER_TYPE_INI_ABTS); - - sema_init(&xchg->task_sema, 0); - - if (unf_send_abts(unf_lport, xchg) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "LPort(0x%x) send ABTS, Send ABTS unsuccessful. Exchange OX_ID(0x%x), RX_ID(0x%x).", - unf_lport->port_id, xchg->oxid, xchg->rxid); - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - xchg->io_state &= ~INI_IO_STATE_UPABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - return; - } - unf_fc_wait_abts_complete(unf_lport, xchg); -} - -static void unf_fc_ini_io_rec_wait_time_out(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - ulong time_out = 0; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) Rec timeout exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - lport->port_id, rport->nport_id, xchg, xchg->oxid, - xchg->rxid, xchg->io_state); - - if (xchg->rport_bind_jifs == rport->rport_alloc_jifs) { - unf_send_rec(lport, rport, xchg); - - if (xchg->scsi_cmnd_info.abort_time_out > 0) { - time_out = (xchg->scsi_cmnd_info.abort_time_out > UNF_REC_TOV) ? - (xchg->scsi_cmnd_info.abort_time_out - UNF_REC_TOV) : 0; - if (time_out > 0) { - lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, time_out, - UNF_TIMER_TYPE_REQ_IO); - } else { - unf_fc_abort_time_out_cmnd(lport, xchg); - } - } - } -} - -static void unf_fc_ini_send_abts_time_out(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - if (xchg->rport_bind_jifs == rport->rport_alloc_jifs && - xchg->rport_bind_jifs != INVALID_VALUE64) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) first time to send abts timeout, retry again OX_ID(0x%x) RX_ID(0x%x) HotTag(0x%x) state(0x%x)", - lport->port_id, rport->nport_id, xchg, xchg->oxid, - xchg->rxid, xchg->hotpooltag, xchg->io_state); - - lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, - (ulong)UNF_WAIT_ABTS_RSP_TIMEOUT, UNF_TIMER_TYPE_INI_ABTS); - - if (unf_send_abts(lport, xchg) != RETURN_OK) { - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - unf_abts_timeout_recovery_default(rport, xchg); - - unf_cm_free_xchg(lport, xchg); - } - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) rport is invalid, exchg rport jiff(0x%llx 0x%llx), free exchange OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - lport->port_id, rport->nport_id, xchg, - xchg->rport_bind_jifs, rport->rport_alloc_jifs, - xchg->oxid, xchg->rxid, xchg->io_state); - - unf_cm_free_xchg(lport, xchg); - } -} - -void unf_fc_ini_io_xchg_time_out(struct work_struct *work) -{ - struct unf_xchg *xchg = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - u32 ret = UNF_RETURN_ERROR; - u32 port_valid_flag = 0; - - xchg = container_of(work, struct unf_xchg, timeout_work.work); - FC_CHECK_RETURN_VOID(xchg); - - ret = unf_xchg_ref_inc(xchg, INI_IO_TIMEOUT); - FC_CHECK_RETURN_VOID(ret == RETURN_OK); - - unf_lport = xchg->lport; - unf_rport = xchg->rport; - - port_valid_flag = (!unf_lport) || (!unf_rport); - if (port_valid_flag) { - unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); - unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); - return; - } - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - /* 1. for Send RRQ failed Timer timeout */ - if (INI_IO_STATE_RRQSEND_ERR & xchg->io_state) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[info]LPort(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for RRQ send failed OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - unf_lport->port_id, unf_rport->nport_id, xchg, - xchg->oxid, xchg->rxid, xchg->io_state); - unf_notify_chip_free_xid(xchg); - unf_cm_free_xchg(unf_lport, xchg); - } - /* Second ABTS timeout and enter LOGO process */ - else if ((INI_IO_STATE_ABORT_TIMEOUT & xchg->io_state) && - (!(ABTS_RESPONSE_RECEIVED & xchg->abts_state))) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) Exch(0x%p) had wait enough time for second abts send OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - unf_lport->port_id, unf_rport->nport_id, xchg, - xchg->oxid, xchg->rxid, xchg->io_state); - unf_abts_timeout_recovery_default(unf_rport, xchg); - unf_cm_free_xchg(unf_lport, xchg); - } - /* First time to send ABTS, timeout and retry to send ABTS again */ - else if ((INI_IO_STATE_UPABORT & xchg->io_state) && - (!(ABTS_RESPONSE_RECEIVED & xchg->abts_state))) { - xchg->io_state |= INI_IO_STATE_ABORT_TIMEOUT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_fc_ini_send_abts_time_out(unf_lport, unf_rport, xchg); - } - /* 3. IO_DONE */ - else if ((INI_IO_STATE_DONE & xchg->io_state) && - (ABTS_RESPONSE_RECEIVED & xchg->abts_state)) { - /* - * for IO_DONE: - * 1. INI ABTS first timer time out - * 2. INI RCVD ABTS Response - * 3. Normal case for I/O Done - */ - /* Send ABTS & RCVD RSP & no timeout */ - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - if (unf_send_rrq(unf_lport, unf_rport, xchg) == RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]LPort(0x%x) send RRQ succeed to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - unf_lport->port_id, unf_rport->nport_id, xchg, - xchg->oxid, xchg->rxid, xchg->io_state); - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]LPort(0x%x) can't send RRQ to RPort(0x%x) Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) state(0x%x)", - unf_lport->port_id, unf_rport->nport_id, xchg, - xchg->oxid, xchg->rxid, xchg->io_state); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->io_state |= INI_IO_STATE_RRQSEND_ERR; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, - (ulong)UNF_WRITE_RRQ_SENDERR_INTERVAL, UNF_TIMER_TYPE_INI_IO); - } - } else if (INI_IO_STATE_REC_TIMEOUT_WAIT & xchg->io_state) { - xchg->io_state &= ~INI_IO_STATE_REC_TIMEOUT_WAIT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_fc_ini_io_rec_wait_time_out(unf_lport, unf_rport, xchg); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_fc_abort_time_out_cmnd(unf_lport, xchg); - } - - unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); - unf_xchg_ref_dec(xchg, INI_IO_TIMEOUT); -} - -void unf_sfs_xchg_time_out(struct work_struct *work) -{ - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(work); - xchg = container_of(work, struct unf_xchg, timeout_work.work); - FC_CHECK_RETURN_VOID(xchg); - - ret = unf_xchg_ref_inc(xchg, SFS_TIMEOUT); - FC_CHECK_RETURN_VOID(ret == RETURN_OK); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_lport = xchg->lport; - unf_rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - unf_xchg_ref_dec(xchg, SFS_TIMEOUT); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]SFS Exch(%p) Cmnd(0x%x) IO Exch(0x%p) Sid_Did(0x%x:0x%x) HotTag(0x%x) State(0x%x) Timeout.", - xchg, xchg->cmnd_code, xchg->io_xchg, xchg->sid, xchg->did, - xchg->hotpooltag, xchg->io_state); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if ((xchg->io_state & TGT_IO_STATE_ABORT) && - xchg->cmnd_code != ELS_RRQ && xchg->cmnd_code != ELS_LOGO) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "SFS Exch(0x%p) Cmnd(0x%x) Hot Pool Tag(0x%x) timeout, but aborted, no need to handle.", - xchg, xchg->cmnd_code, xchg->hotpooltag); - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - unf_xchg_ref_dec(xchg, SFS_TIMEOUT); - unf_xchg_ref_dec(xchg, SFS_TIMEOUT); - - return; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - /* The sfs times out. If the sfs is ELS reply, - * go to UNF_RPortErrorRecovery/unf_lport_error_recovery. - * Otherwise, go to the corresponding obCallback. - */ - if (UNF_XCHG_IS_ELS_REPLY(xchg) && unf_rport) { - if (unf_rport->nport_id >= UNF_FC_FID_DOM_MGR) - unf_lport_error_recovery(unf_lport); - else - unf_rport_error_recovery(unf_rport); - - } else if (xchg->ob_callback) { - xchg->ob_callback(xchg); - } else { - /* Do nothing */ - } - unf_notify_chip_free_xid(xchg); - unf_xchg_ref_dec(xchg, SFS_TIMEOUT); - unf_xchg_ref_dec(xchg, SFS_TIMEOUT); -} diff --git a/drivers/scsi/spfc/common/unf_exchg_abort.h b/drivers/scsi/spfc/common/unf_exchg_abort.h deleted file mode 100644 index 75b5a1bab733..000000000000 --- a/drivers/scsi/spfc/common/unf_exchg_abort.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_EXCHG_ABORT_H -#define UNF_EXCHG_ABORT_H - -#include "unf_type.h" -#include "unf_exchg.h" - -#define UNF_RAW_LUN_ID_MASK 0x000000000000ffff - -void unf_xchg_abort_by_lun(void *lport, void *rport, u64 lun_id, void *tm_xchg, - bool abort_all_lun_flag); -void unf_xchg_abort_by_session(void *lport, void *rport); -void unf_xchg_mgr_io_xchg_abort(void *lport, void *rport, u32 sid, u32 did, - u32 extra_io_state); -void unf_xchg_mgr_sfs_xchg_abort(void *lport, void *rport, u32 sid, u32 did); -void unf_xchg_abort_all_xchg(void *lport, u32 xchg_type, bool clean); -void unf_fc_abort_time_out_cmnd(struct unf_lport *lport, struct unf_xchg *xchg); -void unf_fc_ini_io_xchg_time_out(struct work_struct *work); -void unf_sfs_xchg_time_out(struct work_struct *work); -void unf_xchg_up_abort_io_by_scsi_id(void *lport, u32 scsi_id); -#endif diff --git a/drivers/scsi/spfc/common/unf_fcstruct.h b/drivers/scsi/spfc/common/unf_fcstruct.h deleted file mode 100644 index d6eb8592994b..000000000000 --- a/drivers/scsi/spfc/common/unf_fcstruct.h +++ /dev/null @@ -1,459 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_FCSTRUCT_H -#define UNF_FCSTRUCT_H - -#include "unf_type.h" -#include "unf_scsi_common.h" - -#define FC_RCTL_BLS 0x80000000 - -/* - * * R_CTL Basic Link Data defines - */ - -#define FC_RCTL_BLS_ACC (FC_RCTL_BLS | 0x04000000) -#define FC_RCTL_BLS_RJT (FC_RCTL_BLS | 0x05000000) - -/* - * * BA_RJT reason code defines - */ -#define FCXLS_BA_RJT_LOGICAL_ERROR 0x00030000 - -/* - * * BA_RJT code explanation - */ - -#define FCXLS_LS_RJT_INVALID_OXID_RXID 0x00001700 - -/* - * * ELS ACC - */ -struct unf_els_acc { - struct unf_fc_head frame_hdr; - u32 cmnd; -}; - -/* - * * ELS RJT - */ -struct unf_els_rjt { - struct unf_fc_head frame_hdr; - u32 cmnd; - u32 reason_code; -}; - -/* - * * FLOGI payload, - * * FC-LS-2 FLOGI, PLOGI, FDISC or LS_ACC Payload - */ -struct unf_flogi_fdisc_payload { - u32 cmnd; - struct unf_fabric_parm fabric_parms; -}; - -/* - * * Flogi and Flogi accept frames. They are the same structure - */ -struct unf_flogi_fdisc_acc { - struct unf_fc_head frame_hdr; - struct unf_flogi_fdisc_payload flogi_payload; -}; - -/* - * * Fdisc and Fdisc accept frames. They are the same structure - */ - -struct unf_fdisc_acc { - struct unf_fc_head frame_hdr; - struct unf_flogi_fdisc_payload fdisc_payload; -}; - -/* - * * PLOGI payload - */ -struct unf_plogi_payload { - u32 cmnd; - struct unf_lgn_parm stparms; -}; - -/* - *Plogi, Plogi accept, Pdisc and Pdisc accept frames. They are all the same - *structure. - */ -struct unf_plogi_pdisc { - struct unf_fc_head frame_hdr; - struct unf_plogi_payload payload; -}; - -/* - * * LOGO logout link service requests invalidation of service parameters and - * * port name. - * * see FC-PH 4.3 Section 21.4.8 - */ -struct unf_logo_payload { - u32 cmnd; - u32 nport_id; - u32 high_port_name; - u32 low_port_name; -}; - -/* - * * payload to hold LOGO command - */ -struct unf_logo { - struct unf_fc_head frame_hdr; - struct unf_logo_payload payload; -}; - -/* - * * payload for ECHO command, refer to FC-LS-2 4.2.4 - */ -struct unf_echo_payload { - u32 cmnd; -#define UNF_FC_ECHO_PAYLOAD_LENGTH 255 /* Length in words */ - u32 data[UNF_FC_ECHO_PAYLOAD_LENGTH]; -}; - -struct unf_echo { - struct unf_fc_head frame_hdr; - struct unf_echo_payload *echo_pld; - dma_addr_t phy_echo_addr; -}; - -#define UNF_PRLI_SIRT_EXTRA_SIZE 12 - -/* - * * payload for PRLI and PRLO - */ -struct unf_prli_payload { - u32 cmnd; -#define UNF_FC_PRLI_PAYLOAD_LENGTH 7 /* Length in words */ - u32 parms[UNF_FC_PRLI_PAYLOAD_LENGTH]; -}; - -/* - * * FCHS structure with payload - */ -struct unf_prli_prlo { - struct unf_fc_head frame_hdr; - struct unf_prli_payload payload; -}; - -struct unf_adisc_payload { - u32 cmnd; - u32 hard_address; - u32 high_port_name; - u32 low_port_name; - u32 high_node_name; - u32 low_node_name; - u32 nport_id; -}; - -/* - * * FCHS structure with payload - */ -struct unf_adisc { - struct unf_fc_head frame_hdr; /* FCHS structure */ - struct unf_adisc_payload - adisc_payl; /* Payload data containing ADISC info - */ -}; - -/* - * * RLS payload - */ -struct unf_rls_payload { - u32 cmnd; - u32 nport_id; /* in litle endian format */ -}; - -/* - * * RLS - */ -struct unf_rls { - struct unf_fc_head frame_hdr; /* FCHS structure */ - struct unf_rls_payload rls; /* payload data containing the RLS info */ -}; - -/* - * * RLS accept payload - */ -struct unf_rls_acc_payload { - u32 cmnd; - u32 link_failure_count; - u32 loss_of_sync_count; - u32 loss_of_signal_count; - u32 primitive_seq_count; - u32 invalid_trans_word_count; - u32 invalid_crc_count; -}; - -/* - * * RLS accept - */ -struct unf_rls_acc { - struct unf_fc_head frame_hdr; /* FCHS structure */ - struct unf_rls_acc_payload - rls; /* payload data containing the RLS ACC info - */ -}; - -/* - * * FCHS structure with payload - */ -struct unf_rrq { - struct unf_fc_head frame_hdr; - u32 cmnd; - u32 sid; - u32 oxid_rxid; -}; - -#define UNF_SCR_PAYLOAD_CNT 2 -struct unf_scr { - struct unf_fc_head frame_hdr; - u32 payload[UNF_SCR_PAYLOAD_CNT]; -}; - -struct unf_ctiu_prem { - u32 rev_inid; - u32 gstype_gssub_options; - u32 cmnd_rsp_size; - u32 frag_reason_exp_vend; -}; - -#define UNF_FC4TYPE_CNT 8 -struct unf_rftid { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 nport_id; - u32 fc4_types[UNF_FC4TYPE_CNT]; -}; - -struct unf_rffid { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 nport_id; - u32 fc4_feature; -}; - -struct unf_rffid_rsp { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; -}; - -struct unf_gffid { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 nport_id; -}; - -struct unf_gffid_rsp { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 fc4_feature[32]; -}; - -struct unf_gnnid { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 nport_id; -}; - -struct unf_gnnid_rsp { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 node_name[2]; -}; - -struct unf_gpnid { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 nport_id; -}; - -struct unf_gpnid_rsp { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; - u32 port_name[2]; -}; - -struct unf_rft_rsp { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; -}; - -struct unf_ls_rjt_pld { - u32 srr_op; /* 01000000h */ - u8 vandor; - u8 reason_exp; - u8 reason; - u8 reserved; -}; - -struct unf_ls_rjt { - struct unf_fc_head frame_hdr; - struct unf_ls_rjt_pld pld; -}; - -struct unf_rec_pld { - u32 rec_cmnd; - u32 xchg_org_sid; /* bit0-bit23 */ - u16 rx_id; - u16 ox_id; -}; - -struct unf_rec { - struct unf_fc_head frame_hdr; - struct unf_rec_pld rec_pld; -}; - -struct unf_rec_acc_pld { - u32 cmnd; - u16 rx_id; - u16 ox_id; - u32 org_addr_id; /* bit0-bit23 */ - u32 rsp_addr_id; /* bit0-bit23 */ -}; - -struct unf_rec_acc { - struct unf_fc_head frame_hdr; - struct unf_rec_acc_pld payload; -}; - -struct unf_gid { - struct unf_ctiu_prem ctiu_pream; - u32 scope_type; -}; - -struct unf_gid_acc { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; -}; - -#define UNF_LOOPMAP_COUNT 128 -struct unf_loop_init { - struct unf_fc_head frame_hdr; - u32 cmnd; -#define UNF_FC_ALPA_BIT_MAP_SIZE 4 - u32 alpha_bit_map[UNF_FC_ALPA_BIT_MAP_SIZE]; -}; - -struct unf_loop_map { - struct unf_fc_head frame_hdr; - u32 cmnd; - u32 loop_map[32]; -}; - -struct unf_ctiu_rjt { - struct unf_fc_head frame_hdr; - struct unf_ctiu_prem ctiu_pream; -}; - -struct unf_gid_acc_pld { - struct unf_ctiu_prem ctiu_pream; - - u32 gid_port_id[UNF_GID_PORT_CNT]; -}; - -struct unf_gid_rsp { - struct unf_gid_acc_pld *gid_acc_pld; -}; - -struct unf_gid_req_rsp { - struct unf_fc_head frame_hdr; - struct unf_gid gid_req; - struct unf_gid_rsp gid_rsp; -}; - -/* FC-LS-2 Table 31 RSCN Payload */ -struct unf_rscn_port_id_page { - u8 port_id_port; - u8 port_id_area; - u8 port_id_domain; - - u8 addr_format : 2; - u8 event_qualifier : 4; - u8 reserved : 2; -}; - -struct unf_rscn_pld { - u32 cmnd; - struct unf_rscn_port_id_page port_id_page[UNF_RSCN_PAGE_SUM]; -}; - -struct unf_rscn { - struct unf_fc_head frame_hdr; - struct unf_rscn_pld *rscn_pld; -}; - -union unf_sfs_u { - struct { - struct unf_fc_head frame_head; - u8 data[0]; - } sfs_common; - struct unf_els_acc els_acc; - struct unf_els_rjt els_rjt; - struct unf_plogi_pdisc plogi; - struct unf_logo logo; - struct unf_echo echo; - struct unf_echo echo_acc; - struct unf_prli_prlo prli; - struct unf_prli_prlo prlo; - struct unf_rls rls; - struct unf_rls_acc rls_acc; - struct unf_plogi_pdisc pdisc; - struct unf_adisc adisc; - struct unf_rrq rrq; - struct unf_flogi_fdisc_acc flogi; - struct unf_fdisc_acc fdisc; - struct unf_scr scr; - struct unf_rec rec; - struct unf_rec_acc rec_acc; - struct unf_ls_rjt ls_rjt; - struct unf_rscn rscn; - struct unf_gid_req_rsp get_id; - struct unf_rftid rft_id; - struct unf_rft_rsp rft_id_rsp; - struct unf_rffid rff_id; - struct unf_rffid_rsp rff_id_rsp; - struct unf_gffid gff_id; - struct unf_gffid_rsp gff_id_rsp; - struct unf_gnnid gnn_id; - struct unf_gnnid_rsp gnn_id_rsp; - struct unf_gpnid gpn_id; - struct unf_gpnid_rsp gpn_id_rsp; - struct unf_plogi_pdisc plogi_acc; - struct unf_plogi_pdisc pdisc_acc; - struct unf_adisc adisc_acc; - struct unf_prli_prlo prli_acc; - struct unf_prli_prlo prlo_acc; - struct unf_flogi_fdisc_acc flogi_acc; - struct unf_fdisc_acc fdisc_acc; - struct unf_loop_init lpi; - struct unf_loop_map loop_map; - struct unf_ctiu_rjt ctiu_rjt; -}; - -struct unf_sfs_entry { - union unf_sfs_u *fc_sfs_entry_ptr; /* Virtual addr of SFS buffer */ - u64 sfs_buff_phy_addr; /* Physical addr of SFS buffer */ - u32 sfs_buff_len; /* Length of bytes in SFS buffer */ - u32 cur_offset; -}; - -struct unf_fcp_rsp_iu_entry { - u8 *fcp_rsp_iu; - u32 fcp_sense_len; -}; - -struct unf_rjt_info { - u32 els_cmnd_code; - u32 reason_code; - u32 reason_explanation; - u8 class_mode; - u8 ucrsvd[3]; -}; - -#endif diff --git a/drivers/scsi/spfc/common/unf_gs.c b/drivers/scsi/spfc/common/unf_gs.c deleted file mode 100644 index cb5fc1a5d246..000000000000 --- a/drivers/scsi/spfc/common/unf_gs.c +++ /dev/null @@ -1,2521 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_gs.h" -#include "unf_log.h" -#include "unf_exchg.h" -#include "unf_rport.h" -#include "unf_service.h" -#include "unf_portman.h" -#include "unf_ls.h" - -static void unf_gpn_id_callback(void *lport, void *sns_port, void *xchg); -static void unf_gpn_id_ob_callback(struct unf_xchg *xchg); -static void unf_gnn_id_ob_callback(struct unf_xchg *xchg); -static void unf_scr_callback(void *lport, void *rport, void *xchg); -static void unf_scr_ob_callback(struct unf_xchg *xchg); -static void unf_gff_id_ob_callback(struct unf_xchg *xchg); -static void unf_gff_id_callback(void *lport, void *sns_port, void *xchg); -static void unf_gnn_id_callback(void *lport, void *sns_port, void *xchg); -static void unf_gid_ft_ob_callback(struct unf_xchg *xchg); -static void unf_gid_ft_callback(void *lport, void *rport, void *xchg); -static void unf_gid_pt_ob_callback(struct unf_xchg *xchg); -static void unf_gid_pt_callback(void *lport, void *rport, void *xchg); -static void unf_rft_id_ob_callback(struct unf_xchg *xchg); -static void unf_rft_id_callback(void *lport, void *rport, void *xchg); -static void unf_rff_id_callback(void *lport, void *rport, void *xchg); -static void unf_rff_id_ob_callback(struct unf_xchg *xchg); - -#define UNF_GET_DOMAIN_ID(x) (((x) & 0xFF0000) >> 16) -#define UNF_GET_AREA_ID(x) (((x) & 0x00FF00) >> 8) - -#define UNF_GID_LAST_PORT_ID 0x80 -#define UNF_GID_CONTROL(nport_id) ((nport_id) >> 24) -#define UNF_GET_PORT_OPTIONS(fc_4feature) ((fc_4feature) >> 20) - -#define UNF_SERVICE_GET_NPORTID_FORM_GID_PAGE(port_id_page) \ - (((u32)(port_id_page)->port_id_domain << 16) | \ - ((u32)(port_id_page)->port_id_area << 8) | \ - ((u32)(port_id_page)->port_id_port)) - -#define UNF_GNN_GFF_ID_RJT_REASON(rjt_reason) \ - ((UNF_CTIU_RJT_UNABLE_PERFORM == \ - ((rjt_reason) & UNF_CTIU_RJT_MASK)) && \ - ((UNF_CTIU_RJT_EXP_PORTID_NO_REG == \ - ((rjt_reason) & UNF_CTIU_RJT_EXP_MASK)) || \ - (UNF_CTIU_RJT_EXP_PORTNAME_NO_REG == \ - ((rjt_reason) & UNF_CTIU_RJT_EXP_MASK)) || \ - (UNF_CTIU_RJT_EXP_NODENAME_NO_REG == \ - ((rjt_reason) & UNF_CTIU_RJT_EXP_MASK)))) - -u32 unf_send_scr(struct unf_lport *lport, struct unf_rport *rport) -{ - /* after RCVD RFF_ID ACC */ - struct unf_scr *scr = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, NULL, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for SCR", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_SCR; - - xchg->callback = unf_scr_callback; - xchg->ob_callback = unf_scr_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - scr = &fc_entry->scr; - memset(scr, 0, sizeof(struct unf_scr)); - scr->payload[ARRAY_INDEX_0] = (UNF_GS_CMND_SCR); /* SCR is 0x62 */ - scr->payload[ARRAY_INDEX_1] = (UNF_FABRIC_FULL_REG); /* Full registration */ - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: SCR send %s. Port(0x%x_0x%x)--->RPort(0x%x) with hottag(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, rport->nport_id, xchg->hotpooltag); - - return ret; -} - -static void unf_fill_gff_id_pld(struct unf_gffid *gff_id, u32 nport_id) -{ - FC_CHECK_RETURN_VOID(gff_id); - - gff_id->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - gff_id->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - gff_id->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GFF_ID); - gff_id->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - gff_id->nport_id = nport_id; -} - -static void unf_ctpass_thru_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_gid_acc_pld *gid_acc_pld = NULL; - struct unf_xchg *unf_xchg = NULL; - union unf_sfs_u *sfs = NULL; - u32 cmnd_rsp_size = 0; - - struct send_com_trans_out *out_send = NULL; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - sfs = unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - - gid_acc_pld = sfs->get_id.gid_rsp.gid_acc_pld; - if (!gid_acc_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) CT PassThru response payload is NULL", - unf_lport->port_id); - - return; - } - - out_send = (struct send_com_trans_out *)unf_xchg->upper_ct; - - cmnd_rsp_size = (gid_acc_pld->ctiu_pream.cmnd_rsp_size); - if (UNF_CT_IU_ACCEPT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - out_send->hba_status = 0; /* HBA_STATUS_OK 0 */ - out_send->total_resp_buffer_cnt = unf_xchg->fcp_sfs_union.sfs_entry.cur_offset; - out_send->actual_resp_buffer_cnt = unf_xchg->fcp_sfs_union.sfs_entry.cur_offset; - unf_cpu_to_big_end(out_send->resp_buffer, (u32)out_send->total_resp_buffer_cnt); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) CT PassThru was receive len is(0x%0x)", - unf_lport->port_id, unf_lport->nport_id, - out_send->total_resp_buffer_cnt); - } else if (UNF_CT_IU_REJECT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - out_send->hba_status = 13; /* HBA_STATUS_ERROR_ELS_REJECT 13 */ - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) CT PassThru was rejected", - unf_lport->port_id, unf_lport->nport_id); - } else { - out_send->hba_status = 1; /* HBA_STATUS_ERROR 1 */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) CT PassThru was UNKNOWN", - unf_lport->port_id, unf_lport->nport_id); - } - - up(&unf_lport->wmi_task_sema); -} - -u32 unf_send_ctpass_thru(struct unf_lport *lport, void *buffer, u32 bufflen) -{ - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_rport *sns_port = NULL; - struct send_com_trans_in *in_send = (struct send_com_trans_in *)buffer; - struct send_com_trans_out *out_send = - (struct send_com_trans_out *)buffer; - struct unf_ctiu_prem *ctiu_pream = NULL; - struct unf_gid *gs_pld = NULL; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buffer, UNF_RETURN_ERROR); - - ctiu_pream = (struct unf_ctiu_prem *)in_send->req_buffer; - unf_cpu_to_big_end(ctiu_pream, sizeof(struct unf_gid)); - - if (ctiu_pream->cmnd_rsp_size >> UNF_SHIFT_16 == NS_GIEL) { - sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_MGMT_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) can't find SNS port", - lport->port_id); - - return UNF_RETURN_ERROR; - } - } else if (ctiu_pream->cmnd_rsp_size >> UNF_SHIFT_16 == NS_GA_NXT) { - sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) can't find SNS port", - lport->port_id); - - return UNF_RETURN_ERROR; - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[info]%s cmnd(0x%x) is error:", __func__, - ctiu_pream->cmnd_rsp_size >> UNF_SHIFT_16); - - return UNF_RETURN_ERROR; - } - - xchg = unf_get_sfs_free_xchg_and_init(lport, sns_port->nport_id, sns_port, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for GFF_ID", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - xchg->cmnd_code = ctiu_pream->cmnd_rsp_size >> UNF_SHIFT_16; - xchg->upper_ct = buffer; - xchg->ob_callback = NULL; - xchg->callback = unf_ctpass_thru_callback; - xchg->oxid = xchg->hotpooltag; - unf_fill_package(&pkg, xchg, sns_port); - pkg.type = UNF_PKG_GS_REQ; - xchg->fcp_sfs_union.sfs_entry.sfs_buff_len = bufflen; - gs_pld = &fc_entry->get_id.gid_req; /* GID req payload */ - memset(gs_pld, 0, sizeof(struct unf_gid)); - memcpy(gs_pld, (struct unf_gid *)in_send->req_buffer, sizeof(struct unf_gid)); - fc_entry->get_id.gid_rsp.gid_acc_pld = (struct unf_gid_acc_pld *)out_send->resp_buffer; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - - return ret; -} - -u32 unf_send_gff_id(struct unf_lport *lport, struct unf_rport *sns_port, - u32 nport_id) -{ - struct unf_gffid *gff_id = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - - struct unf_frame_pkg pkg; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - - if (unf_is_lport_valid(lport) != RETURN_OK) - /* Lport is invalid, no retry or handle required, return ok */ - return RETURN_OK; - - unf_lport = (struct unf_lport *)lport->root_lport; - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, sns_port->nport_id, sns_port, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for GFF_ID", - lport->port_id); - - return unf_get_and_post_disc_event(lport, sns_port, nport_id, UNF_DISC_GET_FEATURE); - } - - xchg->cmnd_code = NS_GFF_ID; - xchg->disc_portid = nport_id; - - xchg->ob_callback = unf_gff_id_ob_callback; - xchg->callback = unf_gff_id_callback; - - unf_fill_package(&pkg, xchg, sns_port); - pkg.type = UNF_PKG_GS_REQ; - - gff_id = &fc_entry->gff_id; - memset(gff_id, 0, sizeof(struct unf_gffid)); - unf_fill_gff_id_pld(gff_id, nport_id); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - else - atomic_dec(&unf_lport->disc.disc_thread_info.disc_contrl_size); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: GFF_ID send %s. Port(0x%x)--->RPort(0x%x). Inquire RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - sns_port->nport_id, nport_id); - - return ret; -} - -static void unf_fill_gnnid_pld(struct unf_gnnid *gnnid_pld, u32 nport_id) -{ - /* Inquiry R_Port node name from SW */ - FC_CHECK_RETURN_VOID(gnnid_pld); - - gnnid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - gnnid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - gnnid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GNN_ID); - gnnid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - - gnnid_pld->nport_id = nport_id; -} - -u32 unf_send_gnn_id(struct unf_lport *lport, struct unf_rport *sns_port, - u32 nport_id) -{ - /* from DISC stop/re-login */ - struct unf_gnnid *unf_gnnid = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x_0x%x) send gnnid to 0x%x.", lport->port_id, - lport->nport_id, nport_id); - - if (unf_is_lport_valid(lport) != RETURN_OK) - /* Lport is invalid, no retry or handle required, return ok */ - return RETURN_OK; - - unf_lport = (struct unf_lport *)lport->root_lport; - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, sns_port->nport_id, - sns_port, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) exchange can't be NULL for GNN_ID", - lport->port_id); - - return unf_get_and_post_disc_event(lport, sns_port, nport_id, - UNF_DISC_GET_NODE_NAME); - } - - xchg->cmnd_code = NS_GNN_ID; - xchg->disc_portid = nport_id; - - xchg->ob_callback = unf_gnn_id_ob_callback; - xchg->callback = unf_gnn_id_callback; - - unf_fill_package(&pkg, xchg, sns_port); - pkg.type = UNF_PKG_GS_REQ; - - unf_gnnid = &fc_entry->gnn_id; /* GNNID payload */ - memset(unf_gnnid, 0, sizeof(struct unf_gnnid)); - unf_fill_gnnid_pld(unf_gnnid, nport_id); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - else - atomic_dec(&unf_lport->disc.disc_thread_info.disc_contrl_size); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: GNN_ID send %s. Port(0x%x_0x%x)--->RPort(0x%x) inquire Nportid(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, sns_port->nport_id, nport_id); - - return ret; -} - -static void unf_fill_gpnid_pld(struct unf_gpnid *gpnid_pld, u32 nport_id) -{ - FC_CHECK_RETURN_VOID(gpnid_pld); - - gpnid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - gpnid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - gpnid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GPN_ID); - gpnid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - - /* Inquiry WWN from SW */ - gpnid_pld->nport_id = nport_id; -} - -u32 unf_send_gpn_id(struct unf_lport *lport, struct unf_rport *sns_port, - u32 nport_id) -{ - struct unf_gpnid *gpnid_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - - if (unf_is_lport_valid(lport) != RETURN_OK) - /* Lport is invalid, no retry or handle required, return ok */ - return RETURN_OK; - - unf_lport = (struct unf_lport *)lport->root_lport; - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, sns_port->nport_id, - sns_port, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for GPN_ID", - lport->port_id); - - return unf_get_and_post_disc_event(lport, sns_port, nport_id, - UNF_DISC_GET_PORT_NAME); - } - - xchg->cmnd_code = NS_GPN_ID; - xchg->disc_portid = nport_id; - - xchg->callback = unf_gpn_id_callback; - xchg->ob_callback = unf_gpn_id_ob_callback; - - unf_fill_package(&pkg, xchg, sns_port); - pkg.type = UNF_PKG_GS_REQ; - - gpnid_pld = &fc_entry->gpn_id; - memset(gpnid_pld, 0, sizeof(struct unf_gpnid)); - unf_fill_gpnid_pld(gpnid_pld, nport_id); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - else - atomic_dec(&unf_lport->disc.disc_thread_info.disc_contrl_size); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: GPN_ID send %s. Port(0x%x)--->RPort(0x%x). Inquire RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - sns_port->nport_id, nport_id); - - return ret; -} - -static void unf_fill_gid_ft_pld(struct unf_gid *gid_pld) -{ - FC_CHECK_RETURN_VOID(gid_pld); - - gid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - gid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - gid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GID_FT); - gid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - - gid_pld->scope_type = (UNF_GID_FT_TYPE); -} - -u32 unf_send_gid_ft(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_gid *gid_pld = NULL; - struct unf_gid_rsp *gid_rsp = NULL; - struct unf_gid_acc_pld *gid_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, - rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for GID_FT", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = NS_GID_FT; - - xchg->ob_callback = unf_gid_ft_ob_callback; - xchg->callback = unf_gid_ft_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_GS_REQ; - - gid_pld = &fc_entry->get_id.gid_req; /* GID req payload */ - unf_fill_gid_ft_pld(gid_pld); - gid_rsp = &fc_entry->get_id.gid_rsp; /* GID rsp payload */ - - gid_acc_pld = (struct unf_gid_acc_pld *)unf_get_one_big_sfs_buf(xchg); - if (!gid_acc_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate GID_FT response buffer failed", - lport->port_id); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - memset(gid_acc_pld, 0, sizeof(struct unf_gid_acc_pld)); - gid_rsp->gid_acc_pld = gid_acc_pld; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: GID_FT send %s. Port(0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id); - - return ret; -} - -static void unf_fill_gid_pt_pld(struct unf_gid *gid_pld, - struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(gid_pld); - FC_CHECK_RETURN_VOID(lport); - - gid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - gid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - gid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_GID_PT); - gid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - - /* 0x7F000000 means NX_Port */ - gid_pld->scope_type = (UNF_GID_PT_TYPE); - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, gid_pld, - sizeof(struct unf_gid)); -} - -u32 unf_send_gid_pt(struct unf_lport *lport, struct unf_rport *rport) -{ - /* from DISC start */ - struct unf_gid *gid_pld = NULL; - struct unf_gid_rsp *gid_rsp = NULL; - struct unf_gid_acc_pld *gid_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, - rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for GID_PT", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = NS_GID_PT; - - xchg->ob_callback = unf_gid_pt_ob_callback; - xchg->callback = unf_gid_pt_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_GS_REQ; - - gid_pld = &fc_entry->get_id.gid_req; /* GID req payload */ - unf_fill_gid_pt_pld(gid_pld, lport); - gid_rsp = &fc_entry->get_id.gid_rsp; /* GID rsp payload */ - - gid_acc_pld = (struct unf_gid_acc_pld *)unf_get_one_big_sfs_buf(xchg); - if (!gid_acc_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%0x) Allocate GID_PT response buffer failed", - lport->port_id); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - memset(gid_acc_pld, 0, sizeof(struct unf_gid_acc_pld)); - gid_rsp->gid_acc_pld = gid_acc_pld; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: GID_PT send %s. Port(0x%x_0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, rport->nport_id); - - return ret; -} - -static void unf_fill_rft_id_pld(struct unf_rftid *rftid_pld, - struct unf_lport *lport) -{ - u32 index = 1; - - FC_CHECK_RETURN_VOID(rftid_pld); - FC_CHECK_RETURN_VOID(lport); - - rftid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - rftid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - rftid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_RFT_ID); - rftid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - rftid_pld->nport_id = (lport->nport_id); - rftid_pld->fc4_types[ARRAY_INDEX_0] = (UNF_FC4_SCSI_BIT8); - - for (index = ARRAY_INDEX_2; index < UNF_FC4TYPE_CNT; index++) - rftid_pld->fc4_types[index] = 0; -} - -u32 unf_send_rft_id(struct unf_lport *lport, struct unf_rport *rport) -{ - /* After PLOGI process */ - struct unf_rftid *rft_id = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, - rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for RFT_ID", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = NS_RFT_ID; - - xchg->callback = unf_rft_id_callback; - xchg->ob_callback = unf_rft_id_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_GS_REQ; - - rft_id = &fc_entry->rft_id; - memset(rft_id, 0, sizeof(struct unf_rftid)); - unf_fill_rft_id_pld(rft_id, lport); - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: RFT_ID send %s. Port(0x%x_0x%x)--->RPort(0x%x). rport(0x%p) wwpn(0x%llx) ", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, rport->nport_id, rport, rport->port_name); - - return ret; -} - -static void unf_fill_rff_id_pld(struct unf_rffid *rffid_pld, - struct unf_lport *lport, u32 fc4_type) -{ - FC_CHECK_RETURN_VOID(rffid_pld); - FC_CHECK_RETURN_VOID(lport); - - rffid_pld->ctiu_pream.rev_inid = (UNF_REV_NPORTID_INIT); - rffid_pld->ctiu_pream.gstype_gssub_options = (UNF_FSTYPE_OPT_INIT); - rffid_pld->ctiu_pream.cmnd_rsp_size = (UNF_FSTYPE_RFF_ID); - rffid_pld->ctiu_pream.frag_reason_exp_vend = UNF_FRAG_REASON_VENDOR; - rffid_pld->nport_id = (lport->nport_id); - rffid_pld->fc4_feature = (fc4_type | (lport->options << UNF_SHIFT_4)); -} - -u32 unf_send_rff_id(struct unf_lport *lport, struct unf_rport *rport, - u32 fc4_type) -{ - /* from RFT_ID, then Send SCR */ - struct unf_rffid *rff_id = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, "%s Enter", __func__); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, - rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for RFF_ID", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = NS_RFF_ID; - - xchg->callback = unf_rff_id_callback; - xchg->ob_callback = unf_rff_id_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_GS_REQ; - - rff_id = &fc_entry->rff_id; - memset(rff_id, 0, sizeof(struct unf_rffid)); - unf_fill_rff_id_pld(rff_id, lport, fc4_type); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: RFF_ID feature 0x%x(10:TGT,20:INI,30:COM) send %s. Port(0x%x_0x%x)--->RPortid(0x%x) rport(0x%p)", - lport->options, (ret != RETURN_OK) ? "failed" : "succeed", - lport->port_id, lport->nport_id, rport->nport_id, rport); - - return ret; -} - -void unf_handle_init_gid_acc(struct unf_gid_acc_pld *gid_acc_pld, - struct unf_lport *lport) -{ - /* - * from SCR ACC callback - * NOTE: inquiry disc R_Port used for NPIV - */ - struct unf_disc_rport *disc_rport = NULL; - struct unf_disc *disc = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 gid_port_id = 0; - u32 nport_id = 0; - u32 index = 0; - u8 control = 0; - - FC_CHECK_RETURN_VOID(gid_acc_pld); - FC_CHECK_RETURN_VOID(lport); - - /* - * 1. Find & Check & Get (new) R_Port from list_disc_rports_pool - * then, Add to R_Port Disc_busy_list - */ - while (index < UNF_GID_PORT_CNT) { - gid_port_id = (gid_acc_pld->gid_port_id[index]); - nport_id = UNF_NPORTID_MASK & gid_port_id; - control = UNF_GID_CONTROL(gid_port_id); - - /* for each N_Port_ID from GID_ACC payload */ - if (lport->nport_id != nport_id && nport_id != 0 && - (!unf_lookup_lport_by_nportid(lport, nport_id))) { - /* for New Port, not L_Port */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) get nportid(0x%x) from GID_ACC", - lport->port_id, lport->nport_id, nport_id); - - /* Get R_Port from list of RPort Disc Pool */ - disc_rport = unf_rport_get_free_and_init(lport, - UNF_PORT_TYPE_DISC, nport_id); - if (!disc_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) can't allocate new rport(0x%x) from disc pool", - lport->port_id, lport->nport_id, - nport_id); - - index++; - continue; - } - } - - if (UNF_GID_LAST_PORT_ID == (UNF_GID_LAST_PORT_ID & control)) - break; - - index++; - } - - /* - * 2. Do port disc stop operation: - * NOTE: Do DISC & release R_Port from busy_list back to - * list_disc_rports_pool - */ - disc = &lport->disc; - if (!disc->disc_temp.unf_disc_stop) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) disc stop function is NULL", - lport->port_id, lport->nport_id); - - return; - } - - ret = disc->disc_temp.unf_disc_stop(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) do disc stop failed", - lport->port_id, lport->nport_id); - } -} - -u32 unf_rport_relogin(struct unf_lport *lport, u32 nport_id) -{ - /* Send GNN_ID */ - struct unf_rport *sns_port = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* Get SNS R_Port */ - sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find fabric Port", lport->nport_id); - - return UNF_RETURN_ERROR; - } - - /* Send GNN_ID now to SW */ - ret = unf_get_and_post_disc_event(lport, sns_port, nport_id, - UNF_DISC_GET_NODE_NAME); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - lport->nport_id, UNF_DISC_GET_NODE_NAME, nport_id); - - /* NOTE: Continue to next stage */ - unf_rcv_gnn_id_rsp_unknown(lport, sns_port, nport_id); - } - - return ret; -} - -u32 unf_rport_check_wwn(struct unf_lport *lport, struct unf_rport *rport) -{ - /* Send GPN_ID */ - struct unf_rport *sns_port = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - /* Get SNS R_Port */ - sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find fabric Port", lport->nport_id); - - return UNF_RETURN_ERROR; - } - - /* Send GPN_ID to SW */ - ret = unf_get_and_post_disc_event(lport, sns_port, rport->nport_id, - UNF_DISC_GET_PORT_NAME); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - lport->nport_id, UNF_DISC_GET_PORT_NAME, - rport->nport_id); - - unf_rcv_gpn_id_rsp_unknown(lport, rport->nport_id); - } - - return ret; -} - -u32 unf_handle_rscn_port_not_indisc(struct unf_lport *lport, u32 rscn_nport_id) -{ - /* RSCN Port_ID not in GID_ACC payload table: Link Down */ - struct unf_rport *unf_rport = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* from R_Port busy list by N_Port_ID */ - unf_rport = unf_get_rport_by_nport_id(lport, rscn_nport_id); - if (unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) RPort(0x%x) wwpn(0x%llx) has been removed and link down it", - lport->port_id, rscn_nport_id, unf_rport->port_name); - - unf_rport_linkdown(lport, unf_rport); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) has no RPort(0x%x) and do nothing", - lport->nport_id, rscn_nport_id); - } - - return ret; -} - -u32 unf_handle_rscn_port_indisc(struct unf_lport *lport, u32 rscn_nport_id) -{ - /* Send GPN_ID or re-login(GNN_ID) */ - struct unf_rport *unf_rport = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* from R_Port busy list by N_Port_ID */ - unf_rport = unf_get_rport_by_nport_id(lport, rscn_nport_id); - if (unf_rport) { - /* R_Port exist: send GPN_ID */ - ret = unf_rport_check_wwn(lport, unf_rport); - } else { - if (UNF_PORT_MODE_INI == (lport->options & UNF_PORT_MODE_INI)) - /* Re-LOGIN with INI mode: Send GNN_ID */ - ret = unf_rport_relogin(lport, rscn_nport_id); - else - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) with no INI feature. Do nothing", - lport->nport_id); - } - - return ret; -} - -static u32 unf_handle_rscn_port_addr(struct unf_port_id_page *portid_page, - struct unf_gid_acc_pld *gid_acc_pld, - struct unf_lport *lport) -{ - /* - * Input parameters: - * 1. Port_ID_page: saved from RSCN payload - * 2. GID_ACC_payload: back from GID_ACC (GID_PT or GID_FT) - * * - * Do work: check whether RSCN Port_ID within GID_ACC payload or not - * then, re-login or link down rport - */ - u32 rscn_nport_id = 0; - u32 gid_port_id = 0; - u32 nport_id = 0; - u32 index = 0; - u8 control = 0; - u32 ret = RETURN_OK; - bool have_same_id = false; - - FC_CHECK_RETURN_VALUE(portid_page, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(gid_acc_pld, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* 1. get RSCN_NPort_ID from (L_Port->Disc->RSCN_Mgr)->RSCN_Port_ID_Page - */ - rscn_nport_id = UNF_SERVICE_GET_NPORTID_FORM_GID_PAGE(portid_page); - - /* - * 2. for RSCN_NPort_ID - * check whether RSCN_NPort_ID within GID_ACC_Payload or not - */ - while (index < UNF_GID_PORT_CNT) { - gid_port_id = (gid_acc_pld->gid_port_id[index]); - nport_id = UNF_NPORTID_MASK & gid_port_id; - control = UNF_GID_CONTROL(gid_port_id); - - if (lport->nport_id != nport_id && nport_id != 0) { - /* is not L_Port */ - if (nport_id == rscn_nport_id) { - /* RSCN Port_ID within GID_ACC payload */ - have_same_id = true; - break; - } - } - - if (UNF_GID_LAST_PORT_ID == (UNF_GID_LAST_PORT_ID & control)) - break; - - index++; - } - - /* 3. RSCN_Port_ID not within GID_ACC payload table */ - if (!have_same_id) { - /* rport has been removed */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[warn]Port(0x%x_0x%x) find RSCN N_Port_ID(0x%x) in GID_ACC table failed", - lport->port_id, lport->nport_id, rscn_nport_id); - - /* Link down rport */ - ret = unf_handle_rscn_port_not_indisc(lport, rscn_nport_id); - - } else { /* 4. RSCN_Port_ID within GID_ACC payload table */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x_0x%x) find RSCN N_Port_ID(0x%x) in GID_ACC table succeed", - lport->port_id, lport->nport_id, rscn_nport_id); - - /* Re-login with INI mode */ - ret = unf_handle_rscn_port_indisc(lport, rscn_nport_id); - } - - return ret; -} - -void unf_check_rport_rscn_process(struct unf_rport *rport, - struct unf_port_id_page *portid_page) -{ - struct unf_rport *unf_rport = rport; - struct unf_port_id_page *unf_portid_page = portid_page; - u8 addr_format = unf_portid_page->addr_format; - - switch (addr_format) { - /* domain+area */ - case UNF_RSCN_AREA_ADDR_GROUP: - if (UNF_GET_DOMAIN_ID(unf_rport->nport_id) == unf_portid_page->port_id_domain && - UNF_GET_AREA_ID(unf_rport->nport_id) == unf_portid_page->port_id_area) - unf_rport->rscn_position = UNF_RPORT_NEED_PROCESS; - - break; - /* domain */ - case UNF_RSCN_DOMAIN_ADDR_GROUP: - if (UNF_GET_DOMAIN_ID(unf_rport->nport_id) == unf_portid_page->port_id_domain) - unf_rport->rscn_position = UNF_RPORT_NEED_PROCESS; - - break; - /* all */ - case UNF_RSCN_FABRIC_ADDR_GROUP: - unf_rport->rscn_position = UNF_RPORT_NEED_PROCESS; - break; - default: - break; - } -} - -static void unf_set_rport_rscn_position(struct unf_lport *lport, - struct unf_port_id_page *portid_page) -{ - struct unf_rport *unf_rport = NULL; - struct list_head *list_node = NULL; - struct list_head *list_nextnode = NULL; - struct unf_disc *disc = NULL; - ulong disc_flag = 0; - ulong rport_flag = 0; - - FC_CHECK_RETURN_VOID(lport); - disc = &lport->disc; - - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - list_for_each_safe(list_node, list_nextnode, &disc->list_busy_rports) { - unf_rport = list_entry(list_node, struct unf_rport, entry_rport); - spin_lock_irqsave(&unf_rport->rport_state_lock, rport_flag); - - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) { - if (unf_rport->rscn_position == UNF_RPORT_NOT_NEED_PROCESS) - unf_check_rport_rscn_process(unf_rport, portid_page); - } else { - unf_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - } - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, rport_flag); - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); -} - -static void unf_set_rport_rscn_position_local(struct unf_lport *lport) -{ - struct unf_rport *unf_rport = NULL; - struct list_head *list_node = NULL; - struct list_head *list_nextnode = NULL; - struct unf_disc *disc = NULL; - ulong disc_flag = 0; - ulong rport_flag = 0; - - FC_CHECK_RETURN_VOID(lport); - disc = &lport->disc; - - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - list_for_each_safe(list_node, list_nextnode, &disc->list_busy_rports) { - unf_rport = list_entry(list_node, struct unf_rport, entry_rport); - spin_lock_irqsave(&unf_rport->rport_state_lock, rport_flag); - - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) { - if (unf_rport->rscn_position == UNF_RPORT_NEED_PROCESS) - unf_rport->rscn_position = UNF_RPORT_ONLY_IN_LOCAL_PROCESS; - } else { - unf_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - } - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, rport_flag); - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); -} - -static void unf_reset_rport_rscn_setting(struct unf_lport *lport) -{ - struct unf_rport *rport = NULL; - struct list_head *list_node = NULL; - struct list_head *list_nextnode = NULL; - struct unf_disc *disc = NULL; - ulong rport_flag = 0; - - FC_CHECK_RETURN_VOID(lport); - disc = &lport->disc; - - list_for_each_safe(list_node, list_nextnode, &disc->list_busy_rports) { - rport = list_entry(list_node, struct unf_rport, entry_rport); - spin_lock_irqsave(&rport->rport_state_lock, rport_flag); - rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - } -} - -void unf_compare_nport_id_with_rport_list(struct unf_lport *lport, u32 nport_id, - struct unf_port_id_page *portid_page) -{ - struct unf_rport *rport = NULL; - ulong rport_flag = 0; - u8 addr_format = portid_page->addr_format; - - FC_CHECK_RETURN_VOID(lport); - - switch (addr_format) { - /* domain+area */ - case UNF_RSCN_AREA_ADDR_GROUP: - if ((UNF_GET_DOMAIN_ID(nport_id) != portid_page->port_id_domain) || - (UNF_GET_AREA_ID(nport_id) != portid_page->port_id_area)) - return; - - break; - /* domain */ - case UNF_RSCN_DOMAIN_ADDR_GROUP: - if (UNF_GET_DOMAIN_ID(nport_id) != portid_page->port_id_domain) - return; - - break; - /* all */ - case UNF_RSCN_FABRIC_ADDR_GROUP: - break; - /* can't enter this branch guarantee by outer */ - default: - break; - } - - rport = unf_get_rport_by_nport_id(lport, nport_id); - - if (!rport) { - if (UNF_PORT_MODE_INI == (lport->options & UNF_PORT_MODE_INI)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x) Find Rport(0x%x) by RSCN", - lport->nport_id, nport_id); - unf_rport_relogin(lport, nport_id); - } - } else { - spin_lock_irqsave(&rport->rport_state_lock, rport_flag); - if (rport->rscn_position == UNF_RPORT_NEED_PROCESS) - rport->rscn_position = UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS; - - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - } -} - -static void unf_compare_disc_with_local_rport(struct unf_lport *lport, - struct unf_gid_acc_pld *pld, - struct unf_port_id_page *page) -{ - u32 gid_port_id = 0; - u32 nport_id = 0; - u32 index = 0; - u8 control = 0; - - FC_CHECK_RETURN_VOID(pld); - FC_CHECK_RETURN_VOID(lport); - - while (index < UNF_GID_PORT_CNT) { - gid_port_id = (pld->gid_port_id[index]); - nport_id = UNF_NPORTID_MASK & gid_port_id; - control = UNF_GID_CONTROL(gid_port_id); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, "[info]Port(0x%x) DISC N_Port_ID(0x%x)", - lport->nport_id, nport_id); - - if (nport_id != 0 && - (!unf_lookup_lport_by_nportid(lport, nport_id))) - unf_compare_nport_id_with_rport_list(lport, nport_id, page); - - if (UNF_GID_LAST_PORT_ID == (UNF_GID_LAST_PORT_ID & control)) - break; - - index++; - } - - unf_set_rport_rscn_position_local(lport); -} - -static u32 unf_process_each_rport_after_rscn(struct unf_lport *lport, - struct unf_rport *sns_port, - struct unf_rport *rport) -{ - ulong rport_flag = 0; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - - spin_lock_irqsave(&rport->rport_state_lock, rport_flag); - - if (rport->rscn_position == UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x_0x%x) RPort(0x%x) rescan position(0x%x), check wwpn", - lport->port_id, lport->nport_id, rport->nport_id, - rport->rscn_position); - rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - ret = unf_rport_check_wwn(lport, rport); - } else if (rport->rscn_position == UNF_RPORT_ONLY_IN_LOCAL_PROCESS) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x_0x%x) RPort(0x%x) rescan position(0x%x), linkdown it", - lport->port_id, lport->nport_id, rport->nport_id, - rport->rscn_position); - rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - unf_rport_linkdown(lport, rport); - } else { - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - } - - return ret; -} - -static u32 unf_process_local_rport_after_rscn(struct unf_lport *lport, - struct unf_rport *sns_port) -{ - struct unf_rport *unf_rport = NULL; - struct list_head *list_node = NULL; - struct unf_disc *disc = NULL; - ulong disc_flag = 0; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(sns_port, UNF_RETURN_ERROR); - disc = &lport->disc; - - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - if (list_empty(&disc->list_busy_rports)) { - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - - return UNF_RETURN_ERROR; - } - - list_node = UNF_OS_LIST_NEXT(&disc->list_busy_rports); - - do { - unf_rport = list_entry(list_node, struct unf_rport, entry_rport); - - if (unf_rport->rscn_position == UNF_RPORT_NOT_NEED_PROCESS) { - list_node = UNF_OS_LIST_NEXT(list_node); - continue; - } else { - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - ret = unf_process_each_rport_after_rscn(lport, sns_port, unf_rport); - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - list_node = UNF_OS_LIST_NEXT(&disc->list_busy_rports); - } - } while (list_node != &disc->list_busy_rports); - - unf_reset_rport_rscn_setting(lport); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - - return ret; -} - -static u32 unf_handle_rscn_group_addr(struct unf_port_id_page *portid_page, - struct unf_gid_acc_pld *gid_acc_pld, - struct unf_lport *lport) -{ - struct unf_rport *sns_port = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(portid_page, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(gid_acc_pld, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - sns_port = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find fabric port failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - - unf_set_rport_rscn_position(lport, portid_page); - unf_compare_disc_with_local_rport(lport, gid_acc_pld, portid_page); - - ret = unf_process_local_rport_after_rscn(lport, sns_port); - - return ret; -} - -static void unf_handle_rscn_gid_acc(struct unf_gid_acc_pld *gid_acc_pid, - struct unf_lport *lport) -{ - /* for N_Port_ID table return from RSCN */ - struct unf_port_id_page *port_id_page = NULL; - struct unf_rscn_mgr *rscn_mgr = NULL; - struct list_head *list_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(gid_acc_pid); - FC_CHECK_RETURN_VOID(lport); - rscn_mgr = &lport->disc.rscn_mgr; - - spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); - while (!list_empty(&rscn_mgr->list_using_rscn_page)) { - /* - * for each RSCN_Using_Page(NPortID) - * for each - * L_Port->Disc->RSCN_Mgr->RSCN_Using_Page(Port_ID_Page) - * * NOTE: - * check using_page_port_id whether within GID_ACC payload or - * not - */ - list_node = UNF_OS_LIST_NEXT(&rscn_mgr->list_using_rscn_page); - port_id_page = list_entry(list_node, struct unf_port_id_page, list_node_rscn); - list_del(list_node); /* NOTE: here delete node (from RSCN using Page) */ - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); - - switch (port_id_page->addr_format) { - /* each page of RSNC corresponding one of N_Port_ID */ - case UNF_RSCN_PORT_ADDR: - (void)unf_handle_rscn_port_addr(port_id_page, gid_acc_pid, lport); - break; - - /* each page of RSNC corresponding address group */ - case UNF_RSCN_AREA_ADDR_GROUP: - case UNF_RSCN_DOMAIN_ADDR_GROUP: - case UNF_RSCN_FABRIC_ADDR_GROUP: - (void)unf_handle_rscn_group_addr(port_id_page, gid_acc_pid, lport); - break; - - default: - break; - } - - /* NOTE: release this RSCN_Node */ - rscn_mgr->unf_release_rscn_node(rscn_mgr, port_id_page); - - /* go to next */ - spin_lock_irqsave(&rscn_mgr->rscn_id_list_lock, flag); - } - - spin_unlock_irqrestore(&rscn_mgr->rscn_id_list_lock, flag); -} - -static void unf_gid_acc_handle(struct unf_gid_acc_pld *gid_acc_pid, - struct unf_lport *lport) -{ -#define UNF_NONE_DISC 0X0 /* before enter DISC */ - struct unf_disc *disc = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(gid_acc_pid); - FC_CHECK_RETURN_VOID(lport); - disc = &lport->disc; - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - switch (disc->disc_option) { - case UNF_INIT_DISC: /* from SCR callback with INI mode */ - disc->disc_option = UNF_NONE_DISC; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_handle_init_gid_acc(gid_acc_pid, lport); /* R_Port from Disc_list */ - break; - - case UNF_RSCN_DISC: /* from RSCN payload parse(analysis) */ - disc->disc_option = UNF_NONE_DISC; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_handle_rscn_gid_acc(gid_acc_pid, lport); /* R_Port from busy_list */ - break; - - default: - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x)'s disc option(0x%x) is abnormal", - lport->port_id, lport->nport_id, disc->disc_option); - break; - } -} - -static void unf_gid_ft_ob_callback(struct unf_xchg *xchg) -{ - /* Do recovery */ - struct unf_lport *lport = NULL; - union unf_sfs_u *sfs_ptr = NULL; - struct unf_disc *disc = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - sfs_ptr = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!sfs_ptr) - return; - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - if (!lport) - return; - - disc = &lport->disc; - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Do DISC recovery operation */ - unf_disc_error_recovery(lport); -} - -static void unf_gid_ft_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_disc *disc = NULL; - struct unf_gid_acc_pld *gid_acc_pld = NULL; - struct unf_xchg *unf_xchg = NULL; - union unf_sfs_u *sfs_ptr = NULL; - u32 cmnd_rsp_size = 0; - u32 rjt_reason = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - disc = &unf_lport->disc; - - sfs_ptr = unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - gid_acc_pld = sfs_ptr->get_id.gid_rsp.gid_acc_pld; - if (!gid_acc_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) GID_FT response payload is NULL", - unf_lport->port_id); - - return; - } - - cmnd_rsp_size = gid_acc_pld->ctiu_pream.cmnd_rsp_size; - if (UNF_CT_IU_ACCEPT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_SUCCESS); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Process GID_FT ACC */ - unf_gid_acc_handle(gid_acc_pld, unf_lport); - } else if (UNF_CT_IU_REJECT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - rjt_reason = (gid_acc_pld->ctiu_pream.frag_reason_exp_vend); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) GID_FT was rejected with reason code(0x%x)", - unf_lport->port_id, rjt_reason); - - if (UNF_CTIU_RJT_EXP_FC4TYPE_NO_REG == - (rjt_reason & UNF_CTIU_RJT_EXP_MASK)) { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_SUCCESS); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_gid_acc_handle(gid_acc_pld, unf_lport); - } else { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_SUCCESS); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } - } else { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_FAILED); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Do DISC recovery operation */ - unf_disc_error_recovery(unf_lport); - } -} - -static void unf_gid_pt_ob_callback(struct unf_xchg *xchg) -{ - /* Do recovery */ - struct unf_lport *lport = NULL; - union unf_sfs_u *sfs_ptr = NULL; - struct unf_disc *disc = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - sfs_ptr = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!sfs_ptr) - return; - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - if (!lport) - return; - - disc = &lport->disc; - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(lport, UNF_EVENT_DISC_FAILED); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Do DISC recovery operation */ - unf_disc_error_recovery(lport); -} - -static void unf_gid_pt_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_disc *disc = NULL; - struct unf_gid_acc_pld *gid_acc_pld = NULL; - struct unf_xchg *unf_xchg = NULL; - union unf_sfs_u *sfs_ptr = NULL; - u32 cmnd_rsp_size = 0; - u32 rjt_reason = 0; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_rport = (struct unf_rport *)rport; - disc = &unf_lport->disc; - unf_xchg = (struct unf_xchg *)xchg; - sfs_ptr = unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - - gid_acc_pld = sfs_ptr->get_id.gid_rsp.gid_acc_pld; - if (!gid_acc_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) GID_PT response payload is NULL", - unf_lport->port_id); - return; - } - - cmnd_rsp_size = (gid_acc_pld->ctiu_pream.cmnd_rsp_size); - if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_SUCCESS); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_gid_acc_handle(gid_acc_pld, unf_lport); - } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { - rjt_reason = (gid_acc_pld->ctiu_pream.frag_reason_exp_vend); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) GID_PT was rejected with reason code(0x%x)", - unf_lport->port_id, unf_lport->nport_id, rjt_reason); - - if ((rjt_reason & UNF_CTIU_RJT_EXP_MASK) == - UNF_CTIU_RJT_EXP_PORTTYPE_NO_REG) { - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_SUCCESS); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - unf_gid_acc_handle(gid_acc_pld, unf_lport); - } else { - ret = unf_send_gid_ft(unf_lport, unf_rport); - if (ret != RETURN_OK) - goto SEND_GID_PT_FT_FAILED; - } - } else { - goto SEND_GID_PT_FT_FAILED; - } - - return; -SEND_GID_PT_FT_FAILED: - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - unf_disc_state_ma(unf_lport, UNF_EVENT_DISC_FAILED); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - unf_disc_error_recovery(unf_lport); -} - -static void unf_gnn_id_ob_callback(struct unf_xchg *xchg) -{ - /* Send GFF_ID */ - struct unf_lport *lport = NULL; - struct unf_rport *sns_port = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 nport_id = 0; - struct unf_lport *root_lport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - lport = xchg->lport; - FC_CHECK_RETURN_VOID(lport); - sns_port = xchg->rport; - FC_CHECK_RETURN_VOID(sns_port); - nport_id = xchg->disc_portid; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send GNN_ID failed to inquire RPort(0x%x)", - lport->port_id, nport_id); - - root_lport = (struct unf_lport *)lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - /* NOTE: continue next stage */ - ret = unf_get_and_post_disc_event(lport, sns_port, nport_id, UNF_DISC_GET_FEATURE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - lport->port_id, UNF_DISC_GET_FEATURE, nport_id); - - unf_rcv_gff_id_rsp_unknown(lport, nport_id); - } -} - -static void unf_rcv_gnn_id_acc(struct unf_lport *lport, - struct unf_rport *sns_port, - struct unf_gnnid_rsp *gnnid_rsp_pld, - u32 nport_id) -{ - /* Send GFF_ID or Link down immediately */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_sns_port = sns_port; - struct unf_gnnid_rsp *unf_gnnid_rsp_pld = gnnid_rsp_pld; - struct unf_rport *rport = NULL; - u64 node_name = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - FC_CHECK_RETURN_VOID(gnnid_rsp_pld); - - node_name = ((u64)(unf_gnnid_rsp_pld->node_name[ARRAY_INDEX_0]) << UNF_SHIFT_32) | - ((u64)(unf_gnnid_rsp_pld->node_name[ARRAY_INDEX_1])); - - if (unf_lport->node_name == node_name) { - /* R_Port & L_Port with same Node Name */ - rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - if (rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) has the same node name(0x%llx) with RPort(0x%x), linkdown it", - unf_lport->port_id, node_name, nport_id); - - /* Destroy immediately */ - unf_rport_immediate_link_down(unf_lport, rport); - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x) got RPort(0x%x) with node name(0x%llx) by GNN_ID", - unf_lport->port_id, nport_id, node_name); - - /* Start to Send GFF_ID */ - ret = unf_get_and_post_disc_event(unf_lport, unf_sns_port, - nport_id, UNF_DISC_GET_FEATURE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - unf_lport->port_id, UNF_DISC_GET_FEATURE, nport_id); - - unf_rcv_gff_id_rsp_unknown(unf_lport, nport_id); - } - } -} - -static void unf_rcv_gnn_id_rjt(struct unf_lport *lport, - struct unf_rport *sns_port, - struct unf_gnnid_rsp *gnnid_rsp_pld, - u32 nport_id) -{ - /* Send GFF_ID */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_sns_port = sns_port; - struct unf_gnnid_rsp *unf_gnnid_rsp_pld = gnnid_rsp_pld; - u32 rjt_reason = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - FC_CHECK_RETURN_VOID(gnnid_rsp_pld); - - rjt_reason = (unf_gnnid_rsp_pld->ctiu_pream.frag_reason_exp_vend); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) GNN_ID was rejected with reason code(0x%x)", - unf_lport->port_id, unf_lport->nport_id, rjt_reason); - - if (!UNF_GNN_GFF_ID_RJT_REASON(rjt_reason)) { - /* Node existence: Continue next stage */ - ret = unf_get_and_post_disc_event(unf_lport, unf_sns_port, - nport_id, UNF_DISC_GET_FEATURE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - unf_lport->port_id, UNF_DISC_GET_FEATURE, nport_id); - - unf_rcv_gff_id_rsp_unknown(unf_lport, nport_id); - } - } -} - -void unf_rcv_gnn_id_rsp_unknown(struct unf_lport *lport, - struct unf_rport *sns_port, u32 nport_id) -{ - /* Send GFF_ID */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_sns_port = sns_port; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) Rportid(0x%x) GNN_ID response is unknown. Sending GFF_ID", - unf_lport->port_id, unf_lport->nport_id, nport_id); - - ret = unf_get_and_post_disc_event(unf_lport, unf_sns_port, nport_id, UNF_DISC_GET_FEATURE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - unf_lport->port_id, UNF_DISC_GET_FEATURE, - nport_id); - - /* NOTE: go to next stage */ - unf_rcv_gff_id_rsp_unknown(unf_lport, nport_id); - } -} - -static void unf_gnn_id_callback(void *lport, void *sns_port, void *xchg) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_rport *unf_sns_port = (struct unf_rport *)sns_port; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_gnnid_rsp *gnnid_rsp_pld = NULL; - u32 cmnd_rsp_size = 0; - u32 nport_id = 0; - struct unf_lport *root_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - FC_CHECK_RETURN_VOID(xchg); - - nport_id = unf_xchg->disc_portid; - gnnid_rsp_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gnn_id_rsp; - cmnd_rsp_size = gnnid_rsp_pld->ctiu_pream.cmnd_rsp_size; - - root_lport = (struct unf_lport *)unf_lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { - /* Case ACC: send GFF_ID or Link down immediately */ - unf_rcv_gnn_id_acc(unf_lport, unf_sns_port, gnnid_rsp_pld, nport_id); - } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { - /* Case RJT: send GFF_ID */ - unf_rcv_gnn_id_rjt(unf_lport, unf_sns_port, gnnid_rsp_pld, nport_id); - } else { /* NOTE: continue next stage */ - /* Case unknown: send GFF_ID */ - unf_rcv_gnn_id_rsp_unknown(unf_lport, unf_sns_port, nport_id); - } -} - -static void unf_gff_id_ob_callback(struct unf_xchg *xchg) -{ - /* Send PLOGI */ - struct unf_lport *lport = NULL; - struct unf_lport *root_lport = NULL; - struct unf_rport *rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - u32 nport_id = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - nport_id = xchg->disc_portid; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_CHECK_RETURN_VOID(lport); - - root_lport = (struct unf_lport *)lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - /* Get (safe) R_Port */ - rport = unf_get_rport_by_nport_id(lport, nport_id); - rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, nport_id); - if (!rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't allocate new RPort(0x%x)", - lport->port_id, nport_id); - return; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) send GFF_ID(0x%x_0x%x) to RPort(0x%x_0x%x) abnormal", - lport->port_id, lport->nport_id, xchg->oxid, xchg->rxid, - rport->rport_index, rport->nport_id); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* NOTE: Start to send PLOGI */ - ret = unf_send_plogi(lport, rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send PLOGI failed, enter recovry", - lport->port_id); - - /* Do R_Port recovery */ - unf_rport_error_recovery(rport); - } -} - -void unf_rcv_gff_id_acc(struct unf_lport *lport, - struct unf_gffid_rsp *gffid_rsp_pld, u32 nport_id) -{ - /* Delay to LOGIN */ - struct unf_lport *unf_lport = lport; - struct unf_rport *rport = NULL; - struct unf_gffid_rsp *unf_gffid_rsp_pld = gffid_rsp_pld; - u32 fc_4feacture = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(gffid_rsp_pld); - - fc_4feacture = unf_gffid_rsp_pld->fc4_feature[ARRAY_INDEX_1]; - if ((UNF_GFF_ACC_MASK & fc_4feacture) == 0) - fc_4feacture = be32_to_cpu(unf_gffid_rsp_pld->fc4_feature[ARRAY_INDEX_1]); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) RPort(0x%x) received GFF_ID ACC. FC4 feature is 0x%x(1:TGT,2:INI,3:COM)", - unf_lport->port_id, unf_lport->nport_id, nport_id, fc_4feacture); - - /* Check (& Get new) R_Port */ - rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - if (rport) - rport = unf_find_rport(unf_lport, nport_id, rport->port_name); - - if (rport || (UNF_GET_PORT_OPTIONS(fc_4feacture) != UNF_PORT_MODE_INI)) { - rport = unf_get_safe_rport(unf_lport, rport, UNF_RPORT_REUSE_ONLY, nport_id); - FC_CHECK_RETURN_VOID(rport); - } else { - return; - } - - if ((fc_4feacture & UNF_GFF_ACC_MASK) != 0) { - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->options = UNF_GET_PORT_OPTIONS(fc_4feacture); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } else if (rport->port_name != INVALID_WWPN) { - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->options = unf_get_port_feature(rport->port_name); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } - - /* NOTE: Send PLOGI if necessary */ - unf_check_rport_need_delay_plogi(unf_lport, rport, rport->options); -} - -void unf_rcv_gff_id_rjt(struct unf_lport *lport, - struct unf_gffid_rsp *gffid_rsp_pld, u32 nport_id) -{ - /* Delay LOGIN or LOGO */ - struct unf_lport *unf_lport = lport; - struct unf_rport *rport = NULL; - struct unf_gffid_rsp *unf_gffid_rsp_pld = gffid_rsp_pld; - u32 rjt_reason = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(gffid_rsp_pld); - - /* Check (& Get new) R_Port */ - rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - if (rport) - rport = unf_find_rport(unf_lport, nport_id, rport->port_name); - - if (!rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) get RPort by N_Port_ID(0x%x) failed and alloc new", - unf_lport->port_id, nport_id); - - rport = unf_rport_get_free_and_init(unf_lport, UNF_PORT_TYPE_FC, nport_id); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } - - rjt_reason = unf_gffid_rsp_pld->ctiu_pream.frag_reason_exp_vend; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send GFF_ID for RPort(0x%x) but was rejected. Reason code(0x%x)", - unf_lport->port_id, nport_id, rjt_reason); - - if (!UNF_GNN_GFF_ID_RJT_REASON(rjt_reason)) { - rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_ONLY, nport_id); - FC_CHECK_RETURN_VOID(rport); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Delay to send PLOGI */ - unf_rport_delay_login(rport); - } else { - spin_lock_irqsave(&rport->rport_state_lock, flag); - if (rport->rp_state == UNF_RPORT_ST_INIT) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Enter closing state */ - unf_rport_enter_logo(unf_lport, rport); - } else { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } - } -} - -void unf_rcv_gff_id_rsp_unknown(struct unf_lport *lport, u32 nport_id) -{ - /* Send PLOGI */ - struct unf_lport *unf_lport = lport; - struct unf_rport *rport = NULL; - ulong flag = 0; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send GFF_ID for RPort(0x%x) but response is unknown", - unf_lport->port_id, nport_id); - - /* Get (Safe) R_Port & Set State */ - rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - if (rport) - rport = unf_find_rport(unf_lport, nport_id, rport->port_name); - - if (!rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) can't get RPort by NPort ID(0x%x), allocate new RPort", - unf_lport->port_id, unf_lport->nport_id, nport_id); - - rport = unf_rport_get_free_and_init(unf_lport, UNF_PORT_TYPE_FC, nport_id); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } - - rport = unf_get_safe_rport(unf_lport, rport, UNF_RPORT_REUSE_ONLY, nport_id); - FC_CHECK_RETURN_VOID(rport); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Start to send PLOGI */ - ret = unf_send_plogi(unf_lport, rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) can not send PLOGI for RPort(0x%x), enter recovery", - unf_lport->port_id, nport_id); - - unf_rport_error_recovery(rport); - } -} - -static void unf_gff_id_callback(void *lport, void *sns_port, void *xchg) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_lport *root_lport = NULL; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_gffid_rsp *gffid_rsp_pld = NULL; - u32 cmnd_rsp_size = 0; - u32 nport_id = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - FC_CHECK_RETURN_VOID(xchg); - - nport_id = unf_xchg->disc_portid; - - gffid_rsp_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gff_id_rsp; - cmnd_rsp_size = (gffid_rsp_pld->ctiu_pream.cmnd_rsp_size); - - root_lport = (struct unf_lport *)unf_lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { - /* Case for GFF_ID ACC: (Delay)PLOGI */ - unf_rcv_gff_id_acc(unf_lport, gffid_rsp_pld, nport_id); - } else if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_REJECT) { - /* Case for GFF_ID RJT: Delay PLOGI or LOGO directly */ - unf_rcv_gff_id_rjt(unf_lport, gffid_rsp_pld, nport_id); - } else { - /* Send PLOGI */ - unf_rcv_gff_id_rsp_unknown(unf_lport, nport_id); - } -} - -static void unf_rcv_gpn_id_acc(struct unf_lport *lport, - u32 nport_id, u64 port_name) -{ - /* then PLOGI or re-login */ - struct unf_lport *unf_lport = lport; - struct unf_rport *rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - rport = unf_find_valid_rport(unf_lport, port_name, nport_id); - if (rport) { - /* R_Port with TGT mode & L_Port with INI mode: - * send PLOGI with INIT state - */ - if ((rport->options & UNF_PORT_MODE_TGT) == UNF_PORT_MODE_TGT) { - rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_INIT, nport_id); - FC_CHECK_RETURN_VOID(rport); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->nport_id = nport_id; - unf_rport_state_ma(rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Start to send PLOGI */ - ret = unf_send_plogi(unf_lport, rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI failed for 0x%x, enter recovry", - unf_lport->port_id, unf_lport->nport_id, nport_id); - - unf_rport_error_recovery(rport); - } - } else { - spin_lock_irqsave(&rport->rport_state_lock, flag); - if (rport->rp_state != UNF_RPORT_ST_PLOGI_WAIT && - rport->rp_state != UNF_RPORT_ST_PRLI_WAIT && - rport->rp_state != UNF_RPORT_ST_READY) { - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Do LOGO operation */ - unf_rport_enter_logo(unf_lport, rport); - } else { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } - } - } else { - /* Send GNN_ID */ - (void)unf_rport_relogin(unf_lport, nport_id); - } -} - -static void unf_rcv_gpn_id_rjt(struct unf_lport *lport, u32 nport_id) -{ - struct unf_lport *unf_lport = lport; - struct unf_rport *rport = NULL; - - FC_CHECK_RETURN_VOID(lport); - - rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - if (rport) - /* Do R_Port Link down */ - unf_rport_linkdown(unf_lport, rport); -} - -void unf_rcv_gpn_id_rsp_unknown(struct unf_lport *lport, u32 nport_id) -{ - struct unf_lport *unf_lport = lport; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) wrong response of GPN_ID with RPort(0x%x)", - unf_lport->port_id, nport_id); - - /* NOTE: go to next stage */ - (void)unf_rport_relogin(unf_lport, nport_id); -} - -static void unf_gpn_id_ob_callback(struct unf_xchg *xchg) -{ - struct unf_lport *lport = NULL; - u32 nport_id = 0; - struct unf_lport *root_lport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - - lport = xchg->lport; - nport_id = xchg->disc_portid; - FC_CHECK_RETURN_VOID(lport); - - root_lport = (struct unf_lport *)lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send GPN_ID failed to inquire RPort(0x%x)", - lport->port_id, nport_id); - - /* NOTE: go to next stage */ - (void)unf_rport_relogin(lport, nport_id); -} - -static void unf_gpn_id_callback(void *lport, void *sns_port, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_gpnid_rsp *gpnid_rsp_pld = NULL; - u64 port_name = 0; - u32 cmnd_rsp_size = 0; - u32 nport_id = 0; - struct unf_lport *root_lport = NULL; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(sns_port); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - nport_id = unf_xchg->disc_portid; - - root_lport = (struct unf_lport *)unf_lport->root_lport; - atomic_inc(&root_lport->disc.disc_thread_info.disc_contrl_size); - wake_up_process(root_lport->disc.disc_thread_info.thread); - - gpnid_rsp_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->gpn_id_rsp; - cmnd_rsp_size = gpnid_rsp_pld->ctiu_pream.cmnd_rsp_size; - if (UNF_CT_IU_ACCEPT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - /* GPN_ID ACC */ - port_name = ((u64)(gpnid_rsp_pld->port_name[ARRAY_INDEX_0]) - << UNF_SHIFT_32) | - ((u64)(gpnid_rsp_pld->port_name[ARRAY_INDEX_1])); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x) GPN_ID ACC with WWN(0x%llx) RPort NPort ID(0x%x)", - unf_lport->port_id, port_name, nport_id); - - /* Send PLOGI or LOGO or GNN_ID */ - unf_rcv_gpn_id_acc(unf_lport, nport_id, port_name); - } else if (UNF_CT_IU_REJECT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - /* GPN_ID RJT: Link Down */ - unf_rcv_gpn_id_rjt(unf_lport, nport_id); - } else { - /* GPN_ID response type unknown: Send GNN_ID */ - unf_rcv_gpn_id_rsp_unknown(unf_lport, nport_id); - } -} - -static void unf_rff_id_ob_callback(struct unf_xchg *xchg) -{ - /* Do recovery */ - struct unf_lport *lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send RFF_ID failed", - lport->port_id, lport->nport_id); - - unf_lport_error_recovery(lport); -} - -static void unf_rff_id_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_ctiu_prem *ctiu_prem = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 cmnd_rsp_size = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - if (unlikely(!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr)) - return; - - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_FCTRL); - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, - UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FCTRL); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't allocate RPort(0x%x)", - unf_lport->port_id, UNF_FC_FID_FCTRL); - return; - } - - unf_rport->nport_id = UNF_FC_FID_FCTRL; - ctiu_prem = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rff_id_rsp.ctiu_pream; - cmnd_rsp_size = ctiu_prem->cmnd_rsp_size; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x_0x%x) RFF_ID rsp is (0x%x)", - unf_lport->port_id, unf_lport->nport_id, - (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)); - - /* RSP Type check: some SW not support RFF_ID, go to next stage also */ - if ((cmnd_rsp_size & UNF_CT_IU_RSP_MASK) == UNF_CT_IU_ACCEPT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) receive RFF ACC(0x%x) in state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), unf_lport->states); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) receive RFF RJT(0x%x) in state(0x%x) with RJT reason code(0x%x) explanation(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), unf_lport->states, - (ctiu_prem->frag_reason_exp_vend) & UNF_CT_IU_REASON_MASK, - (ctiu_prem->frag_reason_exp_vend) & UNF_CT_IU_EXPLAN_MASK); - } - - /* L_Port state check */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_RFF_ID_WAIT) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) receive RFF reply in state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->states); - - return; - } - /* LPort: RFF_ID_WAIT --> SCR_WAIT */ - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_REMOTE_ACC); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - ret = unf_send_scr(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send SCR failed", - unf_lport->port_id, unf_lport->nport_id); - unf_lport_error_recovery(unf_lport); - } -} - -static void unf_rft_id_ob_callback(struct unf_xchg *xchg) -{ - struct unf_lport *lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send RFT_ID failed", - lport->port_id, lport->nport_id); - unf_lport_error_recovery(lport); -} - -static void unf_rft_id_callback(void *lport, void *rport, void *xchg) -{ - /* RFT_ID --->>> RFF_ID */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_ctiu_prem *ctiu_prem = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 cmnd_rsp_size = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_rport = (struct unf_rport *)rport; - unf_xchg = (struct unf_xchg *)xchg; - - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) SFS entry is NULL with state(0x%x)", - unf_lport->port_id, unf_lport->states); - return; - } - - ctiu_prem = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr - ->rft_id_rsp.ctiu_pream; - cmnd_rsp_size = (ctiu_prem->cmnd_rsp_size); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) RFT_ID response is (0x%x)", - (cmnd_rsp_size & UNF_CT_IU_RSP_MASK), unf_lport->port_id, - unf_lport->nport_id); - - if (UNF_CT_IU_ACCEPT == (cmnd_rsp_size & UNF_CT_IU_RSP_MASK)) { - /* Case for RFT_ID ACC: send RFF_ID */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_RFT_ID_WAIT) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) receive RFT_ID ACC in state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_lport->states); - - return; - } - - /* LPort: RFT_ID_WAIT --> RFF_ID_WAIT */ - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_REMOTE_ACC); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - /* Start to send RFF_ID GS command */ - ret = unf_send_rff_id(unf_lport, unf_rport, UNF_FC4_FCP_TYPE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send RFF_ID failed", - unf_lport->port_id, unf_lport->nport_id); - unf_lport_error_recovery(unf_lport); - } - } else { - /* Case for RFT_ID RJT: do recovery */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) receive RFT_ID RJT with reason_code(0x%x) explanation(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - (ctiu_prem->frag_reason_exp_vend) & UNF_CT_IU_REASON_MASK, - (ctiu_prem->frag_reason_exp_vend) & UNF_CT_IU_EXPLAN_MASK); - - /* Do L_Port recovery */ - unf_lport_error_recovery(unf_lport); - } -} - -static void unf_scr_ob_callback(struct unf_xchg *xchg) -{ - /* Callback fucnion for exception: Do L_Port error recovery */ - struct unf_lport *lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send SCR failed and do port recovery", - lport->port_id); - - unf_lport_error_recovery(lport); -} - -static void unf_scr_callback(void *lport, void *rport, void *xchg) -{ - /* Callback function for SCR response: Send GID_PT with INI mode */ - struct unf_lport *unf_lport = NULL; - struct unf_disc *disc = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_els_acc *els_acc = NULL; - u32 ret = UNF_RETURN_ERROR; - ulong port_flag = 0; - ulong disc_flag = 0; - u32 cmnd = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - disc = &unf_lport->disc; - - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) - return; - - els_acc = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_acc; - if (unf_xchg->byte_orders & UNF_BIT_2) - cmnd = be32_to_cpu(els_acc->cmnd); - else - cmnd = (els_acc->cmnd); - - if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { - spin_lock_irqsave(&unf_lport->lport_state_lock, port_flag); - if (unf_lport->states != UNF_LPORT_ST_SCR_WAIT) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, - port_flag); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) receive SCR ACC with error state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_lport->states); - return; - } - - /* LPort: SCR_WAIT --> READY */ - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_REMOTE_ACC); - if (unf_lport->states == UNF_LPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) enter READY state when received SCR response", - unf_lport->port_id, unf_lport->nport_id); - } - - /* Start to Discovery with INI mode: GID_PT */ - if ((unf_lport->options & UNF_PORT_MODE_INI) == - UNF_PORT_MODE_INI) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, - port_flag); - - if (unf_lport->disc.disc_temp.unf_disc_start) { - spin_lock_irqsave(&disc->rport_busy_pool_lock, - disc_flag); - unf_lport->disc.disc_option = UNF_INIT_DISC; - disc->last_disc_jiff = jiffies; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - - ret = unf_lport->disc.disc_temp.unf_disc_start(unf_lport); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x) DISC %s with INI mode", - unf_lport->port_id, - (ret != RETURN_OK) ? "failed" : "succeed"); - } - return; - } - - spin_unlock_irqrestore(&unf_lport->lport_state_lock, port_flag); - /* NOTE: set state with UNF_DISC_ST_END used for - * RSCN process - */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - unf_lport->disc.states = UNF_DISC_ST_END; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) is TGT mode, no need to discovery", - unf_lport->port_id); - - return; - } - unf_lport_error_recovery(unf_lport); -} - -void unf_check_rport_need_delay_plogi(struct unf_lport *lport, - struct unf_rport *rport, u32 port_feature) -{ - /* - * Called by: - * 1. Private loop - * 2. RCVD GFF_ID ACC - */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - ulong flag = 0; - u32 nport_id = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - nport_id = unf_rport->nport_id; - - /* - * Send GFF_ID means L_Port has INI attribute - * * - * When to send PLOGI: - * 1. R_Port has TGT mode (COM or TGT), send PLOGI immediately - * 2. R_Port only with INI, send LOGO immediately - * 3. R_Port with unknown attribute, delay to send PLOGI - */ - if ((UNF_PORT_MODE_TGT & port_feature) || - (UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF & - unf_lport->enhanced_features)) { - /* R_Port has TGT mode: send PLOGI immediately */ - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, nport_id); - FC_CHECK_RETURN_VOID(unf_rport); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = nport_id; - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Start to send PLOGI */ - ret = unf_send_plogi(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI to RPort(0x%x) failed", - unf_lport->port_id, unf_lport->nport_id, - nport_id); - - unf_rport_error_recovery(unf_rport); - } - } else if (port_feature == UNF_PORT_MODE_INI) { - /* R_Port only with INI mode: can't send PLOGI - * --->>> LOGO/nothing - */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - if (unf_rport->rp_state == UNF_RPORT_ST_INIT) { - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send LOGO to RPort(0x%x) which only with INI mode", - unf_lport->port_id, unf_lport->nport_id, nport_id); - - /* Enter Closing state */ - unf_rport_enter_logo(unf_lport, unf_rport); - } else { - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - } - } else { - /* Unknown R_Port attribute: Delay to send PLOGI */ - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, nport_id); - FC_CHECK_RETURN_VOID(unf_rport); - - /* Update R_Port state: PLOGI_WAIT */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = nport_id; - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_rport_delay_login(unf_rport); - } -} diff --git a/drivers/scsi/spfc/common/unf_gs.h b/drivers/scsi/spfc/common/unf_gs.h deleted file mode 100644 index d9856133b3cd..000000000000 --- a/drivers/scsi/spfc/common/unf_gs.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_GS_H -#define UNF_GS_H - -#include "unf_type.h" -#include "unf_lport.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -u32 unf_send_scr(struct unf_lport *lport, - struct unf_rport *rport); -u32 unf_send_ctpass_thru(struct unf_lport *lport, - void *buffer, u32 bufflen); - -u32 unf_send_gid_ft(struct unf_lport *lport, - struct unf_rport *rport); -u32 unf_send_gid_pt(struct unf_lport *lport, - struct unf_rport *rport); -u32 unf_send_gpn_id(struct unf_lport *lport, - struct unf_rport *sns_port, u32 nport_id); -u32 unf_send_gnn_id(struct unf_lport *lport, - struct unf_rport *sns_port, u32 nport_id); -u32 unf_send_gff_id(struct unf_lport *lport, - struct unf_rport *sns_port, u32 nport_id); - -u32 unf_send_rff_id(struct unf_lport *lport, - struct unf_rport *rport, u32 fc4_type); -u32 unf_send_rft_id(struct unf_lport *lport, - struct unf_rport *rport); -void unf_rcv_gnn_id_rsp_unknown(struct unf_lport *lport, - struct unf_rport *sns_port, u32 nport_id); -void unf_rcv_gpn_id_rsp_unknown(struct unf_lport *lport, u32 nport_id); -void unf_rcv_gff_id_rsp_unknown(struct unf_lport *lport, u32 nport_id); -void unf_check_rport_need_delay_plogi(struct unf_lport *lport, - struct unf_rport *rport, u32 port_feature); - -struct send_com_trans_in { - unsigned char port_wwn[8]; - u32 req_buffer_count; - unsigned char req_buffer[ARRAY_INDEX_1]; -}; - -struct send_com_trans_out { - u32 hba_status; - u32 total_resp_buffer_cnt; - u32 actual_resp_buffer_cnt; - unsigned char resp_buffer[ARRAY_INDEX_1]; -}; - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/drivers/scsi/spfc/common/unf_init.c b/drivers/scsi/spfc/common/unf_init.c deleted file mode 100644 index 7e6f98d16977..000000000000 --- a/drivers/scsi/spfc/common/unf_init.c +++ /dev/null @@ -1,353 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_type.h" -#include "unf_log.h" -#include "unf_scsi_common.h" -#include "unf_event.h" -#include "unf_exchg.h" -#include "unf_portman.h" -#include "unf_rport.h" -#include "unf_service.h" -#include "unf_io.h" -#include "unf_io_abnormal.h" - -#define UNF_PID 12 -#define MY_PID UNF_PID - -#define RPORT_FEATURE_POOL_SIZE 4096 -struct task_struct *event_task_thread; -struct workqueue_struct *unf_wq; - -atomic_t fc_mem_ref; - -struct unf_global_card_thread card_thread_mgr; -u32 unf_dgb_level = UNF_MAJOR; -u32 log_print_level = UNF_INFO; -u32 log_limited_times = UNF_LOGIN_ATT_PRINT_TIMES; - -static struct unf_esgl_page *unf_get_one_free_esgl_page - (void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(pkg, NULL); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)pkg->xchg_contex; - - return unf_get_and_add_one_free_esgl_page(unf_lport, unf_xchg); -} - -static int unf_get_cfg_parms(char *section_name, struct unf_cfg_item *cfg_itm, - u32 *cfg_value, u32 itemnum) -{ - /* Maximum length of a configuration item value, including the end - * character - */ -#define UNF_MAX_ITEM_VALUE_LEN (256) - - u32 *unf_cfg_value = NULL; - struct unf_cfg_item *unf_cfg_itm = NULL; - u32 i = 0; - - unf_cfg_itm = cfg_itm; - unf_cfg_value = cfg_value; - - for (i = 0; i < itemnum; i++) { - if (!unf_cfg_itm || !unf_cfg_value) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, - "[err]Config name or value is NULL"); - - return UNF_RETURN_ERROR; - } - - if (strcmp("End", unf_cfg_itm->puc_name) == 0x0) - break; - - if (strcmp("fw_path", unf_cfg_itm->puc_name) == 0x0) { - unf_cfg_itm++; - unf_cfg_value += UNF_MAX_ITEM_VALUE_LEN / sizeof(u32); - continue; - } - - *unf_cfg_value = unf_cfg_itm->default_value; - unf_cfg_itm++; - unf_cfg_value++; - } - - return RETURN_OK; -} - -struct unf_cm_handle_op unf_cm_handle_ops = { - .unf_alloc_local_port = unf_lport_create_and_init, - .unf_release_local_port = unf_release_local_port, - .unf_receive_ls_gs_pkg = unf_receive_ls_gs_pkg, - .unf_receive_bls_pkg = unf_receive_bls_pkg, - .unf_send_els_done = unf_send_els_done, - .unf_receive_ini_response = unf_ini_scsi_completed, - .unf_get_cfg_parms = unf_get_cfg_parms, - .unf_receive_marker_status = unf_recv_tmf_marker_status, - .unf_receive_abts_marker_status = unf_recv_abts_marker_status, - - .unf_process_fcp_cmnd = NULL, - .unf_tgt_cmnd_xfer_or_rsp_echo = NULL, - .unf_cm_get_sgl_entry = unf_ini_get_sgl_entry, - .unf_cm_get_dif_sgl_entry = unf_ini_get_dif_sgl_entry, - .unf_get_one_free_esgl_page = unf_get_one_free_esgl_page, - .unf_fc_port_event = unf_fc_port_link_event, -}; - -u32 unf_get_cm_handle_ops(struct unf_cm_handle_op *cm_handle) -{ - FC_CHECK_RETURN_VALUE(cm_handle, UNF_RETURN_ERROR); - - memcpy(cm_handle, &unf_cm_handle_ops, sizeof(struct unf_cm_handle_op)); - - return RETURN_OK; -} - -static void unf_deinit_cm_handle_ops(void) -{ - memset(&unf_cm_handle_ops, 0, sizeof(struct unf_cm_handle_op)); -} - -int unf_event_process(void *worker_ptr) -{ - struct list_head *event_list = NULL; - struct unf_cm_event_report *event_node = NULL; - struct completion *create_done = (struct completion *)worker_ptr; - ulong flags = 0; - - set_user_nice(current, UNF_OS_THRD_PRI_LOW); - recalc_sigpending(); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[event]Enter event thread"); - - if (create_done) - complete(create_done); - - do { - spin_lock_irqsave(&fc_event_list.fc_event_list_lock, flags); - if (list_empty(&fc_event_list.list_head)) { - spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, flags); - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout((long)msecs_to_jiffies(UNF_S_TO_MS)); - } else { - event_list = UNF_OS_LIST_NEXT(&fc_event_list.list_head); - list_del_init(event_list); - fc_event_list.list_num--; - event_node = list_entry(event_list, - struct unf_cm_event_report, - list_entry); - spin_unlock_irqrestore(&fc_event_list.fc_event_list_lock, flags); - - /* Process event node */ - unf_handle_event(event_node); - } - } while (!kthread_should_stop()); - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "[event]Event thread exit"); - - return RETURN_OK; -} - -static int unf_creat_event_center(void) -{ - struct completion create_done; - - init_completion(&create_done); - INIT_LIST_HEAD(&fc_event_list.list_head); - fc_event_list.list_num = 0; - spin_lock_init(&fc_event_list.fc_event_list_lock); - - event_task_thread = kthread_run(unf_event_process, &create_done, "spfc_event"); - if (IS_ERR(event_task_thread)) { - complete_and_exit(&create_done, 0); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create event thread failed(0x%p)", - event_task_thread); - - return UNF_RETURN_ERROR; - } - wait_for_completion(&create_done); - return RETURN_OK; -} - -static void unf_cm_event_thread_exit(void) -{ - if (event_task_thread) - kthread_stop(event_task_thread); -} - -static void unf_init_card_mgr_list(void) -{ - /* So far, do not care */ - INIT_LIST_HEAD(&card_thread_mgr.card_list_head); - - spin_lock_init(&card_thread_mgr.global_card_list_lock); - - card_thread_mgr.card_num = 0; -} - -int unf_port_feature_pool_init(void) -{ - u32 index = 0; - u32 rport_feature_pool_size = 0; - struct unf_rport_feature_recard *rport_feature = NULL; - unsigned long flags = 0; - - rport_feature_pool_size = sizeof(struct unf_rport_feature_pool); - port_feature_pool = vmalloc(rport_feature_pool_size); - if (!port_feature_pool) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]cannot allocate rport feature pool"); - - return UNF_RETURN_ERROR; - } - memset(port_feature_pool, 0, rport_feature_pool_size); - spin_lock_init(&port_feature_pool->port_fea_pool_lock); - INIT_LIST_HEAD(&port_feature_pool->list_busy_head); - INIT_LIST_HEAD(&port_feature_pool->list_free_head); - - port_feature_pool->port_feature_pool_addr = - vmalloc((size_t)(RPORT_FEATURE_POOL_SIZE * sizeof(struct unf_rport_feature_recard))); - if (!port_feature_pool->port_feature_pool_addr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]cannot allocate rport feature pool address"); - - vfree(port_feature_pool); - port_feature_pool = NULL; - - return UNF_RETURN_ERROR; - } - - memset(port_feature_pool->port_feature_pool_addr, 0, - RPORT_FEATURE_POOL_SIZE * sizeof(struct unf_rport_feature_recard)); - rport_feature = (struct unf_rport_feature_recard *) - port_feature_pool->port_feature_pool_addr; - - spin_lock_irqsave(&port_feature_pool->port_fea_pool_lock, flags); - for (index = 0; index < RPORT_FEATURE_POOL_SIZE; index++) { - list_add_tail(&rport_feature->entry_feature, &port_feature_pool->list_free_head); - rport_feature++; - } - spin_unlock_irqrestore(&port_feature_pool->port_fea_pool_lock, flags); - - return RETURN_OK; -} - -void unf_free_port_feature_pool(void) -{ - if (port_feature_pool->port_feature_pool_addr) { - vfree(port_feature_pool->port_feature_pool_addr); - port_feature_pool->port_feature_pool_addr = NULL; - } - - vfree(port_feature_pool); - port_feature_pool = NULL; -} - -int unf_common_init(void) -{ - int ret = RETURN_OK; - - unf_dgb_level = UNF_MAJOR; - log_print_level = UNF_KEVENT; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "UNF Driver Version:%s.", SPFC_DRV_VERSION); - - atomic_set(&fc_mem_ref, 0); - ret = unf_port_feature_pool_init(); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port Feature Pool init failed"); - return ret; - } - - ret = (int)unf_register_ini_transport(); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]INI interface init failed"); - goto REG_INITRANSPORT_FAIL; - } - - unf_port_mgmt_init(); - unf_init_card_mgr_list(); - ret = (int)unf_init_global_event_msg(); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create global event center failed"); - goto CREAT_GLBEVENTMSG_FAIL; - } - - ret = (int)unf_creat_event_center(); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create event center (thread) failed"); - goto CREAT_EVENTCENTER_FAIL; - } - - unf_wq = create_workqueue("unf_wq"); - if (!unf_wq) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create work queue failed"); - goto CREAT_WORKQUEUE_FAIL; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Init common layer succeed"); - return ret; -CREAT_WORKQUEUE_FAIL: - unf_cm_event_thread_exit(); -CREAT_EVENTCENTER_FAIL: - unf_destroy_global_event_msg(); -CREAT_GLBEVENTMSG_FAIL: - unf_unregister_ini_transport(); -REG_INITRANSPORT_FAIL: - unf_free_port_feature_pool(); - return UNF_RETURN_ERROR; -} - -static void unf_destroy_dirty_port(void) -{ - u32 ditry_port_num = 0; - - unf_show_dirty_port(false, &ditry_port_num); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Sys has %u dirty L_Port(s)", ditry_port_num); -} - -void unf_common_exit(void) -{ - unf_free_port_feature_pool(); - - unf_destroy_dirty_port(); - - flush_workqueue(unf_wq); - destroy_workqueue(unf_wq); - unf_wq = NULL; - - unf_cm_event_thread_exit(); - - unf_destroy_global_event_msg(); - - unf_deinit_cm_handle_ops(); - - unf_port_mgmt_deinit(); - - unf_unregister_ini_transport(); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[info]SPFC module remove succeed, memory reference count is %d", - atomic_read(&fc_mem_ref)); -} diff --git a/drivers/scsi/spfc/common/unf_io.c b/drivers/scsi/spfc/common/unf_io.c deleted file mode 100644 index 5de69f8ddc6d..000000000000 --- a/drivers/scsi/spfc/common/unf_io.c +++ /dev/null @@ -1,1219 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_io.h" -#include "unf_log.h" -#include "unf_portman.h" -#include "unf_service.h" -#include "unf_io_abnormal.h" - -u32 sector_size_flag; - -#define UNF_GET_FCP_CTL(pkg) ((((pkg)->status) >> UNF_SHIFT_8) & 0xFF) -#define UNF_GET_SCSI_STATUS(pkg) (((pkg)->status) & 0xFF) - -static u32 unf_io_success_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status); -static u32 unf_ini_error_default_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, - u32 up_status); -static u32 unf_io_underflow_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status); -static u32 unf_ini_dif_error_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status); - -struct unf_ini_error_handler_s { - u32 ini_error_code; - u32 (*unf_ini_error_handler)(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status); -}; - -struct unf_ini_error_handler_s ini_error_handler_table[] = { - {UNF_IO_SUCCESS, unf_io_success_handler}, - {UNF_IO_ABORTED, unf_ini_error_default_handler}, - {UNF_IO_FAILED, unf_ini_error_default_handler}, - {UNF_IO_ABORT_ABTS, unf_ini_error_default_handler}, - {UNF_IO_ABORT_LOGIN, unf_ini_error_default_handler}, - {UNF_IO_ABORT_REET, unf_ini_error_default_handler}, - {UNF_IO_ABORT_FAILED, unf_ini_error_default_handler}, - {UNF_IO_OUTOF_ORDER, unf_ini_error_default_handler}, - {UNF_IO_FTO, unf_ini_error_default_handler}, - {UNF_IO_LINK_FAILURE, unf_ini_error_default_handler}, - {UNF_IO_OVER_FLOW, unf_ini_error_default_handler}, - {UNF_IO_RSP_OVER, unf_ini_error_default_handler}, - {UNF_IO_LOST_FRAME, unf_ini_error_default_handler}, - {UNF_IO_UNDER_FLOW, unf_io_underflow_handler}, - {UNF_IO_HOST_PROG_ERROR, unf_ini_error_default_handler}, - {UNF_IO_SEST_PROG_ERROR, unf_ini_error_default_handler}, - {UNF_IO_INVALID_ENTRY, unf_ini_error_default_handler}, - {UNF_IO_ABORT_SEQ_NOT, unf_ini_error_default_handler}, - {UNF_IO_REJECT, unf_ini_error_default_handler}, - {UNF_IO_EDC_IN_ERROR, unf_ini_error_default_handler}, - {UNF_IO_EDC_OUT_ERROR, unf_ini_error_default_handler}, - {UNF_IO_UNINIT_KEK_ERR, unf_ini_error_default_handler}, - {UNF_IO_DEK_OUTOF_RANGE, unf_ini_error_default_handler}, - {UNF_IO_KEY_UNWRAP_ERR, unf_ini_error_default_handler}, - {UNF_IO_KEY_TAG_ERR, unf_ini_error_default_handler}, - {UNF_IO_KEY_ECC_ERR, unf_ini_error_default_handler}, - {UNF_IO_BLOCK_SIZE_ERROR, unf_ini_error_default_handler}, - {UNF_IO_ILLEGAL_CIPHER_MODE, unf_ini_error_default_handler}, - {UNF_IO_CLEAN_UP, unf_ini_error_default_handler}, - {UNF_IO_ABORTED_BY_TARGET, unf_ini_error_default_handler}, - {UNF_IO_TRANSPORT_ERROR, unf_ini_error_default_handler}, - {UNF_IO_LINK_FLASH, unf_ini_error_default_handler}, - {UNF_IO_TIMEOUT, unf_ini_error_default_handler}, - {UNF_IO_DMA_ERROR, unf_ini_error_default_handler}, - {UNF_IO_DIF_ERROR, unf_ini_dif_error_handler}, - {UNF_IO_INCOMPLETE, unf_ini_error_default_handler}, - {UNF_IO_DIF_REF_ERROR, unf_ini_dif_error_handler}, - {UNF_IO_DIF_GEN_ERROR, unf_ini_dif_error_handler}, - {UNF_IO_NO_XCHG, unf_ini_error_default_handler} - }; - -void unf_done_ini_xchg(struct unf_xchg *xchg) -{ - /* - * About I/O Done - * 1. normal case - * 2. Send ABTS & RCVD RSP - * 3. Send ABTS & timer timeout - */ - struct unf_scsi_cmnd scsi_cmd = {0}; - ulong flags = 0; - struct unf_scsi_cmd_info *scsi_cmnd_info = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - u32 scsi_id = 0; - - FC_CHECK_RETURN_VOID(xchg); - - if (unlikely(!xchg->scsi_cmnd_info.scsi_cmnd)) - return; - - /* 1. Free RX_ID for INI SIRT: Do not care */ - - /* - * 2. set & check exchange state - * * - * for Set UP_ABORT Tag: - * 1) L_Port destroy - * 2) LUN reset - * 3) Target/Session reset - * 4) SCSI send Abort(ABTS) - */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->io_state |= INI_IO_STATE_DONE; - if (unlikely(xchg->io_state & - (INI_IO_STATE_UPABORT | INI_IO_STATE_UPSEND_ERR | INI_IO_STATE_TMF_ABORT))) { - /* - * a. UPABORT: scsi have send ABTS - * --->>> do not call SCSI_Done, return directly - * b. UPSEND_ERR: error happened duiring LLDD send SCSI_CMD - * --->>> do not call SCSI_Done, scsi need retry - */ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[event]Exchange(0x%p) Cmdsn:0x%lx upCmd:%p hottag(0x%x) with state(0x%x) has been aborted or send error", - xchg, (ulong)xchg->cmnd_sn, xchg->scsi_cmnd_info.scsi_cmnd, - xchg->hotpooltag, xchg->io_state); - - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - return; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - scsi_cmnd_info = &xchg->scsi_cmnd_info; - - /* - * 3. Set: - * scsi_cmnd; - * cmnd_done_func; - * cmnd up_level_done; - * sense_buff_addr; - * resid_length; - * cmnd_result; - * dif_info - * ** - * UNF_SCSI_CMND <<-- UNF_SCSI_CMND_INFO - */ - UNF_SET_HOST_CMND((&scsi_cmd), scsi_cmnd_info->scsi_cmnd); - UNF_SER_CMND_DONE_FUNC((&scsi_cmd), scsi_cmnd_info->done); - UNF_SET_UP_LEVEL_CMND_DONE_FUNC(&scsi_cmd, scsi_cmnd_info->uplevel_done); - scsi_cmd.drv_private = xchg->lport; - if (unlikely((UNF_SCSI_STATUS(xchg->scsi_cmnd_info.result)) & FCP_SNS_LEN_VALID_MASK)) { - unf_save_sense_data(scsi_cmd.upper_cmnd, - (char *)xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, - (int)xchg->fcp_sfs_union.fcp_rsp_entry.fcp_sense_len); - } - UNF_SET_RESID((&scsi_cmd), (u32)xchg->resid_len); - UNF_SET_CMND_RESULT((&scsi_cmd), scsi_cmnd_info->result); - memcpy(&scsi_cmd.dif_info, &xchg->dif_info, sizeof(struct dif_info)); - - scsi_id = scsi_cmnd_info->scsi_id; - - UNF_DONE_SCSI_CMND((&scsi_cmd)); - - /* 4. Update IO result CNT */ - if (likely(xchg->lport)) { - scsi_image_table = &xchg->lport->rport_scsi_table; - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, - (scsi_cmnd_info->result >> UNF_SHIFT_16)); - } -} - -static inline u32 unf_ini_get_sgl_entry_buf(ini_get_sgl_entry_buf ini_get_sgl, - void *cmnd, void *driver_sgl, - void **upper_sgl, u32 *req_index, - u32 *index, char **buf, - u32 *buf_len) -{ - if (unlikely(!ini_get_sgl)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Command(0x%p) Get sgl Entry func Null.", cmnd); - - return UNF_RETURN_ERROR; - } - - return ini_get_sgl(cmnd, driver_sgl, upper_sgl, req_index, index, buf, buf_len); -} - -u32 unf_ini_get_sgl_entry(void *pkg, char **buf, u32 *buf_len) -{ - struct unf_frame_pkg *unf_pkg = (struct unf_frame_pkg *)pkg; - struct unf_xchg *unf_xchg = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf_len, UNF_RETURN_ERROR); - - unf_xchg = (struct unf_xchg *)unf_pkg->xchg_contex; - FC_CHECK_RETURN_VALUE(unf_xchg, UNF_RETURN_ERROR); - - /* Get SGL Entry buffer for INI Mode */ - ret = unf_ini_get_sgl_entry_buf(unf_xchg->scsi_cmnd_info.unf_get_sgl_entry_buf, - unf_xchg->scsi_cmnd_info.scsi_cmnd, NULL, - &unf_xchg->req_sgl_info.sgl, - &unf_xchg->scsi_cmnd_info.port_id, - &((unf_xchg->req_sgl_info).entry_index), buf, buf_len); - - return ret; -} - -u32 unf_ini_get_dif_sgl_entry(void *pkg, char **buf, u32 *buf_len) -{ - struct unf_frame_pkg *unf_pkg = (struct unf_frame_pkg *)pkg; - struct unf_xchg *unf_xchg = NULL; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf_len, UNF_RETURN_ERROR); - - unf_xchg = (struct unf_xchg *)unf_pkg->xchg_contex; - FC_CHECK_RETURN_VALUE(unf_xchg, UNF_RETURN_ERROR); - - /* Get SGL Entry buffer for INI Mode */ - ret = unf_ini_get_sgl_entry_buf(unf_xchg->scsi_cmnd_info.unf_get_sgl_entry_buf, - unf_xchg->scsi_cmnd_info.scsi_cmnd, NULL, - &unf_xchg->dif_sgl_info.sgl, - &unf_xchg->scsi_cmnd_info.port_id, - &((unf_xchg->dif_sgl_info).entry_index), buf, buf_len); - return ret; -} - -u32 unf_get_up_level_cmnd_errcode(struct unf_ini_error_code *err_table, - u32 err_table_count, u32 drv_err_code) -{ - u32 loop = 0; - - /* fail return UNF_RETURN_ERROR,adjust by up level */ - if (unlikely(!err_table)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Error Code Table is Null, Error Code(0x%x).", drv_err_code); - - return (u32)UNF_SCSI_HOST(DID_ERROR); - } - - for (loop = 0; loop < err_table_count; loop++) { - if (err_table[loop].drv_errcode == drv_err_code) - return err_table[loop].ap_errcode; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Unsupported Ap Error code by Error Code(0x%x).", drv_err_code); - - return (u32)UNF_SCSI_HOST(DID_ERROR); -} - -static u32 unf_ini_status_handle(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg) -{ - u32 loop = 0; - u32 ret = UNF_RETURN_ERROR; - u32 up_status = 0; - - for (loop = 0; loop < sizeof(ini_error_handler_table) / - sizeof(struct unf_ini_error_handler_s); loop++) { - if (UNF_GET_LL_ERR(pkg) == ini_error_handler_table[loop].ini_error_code) { - up_status = - unf_get_up_level_cmnd_errcode(xchg->scsi_cmnd_info.err_code_table, - xchg->scsi_cmnd_info.err_code_table_cout, - UNF_GET_LL_ERR(pkg)); - - if (ini_error_handler_table[loop].unf_ini_error_handler) { - ret = ini_error_handler_table[loop] - .unf_ini_error_handler(xchg, pkg, up_status); - } else { - /* set exchange->result ---to--->>>scsi_result */ - ret = unf_ini_error_default_handler(xchg, pkg, up_status); - } - - return ret; - } - } - - up_status = unf_get_up_level_cmnd_errcode(xchg->scsi_cmnd_info.err_code_table, - xchg->scsi_cmnd_info.err_code_table_cout, - UNF_IO_SOFT_ERR); - - ret = unf_ini_error_default_handler(xchg, pkg, up_status); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Can not find com status, SID(0x%x) exchange(0x%p) com_status(0x%x) DID(0x%x) hot_pool_tag(0x%x)", - xchg->sid, xchg, pkg->status, xchg->did, xchg->hotpooltag); - - return ret; -} - -static void unf_analysis_response_info(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, - u32 *up_status) -{ - u8 *resp_buf = NULL; - - /* LL_Driver use Little End, and copy RSP_INFO to COM_Driver */ - if (unlikely(pkg->unf_rsp_pload_bl.length > UNF_RESPONE_DATA_LEN)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Receive FCP response resp buffer len is invalid 0x%x", - pkg->unf_rsp_pload_bl.length); - return; - } - - resp_buf = (u8 *)pkg->unf_rsp_pload_bl.buffer_ptr; - if (resp_buf) { - /* If chip use Little End, then change it to Big End */ - if ((pkg->byte_orders & UNF_BIT_3) == 0) - unf_cpu_to_big_end(resp_buf, pkg->unf_rsp_pload_bl.length); - - /* Chip DAM data with Big End */ - if (resp_buf[ARRAY_INDEX_3] != UNF_FCP_TM_RSP_COMPLETE) { - *up_status = UNF_SCSI_HOST(DID_BUS_BUSY); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%p) DID bus busy, scsi_status(0x%x)", - xchg->lport, UNF_GET_SCSI_STATUS(pkg)); - } - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Receive FCP response, resp buffer is NULL resp buffer len is 0x%x", - pkg->unf_rsp_pload_bl.length); - } -} - -static void unf_analysis_sense_info(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 *up_status) -{ - u32 length = 0; - - /* 4 bytes Align */ - length = MIN(SCSI_SENSE_DATA_LEN, pkg->unf_sense_pload_bl.length); - - if (unlikely(pkg->unf_sense_pload_bl.length > SCSI_SENSE_DATA_LEN)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[info]Receive FCP response resp buffer len is 0x%x", - pkg->unf_sense_pload_bl.length); - } - /* - * If have sense info then copy directly - * else, the chip has been dma the data to sense buffer - */ - - if (length != 0 && pkg->unf_rsp_pload_bl.buffer_ptr) { - /* has been dma to exchange buffer */ - if (unlikely(pkg->unf_rsp_pload_bl.length > UNF_RESPONE_DATA_LEN)) { - *up_status = UNF_SCSI_HOST(DID_ERROR); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Receive FCP response resp buffer len is invalid 0x%x", - pkg->unf_rsp_pload_bl.length); - - return; - } - - xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu = (u8 *)kmalloc(length, GFP_ATOMIC); - if (!xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Alloc FCP sense buffer failed"); - return; - } - - memcpy(xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu, - ((u8 *)(pkg->unf_rsp_pload_bl.buffer_ptr)) + - pkg->unf_rsp_pload_bl.length, length); - - xchg->fcp_sfs_union.fcp_rsp_entry.fcp_sense_len = length; - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Receive FCP response, sense buffer is NULL sense buffer len is 0x%x", - length); - } -} - -static u32 unf_io_success_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status) -{ - u8 scsi_status = 0; - u8 control = 0; - u32 status = up_status; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - control = UNF_GET_FCP_CTL(pkg); - scsi_status = UNF_GET_SCSI_STATUS(pkg); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Port(0x%p), Exchange(0x%p) Completed, Control(0x%x), Scsi Status(0x%x)", - xchg->lport, xchg, control, scsi_status); - - if (control & FCP_SNS_LEN_VALID_MASK) { - /* has sense info */ - if (scsi_status == FCP_SCSI_STATUS_GOOD) - scsi_status = SCSI_CHECK_CONDITION; - - unf_analysis_sense_info(xchg, pkg, &status); - } else { - /* - * When the FCP_RSP_LEN_VALID bit is set to one, - * the content of the SCSI STATUS CODE field is not reliable - * and shall be ignored by the application client. - */ - if (control & FCP_RSP_LEN_VALID_MASK) - unf_analysis_response_info(xchg, pkg, &status); - } - - xchg->scsi_cmnd_info.result = status | UNF_SCSI_STATUS(scsi_status); - - return RETURN_OK; -} - -static u32 unf_ini_error_default_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, - u32 up_status) -{ - /* set exchange->result ---to--->>> scsi_cmnd->result */ - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_WARN, - "[warn]SID(0x%x) exchange(0x%p) com_status(0x%x) up_status(0x%x) DID(0x%x) hot_pool_tag(0x%x) response_len(0x%x)", - xchg->sid, xchg, pkg->status, up_status, xchg->did, - xchg->hotpooltag, pkg->residus_len); - - xchg->scsi_cmnd_info.result = - up_status | UNF_SCSI_STATUS(UNF_GET_SCSI_STATUS(pkg)); - - return RETURN_OK; -} - -static u32 unf_ini_dif_error_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status) -{ - u8 *sense_data = NULL; - u16 sense_code = 0; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - /* - * According to DIF scheme - * drive set check condition(0x2) when dif error occurs, - * and returns the values base on the upper-layer verification resule - * Check sequence: crc,Lba,App, - * if CRC error is found, the subsequent check is not performed - */ - xchg->scsi_cmnd_info.result = UNF_SCSI_STATUS(SCSI_CHECK_CONDITION); - - sense_code = (u16)pkg->status_sub_code; - sense_data = (u8 *)kmalloc(SCSI_SENSE_DATA_LEN, GFP_ATOMIC); - if (!sense_data) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Alloc FCP sense buffer failed"); - - return UNF_RETURN_ERROR; - } - memset(sense_data, 0, SCSI_SENSE_DATA_LEN); - sense_data[ARRAY_INDEX_0] = SENSE_DATA_RESPONSE_CODE; /* response code:0x70 */ - sense_data[ARRAY_INDEX_2] = ILLEGAL_REQUEST; /* sense key:0x05; */ - sense_data[ARRAY_INDEX_7] = ADDITINONAL_SENSE_LEN; /* additional sense length:0x7 */ - sense_data[ARRAY_INDEX_12] = (u8)(sense_code >> UNF_SHIFT_8); - sense_data[ARRAY_INDEX_13] = (u8)sense_code; - - xchg->fcp_sfs_union.fcp_rsp_entry.fcp_rsp_iu = sense_data; - xchg->fcp_sfs_union.fcp_rsp_entry.fcp_sense_len = SCSI_SENSE_DATA_LEN; - - /* valid sense data length snscode[13] */ - return RETURN_OK; -} - -static u32 unf_io_underflow_handler(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg, u32 up_status) -{ - /* under flow: residlen > 0 */ - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (xchg->fcp_cmnd.cdb[ARRAY_INDEX_0] != SCSIOPC_REPORT_LUN && - xchg->fcp_cmnd.cdb[ARRAY_INDEX_0] != SCSIOPC_INQUIRY) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]IO under flow: SID(0x%x) exchange(0x%p) com status(0x%x) up_status(0x%x) DID(0x%x) hot_pool_tag(0x%x) response SID(0x%x)", - xchg->sid, xchg, pkg->status, up_status, - xchg->did, xchg->hotpooltag, pkg->residus_len); - } - - xchg->resid_len = (int)pkg->residus_len; - (void)unf_io_success_handler(xchg, pkg, up_status); - - return RETURN_OK; -} - -void unf_complete_cmnd(struct unf_scsi_cmnd *scsi_cmnd, u32 result_size) -{ - /* - * Exception during process Que_CMND - * 1. L_Port == NULL; - * 2. L_Port == removing; - * 3. R_Port == NULL; - * 4. Xchg == NULL. - */ - FC_CHECK_RETURN_VOID((UNF_GET_CMND_DONE_FUNC(scsi_cmnd))); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Command(0x%p), Result(0x%x).", scsi_cmnd, result_size); - - UNF_SET_CMND_RESULT(scsi_cmnd, result_size); - - UNF_DONE_SCSI_CMND(scsi_cmnd); -} - -static inline void unf_bind_xchg_scsi_cmd(struct unf_xchg *xchg, - struct unf_scsi_cmnd *scsi_cmnd) -{ - struct unf_scsi_cmd_info *scsi_cmnd_info = NULL; - - scsi_cmnd_info = &xchg->scsi_cmnd_info; - - /* UNF_SCSI_CMND_INFO <<-- UNF_SCSI_CMND */ - scsi_cmnd_info->err_code_table = UNF_GET_ERR_CODE_TABLE(scsi_cmnd); - scsi_cmnd_info->err_code_table_cout = UNF_GET_ERR_CODE_TABLE_COUNT(scsi_cmnd); - scsi_cmnd_info->done = UNF_GET_CMND_DONE_FUNC(scsi_cmnd); - scsi_cmnd_info->scsi_cmnd = UNF_GET_HOST_CMND(scsi_cmnd); - scsi_cmnd_info->sense_buf = (char *)UNF_GET_SENSE_BUF_ADDR(scsi_cmnd); - scsi_cmnd_info->uplevel_done = UNF_GET_UP_LEVEL_CMND_DONE(scsi_cmnd); - scsi_cmnd_info->unf_get_sgl_entry_buf = UNF_GET_SGL_ENTRY_BUF_FUNC(scsi_cmnd); - scsi_cmnd_info->sgl = UNF_GET_CMND_SGL(scsi_cmnd); - scsi_cmnd_info->time_out = scsi_cmnd->time_out; - scsi_cmnd_info->entry_cnt = scsi_cmnd->entry_count; - scsi_cmnd_info->port_id = (u32)scsi_cmnd->port_id; - scsi_cmnd_info->scsi_id = UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd); -} - -u32 unf_ini_scsi_completed(void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_fcp_cmnd *fcp_cmnd = NULL; - u32 control = 0; - u16 xchg_tag = 0x0ffff; - u32 ret = UNF_RETURN_ERROR; - ulong xchg_flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - unf_lport = (struct unf_lport *)lport; - xchg_tag = (u16)pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]; - - /* 1. Find Exchange Context */ - unf_xchg = unf_cm_lookup_xchg_by_tag(lport, (u16)xchg_tag); - if (unlikely(!unf_xchg)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) can not find exchange by tag(0x%x)", - unf_lport->port_id, unf_lport->nport_id, xchg_tag); - - /* NOTE: return directly */ - return UNF_RETURN_ERROR; - } - - /* 2. Consistency check */ - UNF_CHECK_ALLOCTIME_VALID(unf_lport, xchg_tag, unf_xchg, - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - unf_xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]); - - /* 3. Increase ref_cnt for exchange protecting */ - ret = unf_xchg_ref_inc(unf_xchg, INI_RESPONSE_DONE); /* hold */ - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), UNF_RETURN_ERROR); - - fcp_cmnd = &unf_xchg->fcp_cmnd; - control = fcp_cmnd->control; - control = UNF_GET_TASK_MGMT_FLAGS(control); - - /* 4. Cancel timer if necessary */ - if (unf_xchg->scsi_cmnd_info.time_out != 0) - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer(unf_xchg); - - /* 5. process scsi TMF if necessary */ - if (control != 0) { - unf_process_scsi_mgmt_result(pkg, unf_xchg); - unf_xchg_ref_dec(unf_xchg, INI_RESPONSE_DONE); /* cancel hold */ - - /* NOTE: return directly */ - return RETURN_OK; - } - - /* 6. Xchg Abort state check */ - spin_lock_irqsave(&unf_xchg->xchg_state_lock, xchg_flag); - unf_xchg->oxid = UNF_GET_OXID(pkg); - unf_xchg->rxid = UNF_GET_RXID(pkg); - if (INI_IO_STATE_UPABORT & unf_xchg->io_state) { - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, xchg_flag); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]Port(0x%x) find exchange(%p) state(0x%x) has been aborted", - unf_lport->port_id, unf_xchg, unf_xchg->io_state); - - /* NOTE: release exchange during SCSI ABORT(ABTS) */ - unf_xchg_ref_dec(unf_xchg, INI_RESPONSE_DONE); /* cancel hold */ - - return ret; - } - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, xchg_flag); - - /* - * 7. INI SCSI CMND Status process - * set exchange->result ---to--->>> scsi_result - */ - ret = unf_ini_status_handle(unf_xchg, pkg); - - /* 8. release exchangenecessary */ - unf_cm_free_xchg(unf_lport, unf_xchg); - - /* 9. dec exch ref_cnt */ - unf_xchg_ref_dec(unf_xchg, INI_RESPONSE_DONE); /* cancel hold: release resource now */ - - return ret; -} - -u32 unf_hardware_start_io(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - if (unlikely(!lport->low_level_func.service_op.unf_cmnd_send)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) low level send scsi function is NULL", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - return lport->low_level_func.service_op.unf_cmnd_send(lport->fc_port, pkg); -} - -struct unf_rport *unf_find_rport_by_scsi_id(struct unf_lport *lport, - struct unf_ini_error_code *err_code_table, - u32 err_code_table_cout, u32 scsi_id, u32 *scsi_result) -{ - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - struct unf_wwpn_rport_info *wwpn_rport_info = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - - /* scsi_table -> session_table ->image_table */ - scsi_image_table = &lport->rport_scsi_table; - - /* 1. Scsi_Id validity check */ - if (unlikely(scsi_id >= scsi_image_table->max_scsi_id)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Input scsi_id(0x%x) bigger than max_scsi_id(0x%x).", - scsi_id, scsi_image_table->max_scsi_id); - - *scsi_result = unf_get_up_level_cmnd_errcode(err_code_table, err_code_table_cout, - UNF_IO_SOFT_ERR); /* did_soft_error */ - - return NULL; - } - - /* 2. GetR_Port_Info/R_Port: use Scsi_Id find from L_Port's - * Rport_Scsi_Table (image table) - */ - spin_lock_irqsave(&scsi_image_table->scsi_image_table_lock, flags); - wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[scsi_id]; - unf_rport = wwpn_rport_info->rport; - spin_unlock_irqrestore(&scsi_image_table->scsi_image_table_lock, flags); - - if (unlikely(!unf_rport)) { - *scsi_result = unf_get_up_level_cmnd_errcode(err_code_table, - err_code_table_cout, - UNF_IO_PORT_LOGOUT); - - return NULL; - } - - return unf_rport; -} - -static u32 unf_build_xchg_fcpcmnd(struct unf_fcp_cmnd *fcp_cmnd, - struct unf_scsi_cmnd *scsi_cmnd) -{ - memcpy(fcp_cmnd->cdb, &UNF_GET_FCP_CMND(scsi_cmnd), scsi_cmnd->cmnd_len); - - if ((fcp_cmnd->control == UNF_FCP_WR_DATA && - (IS_READ_COMMAND(fcp_cmnd->cdb[ARRAY_INDEX_0]))) || - (fcp_cmnd->control == UNF_FCP_RD_DATA && - (IS_WRITE_COMMAND(fcp_cmnd->cdb[ARRAY_INDEX_0])))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MINOR, - "Scsi command direction inconsistent, CDB[ARRAY_INDEX_0](0x%x), direction(0x%x).", - fcp_cmnd->cdb[ARRAY_INDEX_0], fcp_cmnd->control); - - return UNF_RETURN_ERROR; - } - - memcpy(fcp_cmnd->lun, scsi_cmnd->lun_id, sizeof(fcp_cmnd->lun)); - - unf_big_end_to_cpu((void *)fcp_cmnd->cdb, sizeof(fcp_cmnd->cdb)); - fcp_cmnd->data_length = UNF_GET_DATA_LEN(scsi_cmnd); - - return RETURN_OK; -} - -static void unf_adjust_xchg_len(struct unf_xchg *xchg, u32 scsi_cmnd) -{ - switch (scsi_cmnd) { - case SCSIOPC_REQUEST_SENSE: /* requires different buffer */ - xchg->data_len = UNF_SCSI_SENSE_BUFFERSIZE; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MINOR, "Request Sense new."); - break; - - case SCSIOPC_TEST_UNIT_READY: - case SCSIOPC_RESERVE: - case SCSIOPC_RELEASE: - case SCSIOPC_START_STOP_UNIT: - xchg->data_len = 0; - break; - - default: - break; - } -} - -static void unf_copy_dif_control(struct unf_dif_control_info *dif_control, - struct unf_scsi_cmnd *scsi_cmnd) -{ - dif_control->fcp_dl = scsi_cmnd->dif_control.fcp_dl; - dif_control->protect_opcode = scsi_cmnd->dif_control.protect_opcode; - dif_control->start_lba = scsi_cmnd->dif_control.start_lba; - dif_control->app_tag = scsi_cmnd->dif_control.app_tag; - - dif_control->flags = scsi_cmnd->dif_control.flags; - dif_control->dif_sge_count = scsi_cmnd->dif_control.dif_sge_count; - dif_control->dif_sgl = scsi_cmnd->dif_control.dif_sgl; -} - -static void unf_adjust_dif_pci_transfer_len(struct unf_xchg *xchg, u32 direction) -{ - struct unf_dif_control_info *dif_control = NULL; - u32 sector_size = 0; - - dif_control = &xchg->dif_control; - - if (dif_control->protect_opcode == UNF_DIF_ACTION_NONE) - return; - if ((dif_control->flags & UNF_DIF_SECTSIZE_4KB) == 0) - sector_size = SECTOR_SIZE_512; - else - sector_size = SECTOR_SIZE_4096; - switch (dif_control->protect_opcode & UNF_DIF_ACTION_MASK) { - case UNF_DIF_ACTION_INSERT: - if (direction == DMA_TO_DEVICE) { - /* write IO,insert,Indicates that data with DIF is - * transmitted over the link. - */ - dif_control->fcp_dl = xchg->data_len + - UNF_CAL_BLOCK_CNT(xchg->data_len, sector_size) * UNF_DIF_AREA_SIZE; - } else { - /* read IO,insert,Indicates that the internal DIf is - * carried, and the link does not carry the DIf. - */ - dif_control->fcp_dl = xchg->data_len; - } - break; - - case UNF_DIF_ACTION_VERIFY_AND_DELETE: - if (direction == DMA_TO_DEVICE) { - /* write IO,Delete,Indicates that the internal DIf is - * carried, and the link does not carry the DIf. - */ - dif_control->fcp_dl = xchg->data_len; - } else { - /* read IO,Delete,Indicates that data with DIF is - * carried on the link and does not contain DIF on - * internal. - */ - dif_control->fcp_dl = xchg->data_len + - UNF_CAL_BLOCK_CNT(xchg->data_len, sector_size) * UNF_DIF_AREA_SIZE; - } - break; - - case UNF_DIF_ACTION_VERIFY_AND_FORWARD: - dif_control->fcp_dl = xchg->data_len + - UNF_CAL_BLOCK_CNT(xchg->data_len, sector_size) * UNF_DIF_AREA_SIZE; - break; - - default: - dif_control->fcp_dl = xchg->data_len; - break; - } - - xchg->fcp_cmnd.data_length = dif_control->fcp_dl; -} - -static void unf_get_dma_direction(struct unf_fcp_cmnd *fcp_cmnd, - struct unf_scsi_cmnd *scsi_cmnd) -{ - if (UNF_GET_DATA_DIRECTION(scsi_cmnd) == DMA_TO_DEVICE) { - fcp_cmnd->control = UNF_FCP_WR_DATA; - } else if (UNF_GET_DATA_DIRECTION(scsi_cmnd) == DMA_FROM_DEVICE) { - fcp_cmnd->control = UNF_FCP_RD_DATA; - } else { - /* DMA Direction None */ - fcp_cmnd->control = 0; - } -} - -static int unf_save_scsi_cmnd_to_xchg(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_xchg *xchg, - struct unf_scsi_cmnd *scsi_cmnd) -{ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - struct unf_xchg *unf_xchg = xchg; - u32 result_size = 0; - - scsi_cmnd->driver_scribble = (void *)unf_xchg->start_jif; - unf_xchg->rport = unf_rport; - unf_xchg->rport_bind_jifs = unf_rport->rport_alloc_jifs; - - /* Build Xchg SCSI_CMND info */ - unf_bind_xchg_scsi_cmd(unf_xchg, scsi_cmnd); - - unf_xchg->data_len = UNF_GET_DATA_LEN(scsi_cmnd); - unf_xchg->data_direction = UNF_GET_DATA_DIRECTION(scsi_cmnd); - unf_xchg->sid = unf_lport->nport_id; - unf_xchg->did = unf_rport->nport_id; - unf_xchg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = unf_rport->rport_index; - unf_xchg->world_id = scsi_cmnd->world_id; - unf_xchg->cmnd_sn = scsi_cmnd->cmnd_sn; - unf_xchg->pinitiator = scsi_cmnd->pinitiator; - unf_xchg->scsi_id = scsi_cmnd->scsi_id; - if (scsi_cmnd->qos_level == UNF_QOS_LEVEL_DEFAULT) - unf_xchg->qos_level = unf_rport->qos_level; - else - unf_xchg->qos_level = scsi_cmnd->qos_level; - - unf_get_dma_direction(&unf_xchg->fcp_cmnd, scsi_cmnd); - result_size = unf_build_xchg_fcpcmnd(&unf_xchg->fcp_cmnd, scsi_cmnd); - if (unlikely(result_size != RETURN_OK)) - return UNF_RETURN_ERROR; - - unf_adjust_xchg_len(unf_xchg, UNF_GET_FCP_CMND(scsi_cmnd)); - - unf_adjust_xchg_len(unf_xchg, UNF_GET_FCP_CMND(scsi_cmnd)); - - /* Dif (control) info */ - unf_copy_dif_control(&unf_xchg->dif_control, scsi_cmnd); - memcpy(&unf_xchg->dif_info, &scsi_cmnd->dif_info, sizeof(struct dif_info)); - unf_adjust_dif_pci_transfer_len(unf_xchg, UNF_GET_DATA_DIRECTION(scsi_cmnd)); - - /* single sgl info */ - if (unf_xchg->data_direction != DMA_NONE && UNF_GET_CMND_SGL(scsi_cmnd)) { - unf_xchg->req_sgl_info.sgl = UNF_GET_CMND_SGL(scsi_cmnd); - unf_xchg->req_sgl_info.sgl_start = unf_xchg->req_sgl_info.sgl; - /* Save the sgl header for easy - * location and printing. - */ - unf_xchg->req_sgl_info.req_index = 0; - unf_xchg->req_sgl_info.entry_index = 0; - } - - if (scsi_cmnd->dif_control.dif_sgl) { - unf_xchg->dif_sgl_info.sgl = UNF_INI_GET_DIF_SGL(scsi_cmnd); - unf_xchg->dif_sgl_info.entry_index = 0; - unf_xchg->dif_sgl_info.req_index = 0; - unf_xchg->dif_sgl_info.sgl_start = unf_xchg->dif_sgl_info.sgl; - } - - return RETURN_OK; -} - -static int unf_send_fcpcmnd(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ -#define UNF_MAX_PENDING_IO_CNT 3 - struct unf_scsi_cmd_info *scsi_cmnd_info = NULL; - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - struct unf_xchg *unf_xchg = xchg; - struct unf_frame_pkg pkg = {0}; - u32 result_size = 0; - ulong flags = 0; - - memcpy(&pkg.dif_control, &unf_xchg->dif_control, sizeof(struct unf_dif_control_info)); - pkg.dif_control.fcp_dl = unf_xchg->dif_control.fcp_dl; - pkg.transfer_len = unf_xchg->data_len; /* Pcie data transfer length */ - pkg.xchg_contex = unf_xchg; - pkg.qos_level = unf_xchg->qos_level; - scsi_cmnd_info = &xchg->scsi_cmnd_info; - pkg.entry_count = unf_xchg->scsi_cmnd_info.entry_cnt; - if (unf_xchg->data_direction == DMA_NONE || !scsi_cmnd_info->sgl) - pkg.entry_count = 0; - - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - unf_xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - pkg.private_data[PKG_PRIVATE_XCHG_VP_INDEX] = unf_lport->vp_index; - pkg.private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = unf_rport->rport_index; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = unf_xchg->hotpooltag; - - unf_select_sq(unf_xchg, &pkg); - pkg.fcp_cmnd = &unf_xchg->fcp_cmnd; - pkg.frame_head.csctl_sid = unf_lport->nport_id; - pkg.frame_head.rctl_did = unf_rport->nport_id; - pkg.upper_cmd = unf_xchg->scsi_cmnd_info.scsi_cmnd; - - /* exch->fcp_rsp_id --->>> pkg->buffer_ptr */ - pkg.frame_head.oxid_rxid = ((u32)unf_xchg->oxid << (u32)UNF_SHIFT_16 | unf_xchg->rxid); - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "[info]LPort (0x%p), Nport ID(0x%x) RPort ID(0x%x) direction(0x%x) magic number(0x%x) IO to entry count(0x%x) hottag(0x%x)", - unf_lport, unf_lport->nport_id, unf_rport->nport_id, - xchg->data_direction, pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - pkg.entry_count, unf_xchg->hotpooltag); - - atomic_inc(&unf_rport->pending_io_cnt); - if (unf_rport->tape_support_needed && - (atomic_read(&unf_rport->pending_io_cnt) <= UNF_MAX_PENDING_IO_CNT)) { - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_xchg->io_state |= INI_IO_STATE_REC_TIMEOUT_WAIT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - scsi_cmnd_info->abort_time_out = scsi_cmnd_info->time_out; - scsi_cmnd_info->time_out = UNF_REC_TOV; - } - /* 3. add INI I/O timer if necessary */ - if (scsi_cmnd_info->time_out != 0) { - /* I/O inner timer, do not used at this time */ - unf_lport->xchg_mgr_temp.unf_xchg_add_timer(unf_xchg, - scsi_cmnd_info->time_out, UNF_TIMER_TYPE_REQ_IO); - } - - /* 4. R_Port state check */ - if (unlikely(unf_rport->lport_ini_state != UNF_PORT_STATE_LINKUP || - unf_rport->rp_state > UNF_RPORT_ST_READY)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[info]Port(0x%x) RPort(0x%p) NPortId(0x%x) inistate(0x%x): RPort state(0x%x) pUpperCmd(0x%p) is not ready", - unf_lport->port_id, unf_rport, unf_rport->nport_id, - unf_rport->lport_ini_state, unf_rport->rp_state, pkg.upper_cmd); - - result_size = unf_get_up_level_cmnd_errcode(scsi_cmnd_info->err_code_table, - scsi_cmnd_info->err_code_table_cout, - UNF_IO_INCOMPLETE); - scsi_cmnd_info->result = result_size; - - if (scsi_cmnd_info->time_out != 0) - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer(unf_xchg); - - unf_cm_free_xchg(unf_lport, unf_xchg); - - /* DID_IMM_RETRY */ - return RETURN_OK; - } else if (unf_rport->rp_state < UNF_RPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[info]Port(0x%x) RPort(0x%p) NPortId(0x%x) inistate(0x%x): RPort state(0x%x) pUpperCmd(0x%p) is not ready", - unf_lport->port_id, unf_rport, unf_rport->nport_id, - unf_rport->lport_ini_state, unf_rport->rp_state, pkg.upper_cmd); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_xchg->io_state |= INI_IO_STATE_UPSEND_ERR; /* need retry */ - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - if (unlikely(scsi_cmnd_info->time_out != 0)) - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)unf_xchg); - - /* Host busy & need scsi retry */ - return UNF_RETURN_ERROR; - } - - /* 5. send scsi_cmnd to FC_LL Driver */ - if (unf_hardware_start_io(unf_lport, &pkg) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port (0x%x) pUpperCmd(0x%p) Hardware Send IO failed.", - unf_lport->port_id, pkg.upper_cmd); - - unf_release_esgls(unf_xchg); - - result_size = unf_get_up_level_cmnd_errcode(scsi_cmnd_info->err_code_table, - scsi_cmnd_info->err_code_table_cout, - UNF_IO_INCOMPLETE); - scsi_cmnd_info->result = result_size; - - if (scsi_cmnd_info->time_out != 0) - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer(unf_xchg); - - unf_cm_free_xchg(unf_lport, unf_xchg); - - /* SCSI_DONE */ - return RETURN_OK; - } - - return RETURN_OK; -} - -int unf_prefer_to_send_scsi_cmnd(struct unf_xchg *xchg) -{ - /* - * About INI_IO_STATE_DRABORT: - * 1. Set ABORT tag: Clean L_Port/V_Port Link Down I/O - * with: INI_busy_list, delay_list, delay_transfer_list, wait_list - * * - * 2. Set ABORT tag: for target session: - * with: INI_busy_list, delay_list, delay_transfer_list, wait_list - * a. R_Port remove - * b. Send PLOGI_ACC callback - * c. RCVD PLOGI - * d. RCVD LOGO - * * - * 3. if set ABORT: prevent send scsi_cmnd to target - */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - int ret = RETURN_OK; - ulong flags = 0; - - unf_lport = xchg->lport; - - unf_rport = xchg->rport; - if (unlikely(!unf_lport || !unf_rport)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%p) or RPort(0x%p) is NULL", unf_lport, unf_rport); - - /* if happened (never happen): need retry */ - return UNF_RETURN_ERROR; - } - - /* 1. inc ref_cnt to protect exchange */ - ret = (int)unf_xchg_ref_inc(xchg, INI_SEND_CMND); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) exhg(%p) exception ref(%d) ", unf_lport->port_id, - xchg, atomic_read(&xchg->ref_cnt)); - /* exchange exception, need retry */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->io_state |= INI_IO_STATE_UPSEND_ERR; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - /* INI_IO_STATE_UPSEND_ERR: Host busy --->>> need retry */ - return UNF_RETURN_ERROR; - } - - /* 2. Xchg Abort state check: Free EXCH if necessary */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (unlikely((xchg->io_state & INI_IO_STATE_UPABORT) || - (xchg->io_state & INI_IO_STATE_DRABORT))) { - /* Prevent to send: UP_ABORT/DRV_ABORT */ - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_IMM_RETRY); - ret = RETURN_OK; - - unf_xchg_ref_dec(xchg, INI_SEND_CMND); - unf_cm_free_xchg(unf_lport, xchg); - - /* - * Release exchange & return directly: - * 1. FC LLDD rcvd ABTS before scsi_cmnd: do nothing - * 2. INI_IO_STATE_UPABORT/INI_IO_STATE_DRABORT: discard this - * cmnd directly - */ - return ret; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - /* 3. Send FCP_CMND to FC_LL Driver */ - ret = unf_send_fcpcmnd(unf_lport, unf_rport, xchg); - if (unlikely(ret != RETURN_OK)) { - /* exchange exception, need retry */ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send exhg(%p) hottag(0x%x) to Rport(%p) NPortID(0x%x) state(0x%x) scsi_id(0x%x) failed", - unf_lport->port_id, xchg, xchg->hotpooltag, unf_rport, - unf_rport->nport_id, unf_rport->rp_state, unf_rport->scsi_id); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - - xchg->io_state |= INI_IO_STATE_UPSEND_ERR; - /* need retry */ - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - /* INI_IO_STATE_UPSEND_ERR: Host busy --->>> need retry */ - unf_cm_free_xchg(unf_lport, xchg); - } - - /* 4. dec ref_cnt */ - unf_xchg_ref_dec(xchg, INI_SEND_CMND); - - return ret; -} - -struct unf_lport *unf_find_lport_by_scsi_cmd(struct unf_scsi_cmnd *scsi_cmnd) -{ - struct unf_lport *unf_lport = NULL; - - /* cmd -->> L_Port */ - unf_lport = (struct unf_lport *)UNF_GET_HOST_PORT_BY_CMND(scsi_cmnd); - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Find Port by scsi_cmnd(0x%p) failed", scsi_cmnd); - - /* cmnd -->> scsi_host_id -->> L_Port */ - unf_lport = unf_find_lport_by_scsi_hostid(UNF_GET_SCSI_HOST_ID_BY_CMND(scsi_cmnd)); - } - - return unf_lport; -} - -int unf_cm_queue_command(struct unf_scsi_cmnd *scsi_cmnd) -{ - /* SCSI Command --->>> FC FCP Command */ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - u32 cmnd_result = 0; - int ret = RETURN_OK; - ulong flags = 0; - u32 scsi_id = 0; - u32 exhg_mgr_type = UNF_XCHG_MGR_TYPE_RANDOM; - - /* 1. Get L_Port */ - unf_lport = unf_find_lport_by_scsi_cmd(scsi_cmnd); - - /* - * corresponds to the insertion or removal scenario or the remove card - * scenario. This method is used to search for LPort information based - * on SCSI_HOST_ID. The Slave alloc is not invoked when LUNs are not - * scanned. Therefore, the Lport cannot be obtained. You need to obtain - * the Lport from the Lport linked list. - * * - * FC After Link Up, the first SCSI command is inquiry. - * Before inquiry, SCSI delivers slave_alloc. - */ - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Find Port by scsi cmd(0x%p) failed", scsi_cmnd); - - /* find from ini_error_code_table1 */ - cmnd_result = unf_get_up_level_cmnd_errcode(scsi_cmnd->err_code_table, - scsi_cmnd->err_code_table_cout, - UNF_IO_NO_LPORT); - - /* DID_NOT_CONNECT & SCSI_DONE & RETURN_OK(0) & I/O error */ - unf_complete_cmnd(scsi_cmnd, cmnd_result); - return RETURN_OK; - } - - /* Get Local SCSI_Image_table & SCSI_ID */ - scsi_image_table = &unf_lport->rport_scsi_table; - scsi_id = scsi_cmnd->scsi_id; - - /* 2. L_Port State check */ - if (unlikely(unf_lport->port_removing || unf_lport->pcie_link_down)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) is removing(%d) or pcielinkdown(%d) and return with scsi_id(0x%x)", - unf_lport->port_id, unf_lport->port_removing, - unf_lport->pcie_link_down, UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - cmnd_result = unf_get_up_level_cmnd_errcode(scsi_cmnd->err_code_table, - scsi_cmnd->err_code_table_cout, - UNF_IO_NO_LPORT); - - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, (cmnd_result >> UNF_SHIFT_16)); - - /* DID_NOT_CONNECT & SCSI_DONE & RETURN_OK(0) & I/O error */ - unf_complete_cmnd(scsi_cmnd, cmnd_result); - return RETURN_OK; - } - - /* 3. Get R_Port */ - unf_rport = unf_find_rport_by_scsi_id(unf_lport, scsi_cmnd->err_code_table, - scsi_cmnd->err_code_table_cout, - UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd), &cmnd_result); - if (unlikely(!unf_rport)) { - /* never happen: do not care */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) find RPort by scsi_id(0x%x) failed", - unf_lport->port_id, UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, (cmnd_result >> UNF_SHIFT_16)); - - /* DID_NOT_CONNECT/DID_SOFT_ERROR & SCSI_DONE & RETURN_OK(0) & - * I/O error - */ - unf_complete_cmnd(scsi_cmnd, cmnd_result); - return RETURN_OK; - } - - /* 4. Can't get exchange & return host busy, retry by uplevel */ - unf_xchg = (struct unf_xchg *)unf_cm_get_free_xchg(unf_lport, - exhg_mgr_type << UNF_SHIFT_16 | UNF_XCHG_TYPE_INI); - if (unlikely(!unf_xchg)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[err]Port(0x%x) get free exchange for INI IO(0x%x) failed", - unf_lport->port_id, UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - /* NOTE: need scsi retry */ - return UNF_RETURN_ERROR; - } - - unf_xchg->scsi_cmnd_info.result = UNF_SCSI_HOST(DID_ERROR); - - /* 5. Save the SCSI CMND information in advance. */ - ret = unf_save_scsi_cmnd_to_xchg(unf_lport, unf_rport, unf_xchg, scsi_cmnd); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[err]Port(0x%x) save scsi_cmnd info(0x%x) to exchange failed", - unf_lport->port_id, UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - spin_lock_irqsave(&unf_xchg->xchg_state_lock, flags); - unf_xchg->io_state |= INI_IO_STATE_UPSEND_ERR; - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - - /* INI_IO_STATE_UPSEND_ERR: Don't Do SCSI_DONE, need retry I/O */ - unf_cm_free_xchg(unf_lport, unf_xchg); - - /* NOTE: need scsi retry */ - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Get exchange(0x%p) hottag(0x%x) for Pcmd:%p,Cmdsn:0x%lx,WorldId:%d", - unf_xchg, unf_xchg->hotpooltag, scsi_cmnd->upper_cmnd, - (ulong)scsi_cmnd->cmnd_sn, scsi_cmnd->world_id); - /* 6. Send SCSI CMND */ - ret = unf_prefer_to_send_scsi_cmnd(unf_xchg); - - return ret; -} diff --git a/drivers/scsi/spfc/common/unf_io.h b/drivers/scsi/spfc/common/unf_io.h deleted file mode 100644 index d8e50eb8035e..000000000000 --- a/drivers/scsi/spfc/common/unf_io.h +++ /dev/null @@ -1,96 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_IO_H -#define UNF_IO_H - -#include "unf_type.h" -#include "unf_scsi_common.h" -#include "unf_exchg.h" -#include "unf_rport.h" - -#define UNF_MAX_TARGET_NUMBER 2048 -#define UNF_DEFAULT_MAX_LUN 0xFFFF -#define UNF_MAX_DMA_SEGS 0x400 -#define UNF_MAX_SCSI_CMND_LEN 16 -#define UNF_MAX_BUS_CHANNEL 0 -#define UNF_DMA_BOUNDARY 0xffffffffffffffff -#define UNF_MAX_CMND_PER_LUN 64 /* LUN max command */ -#define UNF_CHECK_LUN_ID_MATCH(lun_id, raw_lun_id, scsi_id, xchg) \ - (((lun_id) == (raw_lun_id) || (lun_id) == INVALID_VALUE64) && \ - ((scsi_id) == (xchg)->scsi_id)) - -#define NO_SENSE 0x00 -#define RECOVERED_ERROR 0x01 -#define NOT_READY 0x02 -#define MEDIUM_ERROR 0x03 -#define HARDWARE_ERROR 0x04 -#define ILLEGAL_REQUEST 0x05 -#define UNIT_ATTENTION 0x06 -#define DATA_PROTECT 0x07 -#define BLANK_CHECK 0x08 -#define COPY_ABORTED 0x0a -#define ABORTED_COMMAND 0x0b -#define VOLUME_OVERFLOW 0x0d -#define MISCOMPARE 0x0e - -#define SENSE_DATA_RESPONSE_CODE 0x70 -#define ADDITINONAL_SENSE_LEN 0x7 - -extern u32 sector_size_flag; - -#define UNF_GET_SCSI_HOST_ID_BY_CMND(cmd) ((cmd)->scsi_host_id) -#define UNF_GET_SCSI_ID_BY_CMND(cmd) ((cmd)->scsi_id) -#define UNF_GET_HOST_PORT_BY_CMND(cmd) ((cmd)->drv_private) -#define UNF_GET_FCP_CMND(cmd) ((cmd)->pcmnd[ARRAY_INDEX_0]) -#define UNF_GET_DATA_LEN(cmd) ((cmd)->transfer_len) -#define UNF_GET_DATA_DIRECTION(cmd) ((cmd)->data_direction) - -#define UNF_GET_HOST_CMND(cmd) ((cmd)->upper_cmnd) -#define UNF_GET_CMND_DONE_FUNC(cmd) ((cmd)->done) -#define UNF_GET_UP_LEVEL_CMND_DONE(cmd) ((cmd)->uplevel_done) -#define UNF_GET_SGL_ENTRY_BUF_FUNC(cmd) ((cmd)->unf_ini_get_sgl_entry) -#define UNF_GET_SENSE_BUF_ADDR(cmd) ((cmd)->sense_buf) -#define UNF_GET_ERR_CODE_TABLE(cmd) ((cmd)->err_code_table) -#define UNF_GET_ERR_CODE_TABLE_COUNT(cmd) ((cmd)->err_code_table_cout) - -#define UNF_SET_HOST_CMND(cmd, host_cmd) ((cmd)->upper_cmnd = (host_cmd)) -#define UNF_SER_CMND_DONE_FUNC(cmd, pfn) ((cmd)->done = (pfn)) -#define UNF_SET_UP_LEVEL_CMND_DONE_FUNC(cmd, pfn) ((cmd)->uplevel_done = (pfn)) - -#define UNF_SET_RESID(cmd, uiresid) ((cmd)->resid = (uiresid)) -#define UNF_SET_CMND_RESULT(cmd, uiresult) ((cmd)->result = ((int)(uiresult))) - -#define UNF_DONE_SCSI_CMND(cmd) ((cmd)->done(cmd)) - -#define UNF_GET_CMND_SGL(cmd) ((cmd)->sgl) -#define UNF_INI_GET_DIF_SGL(cmd) ((cmd)->dif_control.dif_sgl) - -u32 unf_ini_scsi_completed(void *lport, struct unf_frame_pkg *pkg); -u32 unf_ini_get_sgl_entry(void *pkg, char **buf, u32 *buf_len); -u32 unf_ini_get_dif_sgl_entry(void *pkg, char **buf, u32 *buf_len); -void unf_complete_cmnd(struct unf_scsi_cmnd *scsi_cmnd, u32 result_size); -void unf_done_ini_xchg(struct unf_xchg *xchg); -u32 unf_tmf_timeout_recovery_special(void *rport, void *xchg); -u32 unf_tmf_timeout_recovery_default(void *rport, void *xchg); -void unf_abts_timeout_recovery_default(void *rport, void *xchg); -int unf_cm_queue_command(struct unf_scsi_cmnd *scsi_cmnd); -int unf_cm_eh_abort_handler(struct unf_scsi_cmnd *scsi_cmnd); -int unf_cm_eh_device_reset_handler(struct unf_scsi_cmnd *scsi_cmnd); -int unf_cm_target_reset_handler(struct unf_scsi_cmnd *scsi_cmnd); -int unf_cm_bus_reset_handler(struct unf_scsi_cmnd *scsi_cmnd); -int unf_cm_virtual_reset_handler(struct unf_scsi_cmnd *scsi_cmnd); -struct unf_rport *unf_find_rport_by_scsi_id(struct unf_lport *lport, - struct unf_ini_error_code *errcode_table, - u32 errcode_table_count, - u32 scsi_id, u32 *scsi_result); -u32 UNF_IOExchgDelayProcess(struct unf_lport *lport, struct unf_xchg *xchg); -struct unf_lport *unf_find_lport_by_scsi_cmd(struct unf_scsi_cmnd *scsi_cmnd); -int unf_send_scsi_mgmt_cmnd(struct unf_xchg *xchg, struct unf_lport *lport, - struct unf_rport *rport, - struct unf_scsi_cmnd *scsi_cmnd, - enum unf_task_mgmt_cmd task_mgnt_cmd_type); -void unf_tmf_abnormal_recovery(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg); - -#endif diff --git a/drivers/scsi/spfc/common/unf_io_abnormal.c b/drivers/scsi/spfc/common/unf_io_abnormal.c deleted file mode 100644 index 4e268ac026ca..000000000000 --- a/drivers/scsi/spfc/common/unf_io_abnormal.c +++ /dev/null @@ -1,986 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_io_abnormal.h" -#include "unf_log.h" -#include "unf_scsi_common.h" -#include "unf_rport.h" -#include "unf_io.h" -#include "unf_portman.h" -#include "unf_service.h" - -static int unf_send_abts_success(struct unf_lport *lport, struct unf_xchg *xchg, - struct unf_scsi_cmnd *scsi_cmnd, - u32 time_out_value) -{ - bool need_wait_marker = true; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - u32 scsi_id = 0; - u32 return_value = 0; - ulong xchg_flag = 0; - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - need_wait_marker = (xchg->abts_state & MARKER_STS_RECEIVED) ? false : true; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - if (need_wait_marker) { - if (down_timeout(&xchg->task_sema, (s64)msecs_to_jiffies(time_out_value))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) recv abts marker timeout,Exch(0x%p) OX_ID(0x%x 0x%x) RX_ID(0x%x)", - lport->port_id, xchg, xchg->oxid, - xchg->hotpooltag, xchg->rxid); - - /* Cancel abts rsp timer when sema timeout */ - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - /* Cnacel the flag of INI_IO_STATE_UPABORT and process - * the io in TMF - */ - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - xchg->io_state &= ~INI_IO_STATE_UPABORT; - xchg->io_state |= INI_IO_STATE_TMF_ABORT; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - return UNF_SCSI_ABORT_FAIL; - } - } else { - xchg->ucode_abts_state = UNF_IO_SUCCESS; - } - - scsi_image_table = &lport->rport_scsi_table; - scsi_id = scsi_cmnd->scsi_id; - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - if (xchg->ucode_abts_state == UNF_IO_SUCCESS || - xchg->scsi_cmnd_info.result == UNF_IO_ABORT_PORT_REMOVING) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) Send ABTS succeed and recv marker Exch(0x%p) OX_ID(0x%x) RX_ID(0x%x) marker status(0x%x)", - lport->port_id, xchg, xchg->oxid, xchg->rxid, xchg->ucode_abts_state); - return_value = DID_RESET; - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, return_value); - unf_complete_cmnd(scsi_cmnd, DID_RESET << UNF_SHIFT_16); - return UNF_SCSI_ABORT_SUCCESS; - } - - xchg->io_state &= ~INI_IO_STATE_UPABORT; - xchg->io_state |= INI_IO_STATE_TMF_ABORT; - - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - /* Cancel abts rsp timer when sema timeout */ - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) send ABTS failed. Exch(0x%p) oxid(0x%x) hot_tag(0x%x) ret(0x%x) xchg->io_state (0x%x)", - lport->port_id, xchg, xchg->oxid, xchg->hotpooltag, - xchg->scsi_cmnd_info.result, xchg->io_state); - - /* return fail and then enter TMF */ - return UNF_SCSI_ABORT_FAIL; -} - -static int unf_ini_abort_cmnd(struct unf_lport *lport, struct unf_xchg *xchg, - struct unf_scsi_cmnd *scsi_cmnd) -{ - /* - * About INI_IO_STATE_UPABORT: - * * - * 1. Check: L_Port destroy - * 2. Check: I/O XCHG timeout - * 3. Set ABORT: send ABTS - * 4. Set ABORT: LUN reset - * 5. Set ABORT: Target reset - * 6. Check: Prevent to send I/O to target - * (unf_prefer_to_send_scsi_cmnd) - * 7. Check: Done INI XCHG --->>> do not call scsi_done, return directly - * 8. Check: INI SCSI Complete --->>> do not call scsi_done, return - * directly - */ -#define UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT (2000) - - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong rport_flag = 0; - ulong xchg_flag = 0; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - u32 scsi_id = 0; - u32 time_out_value = (u32)UNF_WAIT_SEM_TIMEOUT; - u32 return_value = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_SCSI_ABORT_FAIL); - unf_lport = lport; - - /* 1. Xchg State Set: INI_IO_STATE_UPABORT */ - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - xchg->io_state |= INI_IO_STATE_UPABORT; - unf_rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - /* 2. R_Port check */ - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send ABTS but no RPort, OX_ID(0x%x) RX_ID(0x%x)", - unf_lport->port_id, xchg->oxid, xchg->rxid); - - return UNF_SCSI_ABORT_SUCCESS; - } - - spin_lock_irqsave(&unf_rport->rport_state_lock, rport_flag); - if (unlikely(unf_rport->rp_state != UNF_RPORT_ST_READY)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) find RPort's state(0x%x) is not ready but send ABTS also, exchange(0x%p) tag(0x%x)", - unf_lport->port_id, unf_rport->rp_state, xchg, xchg->hotpooltag); - - /* - * Important: Send ABTS also & update timer - * Purpose: only used for release chip (uCode) resource, - * continue - */ - time_out_value = UNF_RPORT_NOTREADY_WAIT_SEM_TIMEOUT; - } - spin_unlock_irqrestore(&unf_rport->rport_state_lock, rport_flag); - - /* 3. L_Port State check */ - if (unlikely(unf_lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) is removing", unf_lport->port_id); - - xchg->io_state &= ~INI_IO_STATE_UPABORT; - - return UNF_SCSI_ABORT_FAIL; - } - - scsi_image_table = &unf_lport->rport_scsi_table; - scsi_id = scsi_cmnd->scsi_id; - - /* If pcie linkdown, complete this io and flush all io */ - if (unlikely(unf_lport->pcie_link_down)) { - return_value = DID_RESET; - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, return_value); - unf_complete_cmnd(scsi_cmnd, DID_RESET << UNF_SHIFT_16); - unf_free_lport_all_xchg(lport); - return UNF_SCSI_ABORT_SUCCESS; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[abort]Port(0x%x) Exchg(0x%p) delay(%llu) SID(0x%x) DID(0x%x) wwpn(0x%llx) hottag(0x%x) scsi_id(0x%x) lun_id(0x%x) cmdsn(0x%llx) Ini:%p", - unf_lport->port_id, xchg, - (u64)jiffies_to_msecs(jiffies) - (u64)jiffies_to_msecs(xchg->alloc_jif), - xchg->sid, xchg->did, unf_rport->port_name, xchg->hotpooltag, - scsi_cmnd->scsi_id, (u32)scsi_cmnd->raw_lun_id, scsi_cmnd->cmnd_sn, - scsi_cmnd->pinitiator); - - /* Init abts marker semaphore */ - sema_init(&xchg->task_sema, 0); - - if (xchg->scsi_cmnd_info.time_out != 0) - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer(xchg); - - lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, (ulong)UNF_WAIT_ABTS_RSP_TIMEOUT, - UNF_TIMER_TYPE_INI_ABTS); - - /* 4. Send INI ABTS CMND */ - if (unf_send_abts(unf_lport, xchg) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) Send ABTS failed. Exch(0x%p) hottag(0x%x)", - unf_lport->port_id, xchg, xchg->hotpooltag); - - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, xchg_flag); - xchg->io_state &= ~INI_IO_STATE_UPABORT; - xchg->io_state |= INI_IO_STATE_TMF_ABORT; - - spin_unlock_irqrestore(&xchg->xchg_state_lock, xchg_flag); - - return UNF_SCSI_ABORT_FAIL; - } - - return unf_send_abts_success(unf_lport, xchg, scsi_cmnd, time_out_value); -} - -static void unf_flush_ini_resp_que(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - if (lport->low_level_func.service_op.unf_flush_ini_resp_que) - (void)lport->low_level_func.service_op.unf_flush_ini_resp_que(lport->fc_port); -} - -int unf_cm_eh_abort_handler(struct unf_scsi_cmnd *scsi_cmnd) -{ - /* - * SCSI ABORT Command --->>> FC ABTS Command - * If return ABORT_FAIL, then enter TMF process - */ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_lport *xchg_lport = NULL; - int ret = UNF_SCSI_ABORT_SUCCESS; - ulong flag = 0; - - /* 1. Get L_Port: Point to Scsi_host */ - unf_lport = unf_find_lport_by_scsi_cmd(scsi_cmnd); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can't find port by scsi host id(0x%x)", - UNF_GET_SCSI_HOST_ID_BY_CMND(scsi_cmnd)); - return UNF_SCSI_ABORT_FAIL; - } - - /* 2. find target Xchg for INI Abort CMND */ - unf_xchg = unf_cm_lookup_xchg_by_cmnd_sn(unf_lport, scsi_cmnd->cmnd_sn, - scsi_cmnd->world_id, - scsi_cmnd->pinitiator); - if (unlikely(!unf_xchg)) { - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_WARN, - "[warn]Port(0x%x) can't find exchange by Cmdsn(0x%lx),Ini:%p", - unf_lport->port_id, (ulong)scsi_cmnd->cmnd_sn, - scsi_cmnd->pinitiator); - - unf_flush_ini_resp_que(unf_lport); - - return UNF_SCSI_ABORT_SUCCESS; - } - - /* 3. increase ref_cnt to protect exchange */ - ret = (int)unf_xchg_ref_inc(unf_xchg, INI_EH_ABORT); - if (unlikely(ret != RETURN_OK)) { - unf_flush_ini_resp_que(unf_lport); - - return UNF_SCSI_ABORT_SUCCESS; - } - - scsi_cmnd->upper_cmnd = unf_xchg->scsi_cmnd_info.scsi_cmnd; - unf_xchg->debug_hook = true; - - /* 4. Exchang L_Port/R_Port Get & check */ - spin_lock_irqsave(&unf_xchg->xchg_state_lock, flag); - xchg_lport = unf_xchg->lport; - unf_rport = unf_xchg->rport; - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flag); - - if (unlikely(!xchg_lport || !unf_rport)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Exchange(0x%p)'s L_Port or R_Port is NULL, state(0x%x)", - unf_xchg, unf_xchg->io_state); - - unf_xchg_ref_dec(unf_xchg, INI_EH_ABORT); - - if (!xchg_lport) - /* for L_Port */ - return UNF_SCSI_ABORT_FAIL; - /* for R_Port */ - return UNF_SCSI_ABORT_SUCCESS; - } - - /* 5. Send INI Abort Cmnd */ - ret = unf_ini_abort_cmnd(xchg_lport, unf_xchg, scsi_cmnd); - - /* 6. decrease exchange ref_cnt */ - unf_xchg_ref_dec(unf_xchg, INI_EH_ABORT); - - return ret; -} - -u32 unf_tmf_timeout_recovery_default(void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_rport *unf_rport = (struct unf_rport *)rport; - - unf_lport = unf_xchg->lport; - FC_CHECK_RETURN_VALUE(unf_lport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_rport_enter_logo(unf_lport, unf_rport); - - return RETURN_OK; -} - -void unf_abts_timeout_recovery_default(void *rport, void *xchg) -{ - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - ulong flags = 0; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_rport *unf_rport = (struct unf_rport *)rport; - - unf_lport = unf_xchg->lport; - FC_CHECK_RETURN_VOID(unf_lport); - - spin_lock_irqsave(&unf_xchg->xchg_state_lock, flags); - if (INI_IO_STATE_DONE & unf_xchg->io_state) { - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - - return; - } - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - - if (unf_xchg->rport_bind_jifs != unf_rport->rport_alloc_jifs) - return; - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_rport_enter_logo(unf_lport, unf_rport); -} - -u32 unf_tmf_timeout_recovery_special(void *rport, void *xchg) -{ - /* Do port reset or R_Port LOGO */ - int ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_rport *unf_rport = (struct unf_rport *)rport; - - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(unf_xchg->lport, UNF_RETURN_ERROR); - - unf_lport = unf_xchg->lport->root_lport; - FC_CHECK_RETURN_VALUE(unf_lport, UNF_RETURN_ERROR); - - /* 1. TMF response timeout & Marker STS timeout */ - if (!(unf_xchg->tmf_state & - (MARKER_STS_RECEIVED | TMF_RESPONSE_RECEIVED))) { - /* TMF timeout & marker timeout */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive marker status timeout and do recovery", - unf_lport->port_id); - - /* Do port reset */ - ret = unf_cm_reset_port(unf_lport->port_id); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) do reset failed", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; - } - - /* 2. default case: Do LOGO process */ - unf_tmf_timeout_recovery_default(unf_rport, unf_xchg); - - return RETURN_OK; -} - -void unf_tmf_abnormal_recovery(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - /* - * for device(lun)/target(session) reset: - * Do port reset or R_Port LOGO - */ - if (lport->unf_tmf_abnormal_recovery) - lport->unf_tmf_abnormal_recovery((void *)rport, (void *)xchg); -} - -int unf_cm_eh_device_reset_handler(struct unf_scsi_cmnd *scsi_cmnd) -{ - /* SCSI Device/LUN Reset Command --->>> FC LUN/Device Reset Command */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - u32 cmnd_result = 0; - int ret = SUCCESS; - - FC_CHECK_RETURN_VALUE(scsi_cmnd, FAILED); - FC_CHECK_RETURN_VALUE(scsi_cmnd->lun_id, FAILED); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[event]Enter device/LUN reset handler"); - - /* 1. Get L_Port */ - unf_lport = unf_find_lport_by_scsi_cmd(scsi_cmnd); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can't find port by scsi_host_id(0x%x)", - UNF_GET_SCSI_HOST_ID_BY_CMND(scsi_cmnd)); - - return FAILED; - } - - /* 2. L_Port State checking */ - if (unlikely(unf_lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%p) is removing", unf_lport); - - return FAILED; - } - - /* - * 3. Get R_Port: no rport is found or rport is not ready,return ok - * from: L_Port -->> rport_scsi_table (image table) -->> - * rport_info_table - */ - unf_rport = unf_find_rport_by_scsi_id(unf_lport, scsi_cmnd->err_code_table, - scsi_cmnd->err_code_table_cout, - UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd), &cmnd_result); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) Can't find rport by scsi_id(0x%x)", - unf_lport->port_id, UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - return SUCCESS; - } - - /* - * 4. Set the I/O of the corresponding LUN to abort. - * * - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - unf_cm_xchg_abort_by_lun(unf_lport, unf_rport, *((u64 *)scsi_cmnd->lun_id), NULL, false); - - /* 5. R_Port state check */ - if (unlikely(unf_rport->rp_state != UNF_RPORT_ST_READY)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) state(0x%x) SCSI Command(0x%p), rport is not ready", - unf_lport->port_id, unf_rport->nport_id, - unf_rport->rp_state, scsi_cmnd); - - return SUCCESS; - } - - /* 6. Get & inc ref_cnt free Xchg for Device reset */ - unf_xchg = (struct unf_xchg *)unf_cm_get_free_xchg(unf_lport, UNF_XCHG_TYPE_INI); - if (unlikely(!unf_xchg)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%p) can't get free exchange", unf_lport); - - return FAILED; - } - - /* increase ref_cnt for protecting exchange */ - ret = (int)unf_xchg_ref_inc(unf_xchg, INI_EH_DEVICE_RESET); - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), FAILED); - - /* 7. Send Device/LUN Reset to Low level */ - ret = unf_send_scsi_mgmt_cmnd(unf_xchg, unf_lport, unf_rport, scsi_cmnd, - UNF_FCP_TM_LOGICAL_UNIT_RESET); - if (unlikely(ret == FAILED)) { - /* - * Do port reset or R_Port LOGO: - * 1. FAILED: send failed - * 2. FAILED: semaphore timeout - * 3. SUCCESS: rcvd rsp & semaphore has been waken up - */ - unf_tmf_abnormal_recovery(unf_lport, unf_rport, unf_xchg); - } - - /* - * 8. Release resource immediately if necessary - * NOTE: here, semaphore timeout or rcvd rsp(semaphore has been waken - * up) - */ - if (likely(!unf_lport->port_removing || unf_lport->root_lport != unf_lport)) - unf_cm_free_xchg(unf_xchg->lport, unf_xchg); - - /* decrease ref_cnt */ - unf_xchg_ref_dec(unf_xchg, INI_EH_DEVICE_RESET); - - return SUCCESS; -} - -int unf_cm_target_reset_handler(struct unf_scsi_cmnd *scsi_cmnd) -{ - /* SCSI Target Reset Command --->>> FC Session Reset/Delete Command */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - u32 cmnd_result = 0; - int ret = SUCCESS; - - FC_CHECK_RETURN_VALUE(scsi_cmnd, FAILED); - FC_CHECK_RETURN_VALUE(scsi_cmnd->lun_id, FAILED); - - /* 1. Get L_Port */ - unf_lport = unf_find_lport_by_scsi_cmd(scsi_cmnd); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can't find port by scsi_host_id(0x%x)", - UNF_GET_SCSI_HOST_ID_BY_CMND(scsi_cmnd)); - - return FAILED; - } - - /* 2. L_Port State check */ - if (unlikely(unf_lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%p) is removing", unf_lport); - - return FAILED; - } - - /* - * 3. Get R_Port: no rport is found or rport is not ready,return ok - * from: L_Port -->> rport_scsi_table (image table) -->> - * rport_info_table - */ - unf_rport = unf_find_rport_by_scsi_id(unf_lport, scsi_cmnd->err_code_table, - scsi_cmnd->err_code_table_cout, - UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd), &cmnd_result); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can't find rport by scsi_id(0x%x)", - UNF_GET_SCSI_ID_BY_CMND(scsi_cmnd)); - - return SUCCESS; - } - - /* - * 4. set UP_ABORT on Target IO and Session IO - * * - * LUN Reset: set UP_ABORT tag, with: - * INI_Busy_list, IO_Wait_list, - * IO_Delay_list, IO_Delay_transfer_list - */ - unf_cm_xchg_abort_by_session(unf_lport, unf_rport); - - /* 5. R_Port state check */ - if (unlikely(unf_rport->rp_state != UNF_RPORT_ST_READY)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) state(0x%x) is not ready, SCSI Command(0x%p)", - unf_lport->port_id, unf_rport->nport_id, - unf_rport->rp_state, scsi_cmnd); - - return SUCCESS; - } - - /* 6. Get free Xchg for Target Reset CMND */ - unf_xchg = (struct unf_xchg *)unf_cm_get_free_xchg(unf_lport, UNF_XCHG_TYPE_INI); - if (unlikely(!unf_xchg)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%p) can't get free exchange", unf_lport); - - return FAILED; - } - - /* increase ref_cnt to protect exchange */ - ret = (int)unf_xchg_ref_inc(unf_xchg, INI_EH_DEVICE_RESET); - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), FAILED); - - /* 7. Send Target Reset Cmnd to low-level */ - ret = unf_send_scsi_mgmt_cmnd(unf_xchg, unf_lport, unf_rport, scsi_cmnd, - UNF_FCP_TM_TARGET_RESET); - if (unlikely(ret == FAILED)) { - /* - * Do port reset or R_Port LOGO: - * 1. FAILED: send failed - * 2. FAILED: semaphore timeout - * 3. SUCCESS: rcvd rsp & semaphore has been waken up - */ - unf_tmf_abnormal_recovery(unf_lport, unf_rport, unf_xchg); - } - - /* - * 8. Release resource immediately if necessary - * NOTE: here, semaphore timeout or rcvd rsp(semaphore has been waken - * up) - */ - if (likely(!unf_lport->port_removing || unf_lport->root_lport != unf_lport)) - unf_cm_free_xchg(unf_xchg->lport, unf_xchg); - - /* decrease exchange ref_cnt */ - unf_xchg_ref_dec(unf_xchg, INI_EH_DEVICE_RESET); - - return SUCCESS; -} - -int unf_cm_bus_reset_handler(struct unf_scsi_cmnd *scsi_cmnd) -{ - /* SCSI BUS Reset Command --->>> FC Port Reset Command */ - struct unf_lport *unf_lport = NULL; - int cmnd_result = 0; - - /* 1. Get L_Port */ - unf_lport = unf_find_lport_by_scsi_cmd(scsi_cmnd); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can't find port by scsi_host_id(0x%x)", - UNF_GET_SCSI_HOST_ID_BY_CMND(scsi_cmnd)); - - return FAILED; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[event]Do port reset with scsi_bus_reset"); - - cmnd_result = unf_cm_reset_port(unf_lport->port_id); - if (unlikely(cmnd_result == UNF_RETURN_ERROR)) - return FAILED; - else - return SUCCESS; -} - -void unf_process_scsi_mgmt_result(struct unf_frame_pkg *pkg, - struct unf_xchg *xchg) -{ - u8 *rsp_info = NULL; - u8 rsp_code = 0; - u32 code_index = 0; - - /* - * LLT found that:RSP_CODE is the third byte of - * FCP_RSP_INFO, on Little endian should be byte 0, For - * detail FCP_4 Table 26 FCP_RSP_INFO field format - * * - * 1. state setting - * 2. wake up semaphore - */ - FC_CHECK_RETURN_VOID(pkg); - FC_CHECK_RETURN_VOID(xchg); - - xchg->tmf_state |= TMF_RESPONSE_RECEIVED; - - if (UNF_GET_LL_ERR(pkg) != UNF_IO_SUCCESS || - pkg->unf_rsp_pload_bl.length > UNF_RESPONE_DATA_LEN) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Send scsi manage command failed with error code(0x%x) resp len(0x%x)", - UNF_GET_LL_ERR(pkg), pkg->unf_rsp_pload_bl.length); - - xchg->scsi_cmnd_info.result = UNF_IO_FAILED; - - /* wakeup semaphore & return */ - up(&xchg->task_sema); - - return; - } - - rsp_info = pkg->unf_rsp_pload_bl.buffer_ptr; - if (rsp_info && pkg->unf_rsp_pload_bl.length != 0) { - /* change to little end if necessary */ - if (pkg->byte_orders & UNF_BIT_3) - unf_big_end_to_cpu(rsp_info, pkg->unf_rsp_pload_bl.length); - } - - if (!rsp_info) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]FCP response data pointer is NULL with Xchg TAG(0x%x)", - xchg->hotpooltag); - - xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS; - - /* wakeup semaphore & return */ - up(&xchg->task_sema); - - return; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]FCP response data length(0x%x), RSP_CODE(0x%x:%x:%x:%x:%x:%x:%x:%x)", - pkg->unf_rsp_pload_bl.length, rsp_info[ARRAY_INDEX_0], - rsp_info[ARRAY_INDEX_1], rsp_info[ARRAY_INDEX_2], - rsp_info[ARRAY_INDEX_3], rsp_info[ARRAY_INDEX_4], - rsp_info[ARRAY_INDEX_5], rsp_info[ARRAY_INDEX_6], - rsp_info[ARRAY_INDEX_7]); - - rsp_code = rsp_info[code_index]; - if (rsp_code == UNF_FCP_TM_RSP_COMPLETE || rsp_code == UNF_FCP_TM_RSP_SUCCEED) - xchg->scsi_cmnd_info.result = UNF_IO_SUCCESS; - else - xchg->scsi_cmnd_info.result = UNF_IO_FAILED; - - /* wakeup semaphore & return */ - up(&xchg->task_sema); -} - -static void unf_build_task_mgmt_fcp_cmnd(struct unf_fcp_cmnd *fcp_cmnd, - struct unf_scsi_cmnd *scsi_cmnd, - enum unf_task_mgmt_cmd task_mgmt) -{ - FC_CHECK_RETURN_VOID(fcp_cmnd); - FC_CHECK_RETURN_VOID(scsi_cmnd); - - unf_big_end_to_cpu((void *)scsi_cmnd->lun_id, UNF_FCP_LUNID_LEN_8); - (*(u64 *)(scsi_cmnd->lun_id)) >>= UNF_SHIFT_8; - memcpy(fcp_cmnd->lun, scsi_cmnd->lun_id, sizeof(fcp_cmnd->lun)); - - /* - * If the TASK MANAGEMENT FLAGS field is set to a nonzero value, - * the FCP_CDB field, the FCP_DL field, the TASK ATTRIBUTE field, - * the RDDATA bit, and the WRDATA bit shall be ignored and the - * FCP_BIDIRECTIONAL_READ_DL field shall not be included in the FCP_CMND - * IU payload - */ - fcp_cmnd->control = UNF_SET_TASK_MGMT_FLAGS((u32)(task_mgmt)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "SCSI cmnd(0x%x) is task mgmt cmnd. ntrl Flag(LITTLE END) is 0x%x.", - task_mgmt, fcp_cmnd->control); -} - -int unf_send_scsi_mgmt_cmnd(struct unf_xchg *xchg, struct unf_lport *lport, - struct unf_rport *rport, - struct unf_scsi_cmnd *scsi_cmnd, - enum unf_task_mgmt_cmd task_mgnt_cmd_type) -{ - /* - * 1. Device/LUN reset - * 2. Target/Session reset - */ - struct unf_xchg *unf_xchg = NULL; - int ret = SUCCESS; - struct unf_frame_pkg pkg = {0}; - ulong xchg_flag = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, FAILED); - FC_CHECK_RETURN_VALUE(rport, FAILED); - FC_CHECK_RETURN_VALUE(xchg, FAILED); - FC_CHECK_RETURN_VALUE(scsi_cmnd, FAILED); - FC_CHECK_RETURN_VALUE(task_mgnt_cmd_type <= UNF_FCP_TM_TERMINATE_TASK && - task_mgnt_cmd_type >= UNF_FCP_TM_QUERY_TASK_SET, FAILED); - - unf_xchg = xchg; - unf_xchg->lport = lport; - unf_xchg->rport = rport; - - /* 1. State: Up_Task */ - spin_lock_irqsave(&unf_xchg->xchg_state_lock, xchg_flag); - unf_xchg->io_state |= INI_IO_STATE_UPTASK; - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, xchg_flag); - pkg.frame_head.oxid_rxid = ((u32)unf_xchg->oxid << (u32)UNF_SHIFT_16) | unf_xchg->rxid; - - /* 2. Set TASK MANAGEMENT FLAGS of FCP_CMND to the corresponding task - * management command - */ - unf_build_task_mgmt_fcp_cmnd(&unf_xchg->fcp_cmnd, scsi_cmnd, task_mgnt_cmd_type); - - pkg.xchg_contex = unf_xchg; - pkg.private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index; - pkg.fcp_cmnd = &unf_xchg->fcp_cmnd; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = unf_xchg->hotpooltag; - pkg.frame_head.csctl_sid = lport->nport_id; - pkg.frame_head.rctl_did = rport->nport_id; - - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - - if (unlikely(lport->pcie_link_down)) { - unf_free_lport_all_xchg(lport); - return SUCCESS; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) Hottag(0x%x) lunid(0x%llx)", - lport->port_id, task_mgnt_cmd_type, rport->nport_id, - unf_xchg->hotpooltag, *((u64 *)scsi_cmnd->lun_id)); - - /* 3. Init exchange task semaphore */ - sema_init(&unf_xchg->task_sema, 0); - - /* 4. Send Mgmt Task to low-level */ - if (unf_hardware_start_io(lport, &pkg) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) failed", - lport->port_id, task_mgnt_cmd_type, rport->nport_id); - - return FAILED; - } - - /* - * semaphore timeout - ** - * Code review: The second input parameter needs to be converted to - jiffies. - * set semaphore after the message is sent successfully.The semaphore is - returned when the semaphore times out or is woken up. - ** - * 5. The semaphore is cleared and counted when the Mgmt Task message is - sent, and is Wake Up when the RSP message is received. - * If the semaphore is not Wake Up, the semaphore is triggered after - timeout. That is, no RSP message is received within the timeout period. - */ - if (down_timeout(&unf_xchg->task_sema, (s64)msecs_to_jiffies((u32)UNF_WAIT_SEM_TIMEOUT))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) timeout scsi id(0x%x) lun id(0x%x)", - lport->nport_id, task_mgnt_cmd_type, - rport->nport_id, scsi_cmnd->scsi_id, - (u32)scsi_cmnd->raw_lun_id); - unf_notify_chip_free_xid(unf_xchg); - /* semaphore timeout */ - ret = FAILED; - spin_lock_irqsave(&lport->lport_state_lock, flag); - if (lport->states == UNF_LPORT_ST_RESET) - ret = SUCCESS; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - return ret; - } - - /* - * 6. NOTE: no timeout (has been waken up) - * Do Scsi_Cmnd(Mgmt Task) result checking - * * - * FAILED: with error code or RSP is error - * SUCCESS: others - */ - if (unf_xchg->scsi_cmnd_info.result == UNF_IO_SUCCESS) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp succeed", - lport->nport_id, task_mgnt_cmd_type, rport->nport_id); - - ret = SUCCESS; - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send task_cmnd(0x%x) to RPort(0x%x) and receive rsp failed scsi id(0x%x) lun id(0x%x)", - lport->nport_id, task_mgnt_cmd_type, rport->nport_id, - scsi_cmnd->scsi_id, (u32)scsi_cmnd->raw_lun_id); - - ret = FAILED; - } - - return ret; -} - -u32 unf_recv_tmf_marker_status(void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - u32 uret = RETURN_OK; - struct unf_xchg *unf_xchg = NULL; - u16 hot_pool_tag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - unf_lport = (struct unf_lport *)lport; - - /* Find exchange which point to marker sts */ - if (!unf_lport->xchg_mgr_temp.unf_look_up_xchg_by_tag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) tag function is NULL", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - hot_pool_tag = - (u16)(pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); - - unf_xchg = - (struct unf_xchg *)(unf_lport->xchg_mgr_temp - .unf_look_up_xchg_by_tag((void *)unf_lport, hot_pool_tag)); - if (!unf_xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", - unf_lport->port_id, unf_lport->nport_id, hot_pool_tag); - - return UNF_RETURN_ERROR; - } - - /* - * NOTE: set exchange TMF state with MARKER_STS_RECEIVED - * * - * About TMF state - * 1. STS received - * 2. Response received - * 3. Do check if necessary - */ - unf_xchg->tmf_state |= MARKER_STS_RECEIVED; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Marker STS: D_ID(0x%x) S_ID(0x%x) OX_ID(0x%x) RX_ID(0x%x), EXCH: D_ID(0x%x) S_ID(0x%x) OX_ID(0x%x) RX_ID(0x%x)", - pkg->frame_head.rctl_did & UNF_NPORTID_MASK, - pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, - (u16)(pkg->frame_head.oxid_rxid >> UNF_SHIFT_16), - (u16)(pkg->frame_head.oxid_rxid), unf_xchg->did, unf_xchg->sid, - unf_xchg->oxid, unf_xchg->rxid); - - return uret; -} - -u32 unf_recv_abts_marker_status(void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - u16 hot_pool_tag = 0; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - unf_lport = (struct unf_lport *)lport; - - /* Find exchange by tag */ - if (!unf_lport->xchg_mgr_temp.unf_look_up_xchg_by_tag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) tag function is NULL", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - hot_pool_tag = (u16)(pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); - - unf_xchg = - (struct unf_xchg *)(unf_lport->xchg_mgr_temp.unf_look_up_xchg_by_tag((void *)unf_lport, - hot_pool_tag)); - if (!unf_xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", - unf_lport->port_id, unf_lport->nport_id, hot_pool_tag); - - return UNF_RETURN_ERROR; - } - - /* - * NOTE: set exchange ABTS state with MARKER_STS_RECEIVED - * * - * About exchange ABTS state - * 1. STS received - * 2. Response received - * 3. Do check if necessary - * * - * About Exchange status get from low level - * 1. Set: when RCVD ABTS Marker - * 2. Set: when RCVD ABTS Req Done - * 3. value: set value with pkg->status - */ - spin_lock_irqsave(&unf_xchg->xchg_state_lock, flags); - unf_xchg->ucode_abts_state = pkg->status; - unf_xchg->abts_state |= MARKER_STS_RECEIVED; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) wake up SEMA for Abts marker exchange(0x%p) oxid(0x%x 0x%x) hottag(0x%x) status(0x%x)", - unf_lport->port_id, unf_xchg, unf_xchg->oxid, unf_xchg->rxid, - unf_xchg->hotpooltag, pkg->abts_maker_status); - - /* - * NOTE: Second time for ABTS marker received, or - * ABTS response have been received, no need to wake up sema - */ - if ((INI_IO_STATE_ABORT_TIMEOUT & unf_xchg->io_state) || - (ABTS_RESPONSE_RECEIVED & unf_xchg->abts_state)) { - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) no need to wake up SEMA for Abts marker ABTS_STATE(0x%x) IO_STATE(0x%x)", - unf_lport->port_id, unf_xchg->abts_state, unf_xchg->io_state); - - return RETURN_OK; - } - - if (unf_xchg->io_state & INI_IO_STATE_TMF_ABORT) { - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) receive Abts marker, exchange(%p) state(0x%x) free it", - unf_lport->port_id, unf_xchg, unf_xchg->io_state); - - unf_cm_free_xchg(unf_lport, unf_xchg); - } else { - spin_unlock_irqrestore(&unf_xchg->xchg_state_lock, flags); - up(&unf_xchg->task_sema); - } - - return RETURN_OK; -} diff --git a/drivers/scsi/spfc/common/unf_io_abnormal.h b/drivers/scsi/spfc/common/unf_io_abnormal.h deleted file mode 100644 index 31cc8e30e51a..000000000000 --- a/drivers/scsi/spfc/common/unf_io_abnormal.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_IO_ABNORMAL_H -#define UNF_IO_ABNORMAL_H - -#include "unf_type.h" -#include "unf_lport.h" -#include "unf_exchg.h" - -#define UNF_GET_LL_ERR(pkg) (((pkg)->status) >> 16) - -void unf_process_scsi_mgmt_result(struct unf_frame_pkg *pkg, - struct unf_xchg *xchg); -u32 unf_hardware_start_io(struct unf_lport *lport, struct unf_frame_pkg *pkg); -u32 unf_recv_abts_marker_status(void *lport, struct unf_frame_pkg *pkg); -u32 unf_recv_tmf_marker_status(void *lport, struct unf_frame_pkg *pkg); - -#endif diff --git a/drivers/scsi/spfc/common/unf_log.h b/drivers/scsi/spfc/common/unf_log.h deleted file mode 100644 index 801e23ac0829..000000000000 --- a/drivers/scsi/spfc/common/unf_log.h +++ /dev/null @@ -1,178 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_LOG_H -#define UNF_LOG_H -#include "unf_type.h" - -#define UNF_CRITICAL 1 -#define UNF_ERR 2 -#define UNF_WARN 3 -#define UNF_KEVENT 4 -#define UNF_MAJOR 5 -#define UNF_MINOR 6 -#define UNF_INFO 7 -#define UNF_DATA 7 -#define UNF_ALL 7 - -enum unf_debug_type { - UNF_DEBUG_TYPE_MML = 0, - UNF_DEBUG_TYPE_DIAGNOSE = 1, - UNF_DEBUG_TYPE_MESSAGE = 2, - UNF_DEBUG_TYPE_BUTT -}; - -enum unf_log_attr { - UNF_LOG_LOGIN_ATT = 0x1, - UNF_LOG_IO_ATT = 0x2, - UNF_LOG_EQUIP_ATT = 0x4, - UNF_LOG_REG_ATT = 0x8, - UNF_LOG_REG_MML_TEST = 0x10, - UNF_LOG_EVENT = 0x20, - UNF_LOG_NORMAL = 0x40, - UNF_LOG_ABNORMAL = 0X80, - UNF_LOG_BUTT -}; - -enum event_log { - UNF_EVTLOG_DRIVER_SUC = 0, - UNF_EVTLOG_DRIVER_INFO, - UNF_EVTLOG_DRIVER_WARN, - UNF_EVTLOG_DRIVER_ERR, - UNF_EVTLOG_LINK_SUC, - UNF_EVTLOG_LINK_INFO, - UNF_EVTLOG_LINK_WARN, - UNF_EVTLOG_LINK_ERR, - UNF_EVTLOG_IO_SUC, - UNF_EVTLOG_IO_INFO, - UNF_EVTLOG_IO_WARN, - UNF_EVTLOG_IO_ERR, - UNF_EVTLOG_TOOL_SUC, - UNF_EVTLOG_TOOL_INFO, - UNF_EVTLOG_TOOL_WARN, - UNF_EVTLOG_TOOL_ERR, - UNF_EVTLOG_BUT -}; - -#define UNF_IO_ATT_PRINT_TIMES 2 -#define UNF_LOGIN_ATT_PRINT_TIMES 100 - -#define UNF_IO_ATT_PRINT_LIMIT msecs_to_jiffies(2 * 1000) - -extern u32 unf_dgb_level; -extern u32 log_print_level; -extern u32 log_limited_times; - -#define DRV_LOG_LIMIT(module_id, log_level, log_att, format, ...) \ - do { \ - static unsigned long pre; \ - static int should_print = UNF_LOGIN_ATT_PRINT_TIMES; \ - if (time_after_eq(jiffies, pre + (UNF_IO_ATT_PRINT_LIMIT))) { \ - if (log_att == UNF_LOG_ABNORMAL) { \ - should_print = UNF_IO_ATT_PRINT_TIMES; \ - } else { \ - should_print = log_limited_times; \ - } \ - } \ - if (should_print < 0) { \ - if (log_att != UNF_LOG_ABNORMAL) \ - pre = jiffies; \ - break; \ - } \ - if (should_print-- > 0) { \ - printk(log_level "[%d][FC_UNF]" format "[%s][%-5d]\n", \ - smp_processor_id(), ##__VA_ARGS__, __func__, \ - __LINE__); \ - } \ - if (should_print == 0) { \ - printk(log_level "[FC_UNF]log is limited[%s][%-5d]\n", \ - __func__, __LINE__); \ - } \ - pre = jiffies; \ - } while (0) - -#define FC_CHECK_RETURN_VALUE(condition, ret) \ - do { \ - if (unlikely(!(condition))) { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, \ - UNF_ERR, "Para check(%s) invalid", \ - #condition); \ - return ret; \ - } \ - } while (0) - -#define FC_CHECK_RETURN_VOID(condition) \ - do { \ - if (unlikely(!(condition))) { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, \ - UNF_ERR, "Para check(%s) invalid", \ - #condition); \ - return; \ - } \ - } while (0) - -#define FC_DRV_PRINT(log_att, log_level, format, ...) \ - do { \ - if (unlikely((log_level) <= log_print_level)) { \ - if (log_level == UNF_CRITICAL) { \ - DRV_LOG_LIMIT(UNF_PID, KERN_CRIT, \ - log_att, format, ##__VA_ARGS__); \ - } else if (log_level == UNF_WARN) { \ - DRV_LOG_LIMIT(UNF_PID, KERN_WARNING, \ - log_att, format, ##__VA_ARGS__); \ - } else if (log_level == UNF_ERR) { \ - DRV_LOG_LIMIT(UNF_PID, KERN_ERR, \ - log_att, format, ##__VA_ARGS__); \ - } else if (log_level == UNF_MAJOR || \ - log_level == UNF_MINOR || \ - log_level == UNF_KEVENT) { \ - DRV_LOG_LIMIT(UNF_PID, KERN_NOTICE, \ - log_att, format, ##__VA_ARGS__); \ - } else if (log_level == UNF_INFO || \ - log_level == UNF_DATA) { \ - DRV_LOG_LIMIT(UNF_PID, KERN_INFO, \ - log_att, format, ##__VA_ARGS__); \ - } \ - } \ - } while (0) - -#define UNF_PRINT_SFS(dbg_level, portid, data, size) \ - do { \ - if ((dbg_level) <= log_print_level) { \ - u32 cnt = 0; \ - printk(KERN_INFO "[INFO]Port(0x%x) sfs:0x", (portid)); \ - for (cnt = 0; cnt < (size) / 4; cnt++) { \ - printk(KERN_INFO "%08x ", \ - ((u32 *)(data))[cnt]); \ - } \ - printk(KERN_INFO "[FC_UNF][%s]\n", __func__); \ - } \ - } while (0) - -#define UNF_PRINT_SFS_LIMIT(dbg_level, portid, data, size) \ - do { \ - if ((dbg_level) <= log_print_level) { \ - static ulong pre; \ - static int should_print = UNF_LOGIN_ATT_PRINT_TIMES; \ - if (time_after_eq( \ - jiffies, pre + UNF_IO_ATT_PRINT_LIMIT)) { \ - should_print = log_limited_times; \ - } \ - if (should_print < 0) { \ - pre = jiffies; \ - break; \ - } \ - if (should_print-- > 0) { \ - UNF_PRINT_SFS(dbg_level, portid, data, size); \ - } \ - if (should_print == 0) { \ - printk( \ - KERN_INFO \ - "[FC_UNF]sfs log is limited[%s][%-5d]\n", \ - __func__, __LINE__); \ - } \ - pre = jiffies; \ - } \ - } while (0) - -#endif diff --git a/drivers/scsi/spfc/common/unf_lport.c b/drivers/scsi/spfc/common/unf_lport.c deleted file mode 100644 index 66d3ac14d676..000000000000 --- a/drivers/scsi/spfc/common/unf_lport.c +++ /dev/null @@ -1,1008 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_lport.h" -#include "unf_log.h" -#include "unf_rport.h" -#include "unf_exchg.h" -#include "unf_service.h" -#include "unf_ls.h" -#include "unf_gs.h" -#include "unf_portman.h" - -static void unf_lport_config(struct unf_lport *lport); -void unf_cm_mark_dirty_mem(struct unf_lport *lport, enum unf_lport_dirty_flag type) -{ - FC_CHECK_RETURN_VOID((lport)); - - lport->dirty_flag |= (u32)type; -} - -u32 unf_init_lport_route(struct unf_lport *lport) -{ - u32 ret = RETURN_OK; - int ret_val = 0; - - FC_CHECK_RETURN_VALUE((lport), UNF_RETURN_ERROR); - - /* Init L_Port route work */ - INIT_DELAYED_WORK(&lport->route_timer_work, unf_lport_route_work); - - /* Delay route work */ - ret_val = queue_delayed_work(unf_wq, &lport->route_timer_work, - (ulong)msecs_to_jiffies(UNF_LPORT_POLL_TIMER)); - if (unlikely((!(bool)(ret_val)))) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_WARN, - "[warn]Port(0x%x) schedule route work failed", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - ret = unf_lport_ref_inc(lport); - return ret; -} - -void unf_destroy_lport_route(struct unf_lport *lport) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - - /* Cancel (route timer) delay work */ - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), (&lport->route_timer_work), - "Route Timer work"); - if (ret == RETURN_OK) - /* Corresponding to ADD operation */ - unf_lport_ref_dec(lport); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE; -} - -void unf_init_port_parms(struct unf_lport *lport) -{ - INIT_LIST_HEAD(&lport->list_vports_head); - INIT_LIST_HEAD(&lport->list_intergrad_vports); - INIT_LIST_HEAD(&lport->list_destroy_vports); - INIT_LIST_HEAD(&lport->entry_lport); - INIT_LIST_HEAD(&lport->list_qos_head); - - spin_lock_init(&lport->qos_mgr_lock); - spin_lock_init(&lport->lport_state_lock); - - lport->max_frame_size = max_frame_size; - lport->ed_tov = UNF_DEFAULT_EDTOV; - lport->ra_tov = UNF_DEFAULT_RATOV; - lport->fabric_node_name = 0; - lport->qos_level = UNF_QOS_LEVEL_DEFAULT; - lport->qos_cs_ctrl = false; - lport->priority = (bool)UNF_PRIORITY_DISABLE; - lport->port_dirt_exchange = false; - - unf_lport_config(lport); - - unf_set_lport_state(lport, UNF_LPORT_ST_ONLINE); - - lport->link_up = UNF_PORT_LINK_DOWN; - lport->port_removing = false; - lport->lport_free_completion = NULL; - lport->last_tx_fault_jif = 0; - lport->enhanced_features = 0; - lport->destroy_step = INVALID_VALUE32; - lport->dirty_flag = 0; - lport->switch_state = false; - lport->bbscn_support = false; - lport->loop_back_test_mode = false; - lport->start_work_state = UNF_START_WORK_STOP; - lport->sfp_power_fault_count = 0; - lport->sfp_9545_fault_count = 0; - - atomic_set(&lport->lport_no_operate_flag, UNF_LPORT_NORMAL); - atomic_set(&lport->port_ref_cnt, 0); - atomic_set(&lport->scsi_session_add_success, 0); - atomic_set(&lport->scsi_session_add_failed, 0); - atomic_set(&lport->scsi_session_del_success, 0); - atomic_set(&lport->scsi_session_del_failed, 0); - atomic_set(&lport->add_start_work_failed, 0); - atomic_set(&lport->add_closing_work_failed, 0); - atomic_set(&lport->alloc_scsi_id, 0); - atomic_set(&lport->resume_scsi_id, 0); - atomic_set(&lport->reuse_scsi_id, 0); - atomic_set(&lport->device_alloc, 0); - atomic_set(&lport->device_destroy, 0); - atomic_set(&lport->session_loss_tmo, 0); - atomic_set(&lport->host_no, 0); - atomic64_set(&lport->exchg_index, 0x1000); - atomic_inc(&lport->port_ref_cnt); - - memset(&lport->port_dynamic_info, 0, sizeof(struct unf_port_dynamic_info)); - memset(&lport->link_service_info, 0, sizeof(struct unf_link_service_collect)); - memset(&lport->err_code_sum, 0, sizeof(struct unf_err_code)); -} - -void unf_reset_lport_params(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = lport; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport->link_up = UNF_PORT_LINK_DOWN; - unf_lport->nport_id = 0; - unf_lport->max_frame_size = max_frame_size; - unf_lport->ed_tov = UNF_DEFAULT_EDTOV; - unf_lport->ra_tov = UNF_DEFAULT_RATOV; - unf_lport->fabric_node_name = 0; -} - -static enum unf_lport_login_state -unf_lport_state_online(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_LINK_UP: - next_state = UNF_LPORT_ST_LINK_UP; - break; - - case UNF_EVENT_LPORT_NORMAL_ENTER: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_initial(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_LINK_UP: - next_state = UNF_LPORT_ST_LINK_UP; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_linkup(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_NORMAL_ENTER: - next_state = UNF_LPORT_ST_FLOGI_WAIT; - break; - - case UNF_EVENT_LPORT_READY: - next_state = UNF_LPORT_ST_READY; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_flogi_wait(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_REMOTE_ACC: - next_state = UNF_LPORT_ST_PLOGI_WAIT; - break; - - case UNF_EVENT_LPORT_READY: - next_state = UNF_LPORT_ST_READY; - break; - - case UNF_EVENT_LPORT_REMOTE_TIMEOUT: - next_state = UNF_LPORT_ST_LOGO; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_plogi_wait(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_REMOTE_ACC: - next_state = UNF_LPORT_ST_RFT_ID_WAIT; - break; - - case UNF_EVENT_LPORT_REMOTE_TIMEOUT: - next_state = UNF_LPORT_ST_LOGO; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state -unf_lport_state_rftid_wait(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_REMOTE_ACC: - next_state = UNF_LPORT_ST_RFF_ID_WAIT; - break; - - case UNF_EVENT_LPORT_REMOTE_TIMEOUT: - next_state = UNF_LPORT_ST_LOGO; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_rffid_wait(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_REMOTE_ACC: - next_state = UNF_LPORT_ST_SCR_WAIT; - break; - - case UNF_EVENT_LPORT_REMOTE_TIMEOUT: - next_state = UNF_LPORT_ST_LOGO; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_scr_wait(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_REMOTE_ACC: - next_state = UNF_LPORT_ST_READY; - break; - - case UNF_EVENT_LPORT_REMOTE_TIMEOUT: - next_state = UNF_LPORT_ST_LOGO; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state -unf_lport_state_logo(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_NORMAL_ENTER: - next_state = UNF_LPORT_ST_OFFLINE; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_offline(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_ONLINE: - next_state = UNF_LPORT_ST_ONLINE; - break; - - case UNF_EVENT_LPORT_RESET: - next_state = UNF_LPORT_ST_RESET; - break; - - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_reset(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_NORMAL_ENTER: - next_state = UNF_LPORT_ST_INITIAL; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_lport_login_state unf_lport_state_ready(enum unf_lport_login_state old_state, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - - switch (lport_event) { - case UNF_EVENT_LPORT_LINK_DOWN: - next_state = UNF_LPORT_ST_INITIAL; - break; - - case UNF_EVENT_LPORT_RESET: - next_state = UNF_LPORT_ST_RESET; - break; - - case UNF_EVENT_LPORT_OFFLINE: - next_state = UNF_LPORT_ST_LOGO; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static struct unf_lport_state_ma lport_state[] = { - {UNF_LPORT_ST_ONLINE, unf_lport_state_online}, - {UNF_LPORT_ST_INITIAL, unf_lport_state_initial}, - {UNF_LPORT_ST_LINK_UP, unf_lport_state_linkup}, - {UNF_LPORT_ST_FLOGI_WAIT, unf_lport_state_flogi_wait}, - {UNF_LPORT_ST_PLOGI_WAIT, unf_lport_state_plogi_wait}, - {UNF_LPORT_ST_RFT_ID_WAIT, unf_lport_state_rftid_wait}, - {UNF_LPORT_ST_RFF_ID_WAIT, unf_lport_state_rffid_wait}, - {UNF_LPORT_ST_SCR_WAIT, unf_lport_state_scr_wait}, - {UNF_LPORT_ST_LOGO, unf_lport_state_logo}, - {UNF_LPORT_ST_OFFLINE, unf_lport_state_offline}, - {UNF_LPORT_ST_RESET, unf_lport_state_reset}, - {UNF_LPORT_ST_READY, unf_lport_state_ready}, -}; - -void unf_lport_state_ma(struct unf_lport *lport, - enum unf_lport_event lport_event) -{ - enum unf_lport_login_state old_state = UNF_LPORT_ST_ONLINE; - enum unf_lport_login_state next_state = UNF_LPORT_ST_ONLINE; - u32 index = 0; - - FC_CHECK_RETURN_VOID(lport); - - old_state = lport->states; - - while (index < (sizeof(lport_state) / sizeof(struct unf_lport_state_ma))) { - if (lport->states == lport_state[index].lport_state) { - next_state = lport_state[index].lport_state_ma(old_state, lport_event); - break; - } - index++; - } - - if (index >= (sizeof(lport_state) / sizeof(struct unf_lport_state_ma))) { - next_state = old_state; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "[info]Port(0x%x) hold state(0x%x)", - lport->port_id, lport->states); - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) with old state(0x%x) event(0x%x) next state(0x%x)", - lport->port_id, old_state, lport_event, next_state); - - unf_set_lport_state(lport, next_state); -} - -u32 unf_lport_retry_flogi(struct unf_lport *lport) -{ - struct unf_rport *unf_rport = NULL; - u32 ret = UNF_RETURN_ERROR; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* Get (new) R_Port */ - unf_rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) allocate RPort failed", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Check L_Port state */ - spin_lock_irqsave(&lport->lport_state_lock, flag); - if (lport->states != UNF_LPORT_ST_FLOGI_WAIT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) no need to retry FLOGI with state(0x%x)", - lport->port_id, lport->states); - - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - return RETURN_OK; - } - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = UNF_FC_FID_FLOGI; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Send FLOGI or FDISC */ - if (lport->root_lport != lport) { - ret = unf_send_fdisc(lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send FDISC failed", lport->port_id); - - /* Do L_Port recovery */ - unf_lport_error_recovery(lport); - } - } else { - ret = unf_send_flogi(lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send FLOGI failed\n", lport->port_id); - - /* Do L_Port recovery */ - unf_lport_error_recovery(lport); - } - } - - return ret; -} - -u32 unf_lport_name_server_register(struct unf_lport *lport, - enum unf_lport_login_state state) -{ - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - u32 fabric_id = UNF_FC_FID_DIR_SERV; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (state == UNF_LPORT_ST_SCR_WAIT) - fabric_id = UNF_FC_FID_FCTRL; - - /* Get (safe) R_Port */ - unf_rport = - unf_get_rport_by_nport_id(lport, fabric_id); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, - fabric_id); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) allocate RPort failed", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Update R_Port & L_Port state */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = fabric_id; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_lport_state_ma(lport, UNF_EVENT_LPORT_NORMAL_ENTER); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - switch (state) { - /* RFT_ID */ - case UNF_LPORT_ST_RFT_ID_WAIT: - ret = unf_send_rft_id(lport, unf_rport); - break; - /* RFF_ID */ - case UNF_LPORT_ST_RFF_ID_WAIT: - ret = unf_send_rff_id(lport, unf_rport, UNF_FC4_FCP_TYPE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) register SCSI FC4Type to fabric(0xfffffc) failed", - lport->nport_id); - unf_lport_error_recovery(lport); - } - break; - - /* SCR */ - case UNF_LPORT_ST_SCR_WAIT: - ret = unf_send_scr(lport, unf_rport); - break; - - /* PLOGI */ - case UNF_LPORT_ST_PLOGI_WAIT: - default: - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - ret = unf_send_plogi(lport, unf_rport); - break; - } - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) register fabric(0xfffffc) failed", - lport->nport_id); - - /* Do L_Port recovery */ - unf_lport_error_recovery(lport); - } - - return ret; -} - -u32 unf_lport_enter_sns_logo(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (!rport) - unf_rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_DIR_SERV); - else - unf_rport = rport; - - if (!unf_rport) { - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_lport_state_ma(lport, UNF_EVENT_LPORT_NORMAL_ENTER); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - return RETURN_OK; - } - - /* Update L_Port & R_Port state */ - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_lport_state_ma(lport, UNF_EVENT_LPORT_NORMAL_ENTER); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Do R_Port LOGO state */ - unf_rport_enter_logo(lport, unf_rport); - - return ret; -} - -void unf_lport_enter_sns_plogi(struct unf_lport *lport) -{ - /* Fabric or Public Loop Mode: Login with Name server */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - - /* Get (safe) R_Port */ - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_DIR_SERV); - if (unf_rport) { - /* for port swap: Delete old R_Port if necessary */ - if (unf_rport->local_nport_id != lport->nport_id) { - unf_rport_immediate_link_down(lport, unf_rport); - unf_rport = NULL; - } - } - - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, - UNF_FC_FID_DIR_SERV); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) allocate RPort failed", - lport->port_id); - - unf_lport_error_recovery(unf_lport); - return; - } - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = UNF_FC_FID_DIR_SERV; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Send PLOGI to Fabric(0xfffffc) */ - ret = unf_send_plogi(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send PLOGI to name server failed", - lport->port_id); - - unf_lport_error_recovery(unf_lport); - } -} - -int unf_get_port_params(void *arg_in, void *arg_out) -{ - struct unf_lport *unf_lport = (struct unf_lport *)arg_in; - struct unf_low_level_port_mgr_op *port_mgr = NULL; - struct unf_port_param port_params = {0}; - - FC_CHECK_RETURN_VALUE(arg_in, UNF_RETURN_ERROR); - - port_mgr = &unf_lport->low_level_func.port_mgr_op; - if (!port_mgr->ll_port_config_get) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_WARN, - "[warn]Port(0x%x) low level port_config_get function is NULL", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "[warn]Port(0x%x) get parameters with default:R_A_TOV(%d) E_D_TOV(%d)", - unf_lport->port_id, UNF_DEFAULT_FABRIC_RATOV, - UNF_DEFAULT_EDTOV); - - port_params.ra_tov = UNF_DEFAULT_FABRIC_RATOV; - port_params.ed_tov = UNF_DEFAULT_EDTOV; - - /* Update parameters with Fabric mode */ - if (unf_lport->act_topo == UNF_ACT_TOP_PUBLIC_LOOP || - unf_lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) { - unf_lport->ra_tov = port_params.ra_tov; - unf_lport->ed_tov = port_params.ed_tov; - } - - return RETURN_OK; -} - -u32 unf_lport_enter_flogi(struct unf_lport *lport) -{ - struct unf_rport *unf_rport = NULL; - struct unf_cm_event_report *event = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - u32 nport_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* Get (safe) R_Port */ - nport_id = UNF_FC_FID_FLOGI; - unf_rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); - - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, nport_id); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate RPort failed", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Updtae L_Port state */ - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_lport_state_ma(lport, UNF_EVENT_LPORT_NORMAL_ENTER); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - /* Update R_Port N_Port_ID */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = UNF_FC_FID_FLOGI; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - event = unf_get_one_event_node(lport); - if (event) { - event->lport = lport; - event->event_asy_flag = UNF_EVENT_ASYN; - event->unf_event_task = unf_get_port_params; - event->para_in = (void *)lport; - unf_post_one_event_node(lport, event); - } - - if (lport->root_lport != lport) { - /* for NPIV */ - ret = unf_send_fdisc(lport, unf_rport); - if (ret != RETURN_OK) - unf_lport_error_recovery(lport); - } else { - /* for Physical Port */ - ret = unf_send_flogi(lport, unf_rport); - if (ret != RETURN_OK) - unf_lport_error_recovery(lport); - } - - return ret; -} - -void unf_set_lport_state(struct unf_lport *lport, enum unf_lport_login_state state) -{ - FC_CHECK_RETURN_VOID(lport); - if (lport->states != state) - lport->retries = 0; - - lport->states = state; -} - -static void unf_lport_timeout(struct work_struct *work) -{ - struct unf_lport *unf_lport = NULL; - enum unf_lport_login_state state = UNF_LPORT_ST_READY; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(work); - unf_lport = container_of(work, struct unf_lport, retry_work.work); - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - state = unf_lport->states; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is timeout with state(0x%x)", - unf_lport->port_id, state); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - switch (state) { - /* FLOGI retry */ - case UNF_LPORT_ST_FLOGI_WAIT: - (void)unf_lport_retry_flogi(unf_lport); - break; - - case UNF_LPORT_ST_PLOGI_WAIT: - case UNF_LPORT_ST_RFT_ID_WAIT: - case UNF_LPORT_ST_RFF_ID_WAIT: - case UNF_LPORT_ST_SCR_WAIT: - (void)unf_lport_name_server_register(unf_lport, state); - break; - - /* Send LOGO External */ - case UNF_LPORT_ST_LOGO: - break; - - /* Do nothing */ - case UNF_LPORT_ST_OFFLINE: - case UNF_LPORT_ST_READY: - case UNF_LPORT_ST_RESET: - case UNF_LPORT_ST_ONLINE: - case UNF_LPORT_ST_INITIAL: - case UNF_LPORT_ST_LINK_UP: - - unf_lport->retries = 0; - break; - default: - break; - } - - unf_lport_ref_dec_to_destroy(unf_lport); -} - -static void unf_lport_config(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - INIT_DELAYED_WORK(&lport->retry_work, unf_lport_timeout); - - lport->max_retry_count = UNF_MAX_RETRY_COUNT; - lport->retries = 0; -} - -void unf_lport_error_recovery(struct unf_lport *lport) -{ - ulong delay = 0; - ulong flag = 0; - int ret_val = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - - ret = unf_lport_ref_inc(lport); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is removing and no need process", - lport->port_id); - return; - } - - spin_lock_irqsave(&lport->lport_state_lock, flag); - - /* Port State: removing */ - if (lport->port_removing) { - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is removing and no need process", - lport->port_id); - - unf_lport_ref_dec_to_destroy(lport); - return; - } - - /* Port State: offline */ - if (lport->states == UNF_LPORT_ST_OFFLINE) { - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is offline and no need process", - lport->port_id); - - unf_lport_ref_dec_to_destroy(lport); - return; - } - - /* Queue work state check */ - if (delayed_work_pending(&lport->retry_work)) { - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - unf_lport_ref_dec_to_destroy(lport); - return; - } - - /* Do retry operation */ - if (lport->retries < lport->max_retry_count) { - lport->retries++; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) enter recovery and retry %u times", - lport->port_id, lport->nport_id, lport->retries); - - delay = (ulong)lport->ed_tov; - ret_val = queue_delayed_work(unf_wq, &lport->retry_work, - (ulong)msecs_to_jiffies((u32)delay)); - if (ret_val != 0) { - atomic_inc(&lport->port_ref_cnt); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) queue work success and reference count is %d", - lport->port_id, - atomic_read(&lport->port_ref_cnt)); - } - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - } else { - unf_lport_state_ma(lport, UNF_EVENT_LPORT_REMOTE_TIMEOUT); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) register operation timeout and do LOGO", - lport->port_id); - - (void)unf_lport_enter_sns_logo(lport, NULL); - } - - unf_lport_ref_dec_to_destroy(lport); -} - -struct unf_lport *unf_cm_lookup_vport_by_vp_index(struct unf_lport *lport, u16 vp_index) -{ - FC_CHECK_RETURN_VALUE(lport, NULL); - - if (vp_index == 0) - return lport; - - if (!lport->lport_mgr_temp.unf_look_up_vport_by_index) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) function do look up vport by index is NULL", - lport->port_id); - - return NULL; - } - - return lport->lport_mgr_temp.unf_look_up_vport_by_index(lport, vp_index); -} - -struct unf_lport *unf_cm_lookup_vport_by_did(struct unf_lport *lport, u32 did) -{ - FC_CHECK_RETURN_VALUE(lport, NULL); - - if (!lport->lport_mgr_temp.unf_look_up_vport_by_did) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) function do look up vport by D_ID is NULL", - lport->port_id); - - return NULL; - } - - return lport->lport_mgr_temp.unf_look_up_vport_by_did(lport, did); -} - -struct unf_lport *unf_cm_lookup_vport_by_wwpn(struct unf_lport *lport, u64 wwpn) -{ - FC_CHECK_RETURN_VALUE(lport, NULL); - - if (!lport->lport_mgr_temp.unf_look_up_vport_by_wwpn) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) function do look up vport by WWPN is NULL", - lport->port_id); - - return NULL; - } - - return lport->lport_mgr_temp.unf_look_up_vport_by_wwpn(lport, wwpn); -} - -void unf_cm_vport_remove(struct unf_lport *vport) -{ - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(vport); - unf_lport = vport->root_lport; - FC_CHECK_RETURN_VOID(unf_lport); - - if (!unf_lport->lport_mgr_temp.unf_vport_remove) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) function do vport remove is NULL", - unf_lport->port_id); - return; - } - - unf_lport->lport_mgr_temp.unf_vport_remove(vport); -} diff --git a/drivers/scsi/spfc/common/unf_lport.h b/drivers/scsi/spfc/common/unf_lport.h deleted file mode 100644 index dbd531f15b13..000000000000 --- a/drivers/scsi/spfc/common/unf_lport.h +++ /dev/null @@ -1,519 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_LPORT_H -#define UNF_LPORT_H - -#include "unf_type.h" -#include "unf_disc.h" -#include "unf_event.h" -#include "unf_common.h" - -#define UNF_PORT_TYPE_FC 0 -#define UNF_PORT_TYPE_DISC 1 -#define UNF_FW_UPDATE_PATH_LEN_MAX 255 -#define UNF_EXCHG_MGR_NUM (4) -#define UNF_ERR_CODE_PRINT_TIME 10 /* error code print times */ -#define UNF_MAX_IO_TYPE_STAT_NUM 48 /* IO abnormal max counter */ -#define UNF_MAX_IO_RETURN_VALUE 0x12 -#define UNF_MAX_SCSI_CMD 0xFF -#define UNF_MAX_LPRT_SCSI_ID_MAP 2048 - -enum unf_scsi_error_handle_type { - UNF_SCSI_ABORT_IO_TYPE = 0, - UNF_SCSI_DEVICE_RESET_TYPE, - UNF_SCSI_TARGET_RESET_TYPE, - UNF_SCSI_BUS_RESET_TYPE, - UNF_SCSI_HOST_RESET_TYPE, - UNF_SCSI_VIRTUAL_RESET_TYPE, - UNF_SCSI_ERROR_HANDLE_BUTT -}; - -enum unf_lport_destroy_step { - UNF_LPORT_DESTROY_STEP_0_SET_REMOVING = 0, - UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT, - UNF_LPORT_DESTROY_STEP_2_CLOSE_ROUTE, - UNF_LPORT_DESTROY_STEP_3_DESTROY_EVENT_CENTER, - UNF_LPORT_DESTROY_STEP_4_DESTROY_EXCH_MGR, - UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL, - UNF_LPORT_DESTROY_STEP_6_DESTROY_DISC_MGR, - UNF_LPORT_DESTROY_STEP_7_DESTROY_XCHG_MGR_TMP, - UNF_LPORT_DESTROY_STEP_8_DESTROY_RPORT_MG_TMP, - UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP, - UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE, - UNF_LPORT_DESTROY_STEP_11_UNREG_TGT_HOST, - UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST, - UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE, - UNF_LPORT_DESTROY_STEP_BUTT -}; - -enum unf_lport_enhanced_feature { - /* Enhance GFF feature connect even if fail to get GFF feature */ - UNF_LPORT_ENHANCED_FEATURE_ENHANCED_GFF = 0x0001, - UNF_LPORT_ENHANCED_FEATURE_IO_TRANSFERLIST = 0x0002, /* Enhance IO balance */ - UNF_LPORT_ENHANCED_FEATURE_IO_CHECKPOINT = 0x0004, /* Enhance IO check */ - UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE = 0x0008, /* Close FW ROUTE */ - /* lowest frequency read SFP information */ - UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE = 0x0010, - UNF_LPORT_ENHANCED_FEATURE_BUTT -}; - -enum unf_lport_login_state { - UNF_LPORT_ST_ONLINE = 0x2000, /* uninitialized */ - UNF_LPORT_ST_INITIAL, /* initialized and LinkDown */ - UNF_LPORT_ST_LINK_UP, /* initialized and Link UP */ - UNF_LPORT_ST_FLOGI_WAIT, /* waiting for FLOGI completion */ - UNF_LPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */ - UNF_LPORT_ST_RNN_ID_WAIT, /* waiting for RNN_ID completion */ - UNF_LPORT_ST_RSNN_NN_WAIT, /* waiting for RSNN_NN completion */ - UNF_LPORT_ST_RSPN_ID_WAIT, /* waiting for RSPN_ID completion */ - UNF_LPORT_ST_RPN_ID_WAIT, /* waiting for RPN_ID completion */ - UNF_LPORT_ST_RFT_ID_WAIT, /* waiting for RFT_ID completion */ - UNF_LPORT_ST_RFF_ID_WAIT, /* waiting for RFF_ID completion */ - UNF_LPORT_ST_SCR_WAIT, /* waiting for SCR completion */ - UNF_LPORT_ST_READY, /* ready for use */ - UNF_LPORT_ST_LOGO, /* waiting for LOGO completion */ - UNF_LPORT_ST_RESET, /* being reset and will restart */ - UNF_LPORT_ST_OFFLINE, /* offline */ - UNF_LPORT_ST_BUTT -}; - -enum unf_lport_event { - UNF_EVENT_LPORT_NORMAL_ENTER = 0x8000, /* next state enter */ - UNF_EVENT_LPORT_ONLINE = 0x8001, /* LPort link up */ - UNF_EVENT_LPORT_LINK_UP = 0x8002, /* LPort link up */ - UNF_EVENT_LPORT_LINK_DOWN = 0x8003, /* LPort link down */ - UNF_EVENT_LPORT_OFFLINE = 0x8004, /* lPort bing stopped */ - UNF_EVENT_LPORT_RESET = 0x8005, - UNF_EVENT_LPORT_REMOTE_ACC = 0x8006, /* next state enter */ - UNF_EVENT_LPORT_REMOTE_RJT = 0x8007, /* rport reject */ - UNF_EVENT_LPORT_REMOTE_TIMEOUT = 0x8008, /* rport time out */ - UNF_EVENT_LPORT_READY = 0x8009, - UNF_EVENT_LPORT_REMOTE_BUTT -}; - -struct unf_cm_disc_mg_template { - /* start input:L_Port,return:ok/fail */ - u32 (*unf_disc_start)(void *lport); - /* stop input: L_Port,return:ok/fail */ - u32 (*unf_disc_stop)(void *lport); - - /* Callback after disc complete[with event:ok/fail]. */ - void (*unf_disc_callback)(void *lport, u32 result); -}; - -struct unf_chip_manage_info { - struct list_head list_chip_thread_entry; - struct list_head list_head; - spinlock_t chip_event_list_lock; - struct task_struct *thread; - u32 list_num; - u32 slot_id; - u8 chip_id; - u8 rsv; - u8 sfp_9545_fault; - u8 sfp_power_fault; - atomic_t ref_cnt; - u32 thread_exit; - struct unf_chip_info chip_info; - atomic_t card_loop_test_flag; - spinlock_t card_loop_back_state_lock; - char update_path[UNF_FW_UPDATE_PATH_LEN_MAX]; -}; - -enum unf_timer_type { - UNF_TIMER_TYPE_TGT_IO, - UNF_TIMER_TYPE_INI_IO, - UNF_TIMER_TYPE_REQ_IO, - UNF_TIMER_TYPE_TGT_RRQ, - UNF_TIMER_TYPE_INI_RRQ, - UNF_TIMER_TYPE_SFS, - UNF_TIMER_TYPE_INI_ABTS -}; - -struct unf_cm_xchg_mgr_template { - void *(*unf_xchg_get_free_and_init)(void *lport, u32 xchg_type); - void *(*unf_look_up_xchg_by_id)(void *lport, u16 ox_id, u32 oid); - void *(*unf_look_up_xchg_by_tag)(void *lport, u16 hot_pool_tag); - void (*unf_xchg_release)(void *lport, void *xchg); - void (*unf_xchg_mgr_io_xchg_abort)(void *lport, void *rport, u32 sid, u32 did, - u32 extra_io_state); - void (*unf_xchg_mgr_sfs_xchg_abort)(void *lport, void *rport, u32 sid, u32 did); - void (*unf_xchg_add_timer)(void *xchg, ulong time_ms, enum unf_timer_type time_type); - void (*unf_xchg_cancel_timer)(void *xchg); - void (*unf_xchg_abort_all_io)(void *lport, u32 xchg_type, bool clean); - void *(*unf_look_up_xchg_by_cmnd_sn)(void *lport, u64 command_sn, - u32 world_id, void *pinitiator); - void (*unf_xchg_abort_by_lun)(void *lport, void *rport, u64 lun_id, void *xchg, - bool abort_all_lun_flag); - - void (*unf_xchg_abort_by_session)(void *lport, void *rport); -}; - -struct unf_cm_lport_template { - void *(*unf_look_up_vport_by_index)(void *lport, u16 vp_index); - void *(*unf_look_up_vport_by_port_id)(void *lport, u32 port_id); - void *(*unf_look_up_vport_by_wwpn)(void *lport, u64 wwpn); - void *(*unf_look_up_vport_by_did)(void *lport, u32 did); - void (*unf_vport_remove)(void *vport); -}; - -struct unf_lport_state_ma { - enum unf_lport_login_state lport_state; - enum unf_lport_login_state (*lport_state_ma)(enum unf_lport_login_state old_state, - enum unf_lport_event event); -}; - -struct unf_rport_pool { - u32 rport_pool_count; - void *rport_pool_add; - struct list_head list_rports_pool; - spinlock_t rport_free_pool_lock; - /* for synchronous reuse RPort POOL completion */ - struct completion *rport_pool_completion; - ulong *rpi_bitmap; -}; - -struct unf_vport_pool { - u16 vport_pool_count; - void *vport_pool_addr; - struct list_head list_vport_pool; - spinlock_t vport_pool_lock; - struct completion *vport_pool_completion; - u16 slab_next_index; /* Next free vport */ - u16 slab_total_sum; /* Total Vport num */ - struct unf_lport *vport_slab[ARRAY_INDEX_0]; -}; - -struct unf_esgl_pool { - u32 esgl_pool_count; - void *esgl_pool_addr; - struct list_head list_esgl_pool; - spinlock_t esgl_pool_lock; - struct buf_describe esgl_buff_list; -}; - -/* little endium */ -struct unf_port_id_page { - struct list_head list_node_rscn; - u8 port_id_port; - u8 port_id_area; - u8 port_id_domain; - u8 addr_format : 2; - u8 event_qualifier : 4; - u8 reserved : 2; -}; - -struct unf_rscn_mgr { - spinlock_t rscn_id_list_lock; - u32 free_rscn_count; - struct list_head list_free_rscn_page; - struct list_head list_using_rscn_page; - void *rscn_pool_add; - struct unf_port_id_page *(*unf_get_free_rscn_node)(void *rscn_mg); - void (*unf_release_rscn_node)(void *rscn_mg, void *rscn_node); -}; - -struct unf_disc_rport_mg { - void *disc_pool_add; - struct list_head list_disc_rports_pool; - struct list_head list_disc_rports_busy; -}; - -struct unf_disc_manage_info { - struct list_head list_head; - spinlock_t disc_event_list_lock; - atomic_t disc_contrl_size; - - u32 thread_exit; - struct task_struct *thread; -}; - -struct unf_disc { - u32 retry_count; - u32 max_retry_count; - u32 disc_flag; - - struct completion *disc_completion; - atomic_t disc_ref_cnt; - - struct list_head list_busy_rports; - struct list_head list_delete_rports; - struct list_head list_destroy_rports; - - spinlock_t rport_busy_pool_lock; - - struct unf_lport *lport; - enum unf_disc_state states; - struct delayed_work disc_work; - - /* Disc operation template */ - struct unf_cm_disc_mg_template disc_temp; - - /* UNF_INIT_DISC/UNF_RSCN_DISC */ - u32 disc_option; - - /* RSCN list */ - struct unf_rscn_mgr rscn_mgr; - struct unf_disc_rport_mg disc_rport_mgr; - struct unf_disc_manage_info disc_thread_info; - - u64 last_disc_jiff; -}; - -enum unf_service_item { - UNF_SERVICE_ITEM_FLOGI = 0, - UNF_SERVICE_ITEM_PLOGI, - UNF_SERVICE_ITEM_PRLI, - UNF_SERVICE_ITEM_RSCN, - UNF_SERVICE_ITEM_ABTS, - UNF_SERVICE_ITEM_PDISC, - UNF_SERVICE_ITEM_ADISC, - UNF_SERVICE_ITEM_LOGO, - UNF_SERVICE_ITEM_SRR, - UNF_SERVICE_ITEM_RRQ, - UNF_SERVICE_ITEM_ECHO, - UNF_SERVICE_BUTT -}; - -/* Link service counter */ -struct unf_link_service_collect { - u64 service_cnt[UNF_SERVICE_BUTT]; -}; - -struct unf_pcie_error_count { - u32 pcie_error_count[UNF_PCIE_BUTT]; -}; - -#define INVALID_WWPN 0 - -enum unf_device_scsi_state { - UNF_SCSI_ST_INIT = 0, - UNF_SCSI_ST_OFFLINE, - UNF_SCSI_ST_ONLINE, - UNF_SCSI_ST_DEAD, - UNF_SCSI_ST_BUTT -}; - -struct unf_wwpn_dfx_counter_info { - atomic64_t io_done_cnt[UNF_MAX_IO_RETURN_VALUE]; - atomic64_t scsi_cmd_cnt[UNF_MAX_SCSI_CMD]; - atomic64_t target_busy; - atomic64_t host_busy; - atomic_t error_handle[UNF_SCSI_ERROR_HANDLE_BUTT]; - atomic_t error_handle_result[UNF_SCSI_ERROR_HANDLE_BUTT]; - atomic_t device_alloc; - atomic_t device_destroy; -}; - -#define UNF_MAX_LUN_PER_TARGET 256 -struct unf_wwpn_rport_info { - u64 wwpn; - struct unf_rport *rport; /* Rport which linkup */ - void *lport; /* Lport */ - u32 target_id; /* target_id distribute by scsi */ - u32 las_ten_scsi_state; - atomic_t scsi_state; - struct unf_wwpn_dfx_counter_info *dfx_counter; - struct delayed_work loss_tmo_work; - bool need_scan; - struct list_head fc_lun_list; - u8 *lun_qos_level; -}; - -struct unf_rport_scsi_id_image { - spinlock_t scsi_image_table_lock; - struct unf_wwpn_rport_info - *wwn_rport_info_table; - u32 max_scsi_id; -}; - -enum unf_lport_dirty_flag { - UNF_LPORT_DIRTY_FLAG_NONE = 0, - UNF_LPORT_DIRTY_FLAG_XCHGMGR_DIRTY = 0x100, - UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY = 0x200, - UNF_LPORT_DIRTY_FLAG_DISC_DIRTY = 0x400, - UNF_LPORT_DIRTY_FLAG_BUTT -}; - -typedef struct unf_rport *(*unf_rport_set_qualifier)(struct unf_lport *lport, - struct unf_rport *rport_by_nport_id, - struct unf_rport *rport_by_wwpn, - u64 wwpn, u32 sid); - -typedef u32 (*unf_tmf_status_recovery)(void *rport, void *xchg); - -enum unf_start_work_state { - UNF_START_WORK_STOP, - UNF_START_WORK_BEGIN, - UNF_START_WORK_COMPLETE -}; - -struct unf_qos_info { - u64 wwpn; - u32 nport_id; - enum unf_rport_qos_level qos_level; - struct list_head entry_qos_info; -}; - -struct unf_ini_private_info { - u32 driver_type; /* Driver Type */ - void *lower; /* driver private pointer */ -}; - -struct unf_product_host_info { - void *tgt_host; - struct Scsi_Host *host; - struct unf_ini_private_info drv_private_info; - struct Scsi_Host scsihost; -}; - -struct unf_lport { - u32 port_type; /* Port Type, fc or fcoe */ - atomic_t port_ref_cnt; /* LPort reference counter */ - void *fc_port; /* hard adapter hba pointer */ - void *rport, *drport; /* Used for SCSI interface */ - void *vport; - ulong system_io_bus_num; - - struct unf_product_host_info host_info; /* scsi host mg */ - struct unf_rport_scsi_id_image rport_scsi_table; - bool port_removing; - bool io_allowed; - bool port_dirt_exchange; - - spinlock_t xchg_mgr_lock; - struct list_head list_xchg_mgr_head; - struct list_head list_drty_xchg_mgr_head; - void *xchg_mgr[UNF_EXCHG_MGR_NUM]; - bool qos_cs_ctrl; - bool priority; - enum unf_rport_qos_level qos_level; - spinlock_t qos_mgr_lock; - struct list_head list_qos_head; - struct list_head list_vports_head; /* Vport Mg */ - struct list_head list_intergrad_vports; /* Vport intergrad list */ - struct list_head list_destroy_vports; /* Vport destroy list */ - - struct list_head entry_vport; /* VPort entry, hook in list_vports_head */ - - struct list_head entry_lport; /* LPort entry */ - spinlock_t lport_state_lock; /* UL Port Lock */ - struct unf_disc disc; /* Disc and rport Mg */ - struct unf_rport_pool rport_pool; /* rport pool,Vport share Lport pool */ - struct unf_esgl_pool esgl_pool; /* external sgl pool */ - u32 port_id; /* Port Management ,0x11000 etc. */ - enum unf_lport_login_state states; - u32 link_up; - u32 speed; - - u64 node_name; - u64 port_name; - u64 fabric_node_name; - u32 nport_id; - u32 max_frame_size; - u32 ed_tov; - u32 ra_tov; - u32 class_of_service; - u32 options; /* ini or tgt */ - u32 retries; - u32 max_retry_count; - enum unf_act_topo act_topo; - bool switch_state; /* TRUE---->ON,false---->OFF */ - bool last_switch_state; /* TRUE---->ON,false---->OFF */ - bool bbscn_support; /* TRUE---->ON,false---->OFF */ - - enum unf_start_work_state start_work_state; - struct unf_cm_xchg_mgr_template xchg_mgr_temp; /* Xchg Mg operation template */ - struct unf_cm_lport_template lport_mgr_temp; /* Xchg LPort operation template */ - struct unf_low_level_functioon_op low_level_func; - struct unf_event_mgr event_mgr; /* Disc and rport Mg */ - struct delayed_work retry_work; /* poll work or delay work */ - - struct workqueue_struct *link_event_wq; - struct workqueue_struct *xchg_wq; - atomic64_t io_stat[UNF_MAX_IO_TYPE_STAT_NUM]; - struct unf_err_code err_code_sum; /* Error code counter */ - struct unf_port_dynamic_info port_dynamic_info; - struct unf_link_service_collect link_service_info; - struct unf_pcie_error_count pcie_error_cnt; - unf_rport_set_qualifier unf_qualify_rport; /* Qualify Rport */ - - unf_tmf_status_recovery unf_tmf_abnormal_recovery; /* tmf marker recovery */ - - struct delayed_work route_timer_work; /* L_Port timer route */ - - u16 vp_index; /* Vport Index, Lport:0 */ - u16 path_id; - struct unf_vport_pool *vport_pool; /* Only for Lport */ - void *lport_mgr[UNF_MAX_LPRT_SCSI_ID_MAP]; - bool vport_remove_flags; - - void *root_lport; /* Point to physic Lport */ - - struct completion *lport_free_completion; /* Free LPort Completion */ - -#define UNF_LPORT_NOP 1 -#define UNF_LPORT_NORMAL 0 - - atomic_t lport_no_operate_flag; - - bool loop_back_test_mode; - bool switch_state_before_test_mode; /* TRUE---->ON,false---->OFF */ - u32 enhanced_features; /* Enhanced Features */ - - u32 destroy_step; - u32 dirty_flag; - struct unf_chip_manage_info *chip_info; - - u8 unique_position; - u8 sfp_power_fault_count; - u8 sfp_9545_fault_count; - u64 last_tx_fault_jif; /* SFP last tx fault jiffies */ - u32 target_cnt; - /* Server card: UNF_FC_SERVER_BOARD_32_G(6) for 32G mode, - * UNF_FC_SERVER_BOARD_16_G(7) for 16G mode - */ - u32 card_type; - atomic_t scsi_session_add_success; - atomic_t scsi_session_add_failed; - atomic_t scsi_session_del_success; - atomic_t scsi_session_del_failed; - atomic_t add_start_work_failed; - atomic_t add_closing_work_failed; - atomic_t device_alloc; - atomic_t device_destroy; - atomic_t session_loss_tmo; - atomic_t alloc_scsi_id; - atomic_t resume_scsi_id; - atomic_t reuse_scsi_id; - atomic64_t last_exchg_mgr_idx; - atomic_t host_no; - atomic64_t exchg_index; - int scan_world_id; - struct semaphore wmi_task_sema; - bool ready_to_remove; - u32 pcie_link_down_cnt; - bool pcie_link_down; - u8 fw_version[SPFC_VER_LEN]; - atomic_t link_lose_tmo; - u32 max_ssq_num; -}; - -void unf_lport_state_ma(struct unf_lport *lport, enum unf_lport_event lport_event); -void unf_lport_error_recovery(struct unf_lport *lport); -void unf_set_lport_state(struct unf_lport *lport, enum unf_lport_login_state state); -void unf_init_port_parms(struct unf_lport *lport); -u32 unf_lport_enter_flogi(struct unf_lport *lport); -void unf_lport_enter_sns_plogi(struct unf_lport *lport); -u32 unf_init_disc_mgr(struct unf_lport *lport); -u32 unf_init_lport_route(struct unf_lport *lport); -void unf_destroy_lport_route(struct unf_lport *lport); -void unf_reset_lport_params(struct unf_lport *lport); -void unf_cm_mark_dirty_mem(struct unf_lport *lport, enum unf_lport_dirty_flag type); -struct unf_lport *unf_cm_lookup_vport_by_vp_index(struct unf_lport *lport, u16 vp_index); -struct unf_lport *unf_cm_lookup_vport_by_did(struct unf_lport *lport, u32 did); -struct unf_lport *unf_cm_lookup_vport_by_wwpn(struct unf_lport *lport, u64 wwpn); -void unf_cm_vport_remove(struct unf_lport *vport); - -#endif diff --git a/drivers/scsi/spfc/common/unf_ls.c b/drivers/scsi/spfc/common/unf_ls.c deleted file mode 100644 index 6a2e1fd1872f..000000000000 --- a/drivers/scsi/spfc/common/unf_ls.c +++ /dev/null @@ -1,4883 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_ls.h" -#include "unf_log.h" -#include "unf_service.h" -#include "unf_portman.h" -#include "unf_gs.h" -#include "unf_npiv.h" - -static void unf_flogi_acc_ob_callback(struct unf_xchg *xchg); -static void unf_plogi_acc_ob_callback(struct unf_xchg *xchg); -static void unf_prli_acc_ob_callback(struct unf_xchg *xchg); -static void unf_rscn_acc_ob_callback(struct unf_xchg *xchg); -static void unf_pdisc_acc_ob_callback(struct unf_xchg *xchg); -static void unf_adisc_acc_ob_callback(struct unf_xchg *xchg); -static void unf_logo_acc_ob_callback(struct unf_xchg *xchg); -static void unf_logo_ob_callback(struct unf_xchg *xchg); -static void unf_logo_callback(void *lport, void *rport, void *xchg); -static void unf_rrq_callback(void *lport, void *rport, void *xchg); -static void unf_rrq_ob_callback(struct unf_xchg *xchg); -static void unf_lport_update_nport_id(struct unf_lport *lport, u32 nport_id); -static void -unf_lport_update_time_params(struct unf_lport *lport, - struct unf_flogi_fdisc_payload *flogi_payload); - -static void unf_login_with_rport_in_n2n(struct unf_lport *lport, - u64 remote_port_name, - u64 remote_node_name); -#define UNF_LOWLEVEL_BBCREDIT 0x6 -#define UNF_DEFAULT_BB_SC_N 0 - -#define UNF_ECHO_REQ_SIZE 0 -#define UNF_ECHO_WAIT_SEM_TIMEOUT(lport) (2 * (ulong)(lport)->ra_tov) - -#define UNF_SERVICE_COLLECT(service_collect, item) \ - do { \ - if ((item) < UNF_SERVICE_BUTT) { \ - (service_collect).service_cnt[(item)]++; \ - } \ - } while (0) - -static void unf_check_rport_need_delay_prli(struct unf_lport *lport, - struct unf_rport *rport, - u32 port_feature) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - port_feature &= UNF_PORT_MODE_BOTH; - - /* Used for: L_Port has INI mode & R_Port is not SW */ - if (rport->nport_id < UNF_FC_FID_DOM_MGR) { - /* - * 1. immediately: R_Port only with TGT, or - * L_Port only with INI & R_Port has TGT mode, send PRLI - * immediately - */ - if ((port_feature == UNF_PORT_MODE_TGT || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) || - (UNF_PORT_MODE_TGT == (port_feature & UNF_PORT_MODE_TGT))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) send PRLI", - lport->port_id, lport->nport_id, - rport->nport_id, port_feature); - ret = unf_send_prli(lport, rport, ELS_PRLI); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) send PRLI failed", - lport->port_id, lport->nport_id, - rport->nport_id, port_feature); - - unf_rport_error_recovery(rport); - } - } - /* 2. R_Port has BOTH mode or unknown, Delay to send PRLI */ - else if (port_feature != UNF_PORT_MODE_INI) { - /* Prevent: PRLI done before PLOGI */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) delay to send PRLI", - lport->port_id, lport->nport_id, - rport->nport_id, port_feature); - - /* Delay to send PRLI to R_Port */ - unf_rport_delay_login(rport); - } else { - /* 3. R_Port only with INI mode: wait for R_Port's PRLI: - * Do not care - */ - /* Cancel recovery(timer) work */ - if (delayed_work_pending(&rport->recovery_work)) { - if (cancel_delayed_work(&rport->recovery_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) is pure INI", - lport->port_id, - lport->nport_id, - rport->nport_id, - port_feature); - - unf_rport_ref_dec(rport); - } - } - - /* Server: R_Port only support INI, do not care this - * case - */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x) Rport(0x%x) with feature(0x%x) wait for PRLI", - lport->port_id, lport->nport_id, - rport->nport_id, port_feature); - } - } -} - -static u32 unf_low_level_bb_credit(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 bb_credit = UNF_LOWLEVEL_BBCREDIT; - - if (unlikely(!lport)) - return bb_credit; - - unf_lport = lport; - - if (unlikely(!unf_lport->low_level_func.port_mgr_op.ll_port_config_get)) - return bb_credit; - - ret = unf_lport->low_level_func.port_mgr_op.ll_port_config_get((void *)unf_lport->fc_port, - UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, - (void *)&bb_credit); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[warn]Port(0x%x) get BB_Credit failed, use default value(%d)", - unf_lport->port_id, UNF_LOWLEVEL_BBCREDIT); - - bb_credit = UNF_LOWLEVEL_BBCREDIT; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) with BB_Credit(%u)", unf_lport->port_id, - bb_credit); - - return bb_credit; -} - -u32 unf_low_level_bb_scn(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = NULL; - struct unf_low_level_port_mgr_op *port_mgr = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 bb_scn = UNF_DEFAULT_BB_SC_N; - - if (unlikely(!lport)) - return bb_scn; - - unf_lport = lport; - port_mgr = &unf_lport->low_level_func.port_mgr_op; - - if (unlikely(!port_mgr->ll_port_config_get)) - return bb_scn; - - ret = port_mgr->ll_port_config_get((void *)unf_lport->fc_port, - UNF_PORT_CFG_GET_WORKBALE_BBSCN, - (void *)&bb_scn); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[warn]Port(0x%x) get bbscn failed, use default value(%d)", - unf_lport->port_id, UNF_DEFAULT_BB_SC_N); - - bb_scn = UNF_DEFAULT_BB_SC_N; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x)'s bbscn(%d)", unf_lport->port_id, bb_scn); - - return bb_scn; -} - -static void unf_fill_rec_pld(struct unf_rec_pld *rec_pld, u32 sid) -{ - FC_CHECK_RETURN_VOID(rec_pld); - - rec_pld->rec_cmnd = (UNF_ELS_CMND_REC); - rec_pld->xchg_org_sid = sid; - rec_pld->ox_id = INVALID_VALUE16; - rec_pld->rx_id = INVALID_VALUE16; -} - -u32 unf_send_rec(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *io_xchg) -{ - struct unf_rec_pld *rec_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(io_xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PLOGI", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_REC; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - pkg.origin_hottag = io_xchg->hotpooltag; - pkg.origin_magicnum = io_xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - rec_pld = &fc_entry->rec.rec_pld; - memset(rec_pld, 0, sizeof(struct unf_rec_pld)); - - unf_fill_rec_pld(rec_pld, lport->nport_id); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]LOGIN: Send REC %s. Port(0x%x_0x%x_0x%llx)--->RPort(0x%x_0x%llx) with hottag(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, lport->port_name, rport->nport_id, - rport->port_name, xchg->hotpooltag); - - return ret; -} - -static void unf_fill_flogi_pld(struct unf_flogi_fdisc_payload *flogi_pld, - struct unf_lport *lport) -{ - struct unf_fabric_parm *fabric_parms = NULL; - - FC_CHECK_RETURN_VOID(flogi_pld); - FC_CHECK_RETURN_VOID(lport); - - fabric_parms = &flogi_pld->fabric_parms; - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT || - lport->act_topo == UNF_TOP_P2P_MASK) { - /* Fabric or P2P or FCoE VN2VN topology */ - fabric_parms->co_parms.bb_credit = unf_low_level_bb_credit(lport); - fabric_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - fabric_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - fabric_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - fabric_parms->co_parms.bbscn = unf_low_level_bb_scn(lport); - } else { - /* Loop topology here */ - fabric_parms->co_parms.clean_address = UNF_CLEAN_ADDRESS_DEFAULT; - fabric_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; - fabric_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - fabric_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - fabric_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_LPORT; - fabric_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - } - - if (lport->low_level_func.support_max_npiv_num != 0) - /* support NPIV */ - fabric_parms->co_parms.clean_address = 1; - - fabric_parms->cl_parms[ARRAY_INDEX_2].valid = UNF_CLASS_VALID; - - /* according the user value to set the priority */ - if (lport->qos_cs_ctrl) - fabric_parms->cl_parms[ARRAY_INDEX_2].priority = UNF_PRIORITY_ENABLE; - else - fabric_parms->cl_parms[ARRAY_INDEX_2].priority = UNF_PRIORITY_DISABLE; - - fabric_parms->cl_parms[ARRAY_INDEX_2].sequential_delivery = UNF_SEQUEN_DELIVERY_REQ; - fabric_parms->cl_parms[ARRAY_INDEX_2].received_data_field_size = (lport->max_frame_size); - - fabric_parms->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - fabric_parms->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - fabric_parms->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - fabric_parms->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); -} - -u32 unf_send_flogi(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_xchg *xchg = NULL; - struct unf_flogi_fdisc_payload *flogi_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for FLOGI", - lport->port_id); - - return ret; - } - - /* FLOGI */ - xchg->cmnd_code = ELS_FLOGI; - - /* for rcvd flogi acc/rjt processer */ - xchg->callback = unf_flogi_callback; - /* for send flogi failed processer */ - xchg->ob_callback = unf_flogi_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - flogi_pld = &fc_entry->flogi.flogi_payload; - memset(flogi_pld, 0, sizeof(struct unf_flogi_fdisc_payload)); - unf_fill_flogi_pld(flogi_pld, lport); - flogi_pld->cmnd = (UNF_ELS_CMND_FLOGI); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Begin to send FLOGI. Port(0x%x)--->RPort(0x%x) with hottag(0x%x)", - lport->port_id, rport->nport_id, xchg->hotpooltag); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, flogi_pld, - sizeof(struct unf_flogi_fdisc_payload)); - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]LOGIN: Send FLOGI failed. Port(0x%x)--->RPort(0x%x)", - lport->port_id, rport->nport_id); - - unf_cm_free_xchg((void *)lport, (void *)xchg); - } - - return ret; -} - -u32 unf_send_fdisc(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_xchg *exch = NULL; - struct unf_flogi_fdisc_payload *fdisc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - exch = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!exch) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for FDISC", - lport->port_id); - - return ret; - } - - exch->cmnd_code = ELS_FDISC; - - exch->callback = unf_fdisc_callback; - exch->ob_callback = unf_fdisc_ob_callback; - - unf_fill_package(&pkg, exch, rport); - pkg.type = UNF_PKG_ELS_REQ; - - fdisc_pld = &fc_entry->fdisc.fdisc_payload; - memset(fdisc_pld, 0, sizeof(struct unf_flogi_fdisc_payload)); - unf_fill_flogi_pld(fdisc_pld, lport); - fdisc_pld->cmnd = UNF_ELS_CMND_FDISC; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, exch); - - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)exch); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: FDISC send %s. Port(0x%x)--->RPort(0x%x) with hottag(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, exch->hotpooltag); - - return ret; -} - -static void unf_fill_plogi_pld(struct unf_plogi_payload *plogi_pld, - struct unf_lport *lport) -{ - struct unf_lgn_parm *login_parms = NULL; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(plogi_pld); - FC_CHECK_RETURN_VOID(lport); - - unf_lport = lport->root_lport; - plogi_pld->cmnd = (UNF_ELS_CMND_PLOGI); - login_parms = &plogi_pld->stparms; - - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - /* P2P or Fabric mode or FCoE VN2VN */ - login_parms->co_parms.bb_credit = (unf_low_level_bb_credit(lport)); - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_NFPORT; - login_parms->co_parms.bbscn = - (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) - ? 0 - : unf_low_level_bb_scn(lport); - } else { - /* Public loop & Private loop mode */ - login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_LPORT; - } - - login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - login_parms->co_parms.continuously_increasing = UNF_CONTIN_INCREASE_SUPPORT; - login_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - login_parms->co_parms.nport_total_concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); - login_parms->co_parms.e_d_tov = UNF_DEFAULT_EDTOV; - if (unf_lport->priority == UNF_PRIORITY_ENABLE) { - login_parms->cl_parms[ARRAY_INDEX_2].priority = - UNF_PRIORITY_ENABLE; - } else { - login_parms->cl_parms[ARRAY_INDEX_2].priority = - UNF_PRIORITY_DISABLE; - } - - /* for class_3 */ - login_parms->cl_parms[ARRAY_INDEX_2].valid = UNF_CLASS_VALID; - login_parms->cl_parms[ARRAY_INDEX_2].received_data_field_size = (lport->max_frame_size); - login_parms->cl_parms[ARRAY_INDEX_2].concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->cl_parms[ARRAY_INDEX_2].open_sequence_per_exchange = (UNF_PLOGI_SEQ_PER_XCHG); - - login_parms->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - login_parms->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - login_parms->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - login_parms->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, plogi_pld, sizeof(struct unf_plogi_payload)); -} - -u32 unf_send_plogi(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_plogi_payload *plogi_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PLOGI", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_PLOGI; - - xchg->callback = unf_plogi_callback; - xchg->ob_callback = unf_plogi_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - unf_cm_xchg_mgr_abort_io_by_id(lport, rport, xchg->sid, xchg->did, 0); - - plogi_pld = &fc_entry->plogi.payload; - memset(plogi_pld, 0, sizeof(struct unf_plogi_payload)); - unf_fill_plogi_pld(plogi_pld, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send PLOGI %s. Port(0x%x_0x%x_0x%llx)--->RPort(0x%x_0x%llx) with hottag(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - lport->nport_id, lport->port_name, rport->nport_id, - rport->port_name, xchg->hotpooltag); - - return ret; -} - -static void unf_fill_logo_pld(struct unf_logo_payload *logo_pld, - struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(logo_pld); - FC_CHECK_RETURN_VOID(lport); - - logo_pld->cmnd = (UNF_ELS_CMND_LOGO); - logo_pld->nport_id = (lport->nport_id); - logo_pld->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - logo_pld->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, logo_pld, sizeof(struct unf_logo_payload)); -} - -u32 unf_send_logo(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_logo_payload *logo_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for LOGO", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_LOGO; - /* retry or link down immediately */ - xchg->callback = unf_logo_callback; - /* do nothing */ - xchg->ob_callback = unf_logo_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - logo_pld = &fc_entry->logo.payload; - memset(logo_pld, 0, sizeof(struct unf_logo_payload)); - unf_fill_logo_pld(logo_pld, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - rport->logo_retries++; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]LOGIN: LOGO send %s. Port(0x%x)--->RPort(0x%x) hottag(0x%x) Retries(%d)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, xchg->hotpooltag, rport->logo_retries); - - return ret; -} - -u32 unf_send_logo_by_did(struct unf_lport *lport, u32 did) -{ - struct unf_logo_payload *logo_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, did, NULL, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for LOGO", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_LOGO; - - unf_fill_package(&pkg, xchg, NULL); - pkg.type = UNF_PKG_ELS_REQ; - - logo_pld = &fc_entry->logo.payload; - memset(logo_pld, 0, sizeof(struct unf_logo_payload)); - unf_fill_logo_pld(logo_pld, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: LOGO send %s. Port(0x%x)--->RPort(0x%x) with hottag(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - did, xchg->hotpooltag); - - return ret; -} - -static void unf_echo_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_rport *unf_rport = (struct unf_rport *)rport; - struct unf_xchg *unf_xchg = NULL; - struct unf_echo_payload *echo_rsp_pld = NULL; - u32 cmnd = 0; - u32 mag_ver_local = 0; - u32 mag_ver_remote = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_xchg = (struct unf_xchg *)xchg; - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) - return; - - echo_rsp_pld = unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.echo_pld; - FC_CHECK_RETURN_VOID(echo_rsp_pld); - - if (unf_xchg->byte_orders & UNF_BIT_2) { - unf_big_end_to_cpu((u8 *)echo_rsp_pld, sizeof(struct unf_echo_payload)); - cmnd = echo_rsp_pld->cmnd; - } else { - cmnd = echo_rsp_pld->cmnd; - } - - mag_ver_local = echo_rsp_pld->data[ARRAY_INDEX_0]; - mag_ver_remote = echo_rsp_pld->data[ARRAY_INDEX_1]; - - if (UNF_ELS_CMND_ACC == (cmnd & UNF_ELS_CMND_HIGH_MASK)) { - if (mag_ver_local == ECHO_MG_VERSION_LOCAL && - mag_ver_remote == ECHO_MG_VERSION_REMOTE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local snd echo:(0x%x), remote rcv echo:(0x%x), remote snd echo acc:(0x%x), local rcv echo acc:(0x%x)", - unf_lport->port_id, unf_rport->nport_id, - unf_xchg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME], - unf_xchg->private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME], - unf_xchg->private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME], - unf_xchg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); - } else if ((mag_ver_local == ECHO_MG_VERSION_LOCAL) && - (mag_ver_remote != ECHO_MG_VERSION_REMOTE)) { - /* the peer don't supprt smartping, only local snd and - * rcv rsp time stamp - */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local snd echo:(0x%x), local rcv echo acc:(0x%x)", - unf_lport->port_id, unf_rport->nport_id, - unf_xchg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME], - unf_xchg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); - } else if ((mag_ver_local != ECHO_MG_VERSION_LOCAL) && - (mag_ver_remote != ECHO_MG_VERSION_REMOTE)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, - "LPort(0x%x) send ECHO to RPort(0x%x), received ACC. local and remote is not FC HBA", - unf_lport->port_id, unf_rport->nport_id); - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send ECHO to RPort(0x%x) and received RJT", - unf_lport->port_id, unf_rport->nport_id); - } - - unf_xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_OK; - unf_xchg->echo_info.response_time = jiffies - unf_xchg->echo_info.response_time; - - /* wake up semaphore */ - up(&unf_xchg->echo_info.echo_sync_sema); -} - -static void unf_echo_ob_callback(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - unf_lport = xchg->lport; - FC_CHECK_RETURN_VOID(unf_lport); - unf_rport = xchg->rport; - FC_CHECK_RETURN_VOID(unf_rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send ECHO to RPort(0x%x) but timeout", - unf_lport->port_id, unf_rport->nport_id); - - xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_FAIL; - - /* wake up semaphore */ - up(&xchg->echo_info.echo_sync_sema); -} - -u32 unf_send_echo(struct unf_lport *lport, struct unf_rport *rport, u32 *time) -{ - struct unf_echo_payload *echo_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - ulong delay = 0; - dma_addr_t phy_echo_addr; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(time, UNF_RETURN_ERROR); - - delay = UNF_ECHO_WAIT_SEM_TIMEOUT(lport); - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for ECHO", - lport->port_id); - - return ret; - } - - /* ECHO */ - xchg->cmnd_code = ELS_ECHO; - xchg->fcp_sfs_union.sfs_entry.cur_offset = UNF_ECHO_REQ_SIZE; - - /* Set callback function, wake up semaphore */ - xchg->callback = unf_echo_callback; - /* wake up semaphore */ - xchg->ob_callback = unf_echo_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - echo_pld = (struct unf_echo_payload *)unf_get_one_big_sfs_buf(xchg); - if (!echo_pld) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't allocate buffer for ECHO", - lport->port_id); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - fc_entry->echo.echo_pld = echo_pld; - phy_echo_addr = pci_map_single(lport->low_level_func.dev, echo_pld, - UNF_ECHO_PAYLOAD_LEN, - DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(lport->low_level_func.dev, phy_echo_addr)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) pci map err", lport->port_id); - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - fc_entry->echo.phy_echo_addr = phy_echo_addr; - memset(echo_pld, 0, sizeof(struct unf_echo_payload)); - echo_pld->cmnd = (UNF_ELS_CMND_ECHO); - echo_pld->data[ARRAY_INDEX_0] = ECHO_MG_VERSION_LOCAL; - - ret = unf_xchg_ref_inc(xchg, SEND_ELS); - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), UNF_RETURN_ERROR); - - xchg->echo_info.response_time = jiffies; - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) { - unf_cm_free_xchg((void *)lport, (void *)xchg); - } else { - if (down_timeout(&xchg->echo_info.echo_sync_sema, - (long)msecs_to_jiffies((u32)delay))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]ECHO send %s. Port(0x%x)--->RPort(0x%x) but response timeout ", - (ret != RETURN_OK) ? "failed" : "succeed", - lport->port_id, rport->nport_id); - - xchg->echo_info.echo_result = UNF_ELS_ECHO_RESULT_FAIL; - } - - if (xchg->echo_info.echo_result == UNF_ELS_ECHO_RESULT_FAIL) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "Echo send fail or timeout"); - - ret = UNF_RETURN_ERROR; - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "echo acc rsp,echo_cmd_snd(0x%xus)-->echo_cmd_rcv(0x%xus)-->echo_acc_ snd(0x%xus)-->echo_acc_rcv(0x%xus).", - xchg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME], - xchg->private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME], - xchg->private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME], - xchg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME]); - - *time = - (xchg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME] - - xchg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME]) - - (xchg->private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME] - - xchg->private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME]); - } - } - - pci_unmap_single(lport->low_level_func.dev, phy_echo_addr, - UNF_ECHO_PAYLOAD_LEN, DMA_BIDIRECTIONAL); - fc_entry->echo.phy_echo_addr = 0; - unf_xchg_ref_dec(xchg, SEND_ELS); - - return ret; -} - -static void unf_fill_prli_pld(struct unf_prli_payload *prli_pld, - struct unf_lport *lport) -{ - u32 pld_len = 0; - - FC_CHECK_RETURN_VOID(prli_pld); - FC_CHECK_RETURN_VOID(lport); - - pld_len = sizeof(struct unf_prli_payload) - UNF_PRLI_SIRT_EXTRA_SIZE; - prli_pld->cmnd = - (UNF_ELS_CMND_PRLI | - ((u32)UNF_FC4_FRAME_PAGE_SIZE << UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | - ((u32)pld_len)); - - prli_pld->parms[ARRAY_INDEX_0] = (UNF_FC4_FRAME_PARM_0_FCP | UNF_FC4_FRAME_PARM_0_I_PAIR); - prli_pld->parms[ARRAY_INDEX_1] = UNF_NOT_MEANINGFUL; - prli_pld->parms[ARRAY_INDEX_2] = UNF_NOT_MEANINGFUL; - - /* About Read Xfer_rdy disable */ - prli_pld->parms[ARRAY_INDEX_3] = (UNF_FC4_FRAME_PARM_3_R_XFER_DIS | lport->options); - - /* About FCP confirm */ - if (lport->low_level_func.lport_cfg_items.fcp_conf) - prli_pld->parms[ARRAY_INDEX_3] |= UNF_FC4_FRAME_PARM_3_CONF_ALLOW; - - /* About Tape support */ - if (lport->low_level_func.lport_cfg_items.tape_support) - prli_pld->parms[ARRAY_INDEX_3] |= - (UNF_FC4_FRAME_PARM_3_REC_SUPPORT | - UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT | - UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT | - UNF_FC4_FRAME_PARM_3_CONF_ALLOW); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x)'s PRLI payload: options(0x%x) parameter-3(0x%x)", - lport->port_id, lport->options, - prli_pld->parms[ARRAY_INDEX_3]); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, prli_pld, sizeof(struct unf_prli_payload)); -} - -u32 unf_send_prli(struct unf_lport *lport, struct unf_rport *rport, - u32 cmnd_code) -{ - struct unf_prli_payload *prli_pal = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PRLI", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = cmnd_code; - - /* for rcvd prli acc/rjt processer */ - xchg->callback = unf_prli_callback; - /* for send prli failed processer */ - xchg->ob_callback = unf_prli_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - prli_pal = &fc_entry->prli.payload; - memset(prli_pal, 0, sizeof(struct unf_prli_payload)); - unf_fill_prli_pld(prli_pal, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PRLI send %s. Port(0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id); - - return ret; -} - -static void unf_fill_prlo_pld(struct unf_prli_payload *prlo_pld, - struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(prlo_pld); - FC_CHECK_RETURN_VOID(lport); - - prlo_pld->cmnd = (UNF_ELS_CMND_PRLO); - prlo_pld->parms[ARRAY_INDEX_0] = (UNF_FC4_FRAME_PARM_0_FCP); - prlo_pld->parms[ARRAY_INDEX_1] = UNF_NOT_MEANINGFUL; - prlo_pld->parms[ARRAY_INDEX_2] = UNF_NOT_MEANINGFUL; - prlo_pld->parms[ARRAY_INDEX_3] = UNF_NO_SERVICE_PARAMS; - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, prlo_pld, sizeof(struct unf_prli_payload)); -} - -u32 unf_send_prlo(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_prli_payload *prlo_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PRLO", lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_PRLO; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - prlo_pld = &fc_entry->prlo.payload; - memset(prlo_pld, 0, sizeof(struct unf_prli_payload)); - unf_fill_prlo_pld(prlo_pld, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PRLO send %s. Port(0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id); - - return ret; -} - -static void unf_fill_pdisc_pld(struct unf_plogi_payload *pdisc_pld, - struct unf_lport *lport) -{ - struct unf_lgn_parm *login_parms = NULL; - - FC_CHECK_RETURN_VOID(pdisc_pld); - FC_CHECK_RETURN_VOID(lport); - - pdisc_pld->cmnd = (UNF_ELS_CMND_PDISC); - login_parms = &pdisc_pld->stparms; - - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - /* P2P & Fabric */ - login_parms->co_parms.bb_credit = (unf_low_level_bb_credit(lport)); - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_NFPORT; - login_parms->co_parms.bbscn = - (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) - ? 0 - : unf_low_level_bb_scn(lport); - } else { - /* Public loop & Private loop */ - login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; - /* :1 */ - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_LPORT; - } - - login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - login_parms->co_parms.continuously_increasing = UNF_CONTIN_INCREASE_SUPPORT; - login_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - login_parms->co_parms.nport_total_concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); - login_parms->co_parms.e_d_tov = (lport->ed_tov); - - login_parms->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - login_parms->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - login_parms->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - login_parms->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - /* class-3 */ - login_parms->cl_parms[ARRAY_INDEX_2].valid = UNF_CLASS_VALID; - login_parms->cl_parms[ARRAY_INDEX_2].received_data_field_size = (lport->max_frame_size); - login_parms->cl_parms[ARRAY_INDEX_2].concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->cl_parms[ARRAY_INDEX_2].open_sequence_per_exchange = (UNF_PLOGI_SEQ_PER_XCHG); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, pdisc_pld, sizeof(struct unf_plogi_payload)); -} - -u32 unf_send_pdisc(struct unf_lport *lport, struct unf_rport *rport) -{ - /* PLOGI/PDISC with same payload */ - struct unf_plogi_payload *pdisc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PDISC", - lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_PDISC; - xchg->callback = NULL; - xchg->ob_callback = NULL; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - pdisc_pld = &fc_entry->pdisc.payload; - memset(pdisc_pld, 0, sizeof(struct unf_plogi_payload)); - unf_fill_pdisc_pld(pdisc_pld, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PDISC send %s. Port(0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, rport->nport_id); - - return ret; -} - -static void unf_fill_adisc_pld(struct unf_adisc_payload *adisc_pld, - struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(adisc_pld); - FC_CHECK_RETURN_VOID(lport); - - adisc_pld->cmnd = (UNF_ELS_CMND_ADISC); - adisc_pld->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - adisc_pld->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - adisc_pld->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - adisc_pld->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, adisc_pld, sizeof(struct unf_adisc_payload)); -} - -u32 unf_send_adisc(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_adisc_payload *adisc_pal = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for ADISC", lport->port_id); - - return ret; - } - - xchg->cmnd_code = ELS_ADISC; - - xchg->callback = NULL; - xchg->ob_callback = NULL; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - - adisc_pal = &fc_entry->adisc.adisc_payl; - memset(adisc_pal, 0, sizeof(struct unf_adisc_payload)); - unf_fill_adisc_pld(adisc_pal, lport); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: ADISC send %s. Port(0x%x)--->RPort(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id); - - return ret; -} - -static void unf_fill_rrq_pld(struct unf_rrq *rrq_pld, struct unf_xchg *xchg) -{ - FC_CHECK_RETURN_VOID(rrq_pld); - FC_CHECK_RETURN_VOID(xchg); - - rrq_pld->cmnd = UNF_ELS_CMND_RRQ; - rrq_pld->sid = xchg->sid; - rrq_pld->oxid_rxid = ((u32)xchg->oxid << UNF_SHIFT_16 | xchg->rxid); -} - -u32 unf_send_rrq(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - /* after ABTS Done */ - struct unf_rrq *rrq_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - unf_xchg = unf_get_sfs_free_xchg_and_init(lport, rport->nport_id, rport, &fc_entry); - if (!unf_xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for RRQ", - lport->port_id); - - return ret; - } - - unf_xchg->cmnd_code = ELS_RRQ; /* RRQ */ - - unf_xchg->callback = unf_rrq_callback; /* release I/O exchange context */ - unf_xchg->ob_callback = unf_rrq_ob_callback; /* release I/O exchange context */ - unf_xchg->io_xchg = xchg; /* pointer to IO XCHG */ - - unf_fill_package(&pkg, unf_xchg, rport); - pkg.type = UNF_PKG_ELS_REQ; - rrq_pld = &fc_entry->rrq; - memset(rrq_pld, 0, sizeof(struct unf_rrq)); - unf_fill_rrq_pld(rrq_pld, xchg); - - ret = unf_ls_gs_cmnd_send(lport, &pkg, unf_xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)unf_xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]RRQ send %s. Port(0x%x)--->RPort(0x%x) free old exchange(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, xchg->hotpooltag); - - return ret; -} - -u32 unf_send_flogi_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_flogi_fdisc_payload *flogi_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_FLOGI); - - xchg->did = 0; /* D_ID must be 0 */ - xchg->sid = UNF_FC_FID_FLOGI; /* S_ID must be 0xfffffe */ - xchg->oid = xchg->sid; - xchg->callback = NULL; - xchg->lport = lport; - xchg->rport = rport; - xchg->ob_callback = unf_flogi_acc_ob_callback; /* call back for sending - * FLOGI response - */ - - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - flogi_acc_pld = &fc_entry->flogi_acc.flogi_payload; - flogi_acc_pld->cmnd = (UNF_ELS_CMND_ACC); - unf_fill_flogi_pld(flogi_acc_pld, lport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]LOGIN: FLOGI ACC send %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - return ret; -} - -static void unf_fill_plogi_acc_pld(struct unf_plogi_payload *plogi_acc_pld, - struct unf_lport *lport) -{ - struct unf_lgn_parm *login_parms = NULL; - - FC_CHECK_RETURN_VOID(plogi_acc_pld); - FC_CHECK_RETURN_VOID(lport); - - plogi_acc_pld->cmnd = (UNF_ELS_CMND_ACC); - login_parms = &plogi_acc_pld->stparms; - - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - login_parms->co_parms.bb_credit = (unf_low_level_bb_credit(lport)); - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_NFPORT; /* 0 */ - login_parms->co_parms.bbscn = - (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) - ? 0 - : unf_low_level_bb_scn(lport); - } else { - login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_LPORT; /* 1 */ - } - - login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - login_parms->co_parms.continuously_increasing = UNF_CONTIN_INCREASE_SUPPORT; - login_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - login_parms->co_parms.nport_total_concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); - login_parms->co_parms.e_d_tov = (lport->ed_tov); - login_parms->cl_parms[ARRAY_INDEX_2].valid = UNF_CLASS_VALID; /* class-3 */ - login_parms->cl_parms[ARRAY_INDEX_2].received_data_field_size = (lport->max_frame_size); - login_parms->cl_parms[ARRAY_INDEX_2].concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->cl_parms[ARRAY_INDEX_2].open_sequence_per_exchange = (UNF_PLOGI_SEQ_PER_XCHG); - login_parms->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - login_parms->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - login_parms->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - login_parms->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, plogi_acc_pld, - sizeof(struct unf_plogi_payload)); -} - -u32 unf_send_plogi_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_plogi_payload *plogi_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PLOGI); - - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->callback = NULL; - xchg->lport = lport; - xchg->rport = rport; - - xchg->ob_callback = unf_plogi_acc_ob_callback; /* call back for sending PLOGI ACC */ - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - plogi_acc_pld = &fc_entry->plogi_acc.payload; - unf_fill_plogi_acc_pld(plogi_acc_pld, lport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - if (rport->nport_id < UNF_FC_FID_DOM_MGR || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PLOGI ACC send %s. Port(0x%x_0x%x_0x%llx)--->RPort(0x%x_0x%llx) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", - lport->port_id, lport->nport_id, lport->port_name, - rport->nport_id, rport->port_name, ox_id, rx_id); - } - - return ret; -} - -static void unf_fill_prli_acc_pld(struct unf_prli_payload *prli_acc_pld, - struct unf_lport *lport, - struct unf_rport *rport) -{ - u32 port_mode = UNF_FC4_FRAME_PARM_3_TGT; - - FC_CHECK_RETURN_VOID(prli_acc_pld); - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - prli_acc_pld->cmnd = - (UNF_ELS_CMND_ACC | - ((u32)UNF_FC4_FRAME_PAGE_SIZE << UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | - ((u32)(sizeof(struct unf_prli_payload) - UNF_PRLI_SIRT_EXTRA_SIZE))); - - prli_acc_pld->parms[ARRAY_INDEX_0] = - (UNF_FC4_FRAME_PARM_0_FCP | UNF_FC4_FRAME_PARM_0_I_PAIR | - UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE); - prli_acc_pld->parms[ARRAY_INDEX_1] = UNF_NOT_MEANINGFUL; - prli_acc_pld->parms[ARRAY_INDEX_2] = UNF_NOT_MEANINGFUL; - - /* About INI/TGT mode */ - if (rport->nport_id < UNF_FC_FID_DOM_MGR) { - /* return INI (0x20): R_Port has TGT mode, L_Port has INI mode - */ - port_mode = UNF_FC4_FRAME_PARM_3_INI; - } else { - port_mode = lport->options; - } - - /* About Read xfer_rdy disable */ - prli_acc_pld->parms[ARRAY_INDEX_3] = - (UNF_FC4_FRAME_PARM_3_R_XFER_DIS | port_mode); /* 0x2 */ - - /* About Tape support */ - if (rport->tape_support_needed) { - prli_acc_pld->parms[ARRAY_INDEX_3] |= - (UNF_FC4_FRAME_PARM_3_REC_SUPPORT | - UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT | - UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT | - UNF_FC4_FRAME_PARM_3_CONF_ALLOW); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "PRLI ACC tape support"); - } - - /* About confirm */ - if (lport->low_level_func.lport_cfg_items.fcp_conf) - prli_acc_pld->parms[ARRAY_INDEX_3] |= - UNF_FC4_FRAME_PARM_3_CONF_ALLOW; /* 0x80 */ - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, prli_acc_pld, - sizeof(struct unf_prli_payload)); -} - -u32 unf_send_prli_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_prli_payload *prli_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PRLI); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - - xchg->callback = NULL; - xchg->ob_callback = - unf_prli_acc_ob_callback; /* callback when send succeed */ - - unf_fill_package(&pkg, xchg, rport); - - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - prli_acc_pld = &fc_entry->prli_acc.payload; - unf_fill_prli_acc_pld(prli_acc_pld, lport, rport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - if (rport->nport_id < UNF_FC_FID_DOM_MGR || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PRLI ACC send %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", - lport->port_id, rport->nport_id, ox_id, rx_id); - } - - return ret; -} - -u32 unf_send_rec_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - /* Reserved */ - unf_cm_free_xchg((void *)lport, (void *)xchg); - - return RETURN_OK; -} - -static void unf_rrq_acc_ob_callback(struct unf_xchg *xchg) -{ - FC_CHECK_RETURN_VOID(xchg); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]RRQ ACC Xchg(0x%p) tag(0x%x)", xchg, - xchg->hotpooltag); -} - -static void unf_fill_els_acc_pld(struct unf_els_acc *els_acc_pld) -{ - FC_CHECK_RETURN_VOID(els_acc_pld); - - els_acc_pld->cmnd = (UNF_ELS_CMND_ACC); -} - -u32 unf_send_rscn_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_els_acc *rscn_acc = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RSCN); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - - xchg->callback = NULL; - xchg->ob_callback = unf_rscn_acc_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - rscn_acc = &fc_entry->els_acc; - unf_fill_els_acc_pld(rscn_acc); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: RSCN ACC send %s. Port(0x%x)--->RPort(0x%x) with OXID(0x%x) RXID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - - return ret; -} - -u32 unf_send_logo_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_els_acc *logo_acc = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_LOGO); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - xchg->callback = NULL; - xchg->ob_callback = unf_logo_acc_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - logo_acc = &fc_entry->els_acc; - unf_fill_els_acc_pld(logo_acc); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - if (rport->nport_id < UNF_FC_FID_DOM_MGR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: LOGO ACC send %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", - lport->port_id, rport->nport_id, ox_id, rx_id); - } - - return ret; -} - -static u32 unf_send_rrq_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_els_acc *rrq_acc = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - xchg->callback = NULL; /* do noting */ - - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - rrq_acc = &fc_entry->els_acc; - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_RRQ); - xchg->ob_callback = unf_rrq_acc_ob_callback; /* do noting */ - unf_fill_els_acc_pld(rrq_acc); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]RRQ ACC send %s. Port(0x%x)--->RPort(0x%x) with Xchg(0x%p) OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, xchg, ox_id, rx_id); - - return ret; -} - -static void unf_fill_pdisc_acc_pld(struct unf_plogi_payload *pdisc_acc_pld, - struct unf_lport *lport) -{ - struct unf_lgn_parm *login_parms = NULL; - - FC_CHECK_RETURN_VOID(pdisc_acc_pld); - FC_CHECK_RETURN_VOID(lport); - - pdisc_acc_pld->cmnd = (UNF_ELS_CMND_ACC); - login_parms = &pdisc_acc_pld->stparms; - - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - login_parms->co_parms.bb_credit = (unf_low_level_bb_credit(lport)); - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_NFPORT; - login_parms->co_parms.bbscn = - (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) - ? 0 - : unf_low_level_bb_scn(lport); - } else { - login_parms->co_parms.bb_credit = UNF_BBCREDIT_LPORT; - login_parms->co_parms.alternate_bb_credit_mgmt = UNF_BBCREDIT_MANAGE_LPORT; - } - - login_parms->co_parms.lowest_version = UNF_PLOGI_VERSION_LOWER; - login_parms->co_parms.highest_version = UNF_PLOGI_VERSION_UPPER; - login_parms->co_parms.continuously_increasing = UNF_CONTIN_INCREASE_SUPPORT; - login_parms->co_parms.bb_receive_data_field_size = (lport->max_frame_size); - login_parms->co_parms.nport_total_concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->co_parms.relative_offset = (UNF_PLOGI_RO_CATEGORY); - login_parms->co_parms.e_d_tov = (lport->ed_tov); - - login_parms->cl_parms[ARRAY_INDEX_2].valid = UNF_CLASS_VALID; /* class-3 */ - login_parms->cl_parms[ARRAY_INDEX_2].received_data_field_size = (lport->max_frame_size); - login_parms->cl_parms[ARRAY_INDEX_2].concurrent_sequences = (UNF_PLOGI_CONCURRENT_SEQ); - login_parms->cl_parms[ARRAY_INDEX_2].open_sequence_per_exchange = (UNF_PLOGI_SEQ_PER_XCHG); - - login_parms->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - login_parms->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - login_parms->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - login_parms->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, pdisc_acc_pld, - sizeof(struct unf_plogi_payload)); -} - -u32 unf_send_pdisc_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_plogi_payload *pdisc_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PDISC); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - - xchg->callback = NULL; - xchg->ob_callback = unf_pdisc_acc_ob_callback; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - pdisc_acc_pld = &fc_entry->pdisc_acc.payload; - unf_fill_pdisc_acc_pld(pdisc_acc_pld, lport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send PDISC ACC %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - - return ret; -} - -static void unf_fill_adisc_acc_pld(struct unf_adisc_payload *adisc_acc_pld, - struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(adisc_acc_pld); - FC_CHECK_RETURN_VOID(lport); - - adisc_acc_pld->cmnd = (UNF_ELS_CMND_ACC); - - adisc_acc_pld->hard_address = (lport->nport_id & UNF_ALPA_MASK); - adisc_acc_pld->high_node_name = UNF_GET_NAME_HIGH_WORD(lport->node_name); - adisc_acc_pld->low_node_name = UNF_GET_NAME_LOW_WORD(lport->node_name); - adisc_acc_pld->high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - adisc_acc_pld->low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - adisc_acc_pld->nport_id = lport->nport_id; - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, adisc_acc_pld, - sizeof(struct unf_adisc_payload)); -} - -u32 unf_send_adisc_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_adisc_payload *adisc_acc_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_ADISC); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - - xchg->callback = NULL; - xchg->ob_callback = unf_adisc_acc_ob_callback; - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - adisc_acc_pld = &fc_entry->adisc_acc.adisc_payl; - unf_fill_adisc_acc_pld(adisc_acc_pld, lport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send ADISC ACC %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - - return ret; -} - -static void unf_fill_prlo_acc_pld(struct unf_prli_prlo *prlo_acc, - struct unf_lport *lport) -{ - struct unf_prli_payload *prlo_acc_pld = NULL; - - FC_CHECK_RETURN_VOID(prlo_acc); - - prlo_acc_pld = &prlo_acc->payload; - prlo_acc_pld->cmnd = - (UNF_ELS_CMND_ACC | - ((u32)UNF_FC4_FRAME_PAGE_SIZE << UNF_FC4_FRAME_PAGE_SIZE_SHIFT) | - ((u32)sizeof(struct unf_prli_payload))); - prlo_acc_pld->parms[ARRAY_INDEX_0] = - (UNF_FC4_FRAME_PARM_0_FCP | UNF_FC4_FRAME_PARM_0_GOOD_RSP_CODE); - prlo_acc_pld->parms[ARRAY_INDEX_1] = 0; - prlo_acc_pld->parms[ARRAY_INDEX_2] = 0; - prlo_acc_pld->parms[ARRAY_INDEX_3] = 0; - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, prlo_acc_pld, - sizeof(struct unf_prli_payload)); -} - -u32 unf_send_prlo_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg) -{ - struct unf_prli_prlo *prlo_acc = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_PRLO); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - - xchg->callback = NULL; - xchg->ob_callback = NULL; - - unf_fill_package(&pkg, xchg, rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - memset(fc_entry, 0, sizeof(union unf_sfs_u)); - prlo_acc = &fc_entry->prlo_acc; - unf_fill_prlo_acc_pld(prlo_acc, lport); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send PRLO ACC %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - - return ret; -} - -static void unf_prli_acc_ob_callback(struct unf_xchg *xchg) -{ - /* Report R_Port scsi Link Up */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - enum unf_rport_login_state rport_state = UNF_RPORT_ST_INIT; - - FC_CHECK_RETURN_VOID(xchg); - unf_lport = xchg->lport; - unf_rport = xchg->rport; - FC_CHECK_RETURN_VOID(unf_lport); - FC_CHECK_RETURN_VOID(unf_rport); - - /* Update & Report Link Up */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_READY); - rport_state = unf_rport->rp_state; - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]LOGIN: Port(0x%x) RPort(0x%x) state(0x%x) WWN(0x%llx) prliacc", - unf_lport->port_id, unf_rport->nport_id, - unf_rport->rp_state, unf_rport->port_name); - } - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - if (rport_state == UNF_RPORT_ST_READY) { - unf_rport->logo_retries = 0; - unf_update_lport_state_by_linkup_event(unf_lport, unf_rport, - unf_rport->options); - } -} - -static void unf_schedule_open_work(struct unf_lport *lport, - struct unf_rport *rport) -{ - /* Used for L_Port port only with TGT, or R_Port only with INI */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - ulong delay = 0; - ulong flag = 0; - u32 ret = 0; - u32 port_feature = INVALID_VALUE32; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - delay = (ulong)unf_lport->ed_tov; - port_feature = unf_rport->options & UNF_PORT_MODE_BOTH; - - if (unf_lport->options == UNF_PORT_MODE_TGT || - port_feature == UNF_PORT_MODE_INI) { - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - - ret = unf_rport_ref_inc(unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) RPort(0x%x) abnormal, no need open", - unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - return; - } - - /* Delay work pending check */ - if (delayed_work_pending(&unf_rport->open_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) RPort(0x%x) open work is running, no need re-open", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - unf_rport_ref_dec(unf_rport); - return; - } - - /* start open work */ - if (queue_delayed_work(unf_wq, &unf_rport->open_work, - (ulong)msecs_to_jiffies((u32)delay))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x) start open work", - unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id); - - (void)unf_rport_ref_inc(unf_rport); - } - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_rport_ref_dec(unf_rport); - } -} - -static void unf_plogi_acc_ob_callback(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_lport = xchg->lport; - unf_rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - FC_CHECK_RETURN_VOID(unf_lport); - FC_CHECK_RETURN_VOID(unf_rport); - - /* - * 1. According to FC-LS 4.2.7.1: - * after RCVD PLOGI or sending PLOGI ACC, need to termitate open EXCH - */ - unf_cm_xchg_mgr_abort_io_by_id(unf_lport, unf_rport, - unf_rport->nport_id, unf_lport->nport_id, 0); - - /* 2. Send PLOGI ACC fail */ - if (xchg->ob_callback_sts != UNF_IO_SUCCESS) { - /* Do R_Port recovery */ - unf_rport_error_recovery(unf_rport); - - /* Do not care: Just used for L_Port only is TGT mode or R_Port - * only is INI mode - */ - unf_schedule_open_work(unf_lport, unf_rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x_0x%x) send PLOGI ACC failed(0x%x) with RPort(0x%x) feature(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_lport->options, xchg->ob_callback_sts, - unf_rport->nport_id, unf_rport->options); - - return; - } - - /* 3. Private Loop: check whether or not need to send PRLI */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - if (unf_lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP && - (unf_rport->rp_state == UNF_RPORT_ST_PRLI_WAIT || - unf_rport->rp_state == UNF_RPORT_ST_READY)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x) with State(0x%x) return directly", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id, unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - return; - } - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PRLI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* 4. Set Port Feature with BOTH: cancel */ - if (unf_rport->options == UNF_PORT_MODE_UNKNOWN && unf_rport->port_name != INVALID_WWPN) - unf_rport->options = unf_get_port_feature(unf_rport->port_name); - - /* - * 5. Check whether need to send PRLI delay - * Call by: RCVD PLOGI ACC or callback for sending PLOGI ACC succeed - */ - unf_check_rport_need_delay_prli(unf_lport, unf_rport, unf_rport->options); - - /* 6. Do not care: Just used for L_Port only is TGT mode or R_Port only - * is INI mode - */ - unf_schedule_open_work(unf_lport, unf_rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x_0x%x) send PLOGI ACC succeed with RPort(0x%x) feature(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->options, - unf_rport->nport_id, unf_rport->options); -} - -static void unf_flogi_acc_ob_callback(struct unf_xchg *xchg) -{ - /* Callback for Sending FLOGI ACC succeed */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - u64 rport_port_name = 0; - u64 rport_node_name = 0; - - FC_CHECK_RETURN_VOID(xchg); - FC_CHECK_RETURN_VOID(xchg->lport); - FC_CHECK_RETURN_VOID(xchg->rport); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - unf_lport = xchg->lport; - unf_rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - if (unf_rport->port_name == 0 && unf_rport->node_name == 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x_0x%x_0x%x) already send Plogi with RPort(0x%x) feature(0x%x).", - unf_lport->port_id, unf_lport->nport_id, unf_lport->options, - unf_rport->nport_id, unf_rport->options); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - return; - } - - rport_port_name = unf_rport->port_name; - rport_node_name = unf_rport->node_name; - - /* Swap case: Set WWPN & WWNN with zero */ - unf_rport->port_name = 0; - unf_rport->node_name = 0; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* Enter PLOGI stage: after send FLOGI ACC succeed */ - unf_login_with_rport_in_n2n(unf_lport, rport_port_name, rport_node_name); -} - -static void unf_rscn_acc_ob_callback(struct unf_xchg *xchg) -{ -} - -static void unf_logo_acc_ob_callback(struct unf_xchg *xchg) -{ -} - -static void unf_adisc_acc_ob_callback(struct unf_xchg *xchg) -{ -} - -static void unf_pdisc_acc_ob_callback(struct unf_xchg *xchg) -{ -} - -static inline u8 unf_determin_bbscn(u8 local_bbscn, u8 remote_bbscn) -{ - if (remote_bbscn == 0 || local_bbscn == 0) - local_bbscn = 0; - else - local_bbscn = local_bbscn > remote_bbscn ? local_bbscn : remote_bbscn; - - return local_bbscn; -} - -static void unf_cfg_lowlevel_fabric_params(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_fabric_parm *login_parms) -{ - struct unf_port_login_parms login_co_parms = {0}; - u32 remote_edtov = 0; - u32 ret = 0; - u8 remote_edtov_resolution = 0; /* 0:ms; 1:ns */ - - if (!lport->low_level_func.port_mgr_op.ll_port_config_set) - return; - - login_co_parms.remote_rttov_tag = (u8)UNF_GET_RT_TOV_FROM_PARAMS(login_parms); - login_co_parms.remote_edtov_tag = 0; - login_co_parms.remote_bb_credit = (u16)UNF_GET_BB_CREDIT_FROM_PARAMS(login_parms); - login_co_parms.compared_bbscn = - (u32)unf_determin_bbscn((u8)lport->low_level_func.lport_cfg_items.bbscn, - (u8)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); - - remote_edtov_resolution = (u8)UNF_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(login_parms); - remote_edtov = UNF_GET_E_D_TOV_FROM_PARAMS(login_parms); - login_co_parms.compared_edtov_val = - remote_edtov_resolution ? (remote_edtov / UNF_OS_MS_TO_NS) - : remote_edtov; - - login_co_parms.compared_ratov_val = UNF_GET_RA_TOV_FROM_PARAMS(login_parms); - login_co_parms.els_cmnd_code = ELS_FLOGI; - - if (UNF_TOP_P2P_MASK & (u32)lport->act_topo) { - login_co_parms.act_topo = (login_parms->co_parms.nport == UNF_F_PORT) - ? UNF_ACT_TOP_P2P_FABRIC - : UNF_ACT_TOP_P2P_DIRECT; - } else { - login_co_parms.act_topo = lport->act_topo; - } - - ret = lport->low_level_func.port_mgr_op.ll_port_config_set((void *)lport->fc_port, - UNF_PORT_CFG_UPDATE_FABRIC_PARAM, (void *)&login_co_parms); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Lowlevel unsupport fabric config"); - } -} - -u32 unf_check_flogi_params(struct unf_lport *lport, struct unf_rport *rport, - struct unf_fabric_parm *fabric_parms) -{ - u32 ret = RETURN_OK; - u32 high_port_name; - u32 low_port_name; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(fabric_parms, UNF_RETURN_ERROR); - - if (fabric_parms->cl_parms[ARRAY_INDEX_2].valid == UNF_CLASS_INVALID) { - /* Discard directly */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) NPort_ID(0x%x) FLOGI not support class3", - lport->port_id, rport->nport_id); - - return UNF_RETURN_ERROR; - } - - high_port_name = UNF_GET_NAME_HIGH_WORD(lport->port_name); - low_port_name = UNF_GET_NAME_LOW_WORD(lport->port_name); - if (fabric_parms->high_port_name == high_port_name && - fabric_parms->low_port_name == low_port_name) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]The wwpn(0x%x%x) of lport(0x%x) is same as the wwpn of rport(0x%x)", - high_port_name, low_port_name, lport->port_id, rport->nport_id); - return UNF_RETURN_ERROR; - } - - return ret; -} - -static void unf_save_fabric_params(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_fabric_parm *fabric_parms) -{ - u64 fabric_node_name = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(fabric_parms); - - fabric_node_name = (u64)(((u64)(fabric_parms->high_node_name) << UNF_SHIFT_32) | - ((u64)(fabric_parms->low_node_name))); - - /* R_Port for 0xfffffe is used for FLOGI, not need to save WWN */ - if (fabric_parms->co_parms.bb_receive_data_field_size > UNF_MAX_FRAME_SIZE) - rport->max_frame_size = UNF_MAX_FRAME_SIZE; /* 2112 */ - else - rport->max_frame_size = fabric_parms->co_parms.bb_receive_data_field_size; - - /* with Fabric attribute */ - if (fabric_parms->co_parms.nport == UNF_F_PORT) { - rport->ed_tov = fabric_parms->co_parms.e_d_tov; - rport->ra_tov = fabric_parms->co_parms.r_a_tov; - lport->ed_tov = fabric_parms->co_parms.e_d_tov; - lport->ra_tov = fabric_parms->co_parms.r_a_tov; - lport->fabric_node_name = fabric_node_name; - } - - /* Configure info from FLOGI to chip */ - unf_cfg_lowlevel_fabric_params(lport, rport, fabric_parms); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) Rport(0x%x) login parameter: E_D_TOV = %u. LPort E_D_TOV = %u. fabric nodename: 0x%x%x", - lport->port_id, rport->nport_id, (fabric_parms->co_parms.e_d_tov), - lport->ed_tov, fabric_parms->high_node_name, fabric_parms->low_node_name); -} - -u32 unf_flogi_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - struct unf_flogi_fdisc_acc *flogi_frame = NULL; - struct unf_fabric_parm *fabric_login_parms = NULL; - u32 ret = UNF_RETURN_ERROR; - ulong flag = 0; - u64 wwpn = 0; - u64 wwnn = 0; - enum unf_act_topo unf_active_topo; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x)<---RPort(0x%x) Receive FLOGI with OX_ID(0x%x)", - lport->port_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_FLOGI); - - /* Check L_Port state: Offline */ - if (lport->states >= UNF_LPORT_ST_OFFLINE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with state(0x%x) not need to handle FLOGI", - lport->port_id, lport->states); - - unf_cm_free_xchg(lport, xchg); - return ret; - } - - flogi_frame = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi; - fabric_login_parms = &flogi_frame->flogi_payload.fabric_parms; - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, &flogi_frame->flogi_payload, - sizeof(struct unf_flogi_fdisc_payload)); - wwpn = (u64)(((u64)(fabric_login_parms->high_port_name) << UNF_SHIFT_32) | - ((u64)fabric_login_parms->low_port_name)); - wwnn = (u64)(((u64)(fabric_login_parms->high_node_name) << UNF_SHIFT_32) | - ((u64)fabric_login_parms->low_node_name)); - - /* Get (new) R_Port: reuse only */ - unf_rport = unf_get_rport_by_nport_id(lport, UNF_FC_FID_FLOGI); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has no RPort. do nothing", lport->port_id); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - /* Update R_Port info */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->port_name = wwpn; - unf_rport->node_name = wwnn; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Check RCVD FLOGI parameters: only for class-3 */ - ret = unf_check_flogi_params(lport, unf_rport, fabric_login_parms); - if (ret != RETURN_OK) { - /* Discard directly */ - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - /* Save fabric parameters */ - unf_save_fabric_params(lport, unf_rport, fabric_login_parms); - - if ((u32)lport->act_topo & UNF_TOP_P2P_MASK) { - unf_active_topo = - (fabric_login_parms->co_parms.nport == UNF_F_PORT) - ? UNF_ACT_TOP_P2P_FABRIC - : UNF_ACT_TOP_P2P_DIRECT; - unf_lport_update_topo(lport, unf_active_topo); - } - /* Send ACC for FLOGI */ - ret = unf_send_flogi_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send FLOGI ACC failed and do recover", - lport->port_id); - - /* Do L_Port recovery */ - unf_lport_error_recovery(lport); - } - - return ret; -} - -static void unf_cfg_lowlevel_port_params(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_lgn_parm *login_parms, - u32 cmd_type) -{ - struct unf_port_login_parms login_co_parms = {0}; - u32 ret = 0; - - if (!lport->low_level_func.port_mgr_op.ll_port_config_set) - return; - - login_co_parms.rport_index = rport->rport_index; - login_co_parms.seq_cnt = 0; - login_co_parms.ed_tov = 0; /* ms */ - login_co_parms.ed_tov_timer_val = lport->ed_tov; - login_co_parms.tx_mfs = rport->max_frame_size; - - login_co_parms.remote_rttov_tag = (u8)UNF_GET_RT_TOV_FROM_PARAMS(login_parms); - login_co_parms.remote_edtov_tag = 0; - login_co_parms.remote_bb_credit = (u16)UNF_GET_BB_CREDIT_FROM_PARAMS(login_parms); - login_co_parms.els_cmnd_code = cmd_type; - - if (lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - login_co_parms.compared_bbscn = 0; - } else { - login_co_parms.compared_bbscn = - (u32)unf_determin_bbscn((u8)lport->low_level_func.lport_cfg_items.bbscn, - (u8)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); - } - - login_co_parms.compared_edtov_val = lport->ed_tov; - login_co_parms.compared_ratov_val = lport->ra_tov; - - ret = lport->low_level_func.port_mgr_op.ll_port_config_set((void *)lport->fc_port, - UNF_PORT_CFG_UPDATE_PLOGI_PARAM, (void *)&login_co_parms); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) Lowlevel unsupport port config", lport->port_id); - } -} - -u32 unf_check_plogi_params(struct unf_lport *lport, struct unf_rport *rport, - struct unf_lgn_parm *login_parms) -{ - u32 ret = RETURN_OK; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(login_parms, UNF_RETURN_ERROR); - - /* Parameters check: Class-type */ - if (login_parms->cl_parms[ARRAY_INDEX_2].valid == UNF_CLASS_INVALID || - login_parms->co_parms.bb_receive_data_field_size == 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort N_Port_ID(0x%x) with PLOGI parameters invalid: class3(%u), BBReceiveDataFieldSize(0x%x), send LOGO", - lport->port_id, rport->nport_id, - login_parms->cl_parms[ARRAY_INDEX_2].valid, - login_parms->co_parms.bb_receive_data_field_size); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); /* --->>> LOGO */ - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* Enter LOGO stage */ - unf_rport_enter_logo(lport, rport); - return UNF_RETURN_ERROR; - } - - /* 16G FC Brocade SW, Domain Controller's PLOGI both support CLASS-1 & - * CLASS-2 - */ - if (login_parms->cl_parms[ARRAY_INDEX_0].valid == UNF_CLASS_VALID || - login_parms->cl_parms[ARRAY_INDEX_1].valid == UNF_CLASS_VALID) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) get PLOGI class1(%u) class2(%u) from N_Port_ID(0x%x)", - lport->port_id, login_parms->cl_parms[ARRAY_INDEX_0].valid, - login_parms->cl_parms[ARRAY_INDEX_1].valid, rport->nport_id); - } - - return ret; -} - -static void unf_save_plogi_params(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_lgn_parm *login_parms, - u32 cmd_code) -{ -#define UNF_DELAY_TIME 100 /* WWPN smaller delay to send PRLI with COM mode */ - - u64 wwpn = INVALID_VALUE64; - u64 wwnn = INVALID_VALUE64; - u32 ed_tov = 0; - u32 remote_edtov = 0; - - if (login_parms->co_parms.bb_receive_data_field_size > UNF_MAX_FRAME_SIZE) - rport->max_frame_size = UNF_MAX_FRAME_SIZE; /* 2112 */ - else - rport->max_frame_size = login_parms->co_parms.bb_receive_data_field_size; - - wwnn = (u64)(((u64)(login_parms->high_node_name) << UNF_SHIFT_32) | - ((u64)login_parms->low_node_name)); - wwpn = (u64)(((u64)(login_parms->high_port_name) << UNF_SHIFT_32) | - ((u64)login_parms->low_port_name)); - - remote_edtov = login_parms->co_parms.e_d_tov; - ed_tov = login_parms->co_parms.e_d_tov_resolution - ? (remote_edtov / UNF_OS_MS_TO_NS) - : remote_edtov; - - rport->port_name = wwpn; - rport->node_name = wwnn; - rport->local_nport_id = lport->nport_id; - - if (lport->act_topo == UNF_ACT_TOP_P2P_DIRECT || - lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - /* P2P or Private Loop or FCoE VN2VN */ - lport->ed_tov = (lport->ed_tov > ed_tov) ? lport->ed_tov : ed_tov; - lport->ra_tov = 2 * lport->ed_tov; /* 2 * E_D_TOV */ - - if (ed_tov != 0) - rport->ed_tov = ed_tov; - else - rport->ed_tov = UNF_DEFAULT_EDTOV; - } else { - /* SAN: E_D_TOV updated by FLOGI */ - rport->ed_tov = lport->ed_tov; - } - - /* WWPN smaller: delay to send PRLI */ - if (rport->port_name > lport->port_name) - rport->ed_tov += UNF_DELAY_TIME; /* 100ms */ - - /* Configure port parameters to low level (chip) */ - unf_cfg_lowlevel_port_params(lport, rport, login_parms, cmd_code); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) RPort(0x%x) with WWPN(0x%llx) WWNN(0x%llx) login: ED_TOV(%u) Port: ED_TOV(%u)", - lport->port_id, rport->nport_id, rport->port_name, rport->node_name, - ed_tov, lport->ed_tov); -} - -static bool unf_check_bbscn_is_enabled(u8 local_bbscn, u8 remote_bbscn) -{ - return unf_determin_bbscn(local_bbscn, remote_bbscn) ? true : false; -} - -static u32 unf_irq_process_switch2thread(void *lport, struct unf_xchg *xchg, - unf_event_task evt_task) -{ - struct unf_cm_event_report *event = NULL; - struct unf_xchg *unf_xchg = NULL; - u32 ret = 0; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - unf_lport = lport; - unf_xchg = xchg; - - if (unlikely(!unf_lport->event_mgr.unf_get_free_event_func || - !unf_lport->event_mgr.unf_post_event_func || - !unf_lport->event_mgr.unf_release_event)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) event function is NULL", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - ret = unf_xchg_ref_inc(unf_xchg, SFS_RESPONSE); - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), UNF_RETURN_ERROR); - - event = unf_lport->event_mgr.unf_get_free_event_func((void *)lport); - FC_CHECK_RETURN_VALUE(event, UNF_RETURN_ERROR); - - event->lport = unf_lport; - event->event_asy_flag = UNF_EVENT_ASYN; - event->unf_event_task = evt_task; - event->para_in = xchg; - unf_lport->event_mgr.unf_post_event_func(unf_lport, event); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) start to switch thread process now", - unf_lport->port_id); - - return ret; -} - -u32 unf_plogi_handler_com_process(struct unf_xchg *xchg) -{ - struct unf_xchg *unf_xchg = xchg; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_plogi_pdisc *plogi_frame = NULL; - struct unf_lgn_parm *login_parms = NULL; - u32 ret = UNF_RETURN_ERROR; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(unf_xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(unf_xchg->lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(unf_xchg->rport, UNF_RETURN_ERROR); - - unf_lport = unf_xchg->lport; - unf_rport = unf_xchg->rport; - plogi_frame = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi; - login_parms = &plogi_frame->payload.stparms; - - unf_save_plogi_params(unf_lport, unf_rport, login_parms, ELS_PLOGI); - - /* Update state: PLOGI_WAIT */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = unf_xchg->sid; - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Send PLOGI ACC to remote port */ - ret = unf_send_plogi_acc(unf_lport, unf_rport, unf_xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send PLOGI ACC failed", - unf_lport->port_id); - - /* NOTE: exchange has been freed inner(before) */ - unf_rport_error_recovery(unf_rport); - return ret; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x) send PLOGI ACC to Port(0x%x) succeed", - unf_lport->port_id, unf_rport->nport_id); - - return ret; -} - -int unf_plogi_async_handle(void *argc_in, void *argc_out) -{ - struct unf_xchg *xchg = (struct unf_xchg *)argc_in; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - ret = unf_plogi_handler_com_process(xchg); - - unf_xchg_ref_dec(xchg, SFS_RESPONSE); - - return (int)ret; -} - -u32 unf_plogi_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_xchg *unf_xchg = xchg; - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = NULL; - struct unf_plogi_pdisc *plogi_frame = NULL; - struct unf_lgn_parm *login_parms = NULL; - struct unf_rjt_info rjt_info = {0}; - u64 wwpn = INVALID_VALUE64; - u32 ret = UNF_RETURN_ERROR; - bool bbscn_enabled = false; - bool switch2thread = false; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - /* 1. Maybe: PLOGI is sent by Name server */ - if (sid < UNF_FC_FID_DOM_MGR || - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Receive PLOGI. Port(0x%x_0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - lport->port_id, lport->nport_id, sid, xchg->oxid); - } - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_PLOGI); - - /* 2. State check: Offline */ - if (unf_lport->states >= UNF_LPORT_ST_OFFLINE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) received PLOGI with state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->states); - - unf_cm_free_xchg(unf_lport, unf_xchg); - return UNF_RETURN_ERROR; - } - - /* Get R_Port by WWpn */ - plogi_frame = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi; - login_parms = &plogi_frame->payload.stparms; - - UNF_PRINT_SFS_LIMIT(UNF_INFO, unf_lport->port_id, &plogi_frame->payload, - sizeof(struct unf_plogi_payload)); - - wwpn = (u64)(((u64)(login_parms->high_port_name) << UNF_SHIFT_32) | - ((u64)login_parms->low_port_name)); - - /* 3. Get (new) R_Port (by wwpn) */ - unf_rport = unf_find_rport(unf_lport, sid, wwpn); - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, UNF_RPORT_REUSE_ONLY, sid); - if (!unf_rport) { - memset(&rjt_info, 0, sizeof(struct unf_rjt_info)); - rjt_info.els_cmnd_code = ELS_PLOGI; - rjt_info.reason_code = UNF_LS_RJT_BUSY; - rjt_info.reason_explanation = UNF_LS_RJT_INSUFFICIENT_RESOURCES; - - /* R_Port is NULL: Send ELS RJT for PLOGI */ - (void)unf_send_els_rjt_by_did(unf_lport, unf_xchg, sid, &rjt_info); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has no RPort and send PLOGI reject", - unf_lport->port_id); - return RETURN_OK; - } - - /* - * 4. According to FC-LS 4.2.7.1: - * After RCVD PLogi or send Plogi ACC, need to termitate open EXCH - */ - unf_cm_xchg_mgr_abort_io_by_id(unf_lport, unf_rport, sid, unf_lport->nport_id, 0); - - /* 5. Cancel recovery timer work after RCVD PLOGI */ - if (cancel_delayed_work(&unf_rport->recovery_work)) - atomic_dec(&unf_rport->rport_ref_cnt); - - /* - * 6. Plogi parameters check - * Call by: (RCVD) PLOGI handler & callback function for RCVD PLOGI_ACC - */ - ret = unf_check_plogi_params(unf_lport, unf_rport, login_parms); - if (ret != RETURN_OK) { - unf_cm_free_xchg(unf_lport, unf_xchg); - return UNF_RETURN_ERROR; - } - - unf_xchg->lport = lport; - unf_xchg->rport = unf_rport; - unf_xchg->sid = sid; - - /* 7. About bbscn for context change */ - bbscn_enabled = - unf_check_bbscn_is_enabled((u8)unf_lport->low_level_func.lport_cfg_items.bbscn, - (u8)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); - if (unf_lport->act_topo == UNF_ACT_TOP_P2P_DIRECT && bbscn_enabled) { - switch2thread = true; - unf_lport->bbscn_support = true; - } - - /* 8. Process PLOGI Frame: switch to thread if necessary */ - if (switch2thread && unf_lport->root_lport == unf_lport) { - /* Wait for LR complete sync */ - ret = unf_irq_process_switch2thread(unf_lport, unf_xchg, unf_plogi_async_handle); - } else { - ret = unf_plogi_handler_com_process(unf_xchg); - } - - return ret; -} - -static void unf_obtain_tape_capacity(struct unf_lport *lport, - struct unf_rport *rport, u32 tape_parm) -{ - u32 rec_support = 0; - u32 task_retry_support = 0; - u32 retry_support = 0; - - rec_support = tape_parm & UNF_FC4_FRAME_PARM_3_REC_SUPPORT; - task_retry_support = - tape_parm & UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; - retry_support = tape_parm & UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; - - if (lport->low_level_func.lport_cfg_items.tape_support && - rec_support && task_retry_support && retry_support) { - rport->tape_support_needed = true; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) FC_tape is needed for RPort(0x%x)", - lport->port_id, lport->nport_id, rport->nport_id); - } - - if ((tape_parm & UNF_FC4_FRAME_PARM_3_CONF_ALLOW) && - lport->low_level_func.lport_cfg_items.fcp_conf) { - rport->fcp_conf_needed = true; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) FCP confirm is needed for RPort(0x%x)", - lport->port_id, lport->nport_id, rport->nport_id); - } -} - -static u32 unf_prli_handler_com_process(struct unf_xchg *xchg) -{ - struct unf_prli_prlo *prli = NULL; - u32 ret = UNF_RETURN_ERROR; - ulong flags = 0; - u32 sid = 0; - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - - unf_xchg = xchg; - FC_CHECK_RETURN_VALUE(unf_xchg->lport, UNF_RETURN_ERROR); - unf_lport = unf_xchg->lport; - sid = xchg->sid; - - UNF_SERVICE_COLLECT(unf_lport->link_service_info, UNF_SERVICE_ITEM_PRLI); - - /* 1. Get R_Port: for each R_Port from rport_busy_list */ - unf_rport = unf_get_rport_by_nport_id(unf_lport, sid); - if (!unf_rport) { - /* non session (R_Port) existence */ - (void)unf_send_logo_by_did(unf_lport, sid); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) received PRLI but no RPort SID(0x%x) OX_ID(0x%x)", - unf_lport->port_id, unf_lport->nport_id, sid, xchg->oxid); - - unf_cm_free_xchg(unf_lport, xchg); - return ret; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Receive PRLI. Port(0x%x)<---RPort(0x%x) with S_ID(0x%x)", - unf_lport->port_id, unf_rport->nport_id, sid); - - /* 2. Get PRLI info */ - prli = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prli; - if (sid < UNF_FC_FID_DOM_MGR || unf_lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Receive PRLI. Port(0x%x_0x%x)<---RPort(0x%x) parameter-3(0x%x) OX_ID(0x%x)", - unf_lport->port_id, unf_lport->nport_id, sid, - prli->payload.parms[ARRAY_INDEX_3], xchg->oxid); - } - - UNF_PRINT_SFS_LIMIT(UNF_INFO, unf_lport->port_id, &prli->payload, - sizeof(struct unf_prli_payload)); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - - /* 3. Increase R_Port ref_cnt */ - ret = unf_rport_ref_inc(unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x_0x%p) is removing and do nothing", - unf_lport->port_id, unf_rport->nport_id, unf_rport); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - unf_cm_free_xchg(unf_lport, xchg); - return RETURN_ERROR; - } - - /* 4. Cancel R_Port Open work */ - if (cancel_delayed_work(&unf_rport->open_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x) cancel open work succeed", - unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id); - - /* This is not the last counter */ - atomic_dec(&unf_rport->rport_ref_cnt); - } - - /* 5. Check R_Port state */ - if (unf_rport->rp_state != UNF_RPORT_ST_PRLI_WAIT && - unf_rport->rp_state != UNF_RPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) RPort(0x%x) with state(0x%x) when received PRLI, send LOGO", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id, unf_rport->rp_state); - - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* NOTE: Start to send LOGO */ - unf_rport_enter_logo(unf_lport, unf_rport); - - unf_cm_free_xchg(unf_lport, xchg); - unf_rport_ref_dec(unf_rport); - - return RETURN_ERROR; - } - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* 6. Update R_Port options(INI/TGT/BOTH) */ - unf_rport->options = - prli->payload.parms[ARRAY_INDEX_3] & - (UNF_FC4_FRAME_PARM_3_TGT | UNF_FC4_FRAME_PARM_3_INI); - - unf_update_port_feature(unf_rport->port_name, unf_rport->options); - - /* for Confirm */ - unf_rport->fcp_conf_needed = false; - - unf_obtain_tape_capacity(unf_lport, unf_rport, prli->payload.parms[ARRAY_INDEX_3]); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x_0x%x) RPort(0x%x) parameter-3(0x%x) options(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id, - prli->payload.parms[ARRAY_INDEX_3], unf_rport->options); - - /* 7. Send PRLI ACC */ - ret = unf_send_prli_acc(unf_lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) RPort(0x%x) send PRLI ACC failed", - unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id); - - /* NOTE: exchange has been freed inner(before) */ - unf_rport_error_recovery(unf_rport); - } - - /* 8. Decrease R_Port ref_cnt */ - unf_rport_ref_dec(unf_rport); - - return ret; -} - -int unf_prli_async_handle(void *argc_in, void *argc_out) -{ - struct unf_xchg *xchg = (struct unf_xchg *)argc_in; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - ret = unf_prli_handler_com_process(xchg); - - unf_xchg_ref_dec(xchg, SFS_RESPONSE); - - return (int)ret; -} - -u32 unf_prli_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - u32 ret = UNF_RETURN_ERROR; - bool switch2thread = false; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->sid = sid; - xchg->lport = lport; - unf_lport = lport; - - if (lport->bbscn_support && - lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) - switch2thread = true; - - if (switch2thread && unf_lport->root_lport == unf_lport) { - /* Wait for LR done sync */ - ret = unf_irq_process_switch2thread(lport, xchg, unf_prli_async_handle); - } else { - ret = unf_prli_handler_com_process(xchg); - } - - return ret; -} - -static void unf_save_rscn_port_id(struct unf_rscn_mgr *rscn_mg, - struct unf_rscn_port_id_page *rscn_port_id) -{ - struct unf_port_id_page *exit_port_id_page = NULL; - struct unf_port_id_page *new_port_id_page = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - bool is_repeat = false; - - FC_CHECK_RETURN_VOID(rscn_mg); - FC_CHECK_RETURN_VOID(rscn_port_id); - - /* 1. check new RSCN Port_ID (RSNC_Page) whether within RSCN_Mgr or not - */ - spin_lock_irqsave(&rscn_mg->rscn_id_list_lock, flag); - if (list_empty(&rscn_mg->list_using_rscn_page)) { - is_repeat = false; - } else { - /* Check repeat: for each exist RSCN page form RSCN_Mgr Page - * list - */ - list_for_each_safe(node, next_node, &rscn_mg->list_using_rscn_page) { - exit_port_id_page = list_entry(node, struct unf_port_id_page, - list_node_rscn); - if (exit_port_id_page->port_id_port == rscn_port_id->port_id_port && - exit_port_id_page->port_id_area == rscn_port_id->port_id_area && - exit_port_id_page->port_id_domain == rscn_port_id->port_id_domain) { - is_repeat = true; - break; - } - } - } - spin_unlock_irqrestore(&rscn_mg->rscn_id_list_lock, flag); - - FC_CHECK_RETURN_VOID(rscn_mg->unf_get_free_rscn_node); - - /* 2. Get & add free RSNC Node --->>> RSCN_Mgr */ - if (!is_repeat) { - new_port_id_page = rscn_mg->unf_get_free_rscn_node(rscn_mg); - if (!new_port_id_page) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_ERR, "[err]Get free RSCN node failed"); - - return; - } - - new_port_id_page->addr_format = rscn_port_id->addr_format; - new_port_id_page->event_qualifier = rscn_port_id->event_qualifier; - new_port_id_page->reserved = rscn_port_id->reserved; - new_port_id_page->port_id_domain = rscn_port_id->port_id_domain; - new_port_id_page->port_id_area = rscn_port_id->port_id_area; - new_port_id_page->port_id_port = rscn_port_id->port_id_port; - - /* Add entry to list: using_rscn_page */ - spin_lock_irqsave(&rscn_mg->rscn_id_list_lock, flag); - list_add_tail(&new_port_id_page->list_node_rscn, &rscn_mg->list_using_rscn_page); - spin_unlock_irqrestore(&rscn_mg->rscn_id_list_lock, flag); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) has repeat RSCN node with domain(0x%x) area(0x%x)", - rscn_port_id->port_id_domain, rscn_port_id->port_id_area, - rscn_port_id->port_id_port); - } -} - -static u32 unf_analysis_rscn_payload(struct unf_lport *lport, - struct unf_rscn_pld *rscn_pld) -{ -#define UNF_OS_DISC_REDISC_TIME 10000 - - struct unf_rscn_port_id_page *rscn_port_id = NULL; - struct unf_disc *disc = NULL; - struct unf_rscn_mgr *rscn_mgr = NULL; - u32 index = 0; - u32 pld_len = 0; - u32 port_id_page_cnt = 0; - u32 ret = RETURN_OK; - ulong flag = 0; - bool eb_need_disc_flag = false; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rscn_pld, UNF_RETURN_ERROR); - - /* This field is the length in bytes of the entire Payload, inclusive of - * the word 0 - */ - pld_len = UNF_GET_RSCN_PLD_LEN(rscn_pld->cmnd); - pld_len -= sizeof(rscn_pld->cmnd); - port_id_page_cnt = pld_len / UNF_RSCN_PAGE_LEN; - - /* Pages within payload is nor more than 255 */ - if (port_id_page_cnt > UNF_RSCN_PAGE_SUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x_0x%x) page num(0x%x) exceed 255 in RSCN", - lport->port_id, lport->nport_id, port_id_page_cnt); - - return UNF_RETURN_ERROR; - } - - /* L_Port-->Disc-->Rscn_Mgr */ - disc = &lport->disc; - rscn_mgr = &disc->rscn_mgr; - - /* for each ID from RSCN_Page: check whether need to Disc or not */ - while (index < port_id_page_cnt) { - rscn_port_id = &rscn_pld->port_id_page[index]; - if (unf_lookup_lport_by_nportid(lport, *(u32 *)rscn_port_id)) { - /* Prevent to create session with L_Port which have the - * same N_Port_ID - */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) find local N_Port_ID(0x%x) within RSCN payload", - ((struct unf_lport *)(lport->root_lport))->nport_id, - *(u32 *)rscn_port_id); - } else { - /* New RSCN_Page ID find, save it to RSCN_Mgr */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x_0x%x) save RSCN N_Port_ID(0x%x)", - lport->port_id, lport->nport_id, - *(u32 *)rscn_port_id); - - /* 1. new RSCN_Page ID find, save it to RSCN_Mgr */ - unf_save_rscn_port_id(rscn_mgr, rscn_port_id); - eb_need_disc_flag = true; - } - index++; - } - - if (!eb_need_disc_flag) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Port(0x%x) find all N_Port_ID and do not need to disc", - ((struct unf_lport *)(lport->root_lport))->nport_id); - - return RETURN_OK; - } - - /* 2. Do/Start Disc: Check & do Disc (GID_PT) process */ - if (!disc->disc_temp.unf_disc_start) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) DISC start function is NULL", - lport->nport_id, lport->nport_id); - - return UNF_RETURN_ERROR; - } - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - if (disc->states == UNF_DISC_ST_END || - ((jiffies - disc->last_disc_jiff) > msecs_to_jiffies(UNF_OS_DISC_REDISC_TIME))) { - disc->disc_option = UNF_RSCN_DISC; - disc->last_disc_jiff = jiffies; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - ret = disc->disc_temp.unf_disc_start(lport); - } else { - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_INFO, - "[info]Port(0x%x_0x%x) DISC state(0x%x) with last time(%llu) and don't do DISC", - lport->port_id, lport->nport_id, disc->states, - disc->last_disc_jiff); - - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } - - return ret; -} - -u32 unf_rscn_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - /* - * A RSCN ELS shall be sent to registered Nx_Ports - * when an event occurs that may have affected the state of - * one or more Nx_Ports, or the ULP state within the Nx_Port. - * * - * The Payload of a RSCN Request includes a list - * containing the addresses of the affected Nx_Ports. - * * - * Each affected Port_ID page contains the ID of the Nx_Port, - * Fabric Controller, E_Port, domain, or area for which the event was - * detected. - */ - struct unf_rscn_pld *rscn_pld = NULL; - struct unf_rport *unf_rport = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 pld_len = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Receive RSCN Port(0x%x_0x%x)<---RPort(0x%x) OX_ID(0x%x)", - lport->port_id, lport->nport_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_RSCN); - - /* 1. Get R_Port by S_ID */ - unf_rport = unf_get_rport_by_nport_id(lport, sid); /* rport busy_list */ - if (!unf_rport) { - unf_rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, sid); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) received RSCN but has no RPort(0x%x) with OX_ID(0x%x)", - lport->port_id, lport->nport_id, sid, xchg->oxid); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - unf_rport->nport_id = sid; - } - - rscn_pld = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; - FC_CHECK_RETURN_VALUE(rscn_pld, UNF_RETURN_ERROR); - pld_len = UNF_GET_RSCN_PLD_LEN(rscn_pld->cmnd); - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, rscn_pld, pld_len); - - /* 2. NOTE: Analysis RSCN payload(save & disc if necessary) */ - ret = unf_analysis_rscn_payload(lport, rscn_pld); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) analysis RSCN failed", - lport->port_id, lport->nport_id); - } - - /* 3. send rscn_acc after analysis payload */ - ret = unf_send_rscn_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) send RSCN response failed", - lport->port_id, lport->nport_id); - } - - return ret; -} - -static void unf_analysis_pdisc_pld(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_plogi_pdisc *pdisc) -{ - struct unf_lgn_parm *pdisc_params = NULL; - u64 wwpn = INVALID_VALUE64; - u64 wwnn = INVALID_VALUE64; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(pdisc); - - pdisc_params = &pdisc->payload.stparms; - if (pdisc_params->co_parms.bb_receive_data_field_size > UNF_MAX_FRAME_SIZE) - rport->max_frame_size = UNF_MAX_FRAME_SIZE; - else - rport->max_frame_size = pdisc_params->co_parms.bb_receive_data_field_size; - - wwnn = (u64)(((u64)(pdisc_params->high_node_name) << UNF_SHIFT_32) | - ((u64)pdisc_params->low_node_name)); - wwpn = (u64)(((u64)(pdisc_params->high_port_name) << UNF_SHIFT_32) | - ((u64)pdisc_params->low_port_name)); - - rport->port_name = wwpn; - rport->node_name = wwnn; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) save PDISC parameters to Rport(0x%x) WWPN(0x%llx) WWNN(0x%llx)", - lport->port_id, rport->nport_id, rport->port_name, - rport->node_name); -} - -u32 unf_send_pdisc_rjt(struct unf_lport *lport, struct unf_rport *rport, struct unf_xchg *xchg) -{ - u32 ret = UNF_RETURN_ERROR; - struct unf_rjt_info rjt_info; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&rjt_info, 0, sizeof(struct unf_rjt_info)); - rjt_info.els_cmnd_code = ELS_PDISC; - rjt_info.reason_code = UNF_LS_RJT_LOGICAL_ERROR; - rjt_info.reason_explanation = UNF_LS_RJT_NO_ADDITIONAL_INFO; - - ret = unf_send_els_rjt_by_rport(lport, xchg, rport, &rjt_info); - - return ret; -} - -u32 unf_pdisc_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_plogi_pdisc *pdisc = NULL; - struct unf_rport *unf_rport = NULL; - ulong flags = 0; - u32 ret = RETURN_OK; - u64 wwpn = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Receive PDISC. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - lport->port_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_PDISC); - pdisc = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->pdisc; - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, &pdisc->payload, - sizeof(struct unf_plogi_payload)); - wwpn = (u64)(((u64)(pdisc->payload.stparms.high_port_name) << UNF_SHIFT_32) | - ((u64)pdisc->payload.stparms.low_port_name)); - - unf_rport = unf_find_rport(lport, sid, wwpn); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find RPort by NPort ID(0x%x). Free exchange and send LOGO", - lport->port_id, sid); - - unf_cm_free_xchg(lport, xchg); - (void)unf_send_logo_by_did(lport, sid); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MINOR, - "[info]Port(0x%x) get exist RPort(0x%x) when receive PDISC with S_Id(0x%x)", - lport->port_id, unf_rport->nport_id, sid); - - if (sid >= UNF_FC_FID_DOM_MGR) - return unf_send_pdisc_rjt(lport, unf_rport, xchg); - - unf_analysis_pdisc_pld(lport, unf_rport, pdisc); - - /* State: READY */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - if (unf_rport->rp_state == UNF_RPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find RPort(0x%x) state is READY when receiving PDISC", - lport->port_id, sid); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - ret = unf_send_pdisc_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) handle PDISC failed", - lport->port_id); - - return ret; - } - - /* Report Down/Up event to scsi */ - unf_update_lport_state_by_linkup_event(lport, - unf_rport, unf_rport->options); - } else if ((unf_rport->rp_state == UNF_RPORT_ST_CLOSING) && - (unf_rport->session)) { - /* State: Closing */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC", - lport->port_id, sid, unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - unf_cm_free_xchg(lport, xchg); - (void)unf_send_logo_by_did(lport, sid); - } else if (unf_rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) { - /* State: PRLI_WAIT */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC", - lport->port_id, sid, unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - ret = unf_send_pdisc_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) handle PDISC failed", - lport->port_id); - - return ret; - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving PDISC, send LOGO", - lport->port_id, sid, unf_rport->rp_state); - - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - unf_rport_enter_logo(lport, unf_rport); - unf_cm_free_xchg(lport, xchg); - } - } - - return ret; -} - -static void unf_analysis_adisc_pld(struct unf_lport *lport, - struct unf_rport *rport, - struct unf_adisc_payload *adisc_pld) -{ - u64 wwpn = INVALID_VALUE64; - u64 wwnn = INVALID_VALUE64; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(adisc_pld); - - wwnn = (u64)(((u64)(adisc_pld->high_node_name) << UNF_SHIFT_32) | - ((u64)adisc_pld->low_node_name)); - wwpn = (u64)(((u64)(adisc_pld->high_port_name) << UNF_SHIFT_32) | - ((u64)adisc_pld->low_port_name)); - - rport->port_name = wwpn; - rport->node_name = wwnn; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) save ADISC parameters to RPort(0x%x), WWPN(0x%llx) WWNN(0x%llx) NPort ID(0x%x)", - lport->port_id, rport->nport_id, rport->port_name, - rport->node_name, adisc_pld->nport_id); -} - -u32 unf_adisc_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - struct unf_adisc_payload *adisc_pld = NULL; - ulong flags = 0; - u64 wwpn = 0; - u32 ret = RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Receive ADISC. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - lport->port_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_ADISC); - adisc_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->adisc.adisc_payl; - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, adisc_pld, sizeof(struct unf_adisc_payload)); - wwpn = (u64)(((u64)(adisc_pld->high_port_name) << UNF_SHIFT_32) | - ((u64)adisc_pld->low_port_name)); - - unf_rport = unf_find_rport(lport, sid, wwpn); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find RPort by NPort ID(0x%x). Free exchange and send LOGO", - lport->port_id, sid); - - unf_cm_free_xchg(lport, xchg); - (void)unf_send_logo_by_did(lport, sid); - - return ret; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MINOR, - "[info]Port(0x%x) get exist RPort(0x%x) when receive ADISC with S_ID(0x%x)", - lport->port_id, unf_rport->nport_id, sid); - - unf_analysis_adisc_pld(lport, unf_rport, adisc_pld); - - /* State: READY */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - if (unf_rport->rp_state == UNF_RPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find RPort(0x%x) state is READY when receiving ADISC", - lport->port_id, sid); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* Return ACC directly */ - ret = unf_send_adisc_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send ADISC ACC failed", lport->port_id); - - return ret; - } - - /* Report Down/Up event to SCSI */ - unf_update_lport_state_by_linkup_event(lport, unf_rport, unf_rport->options); - } - /* State: Closing */ - else if ((unf_rport->rp_state == UNF_RPORT_ST_CLOSING) && - (unf_rport->session)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC", - lport->port_id, sid, unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - unf_rport = unf_get_safe_rport(lport, unf_rport, - UNF_RPORT_REUSE_RECOVER, - unf_rport->nport_id); - if (unf_rport) { - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - unf_rport->nport_id = sid; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - ret = unf_send_adisc_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send ADISC ACC failed", - lport->port_id); - - return ret; - } - - unf_update_lport_state_by_linkup_event(lport, - unf_rport, unf_rport->options); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find RPort by NPort_ID(0x%x). Free exchange and send LOGO", - lport->port_id, sid); - - unf_cm_free_xchg(lport, xchg); - (void)unf_send_logo_by_did(lport, sid); - } - } else if (unf_rport->rp_state == UNF_RPORT_ST_PRLI_WAIT) { - /* State: PRLI_WAIT */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC", - lport->port_id, sid, unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - ret = unf_send_adisc_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send ADISC ACC failed", lport->port_id); - - return ret; - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find RPort(0x%x) state is 0x%x when receiving ADISC, send LOGO", - lport->port_id, sid, unf_rport->rp_state); - - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - unf_rport_enter_logo(lport, unf_rport); - unf_cm_free_xchg(lport, xchg); - } - - return ret; -} - -u32 unf_rec_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x) receive REC", lport->port_id); - - /* Send rec acc */ - ret = unf_send_rec_acc(lport, unf_rport, xchg); /* discard directly */ - - return ret; -} - -u32 unf_rrq_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - struct unf_rrq *rrq = NULL; - struct unf_xchg *xchg_reused = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - u32 unf_sid = 0; - ulong flags = 0; - struct unf_rjt_info rjt_info = {0}; - struct unf_xchg_hot_pool *hot_pool = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_RRQ); - rrq = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rrq; - ox_id = (u16)(rrq->oxid_rxid >> UNF_SHIFT_16); - rx_id = (u16)(rrq->oxid_rxid); - unf_sid = rrq->sid & UNF_NPORTID_MASK; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[warn]Receive RRQ. Port(0x%x)<---RPort(0x%x) sfsXchg(0x%p) OX_ID(0x%x,0x%x) RX_ID(0x%x)", - lport->port_id, sid, xchg, ox_id, xchg->oxid, rx_id); - - /* Get R_Port */ - unf_rport = unf_get_rport_by_nport_id(lport, sid); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) receive RRQ but has no RPort(0x%x)", - lport->port_id, sid); - - /* NOTE: send LOGO */ - unf_send_logo_by_did(lport, unf_sid); - - unf_cm_free_xchg(lport, xchg); - return ret; - } - - /* Get Target (Abort I/O) exchange context */ - xchg_reused = unf_cm_lookup_xchg_by_id(lport, ox_id, unf_sid); /* unf_find_xchg_by_ox_id */ - if (!xchg_reused) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) cannot find exchange with OX_ID(0x%x) RX_ID(0x%x) S_ID(0x%x)", - lport->port_id, ox_id, rx_id, unf_sid); - - rjt_info.els_cmnd_code = ELS_RRQ; - rjt_info.reason_code = FCXLS_BA_RJT_LOGICAL_ERROR | FCXLS_LS_RJT_INVALID_OXID_RXID; - - /* NOTE: send ELS RJT */ - if (unf_send_els_rjt_by_rport(lport, xchg, unf_rport, &rjt_info) != RETURN_OK) { - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - return RETURN_OK; - } - - hot_pool = xchg_reused->hot_pool; - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) OxId(0x%x) Rxid(0x%x) Sid(0x%x) Hot Pool is NULL.", - lport->port_id, ox_id, rx_id, unf_sid); - - return ret; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - xchg_reused->oxid = INVALID_VALUE16; - xchg_reused->rxid = INVALID_VALUE16; - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - - /* NOTE: release I/O exchange context */ - unf_xchg_ref_dec(xchg_reused, SFS_RESPONSE); - - /* Send RRQ ACC */ - ret = unf_send_rrq_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) can not send RRQ rsp. Xchg(0x%p) Ioxchg(0x%p) OX_RX_ID(0x%x 0x%x) S_ID(0x%x)", - lport->port_id, xchg, xchg_reused, ox_id, rx_id, unf_sid); - - unf_cm_free_xchg(lport, xchg); - } - - return ret; -} - -u32 unf_logo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - struct unf_rport *logo_rport = NULL; - struct unf_logo *logo = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 nport_id = 0; - struct unf_rjt_info rjt_info = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_LOGO); - logo = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->logo; - nport_id = logo->payload.nport_id & UNF_NPORTID_MASK; - - if (sid < UNF_FC_FID_DOM_MGR) { - /* R_Port is not fabric port */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]LOGIN: Receive LOGO. Port(0x%x)<---RPort(0x%x) NPort_ID(0x%x) OXID(0x%x)", - lport->port_id, sid, nport_id, xchg->oxid); - } - - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, &logo->payload, - sizeof(struct unf_logo_payload)); - - /* - * 1. S_ID unequal to NPort_ID: - * link down Rport find by NPort_ID immediately - */ - if (sid != nport_id) { - logo_rport = unf_get_rport_by_nport_id(lport, nport_id); - if (logo_rport) - unf_rport_immediate_link_down(lport, logo_rport); - } - - /* 2. Get R_Port by S_ID (frame header) */ - unf_rport = unf_get_rport_by_nport_id(lport, sid); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_INIT, sid); /* INIT */ - if (!unf_rport) { - memset(&rjt_info, 0, sizeof(struct unf_rjt_info)); - rjt_info.els_cmnd_code = ELS_LOGO; - rjt_info.reason_code = UNF_LS_RJT_LOGICAL_ERROR; - rjt_info.reason_explanation = UNF_LS_RJT_NO_ADDITIONAL_INFO; - ret = unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive LOGO but has no RPort(0x%x)", - lport->port_id, sid); - - return ret; - } - - /* - * 3. I/O resource release: set ABORT tag - * * - * Call by: R_Port remove; RCVD LOGO; RCVD PLOGI; send PLOGI ACC - */ - unf_cm_xchg_mgr_abort_io_by_id(lport, unf_rport, sid, lport->nport_id, INI_IO_STATE_LOGO); - - /* 4. Send LOGO ACC */ - ret = unf_send_logo_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) send LOGO failed", lport->port_id); - } - /* - * 5. Do same operations with RCVD LOGO/PRLO & Send LOGO: - * retry (LOGIN or LOGO) or link down immediately - */ - unf_process_rport_after_logo(lport, unf_rport); - - return ret; -} - -u32 unf_prlo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - struct unf_prli_prlo *prlo = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Receive PRLO. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - lport->port_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_LOGO); - - /* Get (new) R_Port */ - unf_rport = unf_get_rport_by_nport_id(lport, sid); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_INIT, sid); /* INIT */ - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive PRLO but has no RPort", - lport->port_id); - - /* Discard directly */ - unf_cm_free_xchg(lport, xchg); - return ret; - } - - prlo = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prlo; - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, &prlo->payload, - sizeof(struct unf_prli_payload)); - - /* Send PRLO ACC to remote */ - ret = unf_send_prlo_acc(lport, unf_rport, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) send PRLO ACC failed", lport->port_id); - } - - /* Enter Enhanced action after LOGO (retry LOGIN or LOGO) */ - unf_process_rport_after_logo(lport, unf_rport); - - return ret; -} - -static void unf_fill_echo_acc_pld(struct unf_echo *echo_acc) -{ - struct unf_echo_payload *echo_acc_pld = NULL; - - FC_CHECK_RETURN_VOID(echo_acc); - - echo_acc_pld = echo_acc->echo_pld; - FC_CHECK_RETURN_VOID(echo_acc_pld); - - echo_acc_pld->cmnd = UNF_ELS_CMND_ACC; -} - -static void unf_echo_acc_callback(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = xchg->lport; - - FC_CHECK_RETURN_VOID(unf_lport); - if (xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr) { - pci_unmap_single(unf_lport->low_level_func.dev, - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc - .phy_echo_addr, - UNF_ECHO_PAYLOAD_LEN, DMA_BIDIRECTIONAL); - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo_acc.phy_echo_addr = 0; - } -} - -static u32 unf_send_echo_acc(struct unf_lport *lport, u32 did, - struct unf_xchg *xchg) -{ - struct unf_echo *echo_acc = NULL; - union unf_sfs_u *fc_entry = NULL; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - struct unf_frame_pkg pkg; - dma_addr_t phy_echo_acc_addr; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - xchg->cmnd_code = UNF_SET_ELS_ACC_TYPE(ELS_ECHO); - xchg->did = did; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - - xchg->callback = NULL; - xchg->ob_callback = unf_echo_acc_callback; - - unf_fill_package(&pkg, xchg, xchg->rport); - pkg.type = UNF_PKG_ELS_REPLY; - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - echo_acc = &fc_entry->echo_acc; - unf_fill_echo_acc_pld(echo_acc); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - phy_echo_acc_addr = pci_map_single(lport->low_level_func.dev, - echo_acc->echo_pld, - UNF_ECHO_PAYLOAD_LEN, - DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(lport->low_level_func.dev, phy_echo_acc_addr)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) pci map err", - lport->port_id); - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - echo_acc->phy_echo_addr = phy_echo_acc_addr; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) { - unf_cm_free_xchg((void *)lport, (void *)xchg); - pci_unmap_single(lport->low_level_func.dev, - phy_echo_acc_addr, UNF_ECHO_PAYLOAD_LEN, - DMA_BIDIRECTIONAL); - echo_acc->phy_echo_addr = 0; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]ECHO ACC send %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - did, ox_id, rx_id); - - return ret; -} - -u32 unf_echo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg) -{ - struct unf_echo_payload *echo_pld = NULL; - struct unf_rport *unf_rport = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 data_len = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - data_len = xchg->fcp_sfs_union.sfs_entry.cur_offset; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Receive ECHO. Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x))", - lport->port_id, sid, xchg->oxid); - - UNF_SERVICE_COLLECT(lport->link_service_info, UNF_SERVICE_ITEM_ECHO); - echo_pld = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; - UNF_PRINT_SFS_LIMIT(UNF_INFO, lport->port_id, echo_pld, data_len); - unf_rport = unf_get_rport_by_nport_id(lport, sid); - xchg->rport = unf_rport; - - ret = unf_send_echo_acc(lport, sid, xchg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) send ECHO ACC failed", lport->port_id); - } - - return ret; -} - -static void unf_login_with_rport_in_n2n(struct unf_lport *lport, - u64 remote_port_name, - u64 remote_node_name) -{ - /* - * Call by (P2P): - * 1. RCVD FLOGI ACC - * 2. Send FLOGI ACC succeed - * * - * Compare WWN, larger is master, then send PLOGI - */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = NULL; - ulong lport_flag = 0; - ulong rport_flag = 0; - u64 port_name = 0; - u64 node_name = 0; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VOID(lport); - - spin_lock_irqsave(&unf_lport->lport_state_lock, lport_flag); - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_READY); /* LPort: FLOGI_WAIT --> READY */ - spin_unlock_irqrestore(&unf_lport->lport_state_lock, lport_flag); - - port_name = remote_port_name; - node_name = remote_node_name; - - if (unf_lport->port_name > port_name) { - /* Master case: send PLOGI */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x)'s WWN(0x%llx) is larger than rport(0x%llx), should be master", - unf_lport->port_id, unf_lport->port_name, port_name); - - /* Update N_Port_ID now: 0xEF */ - unf_lport->nport_id = UNF_P2P_LOCAL_NPORT_ID; - - unf_rport = unf_find_valid_rport(lport, port_name, UNF_P2P_REMOTE_NPORT_ID); - unf_rport = unf_get_safe_rport(lport, unf_rport, UNF_RPORT_REUSE_ONLY, - UNF_P2P_REMOTE_NPORT_ID); - if (unf_rport) { - unf_rport->node_name = node_name; - unf_rport->port_name = port_name; - unf_rport->nport_id = UNF_P2P_REMOTE_NPORT_ID; /* 0xD6 */ - unf_rport->local_nport_id = UNF_P2P_LOCAL_NPORT_ID; /* 0xEF */ - - spin_lock_irqsave(&unf_rport->rport_state_lock, rport_flag); - if (unf_rport->rp_state == UNF_RPORT_ST_PLOGI_WAIT || - unf_rport->rp_state == UNF_RPORT_ST_PRLI_WAIT || - unf_rport->rp_state == UNF_RPORT_ST_READY) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x) Rport(0x%x) have sent PLOGI or PRLI with state(0x%x)", - unf_lport->port_id, - unf_rport->nport_id, - unf_rport->rp_state); - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, - rport_flag); - return; - } - /* Update L_Port State: PLOGI_WAIT */ - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, rport_flag); - - /* P2P with master: Start to Send PLOGI */ - ret = unf_send_plogi(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) with WWN(0x%llx) send PLOGI to(0x%llx) failed", - unf_lport->port_id, - unf_lport->port_name, port_name); - - unf_rport_error_recovery(unf_rport); - } - } else { - /* Get/Alloc R_Port failed */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with WWN(0x%llx) allocate RPort(ID:0x%x,WWPN:0x%llx) failed", - unf_lport->port_id, unf_lport->port_name, - UNF_P2P_REMOTE_NPORT_ID, port_name); - } - } else { - /* Slave case: L_Port's Port Name is smaller than R_Port */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) with WWN(0x%llx) is smaller than rport(0x%llx), do nothing", - unf_lport->port_id, unf_lport->port_name, port_name); - } -} - -void unf_lport_enter_mns_plogi(struct unf_lport *lport) -{ - /* Fabric or Public Loop Mode: Login with Name server */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - struct unf_plogi_payload *plogi_pld = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_xchg *xchg = NULL; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VOID(lport); - - /* Get (safe) R_Port */ - unf_rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, UNF_FC_FID_MGMT_SERV); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate RPort failed", lport->port_id); - return; - } - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = UNF_FC_FID_MGMT_SERV; /* 0xfffffa */ - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - - /* Get & Set new free exchange */ - xchg = unf_cm_get_free_xchg(lport, UNF_XCHG_TYPE_SFS); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange can't be NULL for PLOGI", lport->port_id); - - return; - } - - xchg->cmnd_code = ELS_PLOGI; /* PLOGI */ - xchg->did = unf_rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = unf_lport; - xchg->rport = unf_rport; - - /* Set callback function */ - xchg->callback = NULL; /* for rcvd plogi acc/rjt processer */ - xchg->ob_callback = NULL; /* for send plogi failed processer */ - - unf_fill_package(&pkg, xchg, unf_rport); - pkg.type = UNF_PKG_ELS_REQ; - /* Fill PLOGI payload */ - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return; - } - - plogi_pld = &fc_entry->plogi.payload; - memset(plogi_pld, 0, sizeof(struct unf_plogi_payload)); - unf_fill_plogi_pld(plogi_pld, lport); - - /* Start to Send PLOGI command */ - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); -} - -static void unf_register_to_switch(struct unf_lport *lport) -{ - /* Register to Fabric, used for: FABRIC & PUBLI LOOP */ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_lport_state_ma(lport, UNF_EVENT_LPORT_REMOTE_ACC); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - /* Login with Name server: PLOGI */ - unf_lport_enter_sns_plogi(lport); - - unf_lport_enter_mns_plogi(lport); - - /* Physical Port */ - if (lport->root_lport == lport && - lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) { - unf_linkup_all_vports(lport); - } -} - -void unf_fdisc_ob_callback(struct unf_xchg *xchg) -{ - /* Do recovery */ - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - unf_lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: FDISC send failed"); - - FC_CHECK_RETURN_VOID(unf_lport); - - /* Do L_Port error recovery */ - unf_lport_error_recovery(unf_lport); -} - -void unf_fdisc_callback(void *lport, void *rport, void *exch) -{ - /* Register to Name Server or Do recovery */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *xchg = NULL; - struct unf_flogi_fdisc_payload *fdisc_pld = NULL; - ulong flag = 0; - u32 cmd = 0; - - unf_lport = (struct unf_lport *)lport; - unf_rport = (struct unf_rport *)rport; - xchg = (struct unf_xchg *)exch; - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(exch); - FC_CHECK_RETURN_VOID(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr); - fdisc_pld = &xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->fdisc_acc.fdisc_payload; - if (xchg->byte_orders & UNF_BIT_2) - unf_big_end_to_cpu((u8 *)fdisc_pld, sizeof(struct unf_flogi_fdisc_payload)); - - cmd = fdisc_pld->cmnd; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: FDISC response is (0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - cmd, unf_lport->port_id, unf_rport->nport_id, xchg->oxid); - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_FLOGI); - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, - UNF_RPORT_REUSE_ONLY, UNF_FC_FID_FLOGI); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has no Rport", unf_lport->port_id); - return; - } - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = UNF_FC_FID_FLOGI; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - if ((cmd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { - /* Case for ACC */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_FLOGI_WAIT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) receive Flogi/Fdisc ACC in state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->states); - - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - return; - } - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - unf_lport_update_nport_id(unf_lport, xchg->sid); - unf_lport_update_time_params(unf_lport, fdisc_pld); - unf_register_to_switch(unf_lport); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: FDISC response is (0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - cmd, unf_lport->port_id, unf_rport->nport_id, xchg->oxid); - - /* Case for RJT: Do L_Port recovery */ - unf_lport_error_recovery(unf_lport); - } -} - -void unf_flogi_ob_callback(struct unf_xchg *xchg) -{ - /* Send FLOGI failed & Do L_Port recovery */ - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - /* Get L_port from exchange context */ - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - unf_lport = xchg->lport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - FC_CHECK_RETURN_VOID(unf_lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send FLOGI failed", - unf_lport->port_id); - - /* Check L_Port state */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_FLOGI_WAIT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send FLOGI failed with state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->states); - - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - return; - } - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - /* Do L_Port error recovery */ - unf_lport_error_recovery(unf_lport); -} - -static void unf_lport_update_nport_id(struct unf_lport *lport, u32 nport_id) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - lport->nport_id = nport_id; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); -} - -static void -unf_lport_update_time_params(struct unf_lport *lport, - struct unf_flogi_fdisc_payload *flogi_payload) -{ - ulong flag = 0; - u32 ed_tov = 0; - u32 ra_tov = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(flogi_payload); - - ed_tov = flogi_payload->fabric_parms.co_parms.e_d_tov; - ra_tov = flogi_payload->fabric_parms.co_parms.r_a_tov; - - spin_lock_irqsave(&lport->lport_state_lock, flag); - - /* FC-FS-3: 21.3.4, 21.3.5 */ - if (lport->act_topo == UNF_ACT_TOP_P2P_FABRIC || - lport->act_topo == UNF_ACT_TOP_PUBLIC_LOOP) { - lport->ed_tov = ed_tov; - lport->ra_tov = ra_tov; - } else { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) with topo(0x%x) no need to save time parameters", - lport->port_id, lport->nport_id, lport->act_topo); - } - - spin_unlock_irqrestore(&lport->lport_state_lock, flag); -} - -static void unf_rcv_flogi_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_flogi_fdisc_payload *flogi_pld, - u32 nport_id, struct unf_xchg *xchg) -{ - /* PLOGI to Name server or remote port */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - struct unf_flogi_fdisc_payload *unf_flogi_pld = flogi_pld; - struct unf_fabric_parm *fabric_params = NULL; - u64 port_name = 0; - u64 node_name = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(flogi_pld); - - /* Check L_Port state: FLOGI_WAIT */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_FLOGI_WAIT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[info]Port(0x%x_0x%x) receive FLOGI ACC with state(0x%x)", - unf_lport->port_id, unf_lport->nport_id, unf_lport->states); - - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - return; - } - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - fabric_params = &unf_flogi_pld->fabric_parms; - node_name = - (u64)(((u64)(fabric_params->high_node_name) << UNF_SHIFT_32) | - ((u64)(fabric_params->low_node_name))); - port_name = - (u64)(((u64)(fabric_params->high_port_name) << UNF_SHIFT_32) | - ((u64)(fabric_params->low_port_name))); - - /* flogi acc pyload class 3 service priority value */ - if (unf_lport->root_lport == unf_lport && unf_lport->qos_cs_ctrl && - fabric_params->cl_parms[ARRAY_INDEX_2].priority == UNF_PRIORITY_ENABLE) - unf_lport->priority = (bool)UNF_PRIORITY_ENABLE; - else - unf_lport->priority = (bool)UNF_PRIORITY_DISABLE; - - /* Save Flogi parameters */ - unf_save_fabric_params(unf_lport, unf_rport, fabric_params); - - if (UNF_CHECK_NPORT_FPORT_BIT(unf_flogi_pld) == UNF_N_PORT) { - /* P2P Mode */ - unf_lport_update_topo(unf_lport, UNF_ACT_TOP_P2P_DIRECT); - unf_login_with_rport_in_n2n(unf_lport, port_name, node_name); - } else { - /* for: - * UNF_ACT_TOP_PUBLIC_LOOP/UNF_ACT_TOP_P2P_FABRIC - * /UNF_TOP_P2P_MASK - */ - if (unf_lport->act_topo != UNF_ACT_TOP_PUBLIC_LOOP) - unf_lport_update_topo(unf_lport, UNF_ACT_TOP_P2P_FABRIC); - - unf_lport_update_nport_id(unf_lport, nport_id); - unf_lport_update_time_params(unf_lport, unf_flogi_pld); - - /* Save process both for Public loop & Fabric */ - unf_register_to_switch(unf_lport); - } -} - -static void unf_flogi_acc_com_process(struct unf_xchg *xchg) -{ - /* Maybe within interrupt or thread context */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_flogi_fdisc_payload *flogi_pld = NULL; - u32 nport_id = 0; - u32 cmnd = 0; - ulong flags = 0; - struct unf_xchg *unf_xchg = xchg; - - FC_CHECK_RETURN_VOID(unf_xchg); - FC_CHECK_RETURN_VOID(unf_xchg->lport); - - unf_lport = unf_xchg->lport; - unf_rport = unf_xchg->rport; - flogi_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi_acc.flogi_payload; - cmnd = flogi_pld->cmnd; - - /* Get N_Port_ID & R_Port */ - /* Others: 0xFFFFFE */ - unf_rport = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_FLOGI); - nport_id = UNF_FC_FID_FLOGI; - - /* Get Safe R_Port: reuse only */ - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, UNF_RPORT_REUSE_ONLY, nport_id); - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can not allocate new Rport", unf_lport->port_id); - - return; - } - - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - unf_rport->nport_id = UNF_FC_FID_FLOGI; - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - /* Process FLOGI ACC or RJT */ - if ((cmnd & UNF_ELS_CMND_HIGH_MASK) == UNF_ELS_CMND_ACC) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: FLOGI response is(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - cmnd, unf_lport->port_id, unf_rport->nport_id, unf_xchg->oxid); - - /* Case for ACC */ - unf_rcv_flogi_acc(unf_lport, unf_rport, flogi_pld, unf_xchg->sid, unf_xchg); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: FLOGI response is(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - cmnd, unf_lport->port_id, unf_rport->nport_id, - unf_xchg->oxid); - - /* Case for RJT: do L_Port error recovery */ - unf_lport_error_recovery(unf_lport); - } -} - -static int unf_rcv_flogi_acc_async_callback(void *argc_in, void *argc_out) -{ - struct unf_xchg *xchg = (struct unf_xchg *)argc_in; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - unf_flogi_acc_com_process(xchg); - - unf_xchg_ref_dec(xchg, SFS_RESPONSE); - - return RETURN_OK; -} - -void unf_flogi_callback(void *lport, void *rport, void *xchg) -{ - /* Callback function for FLOGI ACC or RJT */ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_flogi_fdisc_payload *flogi_pld = NULL; - bool bbscn_enabled = false; - enum unf_act_topo act_topo = UNF_ACT_TOP_UNKNOWN; - bool switch2thread = false; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - FC_CHECK_RETURN_VOID(unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr); - - unf_xchg->lport = lport; - flogi_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->flogi_acc.flogi_payload; - - if (unf_xchg->byte_orders & UNF_BIT_2) - unf_big_end_to_cpu((u8 *)flogi_pld, sizeof(struct unf_flogi_fdisc_payload)); - - if (unf_lport->act_topo != UNF_ACT_TOP_PUBLIC_LOOP && - (UNF_CHECK_NPORT_FPORT_BIT(flogi_pld) == UNF_F_PORT)) - /* Get Top Mode (P2P_F) --->>> used for BBSCN */ - act_topo = UNF_ACT_TOP_P2P_FABRIC; - - bbscn_enabled = - unf_check_bbscn_is_enabled((u8)unf_lport->low_level_func.lport_cfg_items.bbscn, - (u8)UNF_GET_BB_SC_N_FROM_PARAMS(&flogi_pld->fabric_parms)); - if (act_topo == UNF_ACT_TOP_P2P_FABRIC && bbscn_enabled) { - /* BBSCN Enable or not --->>> used for Context change */ - unf_lport->bbscn_support = true; - switch2thread = true; - } - - if (switch2thread && unf_lport->root_lport == unf_lport) { - /* Wait for LR done sync: for Root Port */ - (void)unf_irq_process_switch2thread(unf_lport, unf_xchg, - unf_rcv_flogi_acc_async_callback); - } else { - /* Process FLOGI response directly */ - unf_flogi_acc_com_process(unf_xchg); - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ALL, - "[info]Port(0x%x) process FLOGI response: switch(%d) to thread done", - unf_lport->port_id, switch2thread); -} - -void unf_plogi_ob_callback(struct unf_xchg *xchg) -{ - /* Do L_Port or R_Port recovery */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - unf_lport = xchg->lport; - unf_rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_CHECK_RETURN_VOID(unf_lport); - FC_CHECK_RETURN_VOID(unf_rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) send PLOGI(0x%x_0x%x) to RPort(%p:0x%x_0x%x) failed", - unf_lport->port_id, unf_lport->nport_id, xchg->oxid, - xchg->rxid, unf_rport, unf_rport->rport_index, - unf_rport->nport_id); - - /* Start to recovery */ - if (unf_rport->nport_id > UNF_FC_FID_DOM_MGR) { - /* with Name server: R_Port is fabric --->>> L_Port error - * recovery - */ - unf_lport_error_recovery(unf_lport); - } else { - /* R_Port is not fabric --->>> R_Port error recovery */ - unf_rport_error_recovery(unf_rport); - } -} - -void unf_rcv_plogi_acc(struct unf_lport *lport, struct unf_rport *rport, - struct unf_lgn_parm *login_parms) -{ - /* PLOGI ACC: PRLI(non fabric) or RFT_ID(fabric) */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - struct unf_lgn_parm *unf_login_parms = login_parms; - u64 node_name = 0; - u64 port_name = 0; - ulong flag = 0; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(login_parms); - - node_name = (u64)(((u64)(unf_login_parms->high_node_name) << UNF_SHIFT_32) | - ((u64)(unf_login_parms->low_node_name))); - port_name = (u64)(((u64)(unf_login_parms->high_port_name) << UNF_SHIFT_32) | - ((u64)(unf_login_parms->low_port_name))); - - /* ACC & Case for: R_Port is fabric (RFT_ID) */ - if (unf_rport->nport_id >= UNF_FC_FID_DOM_MGR) { - /* Check L_Port state */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - if (unf_lport->states != UNF_LPORT_ST_PLOGI_WAIT) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive PLOGI ACC with error state(0x%x)", - lport->port_id, unf_lport->states); - - return; - } - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_REMOTE_ACC); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - /* PLOGI parameters save */ - unf_save_plogi_params(unf_lport, unf_rport, unf_login_parms, ELS_ACC); - - /* Update R_Port WWPN & WWNN */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->node_name = node_name; - unf_rport->port_name = port_name; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Start to Send RFT_ID */ - ret = unf_send_rft_id(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send RFT_ID failed", - lport->port_id); - - unf_lport_error_recovery(unf_lport); - } - } else { - /* ACC & Case for: R_Port is not fabric */ - if (unf_rport->options == UNF_PORT_MODE_UNKNOWN && - unf_rport->port_name != INVALID_WWPN) - unf_rport->options = unf_get_port_feature(port_name); - - /* Set Port Feature with BOTH: cancel */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->node_name = node_name; - unf_rport->port_name = port_name; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x)<---LS_ACC(DID:0x%x SID:0x%x) for PLOGI ACC with RPort state(0x%x) NodeName(0x%llx) E_D_TOV(%u)", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id, unf_rport->rp_state, - unf_rport->node_name, unf_rport->ed_tov); - - if (unf_lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP && - (unf_rport->rp_state == UNF_RPORT_ST_PRLI_WAIT || - unf_rport->rp_state == UNF_RPORT_ST_READY)) { - /* Do nothing, return directly */ - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - return; - } - - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PRLI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* PLOGI parameters save */ - unf_save_plogi_params(unf_lport, unf_rport, unf_login_parms, ELS_ACC); - - /* - * Need Delay to Send PRLI or not - * Used for: L_Port with INI mode & R_Port is not Fabric - */ - unf_check_rport_need_delay_prli(unf_lport, unf_rport, unf_rport->options); - - /* Do not care: Just used for L_Port only is TGT mode or R_Port - * only is INI mode - */ - unf_schedule_open_work(unf_lport, unf_rport); - } -} - -void unf_plogi_acc_com_process(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_plogi_payload *plogi_pld = NULL; - struct unf_lgn_parm *login_parms = NULL; - ulong flag = 0; - u64 port_name = 0; - u32 rport_nport_id = 0; - u32 cmnd = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(unf_xchg); - FC_CHECK_RETURN_VOID(unf_xchg->lport); - FC_CHECK_RETURN_VOID(unf_xchg->rport); - - unf_lport = unf_xchg->lport; - unf_rport = unf_xchg->rport; - rport_nport_id = unf_rport->nport_id; - plogi_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi_acc.payload; - login_parms = &plogi_pld->stparms; - cmnd = (plogi_pld->cmnd); - - if (UNF_ELS_CMND_ACC == (cmnd & UNF_ELS_CMND_HIGH_MASK)) { - /* Case for PLOGI ACC: Go to next stage */ - port_name = - (u64)(((u64)(login_parms->high_port_name) << UNF_SHIFT_32) | - ((u64)(login_parms->low_port_name))); - - /* Get (new) R_Port: 0xfffffc has same WWN with 0xfffcxx */ - unf_rport = unf_find_rport(unf_lport, rport_nport_id, port_name); - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, - UNF_RPORT_REUSE_ONLY, rport_nport_id); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) alloc new RPort with wwpn(0x%llx) failed", - unf_lport->port_id, unf_lport->nport_id, port_name); - return; - } - - /* PLOGI parameters check */ - ret = unf_check_plogi_params(unf_lport, unf_rport, login_parms); - if (ret != RETURN_OK) - return; - - /* Update R_Port state */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->nport_id = rport_nport_id; - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Start to process PLOGI ACC */ - unf_rcv_plogi_acc(unf_lport, unf_rport, login_parms); - } else { - /* Case for PLOGI RJT: L_Port or R_Port recovery */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x)<---RPort(0x%p) with LS_RJT(DID:0x%x SID:0x%x) for PLOGI", - unf_lport->port_id, unf_rport, unf_lport->nport_id, - unf_rport->nport_id); - - if (unf_rport->nport_id >= UNF_FC_FID_DOM_MGR) - unf_lport_error_recovery(unf_lport); - else - unf_rport_error_recovery(unf_rport); - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PLOGI response(0x%x). Port(0x%x_0x%x)<---RPort(0x%x_0x%p) wwpn(0x%llx) OX_ID(0x%x)", - cmnd, unf_lport->port_id, unf_lport->nport_id, unf_rport->nport_id, - unf_rport, port_name, unf_xchg->oxid); -} - -static int unf_rcv_plogi_acc_async_callback(void *argc_in, void *argc_out) -{ - struct unf_xchg *xchg = (struct unf_xchg *)argc_in; - - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - unf_plogi_acc_com_process(xchg); - - unf_xchg_ref_dec(xchg, SFS_RESPONSE); - - return RETURN_OK; -} - -void unf_plogi_callback(void *lport, void *rport, void *xchg) -{ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_xchg *unf_xchg = (struct unf_xchg *)xchg; - struct unf_plogi_payload *plogi_pld = NULL; - struct unf_lgn_parm *login_parms = NULL; - bool bbscn_enabled = false; - bool switch2thread = false; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - FC_CHECK_RETURN_VOID(unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr); - - plogi_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->plogi_acc.payload; - login_parms = &plogi_pld->stparms; - unf_xchg->lport = lport; - - if (unf_xchg->byte_orders & UNF_BIT_2) - unf_big_end_to_cpu((u8 *)plogi_pld, sizeof(struct unf_plogi_payload)); - - bbscn_enabled = - unf_check_bbscn_is_enabled((u8)unf_lport->low_level_func.lport_cfg_items.bbscn, - (u8)UNF_GET_BB_SC_N_FROM_PARAMS(login_parms)); - if ((bbscn_enabled) && - unf_lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - switch2thread = true; - unf_lport->bbscn_support = true; - } - - if (switch2thread && unf_lport->root_lport == unf_lport) { - /* Wait for LR done sync: just for ROOT Port */ - (void)unf_irq_process_switch2thread(unf_lport, unf_xchg, - unf_rcv_plogi_acc_async_callback); - } else { - unf_plogi_acc_com_process(unf_xchg); - } -} - -static void unf_logo_ob_callback(struct unf_xchg *xchg) -{ - struct unf_lport *lport = NULL; - struct unf_rport *rport = NULL; - struct unf_rport *old_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - u32 nport_id = 0; - u32 logo_retry = 0; - u32 max_frame_size = 0; - u64 port_name = 0; - - FC_CHECK_RETURN_VOID(xchg); - unf_xchg = xchg; - old_rport = unf_xchg->rport; - logo_retry = old_rport->logo_retries; - max_frame_size = old_rport->max_frame_size; - port_name = old_rport->port_name; - unf_rport_enter_closing(old_rport); - - lport = unf_xchg->lport; - if (unf_is_lport_valid(lport) != RETURN_OK) - return; - - /* Get R_Port by exchange info: Init state */ - nport_id = unf_xchg->did; - rport = unf_get_rport_by_nport_id(lport, nport_id); - rport = unf_get_safe_rport(lport, rport, UNF_RPORT_REUSE_INIT, nport_id); - if (!rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) cannot allocate RPort", lport->port_id); - return; - } - - rport->logo_retries = logo_retry; - rport->max_frame_size = max_frame_size; - rport->port_name = port_name; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[info]LOGIN: Port(0x%x) received LOGO RSP timeout topo(0x%x) retries(%u)", - lport->port_id, lport->act_topo, rport->logo_retries); - - /* RCVD LOGO/PRLO & SEND LOGO: the same process */ - if (rport->logo_retries < UNF_MAX_RETRY_COUNT) { - /* <: retry (LOGIN or LOGO) if necessary */ - unf_process_rport_after_logo(lport, rport); - } else { - /* >=: Link down */ - unf_rport_immediate_link_down(lport, rport); - } -} - -static void unf_logo_callback(void *lport, void *rport, void *xchg) -{ - /* RCVD LOGO ACC/RJT: retry(LOGIN/LOGO) or link down immediately */ - struct unf_lport *unf_lport = (struct unf_lport *)lport; - struct unf_rport *unf_rport = NULL; - struct unf_rport *old_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_els_rjt *els_acc_rjt = NULL; - u32 cmnd = 0; - u32 nport_id = 0; - u32 logo_retry = 0; - u32 max_frame_size = 0; - u64 port_name = 0; - - FC_CHECK_RETURN_VOID(xchg); - - unf_xchg = (struct unf_xchg *)xchg; - old_rport = unf_xchg->rport; - - logo_retry = old_rport->logo_retries; - max_frame_size = old_rport->max_frame_size; - port_name = old_rport->port_name; - unf_rport_enter_closing(old_rport); - - if (unf_is_lport_valid(lport) != RETURN_OK) - return; - - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) - return; - - /* Get R_Port by exchange info: Init state */ - els_acc_rjt = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->els_rjt; - nport_id = unf_xchg->did; - unf_rport = unf_get_rport_by_nport_id(unf_lport, nport_id); - unf_rport = unf_get_safe_rport(unf_lport, unf_rport, UNF_RPORT_REUSE_INIT, nport_id); - - if (!unf_rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Port(0x%x) cannot allocate RPort", - unf_lport->port_id); - return; - } - - unf_rport->logo_retries = logo_retry; - unf_rport->max_frame_size = max_frame_size; - unf_rport->port_name = port_name; - cmnd = be32_to_cpu(els_acc_rjt->cmnd); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x) received LOGO RSP(0x%x),topo(0x%x) Port options(0x%x) RPort options(0x%x) retries(%u)", - unf_lport->port_id, (cmnd & UNF_ELS_CMND_HIGH_MASK), - unf_lport->act_topo, unf_lport->options, unf_rport->options, - unf_rport->logo_retries); - - /* RCVD LOGO/PRLO & SEND LOGO: the same process */ - if (unf_rport->logo_retries < UNF_MAX_RETRY_COUNT) { - /* <: retry (LOGIN or LOGO) if necessary */ - unf_process_rport_after_logo(unf_lport, unf_rport); - } else { - /* >=: Link down */ - unf_rport_immediate_link_down(unf_lport, unf_rport); - } -} - -void unf_prli_ob_callback(struct unf_xchg *xchg) -{ - /* Do R_Port recovery */ - struct unf_lport *lport = NULL; - struct unf_rport *rport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flag); - lport = xchg->lport; - rport = xchg->rport; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flag); - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x_0x%x) RPort(0x%x) send PRLI failed and do recovery", - lport->port_id, lport->nport_id, rport->nport_id); - - /* Start to do R_Port error recovery */ - unf_rport_error_recovery(rport); -} - -void unf_prli_callback(void *lport, void *rport, void *xchg) -{ - /* RCVD PRLI RSP: ACC or RJT --->>> SCSI Link Up */ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_prli_payload *prli_acc_pld = NULL; - ulong flag = 0; - u32 cmnd = 0; - u32 options = 0; - u32 fcp_conf = 0; - u32 rec_support = 0; - u32 task_retry_support = 0; - u32 retry_support = 0; - u32 tape_support = 0; - u32 fc4_type = 0; - enum unf_rport_login_state rport_state = UNF_RPORT_ST_INIT; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - unf_lport = (struct unf_lport *)lport; - unf_rport = (struct unf_rport *)rport; - unf_xchg = (struct unf_xchg *)xchg; - - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) exchange(%p) entry is NULL", - unf_lport->port_id, unf_xchg); - return; - } - - /* Get PRLI ACC payload */ - prli_acc_pld = &unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->prli_acc.payload; - if (unf_xchg->byte_orders & UNF_BIT_2) { - /* Change to little End, About INI/TGT mode & confirm info */ - options = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_3]) & - (UNF_FC4_FRAME_PARM_3_TGT | UNF_FC4_FRAME_PARM_3_INI); - - cmnd = be32_to_cpu(prli_acc_pld->cmnd); - fcp_conf = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_3]) & - UNF_FC4_FRAME_PARM_3_CONF_ALLOW; - rec_support = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_3]) & - UNF_FC4_FRAME_PARM_3_REC_SUPPORT; - task_retry_support = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_3]) & - UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; - retry_support = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_3]) & - UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; - fc4_type = be32_to_cpu(prli_acc_pld->parms[ARRAY_INDEX_0]) >> - UNF_FC4_TYPE_SHIFT & UNF_FC4_TYPE_MASK; - } else { - options = (prli_acc_pld->parms[ARRAY_INDEX_3]) & - (UNF_FC4_FRAME_PARM_3_TGT | UNF_FC4_FRAME_PARM_3_INI); - - cmnd = (prli_acc_pld->cmnd); - fcp_conf = prli_acc_pld->parms[ARRAY_INDEX_3] & UNF_FC4_FRAME_PARM_3_CONF_ALLOW; - rec_support = prli_acc_pld->parms[ARRAY_INDEX_3] & UNF_FC4_FRAME_PARM_3_REC_SUPPORT; - task_retry_support = prli_acc_pld->parms[ARRAY_INDEX_3] & - UNF_FC4_FRAME_PARM_3_TASK_RETRY_ID_SUPPORT; - retry_support = prli_acc_pld->parms[ARRAY_INDEX_3] & - UNF_FC4_FRAME_PARM_3_RETRY_SUPPORT; - fc4_type = prli_acc_pld->parms[ARRAY_INDEX_0] >> UNF_FC4_TYPE_SHIFT; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: PRLI RSP: RPort(0x%x) parameter-3(0x%x) option(0x%x) cmd(0x%x) uiRecSupport:%u", - unf_rport->nport_id, prli_acc_pld->parms[ARRAY_INDEX_3], - options, cmnd, rec_support); - - /* PRLI ACC: R_Port READY & Report R_Port Link Up */ - if (UNF_ELS_CMND_ACC == (cmnd & UNF_ELS_CMND_HIGH_MASK)) { - /* Update R_Port options(INI/TGT/BOTH) */ - unf_rport->options = options; - - unf_update_port_feature(unf_rport->port_name, unf_rport->options); - - /* NOTE: R_Port only with INI mode, send LOGO */ - if (unf_rport->options == UNF_PORT_MODE_INI) { - /* Update R_Port state: LOGO */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* NOTE: Start to Send LOGO */ - unf_rport_enter_logo(unf_lport, unf_rport); - return; - } - - /* About confirm */ - if (fcp_conf && unf_lport->low_level_func.lport_cfg_items.fcp_conf) { - unf_rport->fcp_conf_needed = true; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) FCP config is need for RPort(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id); - } - - tape_support = (rec_support && task_retry_support && retry_support); - if (tape_support && unf_lport->low_level_func.lport_cfg_items.tape_support) { - unf_rport->tape_support_needed = true; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x_0x%x) Rec is enabled for RPort(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id); - } - - /* Update R_Port state: READY */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_READY); - rport_state = unf_rport->rp_state; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Report R_Port online (Link Up) event to SCSI */ - if (rport_state == UNF_RPORT_ST_READY) { - unf_rport->logo_retries = 0; - unf_update_lport_state_by_linkup_event(unf_lport, unf_rport, - unf_rport->options); - } - } else { - /* PRLI RJT: Do R_Port error recovery */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Port(0x%x)<---LS_RJT(DID:0x%x SID:0x%x) for PRLI. RPort(0x%p) OX_ID(0x%x)", - unf_lport->port_id, unf_lport->nport_id, - unf_rport->nport_id, unf_rport, unf_xchg->oxid); - - unf_rport_error_recovery(unf_rport); - } -} - -static void unf_rrq_callback(void *lport, void *rport, void *xchg) -{ - /* Release I/O */ - struct unf_lport *unf_lport = NULL; - struct unf_xchg *unf_xchg = NULL; - struct unf_xchg *io_xchg = NULL; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(xchg); - - unf_lport = (struct unf_lport *)lport; - unf_xchg = (struct unf_xchg *)xchg; - - if (!unf_xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) exchange(0x%p) SfsEntryPtr is NULL", - unf_lport->port_id, unf_xchg); - return; - } - - io_xchg = (struct unf_xchg *)unf_xchg->io_xchg; - if (!io_xchg) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) IO exchange is NULL. RRQ cb sfs xchg(0x%p) tag(0x%x)", - unf_lport->port_id, unf_xchg, unf_xchg->hotpooltag); - return; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) release IO exch(0x%p) tag(0x%x). RRQ cb sfs xchg(0x%p) tag(0x%x)", - unf_lport->port_id, unf_xchg->io_xchg, io_xchg->hotpooltag, - unf_xchg, unf_xchg->hotpooltag); - - /* After RRQ Success, Free xid */ - unf_notify_chip_free_xid(io_xchg); - - /* NOTE: release I/O exchange resource */ - unf_xchg_ref_dec(io_xchg, XCHG_ALLOC); -} - -static void unf_rrq_ob_callback(struct unf_xchg *xchg) -{ - /* Release I/O */ - struct unf_xchg *unf_xchg = NULL; - struct unf_xchg *io_xchg = NULL; - - unf_xchg = (struct unf_xchg *)xchg; - if (!unf_xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Exchange can't be NULL"); - return; - } - - io_xchg = (struct unf_xchg *)unf_xchg->io_xchg; - if (!io_xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]IO exchange can't be NULL with Sfs exch(0x%p) tag(0x%x)", - unf_xchg, unf_xchg->hotpooltag); - return; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]send RRQ failed: SFS exch(0x%p) tag(0x%x) exch(0x%p) tag(0x%x) OXID_RXID(0x%x_0x%x) SID_DID(0x%x_0x%x)", - unf_xchg, unf_xchg->hotpooltag, io_xchg, io_xchg->hotpooltag, - io_xchg->oxid, io_xchg->rxid, io_xchg->sid, io_xchg->did); - - /* If RRQ failure or timepout, Free xid. */ - unf_notify_chip_free_xid(io_xchg); - - /* NOTE: Free I/O exchange resource */ - unf_xchg_ref_dec(io_xchg, XCHG_ALLOC); -} diff --git a/drivers/scsi/spfc/common/unf_ls.h b/drivers/scsi/spfc/common/unf_ls.h deleted file mode 100644 index 5fdd9e1a258d..000000000000 --- a/drivers/scsi/spfc/common/unf_ls.h +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_LS_H -#define UNF_LS_H - -#include "unf_type.h" -#include "unf_exchg.h" -#include "unf_rport.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -u32 unf_send_adisc(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_pdisc(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_flogi(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_fdisc(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_plogi(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_prli(struct unf_lport *lport, struct unf_rport *rport, - u32 cmnd_code); -u32 unf_send_prlo(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_logo(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_send_logo_by_did(struct unf_lport *lport, u32 did); -u32 unf_send_echo(struct unf_lport *lport, struct unf_rport *rport, u32 *time); -u32 unf_send_plogi_rjt_by_did(struct unf_lport *lport, u32 did); -u32 unf_send_rrq(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg); -void unf_flogi_ob_callback(struct unf_xchg *xchg); -void unf_flogi_callback(void *lport, void *rport, void *xchg); -void unf_fdisc_ob_callback(struct unf_xchg *xchg); -void unf_fdisc_callback(void *lport, void *rport, void *xchg); - -void unf_plogi_ob_callback(struct unf_xchg *xchg); -void unf_plogi_callback(void *lport, void *rport, void *xchg); -void unf_prli_ob_callback(struct unf_xchg *xchg); -void unf_prli_callback(void *lport, void *rport, void *xchg); -u32 unf_flogi_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_plogi_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_rec_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_prli_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_prlo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_rscn_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_logo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_echo_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_pdisc_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_send_pdisc_rjt(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *xchg); -u32 unf_adisc_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_rrq_handler(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -u32 unf_send_rec(struct unf_lport *lport, struct unf_rport *rport, - struct unf_xchg *io_xchg); - -u32 unf_low_level_bb_scn(struct unf_lport *lport); -typedef int (*unf_event_task)(void *arg_in, void *arg_out); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* __UNF_SERVICE_H__ */ diff --git a/drivers/scsi/spfc/common/unf_npiv.c b/drivers/scsi/spfc/common/unf_npiv.c deleted file mode 100644 index 0d441f1c9e06..000000000000 --- a/drivers/scsi/spfc/common/unf_npiv.c +++ /dev/null @@ -1,1005 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_npiv.h" -#include "unf_log.h" -#include "unf_rport.h" -#include "unf_exchg.h" -#include "unf_portman.h" -#include "unf_npiv_portman.h" - -#define UNF_DELETE_VPORT_MAX_WAIT_TIME_MS 60000 - -u32 unf_init_vport_pool(struct unf_lport *lport) -{ - u32 ret = RETURN_OK; - u32 i; - u16 vport_cnt = 0; - struct unf_lport *vport = NULL; - struct unf_vport_pool *vport_pool = NULL; - u32 vport_pool_size; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, RETURN_ERROR); - - UNF_TOU16_CHECK(vport_cnt, lport->low_level_func.support_max_npiv_num, - return RETURN_ERROR); - if (vport_cnt == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) do not support NPIV", - lport->port_id); - - return RETURN_OK; - } - - vport_pool_size = sizeof(struct unf_vport_pool) + sizeof(struct unf_lport *) * vport_cnt; - lport->vport_pool = vmalloc(vport_pool_size); - if (!lport->vport_pool) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) cannot allocate vport pool", - lport->port_id); - - return RETURN_ERROR; - } - memset(lport->vport_pool, 0, vport_pool_size); - vport_pool = lport->vport_pool; - vport_pool->vport_pool_count = vport_cnt; - vport_pool->vport_pool_completion = NULL; - spin_lock_init(&vport_pool->vport_pool_lock); - INIT_LIST_HEAD(&vport_pool->list_vport_pool); - - vport_pool->vport_pool_addr = - vmalloc((size_t)(vport_cnt * sizeof(struct unf_lport))); - if (!vport_pool->vport_pool_addr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) cannot allocate vport pool address", - lport->port_id); - vfree(lport->vport_pool); - lport->vport_pool = NULL; - - return RETURN_ERROR; - } - - memset(vport_pool->vport_pool_addr, 0, - vport_cnt * sizeof(struct unf_lport)); - vport = (struct unf_lport *)vport_pool->vport_pool_addr; - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - for (i = 0; i < vport_cnt; i++) { - list_add_tail(&vport->entry_vport, &vport_pool->list_vport_pool); - vport++; - } - - vport_pool->slab_next_index = 0; - vport_pool->slab_total_sum = vport_cnt; - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - return ret; -} - -void unf_free_vport_pool(struct unf_lport *lport) -{ - struct unf_vport_pool *vport_pool = NULL; - bool wait = false; - ulong flag = 0; - u32 remain = 0; - struct completion vport_pool_completion; - - init_completion(&vport_pool_completion); - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(lport->vport_pool); - vport_pool = lport->vport_pool; - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - - if (vport_pool->slab_total_sum != vport_pool->vport_pool_count) { - vport_pool->vport_pool_completion = &vport_pool_completion; - remain = vport_pool->slab_total_sum - vport_pool->vport_pool_count; - wait = true; - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - if (wait) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to wait for vport pool completion remain(0x%x)", - lport->port_id, remain); - - wait_for_completion(vport_pool->vport_pool_completion); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) wait for vport pool completion end", - lport->port_id); - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - vport_pool->vport_pool_completion = NULL; - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - } - - if (lport->vport_pool->vport_pool_addr) { - vfree(lport->vport_pool->vport_pool_addr); - lport->vport_pool->vport_pool_addr = NULL; - } - - vfree(lport->vport_pool); - lport->vport_pool = NULL; -} - -struct unf_lport *unf_get_vport_by_slab_index(struct unf_vport_pool *vport_pool, - u16 slab_index) -{ - FC_CHECK_RETURN_VALUE(vport_pool, NULL); - - return vport_pool->vport_slab[slab_index]; -} - -static inline void unf_vport_pool_slab_set(struct unf_vport_pool *vport_pool, - u16 slab_index, - struct unf_lport *vport) -{ - FC_CHECK_RETURN_VOID(vport_pool); - - vport_pool->vport_slab[slab_index] = vport; -} - -u32 unf_alloc_vp_index(struct unf_vport_pool *vport_pool, - struct unf_lport *vport, u16 vpid) -{ - u16 slab_index; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(vport_pool, RETURN_ERROR); - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - if (vpid == 0) { - slab_index = vport_pool->slab_next_index; - while (unf_get_vport_by_slab_index(vport_pool, slab_index)) { - slab_index = (slab_index + 1) % vport_pool->slab_total_sum; - - if (vport_pool->slab_next_index == slab_index) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]VPort pool has no slab "); - - return RETURN_ERROR; - } - } - } else { - slab_index = vpid - 1; - if (unf_get_vport_by_slab_index(vport_pool, slab_index)) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_WARN, - "[warn]VPort Index(0x%x) is occupy", vpid); - - return RETURN_ERROR; - } - } - - unf_vport_pool_slab_set(vport_pool, slab_index, vport); - - vport_pool->slab_next_index = (slab_index + 1) % vport_pool->slab_total_sum; - - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - spin_lock_irqsave(&vport->lport_state_lock, flags); - vport->vp_index = slab_index + 1; - spin_unlock_irqrestore(&vport->lport_state_lock, flags); - - return RETURN_OK; -} - -void unf_free_vp_index(struct unf_vport_pool *vport_pool, - struct unf_lport *vport) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(vport_pool); - FC_CHECK_RETURN_VOID(vport); - - if (vport->vp_index == 0 || - vport->vp_index > vport_pool->slab_total_sum) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Input vpoot index(0x%x) is beyond the normal range, min(0x1), max(0x%x).", - vport->vp_index, vport_pool->slab_total_sum); - return; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - unf_vport_pool_slab_set(vport_pool, vport->vp_index - 1, - NULL); /* SlabIndex=VpIndex-1 */ - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - spin_lock_irqsave(&vport->lport_state_lock, flags); - vport->vp_index = INVALID_VALUE16; - spin_unlock_irqrestore(&vport->lport_state_lock, flags); -} - -struct unf_lport *unf_get_free_vport(struct unf_lport *lport) -{ - struct unf_lport *vport = NULL; - struct list_head *list_head = NULL; - struct unf_vport_pool *vport_pool = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(lport->vport_pool, NULL); - - vport_pool = lport->vport_pool; - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - if (!list_empty(&vport_pool->list_vport_pool)) { - list_head = UNF_OS_LIST_NEXT(&vport_pool->list_vport_pool); - list_del(list_head); - vport_pool->vport_pool_count--; - list_add_tail(list_head, &lport->list_vports_head); - vport = list_entry(list_head, struct unf_lport, entry_vport); - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]LPort(0x%x)'s vport pool is empty", lport->port_id); - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - return NULL; - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - return vport; -} - -void unf_vport_back_to_pool(void *vport) -{ - struct unf_lport *unf_lport = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *list = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(vport); - unf_vport = vport; - unf_lport = (struct unf_lport *)(unf_vport->root_lport); - FC_CHECK_RETURN_VOID(unf_lport); - FC_CHECK_RETURN_VOID(unf_lport->vport_pool); - - unf_free_vp_index(unf_lport->vport_pool, unf_vport); - - spin_lock_irqsave(&unf_lport->vport_pool->vport_pool_lock, flag); - - list = &unf_vport->entry_vport; - list_del(list); - list_add_tail(list, &unf_lport->vport_pool->list_vport_pool); - unf_lport->vport_pool->vport_pool_count++; - - spin_unlock_irqrestore(&unf_lport->vport_pool->vport_pool_lock, flag); -} - -void unf_init_vport_from_lport(struct unf_lport *vport, struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(vport); - FC_CHECK_RETURN_VOID(lport); - - vport->port_type = lport->port_type; - vport->fc_port = lport->fc_port; - vport->act_topo = lport->act_topo; - vport->root_lport = lport; - vport->unf_qualify_rport = lport->unf_qualify_rport; - vport->link_event_wq = lport->link_event_wq; - vport->xchg_wq = lport->xchg_wq; - - memcpy(&vport->xchg_mgr_temp, &lport->xchg_mgr_temp, - sizeof(struct unf_cm_xchg_mgr_template)); - - memcpy(&vport->event_mgr, &lport->event_mgr, sizeof(struct unf_event_mgr)); - - memset(&vport->lport_mgr_temp, 0, sizeof(struct unf_cm_lport_template)); - - memcpy(&vport->low_level_func, &lport->low_level_func, - sizeof(struct unf_low_level_functioon_op)); -} - -void unf_check_vport_pool_status(struct unf_lport *lport) -{ - struct unf_vport_pool *vport_pool = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - vport_pool = lport->vport_pool; - FC_CHECK_RETURN_VOID(vport_pool); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - - if (vport_pool->vport_pool_completion && - vport_pool->slab_total_sum == vport_pool->vport_pool_count) { - complete(vport_pool->vport_pool_completion); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); -} - -void unf_vport_fabric_logo(struct unf_lport *vport) -{ - struct unf_rport *unf_rport = NULL; - ulong flag = 0; - - unf_rport = unf_get_rport_by_nport_id(vport, UNF_FC_FID_FLOGI); - FC_CHECK_RETURN_VOID(unf_rport); - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_rport_enter_logo(vport, unf_rport); -} - -void unf_vport_deinit(void *vport) -{ - struct unf_lport *unf_vport = NULL; - - FC_CHECK_RETURN_VOID(vport); - unf_vport = (struct unf_lport *)vport; - - unf_unregister_scsi_host(unf_vport); - - unf_disc_mgr_destroy(unf_vport); - - unf_release_xchg_mgr_temp(unf_vport); - - unf_release_vport_mgr_temp(unf_vport); - - unf_destroy_scsi_id_table(unf_vport); - - unf_lport_release_lw_funop(unf_vport); - unf_vport->fc_port = NULL; - unf_vport->vport = NULL; - - if (unf_vport->lport_free_completion) { - complete(unf_vport->lport_free_completion); - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]VPort(0x%x) point(0x%p) completion free function is NULL", - unf_vport->port_id, unf_vport); - dump_stack(); - } -} - -void unf_vport_ref_dec(struct unf_lport *vport) -{ - FC_CHECK_RETURN_VOID(vport); - - if (atomic_dec_and_test(&vport->port_ref_cnt)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]VPort(0x%x) point(0x%p) reference count is 0 and freevport", - vport->port_id, vport); - - unf_vport_deinit(vport); - } -} - -u32 unf_vport_init(void *vport) -{ - struct unf_lport *unf_vport = NULL; - - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - unf_vport = (struct unf_lport *)vport; - - unf_vport->options = UNF_PORT_MODE_INI; - unf_vport->nport_id = 0; - - if (unf_init_scsi_id_table(unf_vport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Vport(0x%x) can not initialize SCSI ID table", - unf_vport->port_id); - - return RETURN_ERROR; - } - - if (unf_init_disc_mgr(unf_vport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Vport(0x%x) can not initialize discover manager", - unf_vport->port_id); - unf_destroy_scsi_id_table(unf_vport); - - return RETURN_ERROR; - } - - if (unf_register_scsi_host(unf_vport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Vport(0x%x) vport can not register SCSI host", - unf_vport->port_id); - unf_disc_mgr_destroy(unf_vport); - unf_destroy_scsi_id_table(unf_vport); - - return RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Vport(0x%x) Create succeed with wwpn(0x%llx)", - unf_vport->port_id, unf_vport->port_name); - - return RETURN_OK; -} - -void unf_vport_remove(void *vport) -{ - struct unf_lport *unf_vport = NULL; - struct unf_lport *unf_lport = NULL; - struct completion port_free_completion; - - init_completion(&port_free_completion); - FC_CHECK_RETURN_VOID(vport); - unf_vport = (struct unf_lport *)vport; - unf_lport = (struct unf_lport *)(unf_vport->root_lport); - unf_vport->lport_free_completion = &port_free_completion; - - unf_set_lport_removing(unf_vport); - - unf_vport_ref_dec(unf_vport); - - wait_for_completion(unf_vport->lport_free_completion); - unf_vport_back_to_pool(unf_vport); - - unf_check_vport_pool_status(unf_lport); -} - -u32 unf_npiv_conf(u32 port_id, u64 wwpn, enum unf_rport_qos_level qos_level) -{ -#define VPORT_WWN_MASK 0xff00ffffffffffff -#define VPORT_WWN_SHIFT 48 - - struct fc_vport_identifiers vid = {0}; - struct Scsi_Host *host = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_lport *unf_vport = NULL; - u16 vport_id = 0; - - unf_lport = unf_find_lport_by_port_id(port_id); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Cannot find LPort by (0x%x).", port_id); - - return RETURN_ERROR; - } - - unf_vport = unf_cm_lookup_vport_by_wwpn(unf_lport, wwpn); - if (unf_vport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Port(0x%x) has find vport with wwpn(0x%llx), can't create again", - unf_lport->port_id, wwpn); - - return RETURN_ERROR; - } - - unf_vport = unf_get_free_vport(unf_lport); - if (!unf_vport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Can not get free vport from pool"); - - return RETURN_ERROR; - } - - unf_init_port_parms(unf_vport); - unf_init_vport_from_lport(unf_vport, unf_lport); - - if ((unf_lport->port_name & VPORT_WWN_MASK) == (wwpn & VPORT_WWN_MASK)) { - vport_id = (wwpn & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT; - if (vport_id == 0) - vport_id = (unf_lport->port_name & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT; - } - - if (unf_alloc_vp_index(unf_lport->vport_pool, unf_vport, vport_id) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Vport can not allocate vport index"); - unf_vport_back_to_pool(unf_vport); - - return RETURN_ERROR; - } - unf_vport->port_id = (((u32)unf_vport->vp_index) << PORTID_VPINDEX_SHIT) | - unf_lport->port_id; - - vid.roles = FC_PORT_ROLE_FCP_INITIATOR; - vid.vport_type = FC_PORTTYPE_NPIV; - vid.disable = false; - vid.node_name = unf_lport->node_name; - - if (wwpn) { - vid.port_name = wwpn; - } else { - if ((unf_lport->port_name & ~VPORT_WWN_MASK) >> VPORT_WWN_SHIFT != - unf_vport->vp_index) { - vid.port_name = (unf_lport->port_name & VPORT_WWN_MASK) | - (((u64)unf_vport->vp_index) << VPORT_WWN_SHIFT); - } else { - vid.port_name = (unf_lport->port_name & VPORT_WWN_MASK); - } - } - - unf_vport->port_name = vid.port_name; - - host = unf_lport->host_info.host; - - if (!fc_vport_create(host, 0, &vid)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) Cannot Failed to create vport wwpn=%llx", - unf_lport->port_id, vid.port_name); - - unf_vport_back_to_pool(unf_vport); - - return RETURN_ERROR; - } - - unf_vport->qos_level = qos_level; - return RETURN_OK; -} - -struct unf_lport *unf_creat_vport(struct unf_lport *lport, - struct vport_config *vport_config) -{ - u32 ret = RETURN_OK; - struct unf_lport *unf_lport = NULL; - struct unf_lport *vport = NULL; - enum unf_act_topo lport_topo; - enum unf_lport_login_state lport_state; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(vport_config, NULL); - - if (vport_config->port_mode != FC_PORT_ROLE_FCP_INITIATOR) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Only support INITIATOR port mode(0x%x)", - vport_config->port_mode); - - return NULL; - } - unf_lport = lport; - - if (unf_lport->root_lport != unf_lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) not root port return", - unf_lport->port_id); - - return NULL; - } - - vport = unf_cm_lookup_vport_by_wwpn(unf_lport, vport_config->port_name); - if (!vport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Port(0x%x) can not find vport with wwpn(0x%llx)", - unf_lport->port_id, vport_config->port_name); - - return NULL; - } - - ret = unf_vport_init(vport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]VPort(0x%x) can not initialize vport", - vport->port_id); - - return NULL; - } - - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - lport_topo = unf_lport->act_topo; - lport_state = unf_lport->states; - - vport_config->node_name = unf_lport->node_name; - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - vport->port_name = vport_config->port_name; - vport->node_name = vport_config->node_name; - - if (lport_topo == UNF_ACT_TOP_P2P_FABRIC && - lport_state >= UNF_LPORT_ST_PLOGI_WAIT && - lport_state <= UNF_LPORT_ST_READY) { - vport->link_up = unf_lport->link_up; - (void)unf_lport_login(vport, lport_topo); - } - - return vport; -} - -u32 unf_drop_vport(struct unf_lport *vport) -{ - u32 ret = RETURN_ERROR; - struct fc_vport *unf_vport = NULL; - - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - - unf_vport = vport->vport; - if (!unf_vport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]VPort(0x%x) find vport in scsi is NULL", - vport->port_id); - - return ret; - } - - ret = fc_vport_terminate(unf_vport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]VPort(0x%x) terminate vport(%p) in scsi failed", - vport->port_id, unf_vport); - - return ret; - } - return ret; -} - -u32 unf_delete_vport(u32 port_id, u32 vp_index) -{ - struct unf_lport *unf_lport = NULL; - u16 unf_vp_index = 0; - struct unf_lport *vport = NULL; - - unf_lport = unf_find_lport_by_port_id(port_id); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can not be found by portid", port_id); - - return RETURN_ERROR; - } - - if (atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) is in NOP, destroy all vports function will be called", - unf_lport->port_id); - - return RETURN_OK; - } - - UNF_TOU16_CHECK(unf_vp_index, vp_index, return RETURN_ERROR); - vport = unf_cm_lookup_vport_by_vp_index(unf_lport, unf_vp_index); - if (!vport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Can not lookup VPort by VPort index(0x%x)", - unf_vp_index); - - return RETURN_ERROR; - } - - return unf_drop_vport(vport); -} - -void unf_vport_abort_all_sfs_exch(struct unf_lport *vport) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *exch = NULL; - ulong pool_lock_flags = 0; - ulong exch_lock_flags = 0; - u32 i; - - FC_CHECK_RETURN_VOID(vport); - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport((struct unf_lport *)(vport->root_lport), i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) hot pool is NULL", - ((struct unf_lport *)(vport->root_lport))->port_id); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->sfs_busylist) { - exch = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - spin_lock_irqsave(&exch->xchg_state_lock, exch_lock_flags); - if (vport == exch->lport && (atomic_read(&exch->ref_cnt) > 0)) { - exch->io_state |= TGT_IO_STATE_ABORT; - spin_unlock_irqrestore(&exch->xchg_state_lock, exch_lock_flags); - unf_disc_ctrl_size_inc(vport, exch->cmnd_code); - /* Transfer exch to destroy chain */ - list_del(xchg_node); - list_add_tail(xchg_node, &hot_pool->list_destroy_xchg); - } else { - spin_unlock_irqrestore(&exch->xchg_state_lock, exch_lock_flags); - } - } - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - } -} - -void unf_vport_abort_ini_io_exch(struct unf_lport *vport) -{ - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *exch = NULL; - ulong pool_lock_flags = 0; - ulong exch_lock_flags = 0; - u32 i; - - FC_CHECK_RETURN_VOID(vport); - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = unf_get_hot_pool_by_lport((struct unf_lport *)(vport->root_lport), i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) MgrIdex %u hot pool is NULL", - ((struct unf_lport *)(vport->root_lport))->port_id, i); - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - list_for_each_safe(xchg_node, next_xchg_node, &hot_pool->ini_busylist) { - exch = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - - if (vport == exch->lport && atomic_read(&exch->ref_cnt) > 0) { - /* Transfer exch to destroy chain */ - list_del(xchg_node); - list_add_tail(xchg_node, &hot_pool->list_destroy_xchg); - - spin_lock_irqsave(&exch->xchg_state_lock, exch_lock_flags); - exch->io_state |= INI_IO_STATE_DRABORT; - spin_unlock_irqrestore(&exch->xchg_state_lock, exch_lock_flags); - } - } - - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, pool_lock_flags); - } -} - -void unf_vport_abort_exch(struct unf_lport *vport) -{ - FC_CHECK_RETURN_VOID(vport); - - unf_vport_abort_all_sfs_exch(vport); - - unf_vport_abort_ini_io_exch(vport); -} - -u32 unf_vport_wait_all_exch_removed(struct unf_lport *vport) -{ -#define UNF_WAIT_EXCH_REMOVE_ONE_TIME_MS 1000 - struct unf_xchg_hot_pool *hot_pool = NULL; - struct list_head *xchg_node = NULL; - struct list_head *next_xchg_node = NULL; - struct unf_xchg *exch = NULL; - u32 vport_uses = 0; - ulong flags = 0; - u32 wait_timeout = 0; - u32 i = 0; - - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - - while (1) { - vport_uses = 0; - - for (i = 0; i < UNF_EXCHG_MGR_NUM; i++) { - hot_pool = - unf_get_hot_pool_by_lport((struct unf_lport *)(vport->root_lport), i); - if (unlikely(!hot_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) hot Pool is NULL", - ((struct unf_lport *)(vport->root_lport))->port_id); - - continue; - } - - spin_lock_irqsave(&hot_pool->xchg_hotpool_lock, flags); - list_for_each_safe(xchg_node, next_xchg_node, - &hot_pool->list_destroy_xchg) { - exch = list_entry(xchg_node, struct unf_xchg, list_xchg_entry); - - if (exch->lport != vport) - continue; - vport_uses++; - if (wait_timeout >= - UNF_DELETE_VPORT_MAX_WAIT_TIME_MS) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[error]VPort(0x%x) Abort Exch(0x%p) Type(0x%x) OxRxid(0x%x 0x%x),sid did(0x%x 0x%x) SeqId(0x%x) IOState(0x%x) Ref(0x%x)", - vport->port_id, exch, - (u32)exch->xchg_type, - (u32)exch->oxid, - (u32)exch->rxid, (u32)exch->sid, - (u32)exch->did, (u32)exch->seq_id, - (u32)exch->io_state, - atomic_read(&exch->ref_cnt)); - } - } - spin_unlock_irqrestore(&hot_pool->xchg_hotpool_lock, flags); - } - - if (vport_uses == 0) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]VPort(0x%x) has removed all exchanges it used", - vport->port_id); - break; - } - - if (wait_timeout >= UNF_DELETE_VPORT_MAX_WAIT_TIME_MS) - return RETURN_ERROR; - - msleep(UNF_WAIT_EXCH_REMOVE_ONE_TIME_MS); - wait_timeout += UNF_WAIT_EXCH_REMOVE_ONE_TIME_MS; - } - - return RETURN_OK; -} - -u32 unf_vport_wait_rports_removed(struct unf_lport *vport) -{ -#define UNF_WAIT_RPORT_REMOVE_ONE_TIME_MS 5000 - - struct unf_disc *disc = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - u32 vport_uses = 0; - ulong flags = 0; - u32 wait_timeout = 0; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - disc = &vport->disc; - - while (1) { - vport_uses = 0; - spin_lock_irqsave(&disc->rport_busy_pool_lock, flags); - list_for_each_safe(node, next_node, &disc->list_delete_rports) { - unf_rport = list_entry(node, struct unf_rport, entry_rport); - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Vport(0x%x) Rport(0x%x) point(%p) is in Delete", - vport->port_id, unf_rport->nport_id, unf_rport); - vport_uses++; - } - - list_for_each_safe(node, next_node, &disc->list_destroy_rports) { - unf_rport = list_entry(node, struct unf_rport, entry_rport); - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Vport(0x%x) Rport(0x%x) point(%p) is in Destroy", - vport->port_id, unf_rport->nport_id, unf_rport); - vport_uses++; - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags); - - if (vport_uses == 0) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]VPort(0x%x) has removed all RPorts it used", - vport->port_id); - break; - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Vport(0x%x) has %u RPorts not removed wait timeout(%u ms)", - vport->port_id, vport_uses, wait_timeout); - - if (wait_timeout >= UNF_DELETE_VPORT_MAX_WAIT_TIME_MS) - return RETURN_ERROR; - - msleep(UNF_WAIT_RPORT_REMOVE_ONE_TIME_MS); - wait_timeout += UNF_WAIT_RPORT_REMOVE_ONE_TIME_MS; - } - - return RETURN_OK; -} - -u32 unf_destroy_one_vport(struct unf_lport *vport) -{ - u32 ret; - struct unf_lport *root_port = NULL; - - FC_CHECK_RETURN_VALUE(vport, RETURN_ERROR); - - root_port = (struct unf_lport *)vport->root_lport; - - unf_vport_fabric_logo(vport); - - /* 1 set NOP */ - atomic_set(&vport->lport_no_operate_flag, UNF_LPORT_NOP); - vport->port_removing = true; - - /* 2 report linkdown to scsi and delele rpot */ - unf_linkdown_one_vport(vport); - - /* 3 set abort for exchange */ - unf_vport_abort_exch(vport); - - /* 4 wait exch return freepool */ - if (!root_port->port_dirt_exchange) { - ret = unf_vport_wait_all_exch_removed(vport); - if (ret != RETURN_OK) { - if (!root_port->port_removing) { - vport->port_removing = false; - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]VPort(0x%x) can not wait Exchange return freepool", - vport->port_id); - - return RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]Port(0x%x) is removing, there is dirty exchange, continue", - root_port->port_id); - - root_port->port_dirt_exchange = true; - } - } - - /* wait rport return rportpool */ - ret = unf_vport_wait_rports_removed(vport); - if (ret != RETURN_OK) { - vport->port_removing = false; - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]VPort(0x%x) can not wait Rport return freepool", - vport->port_id); - - return RETURN_ERROR; - } - - unf_cm_vport_remove(vport); - - return RETURN_OK; -} - -void unf_destroy_all_vports(struct unf_lport *lport) -{ - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_lport *vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - - unf_lport = lport; - FC_CHECK_RETURN_VOID(unf_lport); - - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Lport(0x%x) VPort pool is NULL", unf_lport->port_id); - - return; - } - - /* Transfer to the transition chain */ - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - vport = list_entry(node, struct unf_lport, entry_vport); - list_del_init(&vport->entry_vport); - list_add_tail(&vport->entry_vport, &unf_lport->list_destroy_vports); - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - vport = list_entry(node, struct unf_lport, entry_vport); - list_del_init(&vport->entry_vport); - list_add_tail(&vport->entry_vport, &unf_lport->list_destroy_vports); - atomic_dec(&vport->port_ref_cnt); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - while (!list_empty(&unf_lport->list_destroy_vports)) { - node = UNF_OS_LIST_NEXT(&unf_lport->list_destroy_vports); - vport = list_entry(node, struct unf_lport, entry_vport); - - list_del_init(&vport->entry_vport); - list_add_tail(&vport->entry_vport, &unf_lport->list_vports_head); - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]VPort(0x%x) Destroy begin", vport->port_id); - unf_drop_vport(vport); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[info]VPort(0x%x) Destroy end", vport->port_id); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); -} - -u32 unf_init_vport_mgr_temp(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - lport->lport_mgr_temp.unf_look_up_vport_by_index = unf_lookup_vport_by_index; - lport->lport_mgr_temp.unf_look_up_vport_by_port_id = unf_lookup_vport_by_portid; - lport->lport_mgr_temp.unf_look_up_vport_by_did = unf_lookup_vport_by_did; - lport->lport_mgr_temp.unf_look_up_vport_by_wwpn = unf_lookup_vport_by_wwpn; - lport->lport_mgr_temp.unf_vport_remove = unf_vport_remove; - - return RETURN_OK; -} - -void unf_release_vport_mgr_temp(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - memset(&lport->lport_mgr_temp, 0, sizeof(struct unf_cm_lport_template)); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_9_DESTROY_LPORT_MG_TMP; -} diff --git a/drivers/scsi/spfc/common/unf_npiv.h b/drivers/scsi/spfc/common/unf_npiv.h deleted file mode 100644 index 6f522470f47a..000000000000 --- a/drivers/scsi/spfc/common/unf_npiv.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_NPIV_H -#define UNF_NPIV_H - -#include "unf_type.h" -#include "unf_common.h" -#include "unf_lport.h" - -/* product VPORT configure */ -struct vport_config { - u64 node_name; - u64 port_name; - u32 port_mode; /* INI, TGT or both */ -}; - -/* product Vport function */ -#define PORTID_VPINDEX_MASK 0xff000000 -#define PORTID_VPINDEX_SHIT 24 -u32 unf_npiv_conf(u32 port_id, u64 wwpn, enum unf_rport_qos_level qos_level); -struct unf_lport *unf_creat_vport(struct unf_lport *lport, - struct vport_config *vport_config); -u32 unf_delete_vport(u32 port_id, u32 vp_index); - -/* Vport pool creat and release function */ -u32 unf_init_vport_pool(struct unf_lport *lport); -void unf_free_vport_pool(struct unf_lport *lport); - -/* Lport resigster stLPortMgTemp function */ -void unf_vport_remove(void *vport); -void unf_vport_ref_dec(struct unf_lport *vport); - -/* linkdown all Vport after receive linkdown event */ -void unf_linkdown_all_vports(void *lport); -/* Lport receive Flogi Acc linkup all Vport */ -void unf_linkup_all_vports(struct unf_lport *lport); -/* Lport remove delete all Vport */ -void unf_destroy_all_vports(struct unf_lport *lport); -void unf_vport_fabric_logo(struct unf_lport *vport); -u32 unf_destroy_one_vport(struct unf_lport *vport); -u32 unf_drop_vport(struct unf_lport *vport); -u32 unf_init_vport_mgr_temp(struct unf_lport *lport); -void unf_release_vport_mgr_temp(struct unf_lport *lport); -struct unf_lport *unf_get_vport_by_slab_index(struct unf_vport_pool *vport_pool, - u16 slab_index); -#endif diff --git a/drivers/scsi/spfc/common/unf_npiv_portman.c b/drivers/scsi/spfc/common/unf_npiv_portman.c deleted file mode 100644 index b4f393f2e732..000000000000 --- a/drivers/scsi/spfc/common/unf_npiv_portman.c +++ /dev/null @@ -1,360 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_npiv_portman.h" -#include "unf_log.h" -#include "unf_common.h" -#include "unf_rport.h" -#include "unf_npiv.h" -#include "unf_portman.h" - -void *unf_lookup_vport_by_index(void *lport, u16 vp_index) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)lport; - - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return NULL; - } - - if (vp_index == 0 || vp_index > vport_pool->slab_total_sum) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) input vport index(0x%x) is beyond the normal range(0x1~0x%x)", - unf_lport->port_id, vp_index, vport_pool->slab_total_sum); - - return NULL; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - unf_vport = unf_get_vport_by_slab_index(vport_pool, vp_index - 1); - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - return (void *)unf_vport; -} - -void *unf_lookup_vport_by_portid(void *lport, u32 port_id) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)lport; - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return NULL; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->port_id == port_id) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->port_id == port_id) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has no vport ID(0x%x).", - unf_lport->port_id, port_id); - return NULL; -} - -void *unf_lookup_vport_by_did(void *lport, u32 did) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)lport; - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return NULL; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->nport_id == did) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - return unf_vport; - } - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->nport_id == did) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) has no vport Nport ID(0x%x)", unf_lport->port_id, did); - return NULL; -} - -void *unf_lookup_vport_by_wwpn(void *lport, u64 wwpn) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)lport; - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return NULL; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->port_name == wwpn) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - return unf_vport; - } - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->port_name == wwpn) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) has no vport WWPN(0x%llx)", - unf_lport->port_id, wwpn); - - return NULL; -} - -void unf_linkdown_one_vport(struct unf_lport *vport) -{ - ulong flag = 0; - struct unf_lport *root_lport = NULL; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_KEVENT, - "[info]VPort(0x%x) linkdown", vport->port_id); - - spin_lock_irqsave(&vport->lport_state_lock, flag); - vport->link_up = UNF_PORT_LINK_DOWN; - vport->nport_id = 0; /* set nportid 0 before send fdisc again */ - unf_lport_state_ma(vport, UNF_EVENT_LPORT_LINK_DOWN); - spin_unlock_irqrestore(&vport->lport_state_lock, flag); - - root_lport = (struct unf_lport *)vport->root_lport; - - unf_flush_disc_event(&root_lport->disc, vport); - - unf_clean_linkdown_rport(vport); -} - -void unf_linkdown_all_vports(void *lport) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = (struct unf_lport *)lport; - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) VPort pool is NULL", unf_lport->port_id); - - return; - } - - /* Transfer to the transition chain */ - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - list_del_init(&unf_vport->entry_vport); - list_add_tail(&unf_vport->entry_vport, &unf_lport->list_intergrad_vports); - (void)unf_lport_ref_inc(unf_vport); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - while (!list_empty(&unf_lport->list_intergrad_vports)) { - node = UNF_OS_LIST_NEXT(&unf_lport->list_intergrad_vports); - unf_vport = list_entry(node, struct unf_lport, entry_vport); - - list_del_init(&unf_vport->entry_vport); - list_add_tail(&unf_vport->entry_vport, &unf_lport->list_vports_head); - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - unf_linkdown_one_vport(unf_vport); - - unf_vport_ref_dec(unf_vport); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); -} - -int unf_process_vports_linkup(void *arg_in, void *arg_out) -{ -#define UNF_WAIT_VPORT_LOGIN_ONE_TIME_MS 100 - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - int ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(arg_in, RETURN_ERROR); - - unf_lport = (struct unf_lport *)arg_in; - - if (atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is NOP don't continue", unf_lport->port_id); - - return RETURN_OK; - } - - if (unf_lport->link_up != UNF_PORT_LINK_UP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is not linkup don't continue.", - unf_lport->port_id); - - return RETURN_OK; - } - - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) VPort pool is NULL.", unf_lport->port_id); - - return RETURN_OK; - } - - /* Transfer to the transition chain */ - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - list_del_init(&unf_vport->entry_vport); - list_add_tail(&unf_vport->entry_vport, &unf_lport->list_intergrad_vports); - (void)unf_lport_ref_inc(unf_vport); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - while (!list_empty(&unf_lport->list_intergrad_vports)) { - node = UNF_OS_LIST_NEXT(&unf_lport->list_intergrad_vports); - unf_vport = list_entry(node, struct unf_lport, entry_vport); - - list_del_init(&unf_vport->entry_vport); - list_add_tail(&unf_vport->entry_vport, &unf_lport->list_vports_head); - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - if (atomic_read(&unf_vport->lport_no_operate_flag) == UNF_LPORT_NOP) { - unf_vport_ref_dec(unf_vport); - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - continue; - } - - if (unf_lport->link_up == UNF_PORT_LINK_UP && - unf_lport->act_topo == UNF_ACT_TOP_P2P_FABRIC) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Vport(0x%x) begin login", unf_vport->port_id); - - unf_vport->link_up = UNF_PORT_LINK_UP; - (void)unf_lport_login(unf_vport, unf_lport->act_topo); - - msleep(UNF_WAIT_VPORT_LOGIN_ONE_TIME_MS); - } else { - unf_linkdown_one_vport(unf_vport); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Vport(0x%x) login failed because root port linkdown", - unf_vport->port_id); - } - - unf_vport_ref_dec(unf_vport); - spin_lock_irqsave(&vport_pool->vport_pool_lock, flags); - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flags); - - return ret; -} - -void unf_linkup_all_vports(struct unf_lport *lport) -{ - struct unf_cm_event_report *event = NULL; - - FC_CHECK_RETURN_VOID(lport); - - if (unlikely(!lport->event_mgr.unf_get_free_event_func || - !lport->event_mgr.unf_post_event_func || - !lport->event_mgr.unf_release_event)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) Event fun is NULL", - lport->port_id); - return; - } - - event = lport->event_mgr.unf_get_free_event_func((void *)lport); - FC_CHECK_RETURN_VOID(event); - - event->lport = lport; - event->event_asy_flag = UNF_EVENT_ASYN; - event->unf_event_task = unf_process_vports_linkup; - event->para_in = (void *)lport; - - lport->event_mgr.unf_post_event_func(lport, event); -} diff --git a/drivers/scsi/spfc/common/unf_npiv_portman.h b/drivers/scsi/spfc/common/unf_npiv_portman.h deleted file mode 100644 index 284c23c9abe4..000000000000 --- a/drivers/scsi/spfc/common/unf_npiv_portman.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_NPIV_PORTMAN_H -#define UNF_NPIV_PORTMAN_H - -#include "unf_type.h" -#include "unf_lport.h" - -/* Lport resigster stLPortMgTemp function */ -void *unf_lookup_vport_by_index(void *lport, u16 vp_index); -void *unf_lookup_vport_by_portid(void *lport, u32 port_id); -void *unf_lookup_vport_by_did(void *lport, u32 did); -void *unf_lookup_vport_by_wwpn(void *lport, u64 wwpn); -void unf_linkdown_one_vport(struct unf_lport *vport); - -#endif diff --git a/drivers/scsi/spfc/common/unf_portman.c b/drivers/scsi/spfc/common/unf_portman.c deleted file mode 100644 index ef8f90eb3777..000000000000 --- a/drivers/scsi/spfc/common/unf_portman.c +++ /dev/null @@ -1,2431 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_portman.h" -#include "unf_log.h" -#include "unf_exchg.h" -#include "unf_rport.h" -#include "unf_io.h" -#include "unf_npiv.h" -#include "unf_scsi_common.h" - -#define UNF_LPORT_CHIP_ERROR(unf_lport) \ - ((unf_lport)->pcie_error_cnt.pcie_error_count[UNF_PCIE_FATALERRORDETECTED]) - -struct unf_global_lport global_lport_mgr; - -static int unf_port_switch(struct unf_lport *lport, bool switch_flag); -static u32 unf_build_lport_wwn(struct unf_lport *lport); -static int unf_lport_destroy(void *lport, void *arg_out); -static u32 unf_port_linkup(struct unf_lport *lport, void *input); -static u32 unf_port_linkdown(struct unf_lport *lport, void *input); -static u32 unf_port_abnormal_reset(struct unf_lport *lport, void *input); -static u32 unf_port_reset_start(struct unf_lport *lport, void *input); -static u32 unf_port_reset_end(struct unf_lport *lport, void *input); -static u32 unf_port_nop(struct unf_lport *lport, void *input); -static void unf_destroy_card_thread(struct unf_lport *lport); -static u32 unf_creat_card_thread(struct unf_lport *lport); -static u32 unf_find_card_thread(struct unf_lport *lport); -static u32 unf_port_begin_remove(struct unf_lport *lport, void *input); - -static struct unf_port_action g_lport_action[] = { - {UNF_PORT_LINK_UP, unf_port_linkup}, - {UNF_PORT_LINK_DOWN, unf_port_linkdown}, - {UNF_PORT_RESET_START, unf_port_reset_start}, - {UNF_PORT_RESET_END, unf_port_reset_end}, - {UNF_PORT_NOP, unf_port_nop}, - {UNF_PORT_BEGIN_REMOVE, unf_port_begin_remove}, - {UNF_PORT_RELEASE_RPORT_INDEX, unf_port_release_rport_index}, - {UNF_PORT_ABNORMAL_RESET, unf_port_abnormal_reset}, -}; - -static void unf_destroy_dirty_rport(struct unf_lport *lport, bool show_only) -{ - u32 dirty_rport = 0; - - /* for whole L_Port */ - if (lport->dirty_flag & UNF_LPORT_DIRTY_FLAG_RPORT_POOL_DIRTY) { - dirty_rport = lport->rport_pool.rport_pool_count; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) has %d dirty RPort(s)", - lport->port_id, dirty_rport); - - /* Show L_Port's R_Port(s) from busy_list & destroy_list */ - unf_show_all_rport(lport); - - /* free R_Port pool memory & bitmap */ - if (!show_only) { - vfree(lport->rport_pool.rport_pool_add); - lport->rport_pool.rport_pool_add = NULL; - vfree(lport->rport_pool.rpi_bitmap); - lport->rport_pool.rpi_bitmap = NULL; - } - } -} - -void unf_show_dirty_port(bool show_only, u32 *dirty_port_num) -{ - struct list_head *node = NULL; - struct list_head *node_next = NULL; - struct unf_lport *unf_lport = NULL; - ulong flags = 0; - u32 port_num = 0; - - FC_CHECK_RETURN_VOID(dirty_port_num); - - /* for each dirty L_Port from global L_Port list */ - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - list_for_each_safe(node, node_next, &global_lport_mgr.dirty_list_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) has dirty data(0x%x)", - unf_lport->port_id, unf_lport->dirty_flag); - - /* Destroy dirty L_Port's exchange(s) & R_Port(s) */ - unf_destroy_dirty_xchg(unf_lport, show_only); - unf_destroy_dirty_rport(unf_lport, show_only); - - /* Delete (dirty L_Port) list entry if necessary */ - if (!show_only) { - list_del_init(node); - vfree(unf_lport); - } - - port_num++; - } - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - - *dirty_port_num = port_num; -} - -void unf_show_all_rport(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_disc *disc = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - u32 rport_cnt = 0; - u32 target_cnt = 0; - - FC_CHECK_RETURN_VOID(lport); - - unf_lport = lport; - disc = &unf_lport->disc; - - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Port(0x%x) disc state(0x%x)", unf_lport->port_id, disc->states); - - /* for each R_Port from busy_list */ - list_for_each_safe(node, next_node, &disc->list_busy_rports) { - unf_rport = list_entry(node, struct unf_rport, entry_rport); - rport_cnt++; - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Port(0x%x) busy RPorts(%u_%p) WWPN(0x%016llx) scsi_id(0x%x) local N_Port_ID(0x%x) N_Port_ID(0x%06x). State(0x%04x) options(0x%04x) index(0x%04x) ref(%d) pend:%d", - unf_lport->port_id, rport_cnt, unf_rport, - unf_rport->port_name, unf_rport->scsi_id, - unf_rport->local_nport_id, unf_rport->nport_id, - unf_rport->rp_state, unf_rport->options, - unf_rport->rport_index, - atomic_read(&unf_rport->rport_ref_cnt), - atomic_read(&unf_rport->pending_io_cnt)); - - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) - target_cnt++; - } - - unf_lport->target_cnt = target_cnt; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) targetnum=(%u)", unf_lport->port_id, - unf_lport->target_cnt); - - /* for each R_Port from destroy_list */ - list_for_each_safe(node, next_node, &disc->list_destroy_rports) { - unf_rport = list_entry(node, struct unf_rport, entry_rport); - rport_cnt++; - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Port(0x%x) destroy RPorts(%u) WWPN(0x%016llx) N_Port_ID(0x%06x) State(0x%04x) options(0x%04x) index(0x%04x) ref(%d)", - unf_lport->port_id, rport_cnt, unf_rport->port_name, - unf_rport->nport_id, unf_rport->rp_state, - unf_rport->options, unf_rport->rport_index, - atomic_read(&unf_rport->rport_ref_cnt)); - } - - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); -} - -u32 unf_lport_ref_inc(struct unf_lport *lport) -{ - ulong lport_flags = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&lport->lport_state_lock, lport_flags); - if (atomic_read(&lport->port_ref_cnt) <= 0) { - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - - return UNF_RETURN_ERROR; - } - - atomic_inc(&lport->port_ref_cnt); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%p) port_id(0x%x) reference count is %d", - lport, lport->port_id, atomic_read(&lport->port_ref_cnt)); - - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - - return RETURN_OK; -} - -void unf_lport_ref_dec(struct unf_lport *lport) -{ - ulong flags = 0; - ulong lport_flags = 0; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "LPort(0x%p), port ID(0x%x), reference count is %d.", - lport, lport->port_id, atomic_read(&lport->port_ref_cnt)); - - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - spin_lock_irqsave(&lport->lport_state_lock, lport_flags); - if (atomic_dec_and_test(&lport->port_ref_cnt)) { - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - list_del(&lport->entry_lport); - global_lport_mgr.lport_sum--; - - /* attaches the lport to the destroy linked list for dfx */ - list_add_tail(&lport->entry_lport, &global_lport_mgr.destroy_list_head); - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - - (void)unf_lport_destroy(lport, NULL); - } else { - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - } -} - -void unf_lport_update_topo(struct unf_lport *lport, - enum unf_act_topo active_topo) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - - if (active_topo > UNF_ACT_TOP_UNKNOWN || active_topo < UNF_ACT_TOP_PUBLIC_LOOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) set invalid topology(0x%x) with current value(0x%x)", - lport->nport_id, active_topo, lport->act_topo); - - return; - } - - spin_lock_irqsave(&lport->lport_state_lock, flag); - lport->act_topo = active_topo; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); -} - -void unf_set_lport_removing(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - lport->fc_port = NULL; - lport->port_removing = true; - lport->destroy_step = UNF_LPORT_DESTROY_STEP_0_SET_REMOVING; -} - -u32 unf_release_local_port(void *lport) -{ - struct unf_lport *unf_lport = lport; - struct completion lport_free_completion; - - init_completion(&lport_free_completion); - FC_CHECK_RETURN_VALUE(unf_lport, UNF_RETURN_ERROR); - - unf_lport->lport_free_completion = &lport_free_completion; - unf_set_lport_removing(unf_lport); - unf_lport_ref_dec(unf_lport); - wait_for_completion(unf_lport->lport_free_completion); - /* for dirty case */ - if (unf_lport->dirty_flag == 0) - vfree(unf_lport); - - return RETURN_OK; -} - -static void unf_free_all_esgl_pages(struct unf_lport *lport) -{ - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - u32 i; - - FC_CHECK_RETURN_VOID(lport); - spin_lock_irqsave(&lport->esgl_pool.esgl_pool_lock, flag); - list_for_each_safe(node, next_node, &lport->esgl_pool.list_esgl_pool) { - list_del(node); - } - - spin_unlock_irqrestore(&lport->esgl_pool.esgl_pool_lock, flag); - if (lport->esgl_pool.esgl_buff_list.buflist) { - for (i = 0; i < lport->esgl_pool.esgl_buff_list.buf_num; i++) { - if (lport->esgl_pool.esgl_buff_list.buflist[i].vaddr) { - dma_free_coherent(&lport->low_level_func.dev->dev, - lport->esgl_pool.esgl_buff_list.buf_size, - lport->esgl_pool.esgl_buff_list.buflist[i].vaddr, - lport->esgl_pool.esgl_buff_list.buflist[i].paddr); - lport->esgl_pool.esgl_buff_list.buflist[i].vaddr = NULL; - } - } - kfree(lport->esgl_pool.esgl_buff_list.buflist); - lport->esgl_pool.esgl_buff_list.buflist = NULL; - } -} - -static u32 unf_init_esgl_pool(struct unf_lport *lport) -{ - struct unf_esgl *esgl = NULL; - u32 ret = RETURN_OK; - u32 index = 0; - u32 buf_total_size; - u32 buf_num; - u32 alloc_idx; - u32 curbuf_idx = 0; - u32 curbuf_offset = 0; - u32 buf_cnt_perhugebuf; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - lport->esgl_pool.esgl_pool_count = lport->low_level_func.lport_cfg_items.max_io; - spin_lock_init(&lport->esgl_pool.esgl_pool_lock); - INIT_LIST_HEAD(&lport->esgl_pool.list_esgl_pool); - - lport->esgl_pool.esgl_pool_addr = - vmalloc((size_t)((lport->esgl_pool.esgl_pool_count) * sizeof(struct unf_esgl))); - if (!lport->esgl_pool.esgl_pool_addr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "LPort(0x%x) cannot allocate ESGL Pool.", lport->port_id); - - return UNF_RETURN_ERROR; - } - esgl = (struct unf_esgl *)lport->esgl_pool.esgl_pool_addr; - memset(esgl, 0, ((lport->esgl_pool.esgl_pool_count) * sizeof(struct unf_esgl))); - - buf_total_size = (u32)(PAGE_SIZE * lport->esgl_pool.esgl_pool_count); - - lport->esgl_pool.esgl_buff_list.buf_size = - buf_total_size > BUF_LIST_PAGE_SIZE ? BUF_LIST_PAGE_SIZE : buf_total_size; - buf_cnt_perhugebuf = lport->esgl_pool.esgl_buff_list.buf_size / PAGE_SIZE; - buf_num = lport->esgl_pool.esgl_pool_count % buf_cnt_perhugebuf - ? lport->esgl_pool.esgl_pool_count / buf_cnt_perhugebuf + 1 - : lport->esgl_pool.esgl_pool_count / buf_cnt_perhugebuf; - lport->esgl_pool.esgl_buff_list.buflist = - (struct buff_list *)kmalloc(buf_num * sizeof(struct buff_list), GFP_KERNEL); - lport->esgl_pool.esgl_buff_list.buf_num = buf_num; - - if (!lport->esgl_pool.esgl_buff_list.buflist) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate Esgl pool buf list failed out of memory"); - goto free_buff; - } - memset(lport->esgl_pool.esgl_buff_list.buflist, 0, buf_num * sizeof(struct buff_list)); - - for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { - lport->esgl_pool.esgl_buff_list.buflist[alloc_idx] - .vaddr = dma_alloc_coherent(&lport->low_level_func.dev->dev, - lport->esgl_pool.esgl_buff_list.buf_size, - &lport->esgl_pool.esgl_buff_list.buflist[alloc_idx].paddr, GFP_KERNEL); - if (!lport->esgl_pool.esgl_buff_list.buflist[alloc_idx].vaddr) - goto free_buff; - memset(lport->esgl_pool.esgl_buff_list.buflist[alloc_idx].vaddr, 0, - lport->esgl_pool.esgl_buff_list.buf_size); - } - - /* allocates the Esgl page, and the DMA uses the */ - for (index = 0; index < lport->esgl_pool.esgl_pool_count; index++) { - if (index != 0 && !(index % buf_cnt_perhugebuf)) - curbuf_idx++; - curbuf_offset = (u32)(PAGE_SIZE * (index % buf_cnt_perhugebuf)); - esgl->page.page_address = - (u64)lport->esgl_pool.esgl_buff_list.buflist[curbuf_idx].vaddr + curbuf_offset; - esgl->page.page_size = PAGE_SIZE; - esgl->page.esgl_phy_addr = - lport->esgl_pool.esgl_buff_list.buflist[curbuf_idx].paddr + curbuf_offset; - list_add_tail(&esgl->entry_esgl, &lport->esgl_pool.list_esgl_pool); - esgl++; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[EVENT]Allocate bufnum:%u,buf_total_size:%u", buf_num, buf_total_size); - - return ret; -free_buff: - unf_free_all_esgl_pages(lport); - vfree(lport->esgl_pool.esgl_pool_addr); - - return UNF_RETURN_ERROR; -} - -static void unf_free_esgl_pool(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - unf_free_all_esgl_pages(lport); - lport->esgl_pool.esgl_pool_count = 0; - - if (lport->esgl_pool.esgl_pool_addr) { - vfree(lport->esgl_pool.esgl_pool_addr); - lport->esgl_pool.esgl_pool_addr = NULL; - } - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_5_DESTROY_ESGL_POOL; -} - -struct unf_lport *unf_find_lport_by_port_id(u32 port_id) -{ - struct unf_lport *unf_lport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - u32 portid = port_id & (~PORTID_VPINDEX_MASK); - u16 vport_index; - spinlock_t *lport_list_lock = NULL; - - lport_list_lock = &global_lport_mgr.global_lport_list_lock; - vport_index = (port_id & PORTID_VPINDEX_MASK) >> PORTID_VPINDEX_SHIT; - spin_lock_irqsave(lport_list_lock, flags); - - list_for_each_safe(node, next_node, &global_lport_mgr.lport_list_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - if (unf_lport->port_id == portid && !unf_lport->port_removing) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_cm_lookup_vport_by_vp_index(unf_lport, vport_index); - } - } - - list_for_each_safe(node, next_node, &global_lport_mgr.intergrad_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - if (unf_lport->port_id == portid && !unf_lport->port_removing) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_cm_lookup_vport_by_vp_index(unf_lport, vport_index); - } - } - - spin_unlock_irqrestore(lport_list_lock, flags); - - return NULL; -} - -u32 unf_is_vport_valid(struct unf_lport *lport, struct unf_lport *vport) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - spinlock_t *vport_pool_lock = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(vport, UNF_RETURN_ERROR); - - unf_lport = lport; - vport_pool = unf_lport->vport_pool; - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - vport_pool_lock = &vport_pool->vport_pool_lock; - spin_lock_irqsave(vport_pool_lock, flag); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - - if (unf_vport == vport && !unf_vport->port_removing) { - spin_unlock_irqrestore(vport_pool_lock, flag); - - return RETURN_OK; - } - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - - if (unf_vport == vport && !unf_vport->port_removing) { - spin_unlock_irqrestore(vport_pool_lock, flag); - - return RETURN_OK; - } - } - spin_unlock_irqrestore(vport_pool_lock, flag); - - return UNF_RETURN_ERROR; -} - -u32 unf_is_lport_valid(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - spinlock_t *lport_list_lock = NULL; - - lport_list_lock = &global_lport_mgr.global_lport_list_lock; - spin_lock_irqsave(lport_list_lock, flags); - - list_for_each_safe(node, next_node, &global_lport_mgr.lport_list_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - - if (unf_lport == lport && !unf_lport->port_removing) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - - if (unf_is_vport_valid(unf_lport, lport) == RETURN_OK) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - } - - list_for_each_safe(node, next_node, &global_lport_mgr.intergrad_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - - if (unf_lport == lport && !unf_lport->port_removing) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - - if (unf_is_vport_valid(unf_lport, lport) == RETURN_OK) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - } - - list_for_each_safe(node, next_node, &global_lport_mgr.destroy_list_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - - if (unf_lport == lport && !unf_lport->port_removing) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - - if (unf_is_vport_valid(unf_lport, lport) == RETURN_OK) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return RETURN_OK; - } - } - - spin_unlock_irqrestore(lport_list_lock, flags); - - return UNF_RETURN_ERROR; -} - -static void unf_clean_linkdown_io(struct unf_lport *lport, bool clean_flag) -{ - /* Clean L_Port/V_Port Link Down I/O: Set Abort Tag */ - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(lport->xchg_mgr_temp.unf_xchg_abort_all_io); - - lport->xchg_mgr_temp.unf_xchg_abort_all_io(lport, UNF_XCHG_TYPE_INI, clean_flag); - lport->xchg_mgr_temp.unf_xchg_abort_all_io(lport, UNF_XCHG_TYPE_SFS, clean_flag); -} - -u32 unf_fc_port_link_event(void *lport, u32 events, void *input) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 index = 0; - - if (unlikely(!lport)) - return UNF_RETURN_ERROR; - unf_lport = (struct unf_lport *)lport; - - ret = unf_lport_ref_inc(unf_lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) is removing and do nothing", - unf_lport->port_id); - - return RETURN_OK; - } - - /* process port event */ - while (index < (sizeof(g_lport_action) / sizeof(struct unf_port_action))) { - if (g_lport_action[index].action == events) { - ret = g_lport_action[index].unf_action(unf_lport, input); - - unf_lport_ref_dec_to_destroy(unf_lport); - - return ret; - } - index++; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive unknown event(0x%x)", - unf_lport->port_id, events); - - unf_lport_ref_dec_to_destroy(unf_lport); - - return ret; -} - -void unf_port_mgmt_init(void) -{ - memset(&global_lport_mgr, 0, sizeof(struct unf_global_lport)); - - INIT_LIST_HEAD(&global_lport_mgr.lport_list_head); - - INIT_LIST_HEAD(&global_lport_mgr.intergrad_head); - - INIT_LIST_HEAD(&global_lport_mgr.destroy_list_head); - - INIT_LIST_HEAD(&global_lport_mgr.dirty_list_head); - - spin_lock_init(&global_lport_mgr.global_lport_list_lock); - - UNF_SET_NOMAL_MODE(global_lport_mgr.dft_mode); - - global_lport_mgr.start_work = true; -} - -void unf_port_mgmt_deinit(void) -{ - if (global_lport_mgr.lport_sum != 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]There are %u port pool memory giveaway", - global_lport_mgr.lport_sum); - } - - memset(&global_lport_mgr, 0, sizeof(struct unf_global_lport)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Common port manager exit succeed"); -} - -static void unf_port_register(struct unf_lport *lport) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Register LPort(0x%p), port ID(0x%x).", lport, lport->port_id); - - /* Add to the global management linked list header */ - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - list_add_tail(&lport->entry_lport, &global_lport_mgr.lport_list_head); - global_lport_mgr.lport_sum++; - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); -} - -static void unf_port_unregister(struct unf_lport *lport) -{ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Unregister LPort(0x%p), port ID(0x%x).", lport, lport->port_id); - - /* Remove from the global management linked list header */ - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - list_del(&lport->entry_lport); - global_lport_mgr.lport_sum--; - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); -} - -int unf_port_start_work(struct unf_lport *lport) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - if (lport->start_work_state != UNF_START_WORK_STOP) { - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - return RETURN_OK; - } - lport->start_work_state = UNF_START_WORK_COMPLETE; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - /* switch sfp to start work */ - (void)unf_port_switch(lport, true); - - return RETURN_OK; -} - -static u32 -unf_lport_init_lw_funop(struct unf_lport *lport, - struct unf_low_level_functioon_op *low_level_op) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(low_level_op, UNF_RETURN_ERROR); - - lport->port_id = low_level_op->lport_cfg_items.port_id; - lport->port_name = low_level_op->sys_port_name; - lport->node_name = low_level_op->sys_node_name; - lport->options = low_level_op->lport_cfg_items.port_mode; - lport->act_topo = UNF_ACT_TOP_UNKNOWN; - lport->max_ssq_num = low_level_op->support_max_ssq_num; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) .", lport->port_id); - - memcpy(&lport->low_level_func, low_level_op, sizeof(struct unf_low_level_functioon_op)); - - return RETURN_OK; -} - -void unf_lport_release_lw_funop(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VOID(lport); - - memset(&lport->low_level_func, 0, sizeof(struct unf_low_level_functioon_op)); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_13_DESTROY_LW_INTERFACE; -} - -struct unf_lport *unf_find_lport_by_scsi_hostid(u32 scsi_host_id) -{ - struct list_head *node = NULL, *next_node = NULL; - struct list_head *vp_node = NULL, *next_vp_node = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_lport *unf_vport = NULL; - ulong flags = 0; - ulong pool_flags = 0; - spinlock_t *vp_pool_lock = NULL; - spinlock_t *lport_list_lock = &global_lport_mgr.global_lport_list_lock; - - spin_lock_irqsave(lport_list_lock, flags); - list_for_each_safe(node, next_node, &global_lport_mgr.lport_list_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - vp_pool_lock = &unf_lport->vport_pool->vport_pool_lock; - if (scsi_host_id == UNF_GET_SCSI_HOST_ID(unf_lport->host_info.host)) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_lport; - } - - /* support NPIV */ - if (unf_lport->vport_pool) { - spin_lock_irqsave(vp_pool_lock, pool_flags); - list_for_each_safe(vp_node, next_vp_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(vp_node, struct unf_lport, entry_vport); - - if (scsi_host_id == - UNF_GET_SCSI_HOST_ID(unf_vport->host_info.host)) { - spin_unlock_irqrestore(vp_pool_lock, pool_flags); - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_vport; - } - } - spin_unlock_irqrestore(vp_pool_lock, pool_flags); - } - } - - list_for_each_safe(node, next_node, &global_lport_mgr.intergrad_head) { - unf_lport = list_entry(node, struct unf_lport, entry_lport); - vp_pool_lock = &unf_lport->vport_pool->vport_pool_lock; - if (scsi_host_id == - UNF_GET_SCSI_HOST_ID(unf_lport->host_info.host)) { - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_lport; - } - - /* support NPIV */ - if (unf_lport->vport_pool) { - spin_lock_irqsave(vp_pool_lock, pool_flags); - list_for_each_safe(vp_node, next_vp_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(vp_node, struct unf_lport, entry_vport); - - if (scsi_host_id == - UNF_GET_SCSI_HOST_ID(unf_vport->host_info.host)) { - spin_unlock_irqrestore(vp_pool_lock, pool_flags); - spin_unlock_irqrestore(lport_list_lock, flags); - - return unf_vport; - } - } - spin_unlock_irqrestore(vp_pool_lock, pool_flags); - } - } - spin_unlock_irqrestore(lport_list_lock, flags); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Can not find port by scsi_host_id(0x%x), may be removing", - scsi_host_id); - - return NULL; -} - -u32 unf_init_scsi_id_table(struct unf_lport *lport) -{ - struct unf_rport_scsi_id_image *rport_scsi_id_image = NULL; - struct unf_wwpn_rport_info *wwpn_port_info = NULL; - u32 idx; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - rport_scsi_id_image = &lport->rport_scsi_table; - rport_scsi_id_image->max_scsi_id = UNF_MAX_SCSI_ID; - - /* If the number of remote connections supported by the L_Port is 0, an - * exception occurs - */ - if (rport_scsi_id_image->max_scsi_id == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x), supported maximum login is zero.", lport->port_id); - - return UNF_RETURN_ERROR; - } - - rport_scsi_id_image->wwn_rport_info_table = - vmalloc(rport_scsi_id_image->max_scsi_id * sizeof(struct unf_wwpn_rport_info)); - if (!rport_scsi_id_image->wwn_rport_info_table) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't allocate SCSI ID Table(0x%x).", - lport->port_id, rport_scsi_id_image->max_scsi_id); - - return UNF_RETURN_ERROR; - } - memset(rport_scsi_id_image->wwn_rport_info_table, 0, - rport_scsi_id_image->max_scsi_id * sizeof(struct unf_wwpn_rport_info)); - - wwpn_port_info = rport_scsi_id_image->wwn_rport_info_table; - - for (idx = 0; idx < rport_scsi_id_image->max_scsi_id; idx++) { - INIT_DELAYED_WORK(&wwpn_port_info->loss_tmo_work, unf_sesion_loss_timeout); - INIT_LIST_HEAD(&wwpn_port_info->fc_lun_list); - wwpn_port_info->lport = lport; - wwpn_port_info->target_id = INVALID_VALUE32; - wwpn_port_info++; - } - - spin_lock_init(&rport_scsi_id_image->scsi_image_table_lock); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) supported maximum login is %u.", - lport->port_id, rport_scsi_id_image->max_scsi_id); - - return RETURN_OK; -} - -void unf_destroy_scsi_id_table(struct unf_lport *lport) -{ - struct unf_rport_scsi_id_image *rport_scsi_id_image = NULL; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - u32 i = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - - rport_scsi_id_image = &lport->rport_scsi_table; - if (rport_scsi_id_image->wwn_rport_info_table) { - for (i = 0; i < UNF_MAX_SCSI_ID; i++) { - wwn_rport_info = &rport_scsi_id_image->wwn_rport_info_table[i]; - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), - (&wwn_rport_info->loss_tmo_work), - "loss tmo Timer work"); - if (wwn_rport_info->lun_qos_level) { - vfree(wwn_rport_info->lun_qos_level); - wwn_rport_info->lun_qos_level = NULL; - } - } - - if (ret) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x) cancel loss tmo work success", lport->port_id); - } - vfree(rport_scsi_id_image->wwn_rport_info_table); - rport_scsi_id_image->wwn_rport_info_table = NULL; - } - - rport_scsi_id_image->max_scsi_id = 0; - lport->destroy_step = UNF_LPORT_DESTROY_STEP_10_DESTROY_SCSI_TABLE; -} - -static u32 unf_lport_init(struct unf_lport *lport, void *private_data, - struct unf_low_level_functioon_op *low_level_op) -{ - u32 ret = RETURN_OK; - char work_queue_name[13]; - - unf_init_port_parms(lport); - - /* Associating LPort with FCPort */ - lport->fc_port = private_data; - - /* VpIndx=0 is reserved for Lport, and rootLport points to its own */ - lport->vp_index = 0; - lport->root_lport = lport; - lport->chip_info = NULL; - - /* Initialize the units related to L_Port and lw func */ - ret = unf_lport_init_lw_funop(lport, low_level_op); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) initialize lowlevel function unsuccessful.", - lport->port_id); - - return ret; - } - - /* Init Linkevent workqueue */ - snprintf(work_queue_name, sizeof(work_queue_name), "%x_lkq", lport->port_id); - - lport->link_event_wq = create_singlethread_workqueue(work_queue_name); - if (!lport->link_event_wq) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]Port(0x%x) creat link event work queue failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - snprintf(work_queue_name, sizeof(work_queue_name), "%x_xchgwq", lport->port_id); - lport->xchg_wq = create_workqueue(work_queue_name); - if (!lport->xchg_wq) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]Port(0x%x) creat Exchg work queue failed", - lport->port_id); - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - return UNF_RETURN_ERROR; - } - /* scsi table (R_Port) required for initializing INI - * Initialize the scsi id Table table to manage the mapping between SCSI - * ID, WWN, and Rport. - */ - - ret = unf_init_scsi_id_table(lport); - if (ret != RETURN_OK) { - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - return ret; - } - - /* Initialize the EXCH resource */ - ret = unf_alloc_xchg_resource(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) can't allocate exchange resource.", lport->port_id); - - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - unf_destroy_scsi_id_table(lport); - - return ret; - } - - /* Initialize the ESGL resource pool used by Lport */ - ret = unf_init_esgl_pool(lport); - if (ret != RETURN_OK) { - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - unf_free_all_xchg_mgr(lport); - unf_destroy_scsi_id_table(lport); - - return ret; - } - /* Initialize the disc manager under Lport */ - ret = unf_init_disc_mgr(lport); - if (ret != RETURN_OK) { - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - unf_free_esgl_pool(lport); - unf_free_all_xchg_mgr(lport); - unf_destroy_scsi_id_table(lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) initialize discover manager unsuccessful.", - lport->port_id); - - return ret; - } - - /* Initialize the LPort manager */ - ret = unf_init_vport_mgr_temp(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) initialize RPort manager unsuccessful.", lport->port_id); - - goto RELEASE_LPORT; - } - - /* Initialize the EXCH manager */ - ret = unf_init_xchg_mgr_temp(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) initialize exchange manager unsuccessful.", - lport->port_id); - goto RELEASE_LPORT; - } - /* Initialize the resources required by the event processing center */ - ret = unf_init_event_center(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) initialize event center unsuccessful.", lport->port_id); - goto RELEASE_LPORT; - } - /* Initialize the initialization status of Lport */ - unf_set_lport_state(lport, UNF_LPORT_ST_INITIAL); - - /* Initialize the Lport route test case */ - ret = unf_init_lport_route(lport); - if (ret != RETURN_OK) { - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - (void)unf_event_center_destroy(lport); - unf_disc_mgr_destroy(lport); - unf_free_esgl_pool(lport); - unf_free_all_xchg_mgr(lport); - unf_destroy_scsi_id_table(lport); - - return ret; - } - /* Thesupports the initialization stepof the NPIV */ - ret = unf_init_vport_pool(lport); - if (ret != RETURN_OK) { - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - - unf_destroy_lport_route(lport); - (void)unf_event_center_destroy(lport); - unf_disc_mgr_destroy(lport); - unf_free_esgl_pool(lport); - unf_free_all_xchg_mgr(lport); - unf_destroy_scsi_id_table(lport); - - return ret; - } - - /* qualifier rport callback */ - lport->unf_qualify_rport = unf_rport_set_qualifier_key_reuse; - lport->unf_tmf_abnormal_recovery = unf_tmf_timeout_recovery_special; - return RETURN_OK; -RELEASE_LPORT: - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - - unf_disc_mgr_destroy(lport); - unf_free_esgl_pool(lport); - unf_free_all_xchg_mgr(lport); - unf_destroy_scsi_id_table(lport); - - return ret; -} - -void unf_free_qos_info(struct unf_lport *lport) -{ - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_qos_info *qos_info = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - - spin_lock_irqsave(&lport->qos_mgr_lock, flag); - list_for_each_safe(node, next_node, &lport->list_qos_head) { - qos_info = (struct unf_qos_info *)list_entry(node, - struct unf_qos_info, entry_qos_info); - list_del_init(&qos_info->entry_qos_info); - kfree(qos_info); - } - - spin_unlock_irqrestore(&lport->qos_mgr_lock, flag); -} - -u32 unf_lport_deinit(struct unf_lport *lport) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - unf_free_qos_info(lport); - - unf_unregister_scsi_host(lport); - - /* If the card is unloaded normally, the thread is stopped once. The - * problem does not occur if you stop the thread again. - */ - unf_destroy_lport_route(lport); - - /* minus the reference count of the card event; the last port deletes - * the card thread - */ - unf_destroy_card_thread(lport); - flush_workqueue(lport->link_event_wq); - destroy_workqueue(lport->link_event_wq); - lport->link_event_wq = NULL; - - (void)unf_event_center_destroy(lport); - unf_free_vport_pool(lport); - unf_xchg_mgr_destroy(lport); - - unf_free_esgl_pool(lport); - - /* reliability review :Disc should release after Xchg. Destroy the disc - * manager - */ - unf_disc_mgr_destroy(lport); - - unf_release_xchg_mgr_temp(lport); - - unf_release_vport_mgr_temp(lport); - - unf_destroy_scsi_id_table(lport); - - flush_workqueue(lport->xchg_wq); - destroy_workqueue(lport->xchg_wq); - lport->xchg_wq = NULL; - - /* Releasing the lw Interface Template */ - unf_lport_release_lw_funop(lport); - lport->fc_port = NULL; - - return RETURN_OK; -} - -static int unf_card_event_process(void *arg) -{ - struct list_head *node = NULL; - struct unf_cm_event_report *event_node = NULL; - ulong flags = 0; - struct unf_chip_manage_info *chip_info = (struct unf_chip_manage_info *)arg; - - set_user_nice(current, UNF_OS_THRD_PRI_LOW); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Slot(%u) chip(0x%x) enter event thread.", - chip_info->slot_id, chip_info->chip_id); - - while (!kthread_should_stop()) { - if (chip_info->thread_exit) - break; - - spin_lock_irqsave(&chip_info->chip_event_list_lock, flags); - if (list_empty(&chip_info->list_head)) { - spin_unlock_irqrestore(&chip_info->chip_event_list_lock, flags); - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout((long)msecs_to_jiffies(UNF_S_TO_MS)); - } else { - node = UNF_OS_LIST_NEXT(&chip_info->list_head); - list_del_init(node); - chip_info->list_num--; - event_node = list_entry(node, struct unf_cm_event_report, list_entry); - spin_unlock_irqrestore(&chip_info->chip_event_list_lock, flags); - unf_handle_event(event_node); - } - } - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "Slot(%u) chip(0x%x) exit event thread.", - chip_info->slot_id, chip_info->chip_id); - - return RETURN_OK; -} - -static void unf_destroy_card_thread(struct unf_lport *lport) -{ - struct unf_event_mgr *event_mgr = NULL; - struct unf_chip_manage_info *chip_info = NULL; - struct list_head *list = NULL; - struct list_head *list_tmp = NULL; - struct unf_cm_event_report *event_node = NULL; - ulong event_lock_flag = 0; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - - /* If the thread cannot be found, apply for a new thread. */ - chip_info = lport->chip_info; - if (!chip_info) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) has no event thread.", lport->port_id); - return; - } - event_mgr = &lport->event_mgr; - - spin_lock_irqsave(&chip_info->chip_event_list_lock, flag); - if (!list_empty(&chip_info->list_head)) { - list_for_each_safe(list, list_tmp, &chip_info->list_head) { - event_node = list_entry(list, struct unf_cm_event_report, list_entry); - - /* The LPort under the global event node is null. */ - if (event_node->lport == lport) { - list_del_init(&event_node->list_entry); - if (event_node->event_asy_flag == UNF_EVENT_SYN) { - event_node->result = UNF_RETURN_ERROR; - complete(&event_node->event_comp); - } - - spin_lock_irqsave(&event_mgr->port_event_lock, event_lock_flag); - event_mgr->free_event_count++; - list_add_tail(&event_node->list_entry, &event_mgr->list_free_event); - spin_unlock_irqrestore(&event_mgr->port_event_lock, - event_lock_flag); - } - } - } - spin_unlock_irqrestore(&chip_info->chip_event_list_lock, flag); - - /* If the number of events introduced by the event thread is 0, - * it indicates that no interface is used. In this case, thread - * resources need to be consumed - */ - if (atomic_dec_and_test(&chip_info->ref_cnt)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) destroy slot(%u) chip(0x%x) event thread succeed.", - lport->port_id, chip_info->slot_id, chip_info->chip_id); - chip_info->thread_exit = true; - wake_up_process(chip_info->thread); - kthread_stop(chip_info->thread); - chip_info->thread = NULL; - - spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); - list_del_init(&chip_info->list_chip_thread_entry); - card_thread_mgr.card_num--; - spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); - - vfree(chip_info); - } - - lport->chip_info = NULL; -} - -static u32 unf_creat_card_thread(struct unf_lport *lport) -{ - ulong flag = 0; - struct unf_chip_manage_info *chip_manage_info = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* If the thread cannot be found, apply for a new thread. */ - chip_manage_info = (struct unf_chip_manage_info *) - vmalloc(sizeof(struct unf_chip_manage_info)); - if (!chip_manage_info) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) cannot allocate thread memory.", lport->port_id); - - return UNF_RETURN_ERROR; - } - memset(chip_manage_info, 0, sizeof(struct unf_chip_manage_info)); - - memcpy(&chip_manage_info->chip_info, &lport->low_level_func.chip_info, - sizeof(struct unf_chip_info)); - chip_manage_info->slot_id = UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(lport->port_id); - chip_manage_info->chip_id = lport->low_level_func.chip_id; - chip_manage_info->list_num = 0; - chip_manage_info->sfp_9545_fault = false; - chip_manage_info->sfp_power_fault = false; - atomic_set(&chip_manage_info->ref_cnt, 1); - atomic_set(&chip_manage_info->card_loop_test_flag, false); - spin_lock_init(&chip_manage_info->card_loop_back_state_lock); - INIT_LIST_HEAD(&chip_manage_info->list_head); - spin_lock_init(&chip_manage_info->chip_event_list_lock); - - chip_manage_info->thread_exit = false; - chip_manage_info->thread = kthread_create(unf_card_event_process, - chip_manage_info, "%x_et", lport->port_id); - - if (IS_ERR(chip_manage_info->thread) || !chip_manage_info->thread) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) creat event thread(0x%p) unsuccessful.", - lport->port_id, chip_manage_info->thread); - - vfree(chip_manage_info); - - return UNF_RETURN_ERROR; - } - - lport->chip_info = chip_manage_info; - wake_up_process(chip_manage_info->thread); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) creat slot(%u) chip(0x%x) event thread succeed.", - lport->port_id, chip_manage_info->slot_id, - chip_manage_info->chip_id); - - spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); - list_add_tail(&chip_manage_info->list_chip_thread_entry, &card_thread_mgr.card_list_head); - card_thread_mgr.card_num++; - spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); - - return RETURN_OK; -} - -static u32 unf_find_card_thread(struct unf_lport *lport) -{ - ulong flag = 0; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_chip_manage_info *chip_info = NULL; - u32 ret = UNF_RETURN_ERROR; - - spin_lock_irqsave(&card_thread_mgr.global_card_list_lock, flag); - list_for_each_safe(node, next_node, &card_thread_mgr.card_list_head) { - chip_info = list_entry(node, struct unf_chip_manage_info, list_chip_thread_entry); - - if (chip_info->chip_id == lport->low_level_func.chip_id && - chip_info->slot_id == - UNF_GET_BOARD_TYPE_AND_SLOT_ID_BY_PORTID(lport->port_id)) { - atomic_inc(&chip_info->ref_cnt); - lport->chip_info = chip_info; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) find card(%u) chip(0x%x) event thread succeed.", - lport->port_id, chip_info->slot_id, chip_info->chip_id); - - spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); - - return RETURN_OK; - } - } - spin_unlock_irqrestore(&card_thread_mgr.global_card_list_lock, flag); - - ret = unf_creat_card_thread(lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) creat event thread unsuccessful. Destroy LPort.", - lport->port_id); - - return UNF_RETURN_ERROR; - } else { - return RETURN_OK; - } -} - -void *unf_lport_create_and_init(void *private_data, struct unf_low_level_functioon_op *low_level_op) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = UNF_RETURN_ERROR; - - if (!private_data) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Private Data is NULL"); - - return NULL; - } - if (!low_level_op) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LowLevel port(0x%p) function is NULL", private_data); - - return NULL; - } - - /* 1. vmalloc & Memset L_Port */ - unf_lport = vmalloc(sizeof(struct unf_lport)); - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Alloc LPort memory failed."); - - return NULL; - } - memset(unf_lport, 0, sizeof(struct unf_lport)); - - /* 2. L_Port Init */ - if (unf_lport_init(unf_lport, private_data, low_level_op) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort initialize unsuccessful."); - - vfree(unf_lport); - - return NULL; - } - - /* 4. Get or Create Chip Thread - * Chip_ID & Slot_ID - */ - ret = unf_find_card_thread(unf_lport); - if (ret != RETURN_OK) { - (void)unf_lport_deinit(unf_lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%x) Find Chip thread unsuccessful. Destroy LPort.", - unf_lport->port_id); - - vfree(unf_lport); - return NULL; - } - - /* 5. Registers with in the port management global linked list */ - unf_port_register(unf_lport); - /* update WWN */ - if (unf_build_lport_wwn(unf_lport) != RETURN_OK) { - unf_port_unregister(unf_lport); - (void)unf_lport_deinit(unf_lport); - vfree(unf_lport); - return NULL; - } - - // unf_init_link_lose_tmo(unf_lport);//TO DO - - /* initialize Scsi Host */ - if (unf_register_scsi_host(unf_lport) != RETURN_OK) { - unf_port_unregister(unf_lport); - (void)unf_lport_deinit(unf_lport); - vfree(unf_lport); - return NULL; - } - /* 7. Here, start work now */ - if (global_lport_mgr.start_work) { - if (unf_port_start_work(unf_lport) != RETURN_OK) { - unf_port_unregister(unf_lport); - - (void)unf_lport_deinit(unf_lport); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) start work failed", unf_lport->port_id); - vfree(unf_lport); - return NULL; - } - } - - return unf_lport; -} - -static int unf_lport_destroy(void *lport, void *arg_out) -{ - struct unf_lport *unf_lport = NULL; - ulong flags = 0; - - if (!lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, "LPort is NULL."); - - return UNF_RETURN_ERROR; - } - - unf_lport = (struct unf_lport *)lport; - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "Destroy LPort(0x%p), ID(0x%x).", unf_lport, unf_lport->port_id); - /* NPIV Ensure that all Vport are deleted */ - unf_destroy_all_vports(unf_lport); - unf_lport->destroy_step = UNF_LPORT_DESTROY_STEP_1_REPORT_PORT_OUT; - - (void)unf_lport_deinit(lport); - - /* The port is removed from the destroy linked list. The next step is to - * release the memory - */ - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - list_del(&unf_lport->entry_lport); - - /* If the port has dirty memory, the port is mounted to the linked list - * of dirty ports - */ - if (unf_lport->dirty_flag) - list_add_tail(&unf_lport->entry_lport, &global_lport_mgr.dirty_list_head); - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - - if (unf_lport->lport_free_completion) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Complete LPort(0x%p), port ID(0x%x)'s Free Completion.", - unf_lport, unf_lport->port_id); - complete(unf_lport->lport_free_completion); - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "LPort(0x%p), port ID(0x%x)'s Free Completion is NULL.", - unf_lport, unf_lport->port_id); - dump_stack(); - } - - return RETURN_OK; -} - -static int unf_port_switch(struct unf_lport *lport, bool switch_flag) -{ - struct unf_lport *unf_lport = lport; - int ret = UNF_RETURN_ERROR; - bool flag = false; - - FC_CHECK_RETURN_VALUE(unf_lport, UNF_RETURN_ERROR); - - if (!unf_lport->low_level_func.port_mgr_op.ll_port_config_set) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_WARN, - "[warn]Port(0x%x)'s config(switch) function is NULL", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - flag = switch_flag ? true : false; - - ret = (int)unf_lport->low_level_func.port_mgr_op.ll_port_config_set(unf_lport->fc_port, - UNF_PORT_CFG_SET_PORT_SWITCH, (void *)&flag); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_WARN, "[warn]Port(0x%x) switch %s failed", - unf_lport->port_id, switch_flag ? "On" : "Off"); - - return UNF_RETURN_ERROR; - } - - unf_lport->switch_state = (bool)flag; - - return RETURN_OK; -} - -static int unf_send_event(u32 port_id, u32 syn_flag, void *argc_in, void *argc_out, - int (*func)(void *argc_in, void *argc_out)) -{ - struct unf_lport *lport = NULL; - struct unf_cm_event_report *event = NULL; - int ret = 0; - - lport = unf_find_lport_by_port_id(port_id); - if (!lport) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_INFO, "Cannot find LPort(0x%x).", port_id); - - return UNF_RETURN_ERROR; - } - - if (unf_lport_ref_inc(lport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "LPort(0x%x) is removing, no need process.", - lport->port_id); - - return UNF_RETURN_ERROR; - } - if (unlikely(!lport->event_mgr.unf_get_free_event_func || - !lport->event_mgr.unf_post_event_func || - !lport->event_mgr.unf_release_event)) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_MAJOR, "Event function is NULL."); - - unf_lport_ref_dec_to_destroy(lport); - - return UNF_RETURN_ERROR; - } - - if (lport->port_removing) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "LPort(0x%x) is removing, no need process.", - lport->port_id); - - unf_lport_ref_dec_to_destroy(lport); - - return UNF_RETURN_ERROR; - } - - event = lport->event_mgr.unf_get_free_event_func((void *)lport); - if (!event) { - unf_lport_ref_dec_to_destroy(lport); - - return UNF_RETURN_ERROR; - } - - init_completion(&event->event_comp); - event->lport = lport; - event->event_asy_flag = syn_flag; - event->unf_event_task = func; - event->para_in = argc_in; - event->para_out = argc_out; - lport->event_mgr.unf_post_event_func(lport, event); - - if (event->event_asy_flag) { - /* You must wait for the other party to return. Otherwise, the - * linked list may be in disorder. - */ - wait_for_completion(&event->event_comp); - ret = (int)event->result; - lport->event_mgr.unf_release_event(lport, event); - } else { - ret = RETURN_OK; - } - - unf_lport_ref_dec_to_destroy(lport); - return ret; -} - -static int unf_reset_port(void *arg_in, void *arg_out) -{ - struct unf_reset_port_argin *input = (struct unf_reset_port_argin *)arg_in; - struct unf_lport *lport = NULL; - u32 ret = UNF_RETURN_ERROR; - enum unf_port_config_state port_state = UNF_PORT_CONFIG_STATE_RESET; - - FC_CHECK_RETURN_VALUE(input, UNF_RETURN_ERROR); - - lport = unf_find_lport_by_port_id(input->port_id); - if (!lport) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_MAJOR, "Not find LPort(0x%x).", - input->port_id); - - return UNF_RETURN_ERROR; - } - - /* reset port */ - if (!lport->low_level_func.port_mgr_op.ll_port_config_set) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_MAJOR, - "Port(0x%x)'s corresponding function is NULL.", lport->port_id); - - return UNF_RETURN_ERROR; - } - - lport->act_topo = UNF_ACT_TOP_UNKNOWN; - lport->speed = UNF_PORT_SPEED_UNKNOWN; - lport->fabric_node_name = 0; - - ret = lport->low_level_func.port_mgr_op.ll_port_config_set(lport->fc_port, - UNF_PORT_CFG_SET_PORT_STATE, - (void *)&port_state); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_MAJOR, "Reset port(0x%x) unsuccessful.", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -int unf_cm_reset_port(u32 port_id) -{ - int ret = UNF_RETURN_ERROR; - - ret = unf_send_event(port_id, UNF_EVENT_SYN, (void *)&port_id, - (void *)NULL, unf_reset_port); - return ret; -} - -int unf_lport_reset_port(struct unf_lport *lport, u32 flag) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - return unf_send_event(lport->port_id, flag, (void *)&lport->port_id, - (void *)NULL, unf_reset_port); -} - -static inline u32 unf_get_loop_alpa(struct unf_lport *lport, void *loop_alpa) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport->low_level_func.port_mgr_op.ll_port_config_get, - UNF_RETURN_ERROR); - - ret = lport->low_level_func.port_mgr_op.ll_port_config_get(lport->fc_port, - UNF_PORT_CFG_GET_LOOP_ALPA, loop_alpa); - - return ret; -} - -static u32 unf_lport_enter_private_loop_login(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = lport; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_READY); /* LPort: LINK_UP --> READY */ - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - unf_lport_update_topo(unf_lport, UNF_ACT_TOP_PRIVATE_LOOP); - - /* NOP: check L_Port state */ - if (atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "[info]Port(0x%x) is NOP, do nothing", - unf_lport->port_id); - - return RETURN_OK; - } - - /* INI: check L_Port mode */ - if (UNF_PORT_MODE_INI != (unf_lport->options & UNF_PORT_MODE_INI)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) has no INI feature(0x%x), do nothing", - unf_lport->port_id, unf_lport->options); - - return RETURN_OK; - } - - if (unf_lport->disc.disc_temp.unf_disc_start) { - ret = unf_lport->disc.disc_temp.unf_disc_start(unf_lport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with nportid(0x%x) start discovery failed", - unf_lport->port_id, unf_lport->nport_id); - } - } - - return ret; -} - -u32 unf_lport_login(struct unf_lport *lport, enum unf_act_topo act_topo) -{ - u32 loop_alpa = 0; - u32 ret = RETURN_OK; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* 1. Update (set) L_Port topo which get from low level */ - unf_lport_update_topo(lport, act_topo); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - - /* 2. Link state check */ - if (lport->link_up != UNF_PORT_LINK_UP) { - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with link_state(0x%x) port_state(0x%x) when login", - lport->port_id, lport->link_up, lport->states); - - return UNF_RETURN_ERROR; - } - - /* 3. Update L_Port state */ - unf_lport_state_ma(lport, UNF_EVENT_LPORT_LINK_UP); /* LPort: INITIAL --> LINK UP */ - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]LOGIN: Port(0x%x) start to login with topology(0x%x)", - lport->port_id, lport->act_topo); - - /* 4. Start logoin */ - if (act_topo == UNF_TOP_P2P_MASK || - act_topo == UNF_ACT_TOP_P2P_FABRIC || - act_topo == UNF_ACT_TOP_P2P_DIRECT) { - /* P2P or Fabric mode */ - ret = unf_lport_enter_flogi(lport); - } else if (act_topo == UNF_ACT_TOP_PUBLIC_LOOP) { - /* Public loop */ - (void)unf_get_loop_alpa(lport, &loop_alpa); - - /* Before FLOGI ALPA just low 8 bit, after FLOGI ACC, switch - * will assign complete addresses - */ - spin_lock_irqsave(&lport->lport_state_lock, flag); - lport->nport_id = loop_alpa; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - ret = unf_lport_enter_flogi(lport); - } else if (act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - /* Private loop */ - (void)unf_get_loop_alpa(lport, &loop_alpa); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - lport->nport_id = loop_alpa; - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - ret = unf_lport_enter_private_loop_login(lport); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]LOGIN: Port(0x%x) login with unknown topology(0x%x)", - lport->port_id, lport->act_topo); - } - - return ret; -} - -static u32 unf_port_linkup(struct unf_lport *lport, void *input) -{ - struct unf_lport *unf_lport = lport; - u32 ret = RETURN_OK; - enum unf_act_topo act_topo = UNF_ACT_TOP_UNKNOWN; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - /* If NOP state, stop */ - if (atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) is NOP and do nothing", unf_lport->port_id); - - return RETURN_OK; - } - - /* Update port state */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - unf_lport->link_up = UNF_PORT_LINK_UP; - unf_lport->speed = *((u32 *)input); - unf_set_lport_state(lport, UNF_LPORT_ST_INITIAL); /* INITIAL state */ - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - /* set hot pool wait state: so far, do not care */ - unf_set_hot_pool_wait_state(unf_lport, true); - - unf_lport->enhanced_features |= UNF_LPORT_ENHANCED_FEATURE_READ_SFP_ONCE; - - /* Get port active topopolgy (from low level) */ - if (!unf_lport->low_level_func.port_mgr_op.ll_port_config_get) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) get topo function is NULL", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - ret = unf_lport->low_level_func.port_mgr_op.ll_port_config_get(unf_lport->fc_port, - UNF_PORT_CFG_GET_TOPO_ACT, (void *)&act_topo); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) get topo from low level failed", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - /* Start Login process */ - ret = unf_lport_login(unf_lport, act_topo); - - return ret; -} - -static u32 unf_port_linkdown(struct unf_lport *lport, void *input) -{ - ulong flag = 0; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - unf_lport = lport; - - /* To prevent repeated reporting linkdown */ - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - unf_lport->speed = UNF_PORT_SPEED_UNKNOWN; - unf_lport->act_topo = UNF_ACT_TOP_UNKNOWN; - if (unf_lport->link_up == UNF_PORT_LINK_DOWN) { - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - return RETURN_OK; - } - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_LINK_DOWN); - unf_reset_lport_params(unf_lport); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - unf_set_hot_pool_wait_state(unf_lport, false); - - /* - * clear I/O: - * 1. INI do ABORT only, - * 2. TGT need do source clear with Wait_IO - * * - * for INI: busy/delay/delay_transfer/wait - * Clean L_Port/V_Port Link Down I/O: only set ABORT tag - */ - unf_flush_disc_event(&unf_lport->disc, NULL); - - unf_clean_linkdown_io(unf_lport, false); - - /* for L_Port's R_Ports */ - unf_clean_linkdown_rport(unf_lport); - /* for L_Port's all Vports */ - unf_linkdown_all_vports(lport); - return RETURN_OK; -} - -static u32 unf_port_abnormal_reset(struct unf_lport *lport, void *input) -{ - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - unf_lport = lport; - - ret = (u32)unf_lport_reset_port(unf_lport, UNF_EVENT_ASYN); - - return ret; -} - -static u32 unf_port_reset_start(struct unf_lport *lport, void *input) -{ - u32 ret = RETURN_OK; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_set_lport_state(lport, UNF_LPORT_ST_RESET); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x) begin to reset.", lport->port_id); - - return ret; -} - -static u32 unf_port_reset_end(struct unf_lport *lport, void *input) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x) reset end.", lport->port_id); - - /* Task management command returns success and avoid repair measures - * case offline device - */ - unf_wake_up_scsi_task_cmnd(lport); - - spin_lock_irqsave(&lport->lport_state_lock, flag); - unf_set_lport_state(lport, UNF_LPORT_ST_INITIAL); - spin_unlock_irqrestore(&lport->lport_state_lock, flag); - - return RETURN_OK; -} - -static u32 unf_port_nop(struct unf_lport *lport, void *input) -{ - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - unf_lport = lport; - - atomic_set(&unf_lport->lport_no_operate_flag, UNF_LPORT_NOP); - - spin_lock_irqsave(&unf_lport->lport_state_lock, flag); - unf_lport_state_ma(unf_lport, UNF_EVENT_LPORT_LINK_DOWN); - unf_reset_lport_params(unf_lport); - spin_unlock_irqrestore(&unf_lport->lport_state_lock, flag); - - /* Set Tag prevent pending I/O to wait_list when close sfp failed */ - unf_set_hot_pool_wait_state(unf_lport, false); - - unf_flush_disc_event(&unf_lport->disc, NULL); - - /* L_Port/V_Port's I/O(s): Clean Link Down I/O: Set Abort Tag */ - unf_clean_linkdown_io(unf_lport, false); - - /* L_Port/V_Port's R_Port(s): report link down event to scsi & clear - * resource - */ - unf_clean_linkdown_rport(unf_lport); - unf_linkdown_all_vports(unf_lport); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) report NOP event done", unf_lport->nport_id); - - return RETURN_OK; -} - -static u32 unf_port_begin_remove(struct unf_lport *lport, void *input) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - /* Cancel route timer delay work */ - unf_destroy_lport_route(lport); - - return RETURN_OK; -} - -static u32 unf_get_pcie_link_state(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = lport; - bool linkstate = true; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(unf_lport->low_level_func.port_mgr_op.ll_port_config_get, - UNF_RETURN_ERROR); - - ret = unf_lport->low_level_func.port_mgr_op.ll_port_config_get(unf_lport->fc_port, - UNF_PORT_CFG_GET_PCIE_LINK_STATE, (void *)&linkstate); - if (ret != RETURN_OK || linkstate != true) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_KEVENT, "[err]Can't Get Pcie Link State"); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -void unf_root_lport_ref_dec(struct unf_lport *lport) -{ - ulong flags = 0; - ulong lport_flags = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%p) port_id(0x%x) reference count is %d", - lport, lport->port_id, atomic_read(&lport->port_ref_cnt)); - - spin_lock_irqsave(&global_lport_mgr.global_lport_list_lock, flags); - spin_lock_irqsave(&lport->lport_state_lock, lport_flags); - if (atomic_dec_and_test(&lport->port_ref_cnt)) { - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - - list_del(&lport->entry_lport); - global_lport_mgr.lport_sum--; - - /* Put L_Port to destroy list for debuging */ - list_add_tail(&lport->entry_lport, &global_lport_mgr.destroy_list_head); - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - - ret = unf_schedule_global_event((void *)lport, UNF_GLOBAL_EVENT_ASYN, - unf_lport_destroy); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_CRITICAL, - "[warn]Schedule global event faile. remain nodes(0x%x)", - global_event_queue.list_number); - } - } else { - spin_unlock_irqrestore(&lport->lport_state_lock, lport_flags); - spin_unlock_irqrestore(&global_lport_mgr.global_lport_list_lock, flags); - } -} - -void unf_lport_ref_dec_to_destroy(struct unf_lport *lport) -{ - if (lport->root_lport != lport) - unf_vport_ref_dec(lport); - else - unf_root_lport_ref_dec(lport); -} - -void unf_lport_route_work(struct work_struct *work) -{ -#define UNF_MAX_PCIE_LINK_DOWN_TIMES 3 - struct unf_lport *unf_lport = NULL; - int ret = 0; - - FC_CHECK_RETURN_VOID(work); - - unf_lport = container_of(work, struct unf_lport, route_timer_work.work); - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_KEVENT, "[err]LPort is NULL"); - - return; - } - - if (unlikely(unf_lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_KEVENT, - "[warn]LPort(0x%x) route work is closing.", unf_lport->port_id); - - unf_lport_ref_dec_to_destroy(unf_lport); - - return; - } - - if (unlikely(unf_get_pcie_link_state(unf_lport))) - unf_lport->pcie_link_down_cnt++; - else - unf_lport->pcie_link_down_cnt = 0; - - if (unf_lport->pcie_link_down_cnt >= UNF_MAX_PCIE_LINK_DOWN_TIMES) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_KEVENT, - "[warn]LPort(0x%x) detected pcie linkdown, closing route work", - unf_lport->port_id); - unf_lport->pcie_link_down = true; - unf_free_lport_all_xchg(unf_lport); - unf_lport_ref_dec_to_destroy(unf_lport); - return; - } - - if (unlikely(UNF_LPORT_CHIP_ERROR(unf_lport))) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_KEVENT, - "[warn]LPort(0x%x) reported chip error, closing route work. ", - unf_lport->port_id); - - unf_lport_ref_dec_to_destroy(unf_lport); - - return; - } - - if (unf_lport->enhanced_features & - UNF_LPORT_ENHANCED_FEATURE_CLOSE_FW_ROUTE) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_KEVENT, - "[warn]User close LPort(0x%x) route work. ", unf_lport->port_id); - - unf_lport_ref_dec_to_destroy(unf_lport); - - return; - } - - /* Scheduling 1 second */ - ret = queue_delayed_work(unf_wq, &unf_lport->route_timer_work, - (ulong)msecs_to_jiffies(UNF_LPORT_POLL_TIMER)); - if (ret == 0) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_KEVENT, - "[warn]LPort(0x%x) schedule work unsuccessful.", unf_lport->port_id); - - unf_lport_ref_dec_to_destroy(unf_lport); - } -} - -static int unf_cm_get_mac_adr(void *argc_in, void *argc_out) -{ - struct unf_lport *unf_lport = NULL; - struct unf_get_chip_info_argout *chip_info = NULL; - - FC_CHECK_RETURN_VALUE(argc_in, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(argc_out, UNF_RETURN_ERROR); - - unf_lport = (struct unf_lport *)argc_in; - chip_info = (struct unf_get_chip_info_argout *)argc_out; - - if (!unf_lport) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, - UNF_MAJOR, " LPort is null."); - - return UNF_RETURN_ERROR; - } - - if (!unf_lport->low_level_func.port_mgr_op.ll_port_config_get) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x)'s corresponding function is NULL.", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - if (unf_lport->low_level_func.port_mgr_op.ll_port_config_get(unf_lport->fc_port, - UNF_PORT_CFG_GET_MAC_ADDR, - chip_info) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) get .", unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -int unf_build_sys_wwn(u32 port_id, u64 *sys_port_name, u64 *sys_node_name) -{ - struct unf_get_chip_info_argout wwn = {0}; - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE((sys_port_name), UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE((sys_node_name), UNF_RETURN_ERROR); - - unf_lport = unf_find_lport_by_port_id(port_id); - if (!unf_lport) - return UNF_RETURN_ERROR; - - ret = (u32)unf_send_event(unf_lport->port_id, UNF_EVENT_SYN, - (void *)unf_lport, (void *)&wwn, unf_cm_get_mac_adr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "send event(port get mac adr) fail."); - return UNF_RETURN_ERROR; - } - - /* save card mode: UNF_FC_SERVER_BOARD_32_G(6):32G; - * UNF_FC_SERVER_BOARD_16_G(7):16G MODE - */ - unf_lport->card_type = wwn.board_type; - - /* update port max speed */ - if (wwn.board_type == UNF_FC_SERVER_BOARD_32_G) - unf_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_32_G; - else if (wwn.board_type == UNF_FC_SERVER_BOARD_16_G) - unf_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_16_G; - else if (wwn.board_type == UNF_FC_SERVER_BOARD_8_G) - unf_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_8_G; - else - unf_lport->low_level_func.fc_ser_max_speed = UNF_PORT_SPEED_32_G; - - *sys_port_name = wwn.wwpn; - *sys_node_name = wwn.wwnn; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) Port Name(0x%llx), Node Name(0x%llx.)", - port_id, *sys_port_name, *sys_node_name); - - return RETURN_OK; -} - -static u32 unf_update_port_wwn(struct unf_lport *lport, - struct unf_port_wwn *port_wwn) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(port_wwn, UNF_RETURN_ERROR); - - /* Now notice lowlevel to update */ - if (!lport->low_level_func.port_mgr_op.ll_port_config_set) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x)'s corresponding function is NULL.", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - if (lport->low_level_func.port_mgr_op.ll_port_config_set(lport->fc_port, - UNF_PORT_CFG_UPDATE_WWN, - port_wwn) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) update WWN unsuccessful.", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) update WWN: previous(0x%llx, 0x%llx), now(0x%llx, 0x%llx).", - lport->port_id, lport->port_name, lport->node_name, - port_wwn->sys_port_wwn, port_wwn->sys_node_name); - - lport->port_name = port_wwn->sys_port_wwn; - lport->node_name = port_wwn->sys_node_name; - - return RETURN_OK; -} - -static u32 unf_build_lport_wwn(struct unf_lport *lport) -{ - struct unf_port_wwn port_wwn = {0}; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (unf_build_sys_wwn(lport->port_id, &port_wwn.sys_port_wwn, - &port_wwn.sys_node_name) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "Port(0x%x) build WWN unsuccessful.", lport->port_id); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) build WWN succeed", lport->port_id); - - if (unf_update_port_wwn(lport, &port_wwn) != RETURN_OK) - return UNF_RETURN_ERROR; - - return RETURN_OK; -} - -u32 unf_port_release_rport_index(struct unf_lport *lport, void *input) -{ - u32 rport_index = INVALID_VALUE32; - ulong flag = 0; - struct unf_rport_pool *rport_pool = NULL; - struct unf_lport *unf_lport = NULL; - spinlock_t *rport_pool_lock = NULL; - - unf_lport = (struct unf_lport *)lport->root_lport; - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (input) { - rport_index = *(u32 *)input; - if (rport_index < lport->low_level_func.support_max_rport) { - rport_pool = &unf_lport->rport_pool; - rport_pool_lock = &rport_pool->rport_free_pool_lock; - spin_lock_irqsave(rport_pool_lock, flag); - if (test_bit((int)rport_index, rport_pool->rpi_bitmap)) { - clear_bit((int)rport_index, rport_pool->rpi_bitmap); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) try to release a free rport index(0x%x)", - lport->port_id, rport_index); - } - spin_unlock_irqrestore(rport_pool_lock, flag); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) try to release a not exist rport index(0x%x)", - lport->port_id, rport_index); - } - } - - return RETURN_OK; -} - -void *unf_lookup_lport_by_nportid(void *lport, u32 nport_id) -{ - struct unf_lport *unf_lport = NULL; - struct unf_vport_pool *vport_pool = NULL; - struct unf_lport *unf_vport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - unf_lport = (struct unf_lport *)lport; - unf_lport = unf_lport->root_lport; - vport_pool = unf_lport->vport_pool; - - if (unf_lport->nport_id == nport_id) - return unf_lport; - - if (unlikely(!vport_pool)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) vport pool is NULL", unf_lport->port_id); - - return NULL; - } - - spin_lock_irqsave(&vport_pool->vport_pool_lock, flag); - list_for_each_safe(node, next_node, &unf_lport->list_vports_head) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->nport_id == nport_id) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - - list_for_each_safe(node, next_node, &unf_lport->list_intergrad_vports) { - unf_vport = list_entry(node, struct unf_lport, entry_vport); - if (unf_vport->nport_id == nport_id) { - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - return unf_vport; - } - } - spin_unlock_irqrestore(&vport_pool->vport_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x) has no vport Nport ID(0x%x)", - unf_lport->port_id, nport_id); - - return NULL; -} - -int unf_get_link_lose_tmo(struct unf_lport *lport) -{ - u32 tmo_value = 0; - - if (!lport) - return UNF_LOSE_TMO; - - tmo_value = atomic_read(&lport->link_lose_tmo); - - if (!tmo_value) - tmo_value = UNF_LOSE_TMO; - - return (int)tmo_value; -} - -u32 unf_register_scsi_host(struct unf_lport *lport) -{ - struct unf_host_param host_param = {0}; - - struct Scsi_Host **scsi_host = NULL; - struct unf_lport_cfg_item *lport_cfg_items = NULL; - - FC_CHECK_RETURN_VALUE((lport), UNF_RETURN_ERROR); - - /* Point to -->> L_port->Scsi_host */ - scsi_host = &lport->host_info.host; - - lport_cfg_items = &lport->low_level_func.lport_cfg_items; - host_param.can_queue = (int)lport_cfg_items->max_queue_depth; - - /* Performance optimization */ - host_param.cmnd_per_lun = UNF_MAX_CMND_PER_LUN; - - host_param.sg_table_size = UNF_MAX_DMA_SEGS; - host_param.max_id = UNF_MAX_TARGET_NUMBER; - host_param.max_lun = UNF_DEFAULT_MAX_LUN; - host_param.max_channel = UNF_MAX_BUS_CHANNEL; - host_param.max_cmnd_len = UNF_MAX_SCSI_CMND_LEN; /* CDB-16 */ - host_param.dma_boundary = UNF_DMA_BOUNDARY; - host_param.max_sectors = UNF_MAX_SECTORS; - host_param.port_id = lport->port_id; - host_param.lport = lport; - host_param.pdev = &lport->low_level_func.dev->dev; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) allocate scsi host: can queue(%u), command performance LUN(%u), max lun(%u)", - lport->port_id, host_param.can_queue, host_param.cmnd_per_lun, - host_param.max_lun); - - if (unf_alloc_scsi_host(scsi_host, &host_param) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) allocate scsi host failed", lport->port_id); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Port(0x%x) allocate scsi host(0x%x) succeed", - lport->port_id, UNF_GET_SCSI_HOST_ID(*scsi_host)); - - return RETURN_OK; -} - -void unf_unregister_scsi_host(struct unf_lport *lport) -{ - struct Scsi_Host *scsi_host = NULL; - u32 host_no = 0; - - FC_CHECK_RETURN_VOID(lport); - - scsi_host = lport->host_info.host; - - if (scsi_host) { - host_no = UNF_GET_SCSI_HOST_ID(scsi_host); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]Port(0x%x) starting unregister scsi host(0x%x)", - lport->port_id, host_no); - unf_free_scsi_host(scsi_host); - /* can`t set scsi_host for NULL, since it does`t alloc by itself */ - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[warn]Port(0x%x) unregister scsi host, invalid scsi_host ", - lport->port_id); - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]Port(0x%x) unregister scsi host(0x%x) succeed", - lport->port_id, host_no); - - lport->destroy_step = UNF_LPORT_DESTROY_STEP_12_UNREG_SCSI_HOST; -} diff --git a/drivers/scsi/spfc/common/unf_portman.h b/drivers/scsi/spfc/common/unf_portman.h deleted file mode 100644 index 4ad93d32bcaa..000000000000 --- a/drivers/scsi/spfc/common/unf_portman.h +++ /dev/null @@ -1,96 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_PORTMAN_H -#define UNF_PORTMAN_H - -#include "unf_type.h" -#include "unf_lport.h" - -#define UNF_LPORT_POLL_TIMER ((u32)(1 * 1000)) -#define UNF_TX_CREDIT_REG_32_G 0x2289420 -#define UNF_RX_CREDIT_REG_32_G 0x228950c -#define UNF_CREDIT_REG_16_G 0x2283418 -#define UNF_PORT_OFFSET_BASE 0x10000 -#define UNF_CREDIT_EMU_VALUE 0x20 -#define UNF_CREDIT_VALUE_32_G 0x8 -#define UNF_CREDIT_VALUE_16_G 0x8000000080008 - -struct unf_nportid_map { - u32 sid; - u32 did; - void *rport[1024]; - void *lport; -}; - -struct unf_global_card_thread { - struct list_head card_list_head; - spinlock_t global_card_list_lock; - u32 card_num; -}; - -/* Global L_Port MG,manage all L_Port */ -struct unf_global_lport { - struct list_head lport_list_head; - - /* Temporary list,used in hold list traverse */ - struct list_head intergrad_head; - - /* destroy list,used in card remove */ - struct list_head destroy_list_head; - - /* Dirty list,abnormal port */ - struct list_head dirty_list_head; - spinlock_t global_lport_list_lock; - u32 lport_sum; - u8 dft_mode; - bool start_work; -}; - -struct unf_port_action { - u32 action; - u32 (*unf_action)(struct unf_lport *lport, void *input); -}; - -struct unf_reset_port_argin { - u32 port_id; -}; - -extern struct unf_global_lport global_lport_mgr; -extern struct unf_global_card_thread card_thread_mgr; -extern struct workqueue_struct *unf_wq; - -struct unf_lport *unf_find_lport_by_port_id(u32 port_id); -struct unf_lport *unf_find_lport_by_scsi_hostid(u32 scsi_host_id); -void * -unf_lport_create_and_init(void *private_data, - struct unf_low_level_functioon_op *low_level_op); -u32 unf_fc_port_link_event(void *lport, u32 events, void *input); -u32 unf_release_local_port(void *lport); -void unf_lport_route_work(struct work_struct *work); -void unf_lport_update_topo(struct unf_lport *lport, - enum unf_act_topo active_topo); -void unf_lport_ref_dec(struct unf_lport *lport); -u32 unf_lport_ref_inc(struct unf_lport *lport); -void unf_lport_ref_dec_to_destroy(struct unf_lport *lport); -void unf_port_mgmt_deinit(void); -void unf_port_mgmt_init(void); -void unf_show_dirty_port(bool show_only, u32 *dirty_port_num); -void *unf_lookup_lport_by_nportid(void *lport, u32 nport_id); -u32 unf_is_lport_valid(struct unf_lport *lport); -int unf_lport_reset_port(struct unf_lport *lport, u32 flag); -int unf_cm_ops_handle(u32 type, void **arg_in); -u32 unf_register_scsi_host(struct unf_lport *lport); -void unf_unregister_scsi_host(struct unf_lport *lport); -void unf_destroy_scsi_id_table(struct unf_lport *lport); -u32 unf_lport_login(struct unf_lport *lport, enum unf_act_topo act_topo); -u32 unf_init_scsi_id_table(struct unf_lport *lport); -void unf_set_lport_removing(struct unf_lport *lport); -void unf_lport_release_lw_funop(struct unf_lport *lport); -void unf_show_all_rport(struct unf_lport *lport); -void unf_disc_state_ma(struct unf_lport *lport, enum unf_disc_event evnet); -int unf_get_link_lose_tmo(struct unf_lport *lport); -u32 unf_port_release_rport_index(struct unf_lport *lport, void *input); -int unf_cm_reset_port(u32 port_id); - -#endif diff --git a/drivers/scsi/spfc/common/unf_rport.c b/drivers/scsi/spfc/common/unf_rport.c deleted file mode 100644 index 9b06df884524..000000000000 --- a/drivers/scsi/spfc/common/unf_rport.c +++ /dev/null @@ -1,2286 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_rport.h" -#include "unf_log.h" -#include "unf_exchg.h" -#include "unf_ls.h" -#include "unf_service.h" -#include "unf_portman.h" - -/* rport state:ready --->>> link_down --->>> closing --->>> timeout --->>> delete */ -struct unf_rport_feature_pool *port_feature_pool; - -void unf_sesion_loss_timeout(struct work_struct *work) -{ - struct unf_wwpn_rport_info *wwpn_rport_info = NULL; - - FC_CHECK_RETURN_VOID(work); - - wwpn_rport_info = container_of(work, struct unf_wwpn_rport_info, loss_tmo_work.work); - if (unlikely(!wwpn_rport_info)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]wwpn_rport_info is NULL"); - return; - } - - atomic_set(&wwpn_rport_info->scsi_state, UNF_SCSI_ST_DEAD); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[info]Port(0x%x) wwpn(0x%llx) set target(0x%x) scsi state to dead", - ((struct unf_lport *)(wwpn_rport_info->lport))->port_id, - wwpn_rport_info->wwpn, wwpn_rport_info->target_id); -} - -u32 unf_alloc_scsi_id(struct unf_lport *lport, struct unf_rport *rport) -{ - struct unf_rport_scsi_id_image *rport_scsi_table = NULL; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - ulong flags = 0; - u32 index = 0; - u32 ret = UNF_RETURN_ERROR; - spinlock_t *rport_scsi_tb_lock = NULL; - - rport_scsi_table = &lport->rport_scsi_table; - rport_scsi_tb_lock = &rport_scsi_table->scsi_image_table_lock; - spin_lock_irqsave(rport_scsi_tb_lock, flags); - - /* 1. At first, existence check */ - for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[index]; - if (rport->port_name == wwn_rport_info->wwpn) { - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), - (&wwn_rport_info->loss_tmo_work), - "loss tmo Timer work"); - - /* Plug case: reuse again */ - spin_lock_irqsave(rport_scsi_tb_lock, flags); - wwn_rport_info->rport = rport; - wwn_rport_info->las_ten_scsi_state = - atomic_read(&wwn_rport_info->scsi_state); - atomic_set(&wwn_rport_info->scsi_state, UNF_SCSI_ST_ONLINE); - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find the same scsi_id(0x%x) by wwpn(0x%llx) RPort(%p) N_Port_ID(0x%x)", - lport->port_id, index, wwn_rport_info->wwpn, rport, - rport->nport_id); - - atomic_inc(&lport->resume_scsi_id); - goto find; - } - } - - /* 2. Alloc new SCSI ID */ - for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[index]; - if (wwn_rport_info->wwpn == INVALID_WWPN) { - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), - (&wwn_rport_info->loss_tmo_work), - "loss tmo Timer work"); - /* Use the free space */ - spin_lock_irqsave(rport_scsi_tb_lock, flags); - wwn_rport_info->rport = rport; - wwn_rport_info->wwpn = rport->port_name; - wwn_rport_info->las_ten_scsi_state = - atomic_read(&wwn_rport_info->scsi_state); - atomic_set(&wwn_rport_info->scsi_state, UNF_SCSI_ST_ONLINE); - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) allco new scsi_id(0x%x) by wwpn(0x%llx) RPort(%p) N_Port_ID(0x%x)", - lport->port_id, index, wwn_rport_info->wwpn, rport, - rport->nport_id); - - atomic_inc(&lport->alloc_scsi_id); - goto find; - } - } - - /* 3. Reuse space has been used */ - for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[index]; - if (atomic_read(&wwn_rport_info->scsi_state) == UNF_SCSI_ST_DEAD) { - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - UNF_DELAYED_WORK_SYNC(ret, (lport->port_id), - (&wwn_rport_info->loss_tmo_work), - "loss tmo Timer work"); - - spin_lock_irqsave(rport_scsi_tb_lock, flags); - if (wwn_rport_info->dfx_counter) { - memset(wwn_rport_info->dfx_counter, 0, - sizeof(struct unf_wwpn_dfx_counter_info)); - } - if (wwn_rport_info->lun_qos_level) { - memset(wwn_rport_info->lun_qos_level, 0, - sizeof(u8) * UNF_MAX_LUN_PER_TARGET); - } - wwn_rport_info->rport = rport; - wwn_rport_info->wwpn = rport->port_name; - wwn_rport_info->las_ten_scsi_state = - atomic_read(&wwn_rport_info->scsi_state); - atomic_set(&wwn_rport_info->scsi_state, UNF_SCSI_ST_ONLINE); - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[info]Port(0x%x) reuse a dead scsi_id(0x%x) by wwpn(0x%llx) RPort(%p) N_Port_ID(0x%x)", - lport->port_id, index, wwn_rport_info->wwpn, rport, - rport->nport_id); - - atomic_inc(&lport->reuse_scsi_id); - goto find; - } - } - - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) there is not enough scsi_id with max_value(0x%x)", - lport->port_id, index); - - return INVALID_VALUE32; - -find: - if (!wwn_rport_info->dfx_counter) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) allocate Rport(0x%x) DFX buffer", - lport->port_id, wwn_rport_info->rport->nport_id); - wwn_rport_info->dfx_counter = vmalloc(sizeof(struct unf_wwpn_dfx_counter_info)); - if (!wwn_rport_info->dfx_counter) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) allocate DFX buffer fail", - lport->port_id); - - return INVALID_VALUE32; - } - - memset(wwn_rport_info->dfx_counter, 0, sizeof(struct unf_wwpn_dfx_counter_info)); - } - - return index; -} - -u32 unf_get_scsi_id_by_wwpn(struct unf_lport *lport, u64 wwpn) -{ - struct unf_rport_scsi_id_image *rport_scsi_table = NULL; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - ulong flags = 0; - u32 index = 0; - spinlock_t *rport_scsi_tb_lock = NULL; - - FC_CHECK_RETURN_VALUE(lport, INVALID_VALUE32); - rport_scsi_table = &lport->rport_scsi_table; - rport_scsi_tb_lock = &rport_scsi_table->scsi_image_table_lock; - - if (wwpn == 0) - return INVALID_VALUE32; - - spin_lock_irqsave(rport_scsi_tb_lock, flags); - - for (index = 0; index < rport_scsi_table->max_scsi_id; index++) { - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[index]; - if (wwn_rport_info->wwpn == wwpn) { - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - return index; - } - } - - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - return INVALID_VALUE32; -} - -void unf_set_device_state(struct unf_lport *lport, u32 scsi_id, int scsi_state) -{ - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - struct unf_wwpn_rport_info *wwpn_rport_info = NULL; - - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) RPort scsi_id(0x%x) is max than 0x%x", - lport->port_id, scsi_id, UNF_MAX_SCSI_ID); - return; - } - - scsi_image_table = &lport->rport_scsi_table; - wwpn_rport_info = &scsi_image_table->wwn_rport_info_table[scsi_id]; - atomic_set(&wwpn_rport_info->scsi_state, scsi_state); -} - -void unf_rport_linkdown(struct unf_lport *lport, struct unf_rport *rport) -{ - /* - * 1. port_logout - * 2. rcvd_rscn_port_not_in_disc - * 3. each_rport_after_rscn - * 4. rcvd_gpnid_rjt - * 5. rport_after_logout(rport is fabric port) - */ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - /* 1. Update R_Port state: Link Down Event --->>> closing state */ - spin_lock_irqsave(&rport->rport_state_lock, flag); - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* 3. Port enter closing (then enter to Delete) process */ - unf_rport_enter_closing(rport); -} - -static struct unf_rport *unf_rport_is_changed(struct unf_lport *lport, - struct unf_rport *rport, u32 sid) -{ - if (rport) { - /* S_ID or D_ID has been changed */ - if (rport->nport_id != sid || rport->local_nport_id != lport->nport_id) { - /* 1. Swap case: (SID or DID changed): Report link down - * & delete immediately - */ - unf_rport_immediate_link_down(lport, rport); - return NULL; - } - } - - return rport; -} - -struct unf_rport *unf_rport_set_qualifier_key_reuse(struct unf_lport *lport, - struct unf_rport *rport_by_nport_id, - struct unf_rport *rport_by_wwpn, - u64 wwpn, u32 sid) -{ - /* Used for SPFC Chip */ - struct unf_rport *rport = NULL; - struct unf_rport *rporta = NULL; - struct unf_rport *rportb = NULL; - bool wwpn_flag = false; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* About R_Port by N_Port_ID */ - rporta = unf_rport_is_changed(lport, rport_by_nport_id, sid); - - /* About R_Port by WWpn */ - rportb = unf_rport_is_changed(lport, rport_by_wwpn, sid); - - if (!rporta && !rportb) { - return NULL; - } else if (!rporta && rportb) { - /* 3. Plug case: reuse again */ - rport = rportb; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by wwpn", - lport->port_id, rport, rport->port_name, - rport->nport_id, rport->local_nport_id); - - return rport; - } else if (rporta && !rportb) { - wwpn_flag = (rporta->port_name != wwpn && rporta->port_name != 0 && - rporta->port_name != INVALID_VALUE64); - if (wwpn_flag) { - /* 4. WWPN changed: Report link down & delete - * immediately - */ - unf_rport_immediate_link_down(lport, rporta); - return NULL; - } - - /* Updtae WWPN */ - rporta->port_name = wwpn; - rport = rporta; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x) reused by N_Port_ID", - lport->port_id, rport, rport->port_name, - rport->nport_id, rport->local_nport_id); - - return rport; - } - - /* 5. Case for A == B && A != NULL && B != NULL */ - if (rportb == rporta) { - rport = rporta; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) find the same RPort(0x%p) WWPN(0x%llx) S_ID(0x%x) D_ID(0x%x)", - lport->port_id, rport, rport->port_name, rport->nport_id, - rport->local_nport_id); - - return rport; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find two duplicate login. RPort(A:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x) RPort(B:0x%p, WWPN:0x%llx, S_ID:0x%x, D_ID:0x%x)", - lport->port_id, rporta, rporta->port_name, rporta->nport_id, - rporta->local_nport_id, rportb, rportb->port_name, rportb->nport_id, - rportb->local_nport_id); - - /* 6. Case for A != B && A != NULL && B != NULL: Immediate - * Report && Deletion - */ - unf_rport_immediate_link_down(lport, rporta); - unf_rport_immediate_link_down(lport, rportb); - - return NULL; -} - -struct unf_rport *unf_find_valid_rport(struct unf_lport *lport, u64 wwpn, u32 sid) -{ - struct unf_rport *rport = NULL; - struct unf_rport *rport_by_nport_id = NULL; - struct unf_rport *rport_by_wwpn = NULL; - ulong flags = 0; - spinlock_t *rport_state_lock = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(lport->unf_qualify_rport, NULL); - - /* Get R_Port by WWN & N_Port_ID */ - rport_by_nport_id = unf_get_rport_by_nport_id(lport, sid); - rport_by_wwpn = unf_get_rport_by_wwn(lport, wwpn); - rport_state_lock = &rport_by_wwpn->rport_state_lock; - - /* R_Port check: by WWPN */ - if (rport_by_wwpn) { - spin_lock_irqsave(rport_state_lock, flags); - if (rport_by_wwpn->nport_id == UNF_FC_FID_FLOGI) { - spin_unlock_irqrestore(rport_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) RPort(0x%p) find by WWPN(0x%llx) is invalid", - lport->port_id, rport_by_wwpn, wwpn); - - rport_by_wwpn = NULL; - } else { - spin_unlock_irqrestore(rport_state_lock, flags); - } - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%p) find by N_Port_ID(0x%x) and RPort(0x%p) by WWPN(0x%llx)", - lport->port_id, lport->nport_id, rport_by_nport_id, sid, rport_by_wwpn, wwpn); - - /* R_Port validity check: get by WWPN & N_Port_ID */ - rport = lport->unf_qualify_rport(lport, rport_by_nport_id, - rport_by_wwpn, wwpn, sid); - - return rport; -} - -void unf_rport_delay_login(struct unf_rport *rport) -{ - FC_CHECK_RETURN_VOID(rport); - - /* Do R_Port recovery: PLOGI or PRLI or LOGO */ - unf_rport_error_recovery(rport); -} - -void unf_rport_enter_logo(struct unf_lport *lport, struct unf_rport *rport) -{ - /* - * 1. TMF/ABTS timeout recovery :Y - * 2. L_Port error recovery --->>> larger than retry_count :Y - * 3. R_Port error recovery --->>> larger than retry_count :Y - * 4. Check PLOGI parameter --->>> parameter is error :Y - * 5. PRLI handler --->>> R_Port state is error :Y - * 6. PDISC handler --->>> R_Port state is not PRLI_WAIT :Y - * 7. ADISC handler --->>> R_Port state is not PRLI_WAIT :Y - * 8. PLOGI wait timeout with R_PORT is INI mode :Y - * 9. RCVD GFFID_RJT --->>> R_Port state is INIT :Y - * 10. RCVD GPNID_ACC --->>> R_Port state is error :Y - * 11. Private Loop mode with LOGO case :Y - * 12. P2P mode with LOGO case :Y - * 13. Fabric mode with LOGO case :Y - * 14. RCVD PRLI_ACC with R_Port is INI :Y - * 15. TGT RCVD BLS_REQ with session is error :Y - */ - ulong flags = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flags); - - if (rport->rp_state == UNF_RPORT_ST_CLOSING || - rport->rp_state == UNF_RPORT_ST_DELETE) { - /* 1. Already within Closing or Delete: Do nothing */ - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - return; - } else if (rport->rp_state == UNF_RPORT_ST_LOGO) { - /* 2. Update R_Port state: Normal Enter Event --->>> closing - * state - */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_NORMAL_ENTER); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - /* Send Logo if necessary */ - if (unf_send_logo(lport, rport) != RETURN_OK) - unf_rport_enter_closing(rport); - } else { - /* 3. Update R_Port state: Link Down Event --->>> closing state - */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - unf_rport_enter_closing(rport); - } -} - -u32 unf_free_scsi_id(struct unf_lport *lport, u32 scsi_id) -{ - ulong flags = 0; - struct unf_rport_scsi_id_image *rport_scsi_table = NULL; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - spinlock_t *rport_scsi_tb_lock = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (unlikely(lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) is removing and do nothing", - lport->port_id, lport->nport_id); - - return UNF_RETURN_ERROR; - } - - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x_0x%x) scsi_id(0x%x) is bigger than %d", - lport->port_id, lport->nport_id, scsi_id, UNF_MAX_SCSI_ID); - - return UNF_RETURN_ERROR; - } - - rport_scsi_table = &lport->rport_scsi_table; - rport_scsi_tb_lock = &rport_scsi_table->scsi_image_table_lock; - if (rport_scsi_table->wwn_rport_info_table) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[warn]Port(0x%x_0x%x) RPort(0x%p) free scsi_id(0x%x) wwpn(0x%llx) target_id(0x%x) succeed", - lport->port_id, lport->nport_id, - rport_scsi_table->wwn_rport_info_table[scsi_id].rport, - scsi_id, rport_scsi_table->wwn_rport_info_table[scsi_id].wwpn, - rport_scsi_table->wwn_rport_info_table[scsi_id].target_id); - - spin_lock_irqsave(rport_scsi_tb_lock, flags); - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; - if (wwn_rport_info->rport) { - wwn_rport_info->rport->rport = NULL; - wwn_rport_info->rport = NULL; - } - wwn_rport_info->target_id = INVALID_VALUE32; - atomic_set(&wwn_rport_info->scsi_state, UNF_SCSI_ST_DEAD); - - /* NOTE: remain WWPN/Port_Name unchanged(un-cleared) */ - spin_unlock_irqrestore(rport_scsi_tb_lock, flags); - - return RETURN_OK; - } - - return UNF_RETURN_ERROR; -} - -static void unf_report_ini_linkwown_event(struct unf_lport *lport, struct unf_rport *rport) -{ - u32 scsi_id = 0; - struct fc_rport *unf_rport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - /* - * 1. set local device(rport/rport_info_table) state - * -------------------------------------------------OFF_LINE - * * - * about rport->scsi_id - * valid during rport link up to link down - */ - - spin_lock_irqsave(&rport->rport_state_lock, flag); - scsi_id = rport->scsi_id; - unf_set_device_state(lport, scsi_id, UNF_SCSI_ST_OFFLINE); - - /* 2. delete scsi's rport */ - unf_rport = (struct fc_rport *)rport->rport; - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - if (unf_rport) { - fc_remote_port_delete(unf_rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x_0x%x) delete RPort(0x%x) wwpn(0x%llx) scsi_id(0x%x) succeed", - lport->port_id, lport->nport_id, rport->nport_id, - rport->port_name, scsi_id); - - atomic_inc(&lport->scsi_session_del_success); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[warn]Port(0x%x_0x%x) delete RPort(0x%x_0x%p) failed", - lport->port_id, lport->nport_id, rport->nport_id, rport); - } -} - -static void unf_report_ini_linkup_event(struct unf_lport *lport, struct unf_rport *rport) -{ - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[event]Port(0x%x) RPort(0x%x_0x%p) put INI link up work(%p) to work_queue", - lport->port_id, rport->nport_id, rport, &rport->start_work); - - if (unlikely(!queue_work(lport->link_event_wq, &rport->start_work))) { - atomic_inc(&lport->add_start_work_failed); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]Port(0x%x) RPort(0x%x_0x%p) put INI link up to work_queue failed", - lport->port_id, rport->nport_id, rport); - } -} - -void unf_update_lport_state_by_linkup_event(struct unf_lport *lport, - struct unf_rport *rport, - u32 rport_att) -{ - /* Report R_Port Link Up/Down Event */ - ulong flag = 0; - enum unf_port_state lport_state = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - - /* 1. R_Port does not has TGT mode any more */ - if (((rport_att & UNF_FC4_FRAME_PARM_3_TGT) == 0) && - rport->lport_ini_state == UNF_PORT_STATE_LINKUP) { - rport->last_lport_ini_state = rport->lport_ini_state; - rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) does not have TGT attribute(0x%x) any more", - lport->port_id, rport->nport_id, rport_att); - } - - /* 2. R_Port with TGT mode, L_Port with INI mode */ - if ((rport_att & UNF_FC4_FRAME_PARM_3_TGT) && - (lport->options & UNF_FC4_FRAME_PARM_3_INI)) { - rport->last_lport_ini_state = rport->lport_ini_state; - rport->lport_ini_state = UNF_PORT_STATE_LINKUP; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[warn]Port(0x%x) update INI state with last(0x%x) and now(0x%x)", - lport->port_id, rport->last_lport_ini_state, - rport->lport_ini_state); - } - - /* 3. Report L_Port INI/TGT Down/Up event to SCSI */ - if (rport->last_lport_ini_state == rport->lport_ini_state) { - if (rport->nport_id < UNF_FC_FID_DOM_MGR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed", - lport->port_id, rport->nport_id, rport, - rport->lport_ini_state); - } - - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - return; - } - - lport_state = rport->lport_ini_state; - - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - switch (lport_state) { - case UNF_PORT_STATE_LINKDOWN: - unf_report_ini_linkwown_event(lport, rport); - break; - case UNF_PORT_STATE_LINKUP: - unf_report_ini_linkup_event(lport, rport); - break; - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with unknown link status(0x%x)", - lport->port_id, rport->lport_ini_state); - break; - } -} - -static void unf_rport_callback(void *rport, void *lport, u32 result) -{ - struct unf_rport *unf_rport = NULL; - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(rport); - FC_CHECK_RETURN_VOID(lport); - unf_rport = (struct unf_rport *)rport; - unf_lport = (struct unf_lport *)lport; - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport->last_lport_ini_state = unf_rport->lport_ini_state; - unf_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; - unf_rport->last_lport_tgt_state = unf_rport->lport_tgt_state; - unf_rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; - - /* Report R_Port Link Down Event to scsi */ - if (unf_rport->last_lport_ini_state == unf_rport->lport_ini_state) { - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x %p) INI state(0x%x) has not been changed", - unf_lport->port_id, unf_rport->nport_id, - unf_rport, unf_rport->lport_ini_state); - } - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - return; - } - - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - unf_report_ini_linkwown_event(unf_lport, unf_rport); -} - -static void unf_rport_recovery_timeout(struct work_struct *work) -{ - struct unf_lport *lport = NULL; - struct unf_rport *rport = NULL; - u32 ret = RETURN_OK; - ulong flag = 0; - enum unf_rport_login_state rp_state = UNF_RPORT_ST_INIT; - - FC_CHECK_RETURN_VOID(work); - - rport = container_of(work, struct unf_rport, recovery_work.work); - if (unlikely(!rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort is NULL"); - - return; - } - - lport = rport->lport; - if (unlikely(!lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort(0x%x) Port is NULL", rport->nport_id); - - /* for timer */ - unf_rport_ref_dec(rport); - return; - } - - spin_lock_irqsave(&rport->rport_state_lock, flag); - rp_state = rport->rp_state; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x) state(0x%x) recovery timer timeout", - lport->port_id, lport->nport_id, rport->nport_id, rp_state); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - switch (rp_state) { - case UNF_RPORT_ST_PLOGI_WAIT: - if ((lport->act_topo == UNF_ACT_TOP_P2P_DIRECT && - lport->port_name > rport->port_name) || - lport->act_topo != UNF_ACT_TOP_P2P_DIRECT) { - /* P2P: Name is master with P2P_D - * or has INI Mode - */ - ret = unf_send_plogi(rport->lport, rport); - } - break; - case UNF_RPORT_ST_PRLI_WAIT: - ret = unf_send_prli(rport->lport, rport, ELS_PRLI); - if (ret != RETURN_OK) - unf_rport_error_recovery(rport); - fallthrough; - default: - break; - } - - if (ret != RETURN_OK) - unf_rport_error_recovery(rport); - - /* company with timer */ - unf_rport_ref_dec(rport); -} - -void unf_schedule_closing_work(struct unf_lport *lport, struct unf_rport *rport) -{ - ulong flags = 0; - struct unf_rport_scsi_id_image *rport_scsi_table = NULL; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - u32 scsi_id = 0; - u32 ret = 0; - u32 delay = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - delay = (u32)(unf_get_link_lose_tmo(lport) * 1000); - - rport_scsi_table = &lport->rport_scsi_table; - scsi_id = rport->scsi_id; - spin_lock_irqsave(&rport->rport_state_lock, flags); - - /* 1. Cancel recovery_work */ - if (cancel_delayed_work(&rport->recovery_work)) { - atomic_dec(&rport->rport_ref_cnt); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel recovery work succeed", - lport->port_id, lport->nport_id, rport->nport_id, rport); - } - - /* 2. Cancel Open_work */ - if (cancel_delayed_work(&rport->open_work)) { - atomic_dec(&rport->rport_ref_cnt); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) RPort(0x%x_0x%p) cancel open work succeed", - lport->port_id, lport->nport_id, rport->nport_id, rport); - } - - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - /* 3. Work in-queue (switch to thread context) */ - if (!queue_work(lport->link_event_wq, &rport->closing_work)) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[warn]Port(0x%x) RPort(0x%x_0x%p) add link down to work queue failed", - lport->port_id, rport->nport_id, rport); - - atomic_inc(&lport->add_closing_work_failed); - } else { - spin_lock_irqsave(&rport->rport_state_lock, flags); - (void)unf_rport_ref_inc(rport); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%x_0x%p) add link down to work(%p) queue succeed", - lport->port_id, rport->nport_id, rport, - &rport->closing_work); - } - - if (rport->nport_id > UNF_FC_FID_DOM_MGR) - return; - - if (scsi_id >= UNF_MAX_SCSI_ID) { - scsi_id = unf_get_scsi_id_by_wwpn(lport, rport->port_name); - if (scsi_id >= UNF_MAX_SCSI_ID) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%p) NPortId(0x%x) wwpn(0x%llx) option(0x%x) scsi_id(0x%x) is max than(0x%x)", - lport->port_id, rport, rport->nport_id, - rport->port_name, rport->options, scsi_id, - UNF_MAX_SCSI_ID); - - return; - } - } - - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; - ret = queue_delayed_work(unf_wq, &wwn_rport_info->loss_tmo_work, - (ulong)msecs_to_jiffies((u32)delay)); - if (!ret) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info] Port(0x%x) add RPort(0x%p) NPortId(0x%x) scsi_id(0x%x) wwpn(0x%llx) loss timeout work failed", - lport->port_id, rport, rport->nport_id, scsi_id, - rport->port_name); - } -} - -static void unf_rport_closing_timeout(struct work_struct *work) -{ - /* closing --->>>(timeout)--->>> delete */ - struct unf_rport *rport = NULL; - struct unf_lport *lport = NULL; - struct unf_disc *disc = NULL; - ulong rport_flag = 0; - ulong disc_flag = 0; - void (*unf_rport_callback)(void *, void *, u32) = NULL; - enum unf_rport_login_state old_state; - - FC_CHECK_RETURN_VOID(work); - - /* Get R_Port & L_Port & Disc */ - rport = container_of(work, struct unf_rport, closing_work); - if (unlikely(!rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort is NULL"); - return; - } - - lport = rport->lport; - if (unlikely(!lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort(0x%x_0x%p) Port is NULL", - rport->nport_id, rport); - - /* Release directly (for timer) */ - unf_rport_ref_dec(rport); - return; - } - disc = &lport->disc; - - spin_lock_irqsave(&rport->rport_state_lock, rport_flag); - - old_state = rport->rp_state; - /* 1. Update R_Port state: event_timeout --->>> state_delete */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_CLS_TIMEOUT); - - /* Check R_Port state */ - if (rport->rp_state != UNF_RPORT_ST_DELETE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) closing timeout with error state(0x%x->0x%x)", - lport->port_id, lport->nport_id, rport->nport_id, - rport, old_state, rport->rp_state); - - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - - /* Dec ref_cnt for timer */ - unf_rport_ref_dec(rport); - return; - } - - unf_rport_callback = rport->unf_rport_callback; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - - /* 2. Put R_Port to delete list */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_flag); - list_del_init(&rport->entry_rport); - list_add_tail(&rport->entry_rport, &disc->list_delete_rports); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_flag); - - /* 3. Report rport link down event to scsi */ - if (unf_rport_callback) { - unf_rport_callback((void *)rport, (void *)rport->lport, RETURN_OK); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort(0x%x) callback is NULL", - rport->nport_id); - } - - /* 4. Remove/delete R_Port */ - unf_rport_ref_dec(rport); - unf_rport_ref_dec(rport); -} - -static void unf_rport_linkup_to_scsi(struct work_struct *work) -{ - struct fc_rport_identifiers rport_ids; - struct fc_rport *rport = NULL; - ulong flags = RETURN_OK; - struct unf_wwpn_rport_info *wwn_rport_info = NULL; - struct unf_rport_scsi_id_image *rport_scsi_table = NULL; - u32 scsi_id = 0; - - struct unf_lport *lport = NULL; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VOID(work); - - unf_rport = container_of(work, struct unf_rport, start_work); - if (unlikely(!unf_rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort is NULL for work(%p)", work); - return; - } - - lport = unf_rport->lport; - if (unlikely(!lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]RPort(0x%x_0x%p) Port is NULL", - unf_rport->nport_id, unf_rport); - return; - } - - /* 1. Alloc R_Port SCSI_ID (image table) */ - unf_rport->scsi_id = unf_alloc_scsi_id(lport, unf_rport); - if (unlikely(unf_rport->scsi_id == INVALID_VALUE32)) { - atomic_inc(&lport->scsi_session_add_failed); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) scsi_id(0x%x) is invalid", - lport->port_id, lport->nport_id, - unf_rport->nport_id, unf_rport, - unf_rport->port_name, unf_rport->scsi_id); - - /* NOTE: return */ - return; - } - - /* 2. Add rport to scsi */ - scsi_id = unf_rport->scsi_id; - rport_ids.node_name = unf_rport->node_name; - rport_ids.port_name = unf_rport->port_name; - rport_ids.port_id = unf_rport->nport_id; - rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; - rport = fc_remote_port_add(lport->host_info.host, 0, &rport_ids); - if (unlikely(!rport)) { - atomic_inc(&lport->scsi_session_add_failed); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x_0x%x) RPort(0x%x_0x%p) wwpn(0x%llx) report link up to scsi failed", - lport->port_id, lport->nport_id, unf_rport->nport_id, unf_rport, - unf_rport->port_name); - - unf_free_scsi_id(lport, scsi_id); - return; - } - - /* 3. Change rport role */ - *((u32 *)rport->dd_data) = scsi_id; /* save local SCSI_ID to scsi rport */ - rport->supported_classes = FC_COS_CLASS3; - rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET; - rport->dev_loss_tmo = (u32)unf_get_link_lose_tmo(lport); /* default 30s */ - fc_remote_port_rolechg(rport, rport_ids.roles); - - /* 4. Save scsi rport info to local R_Port */ - spin_lock_irqsave(&unf_rport->rport_state_lock, flags); - unf_rport->rport = rport; - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flags); - - rport_scsi_table = &lport->rport_scsi_table; - spin_lock_irqsave(&rport_scsi_table->scsi_image_table_lock, flags); - wwn_rport_info = &rport_scsi_table->wwn_rport_info_table[scsi_id]; - wwn_rport_info->target_id = rport->scsi_target_id; - wwn_rport_info->rport = unf_rport; - atomic_set(&wwn_rport_info->scsi_state, UNF_SCSI_ST_ONLINE); - spin_unlock_irqrestore(&rport_scsi_table->scsi_image_table_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x_0x%x) RPort(0x%x) wwpn(0x%llx) scsi_id(0x%x) link up to scsi succeed", - lport->port_id, lport->nport_id, unf_rport->nport_id, - unf_rport->port_name, scsi_id); - - atomic_inc(&lport->scsi_session_add_success); -} - -static void unf_rport_open_timeout(struct work_struct *work) -{ - struct unf_rport *rport = NULL; - struct unf_lport *lport = NULL; - ulong flags = 0; - - FC_CHECK_RETURN_VOID(work); - - rport = container_of(work, struct unf_rport, open_work.work); - if (!rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]RPort is NULL"); - - return; - } - - spin_lock_irqsave(&rport->rport_state_lock, flags); - lport = rport->lport; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) RPort(0x%x) open work timeout with state(0x%x)", - lport->port_id, lport->nport_id, rport->nport_id, - rport->rp_state); - - /* NOTE: R_Port state check */ - if (rport->rp_state != UNF_RPORT_ST_PRLI_WAIT) { - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - /* Dec ref_cnt for timer case */ - unf_rport_ref_dec(rport); - return; - } - - /* Report R_Port Link Down event */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - unf_rport_enter_closing(rport); - /* Dec ref_cnt for timer case */ - unf_rport_ref_dec(rport); -} - -static u32 unf_alloc_index_for_rport(struct unf_lport *lport, struct unf_rport *rport) -{ - ulong rport_flag = 0; - ulong pool_flag = 0; - u32 alloc_indx = 0; - u32 max_rport = 0; - struct unf_rport_pool *rport_pool = NULL; - spinlock_t *rport_scsi_tb_lock = NULL; - - rport_pool = &lport->rport_pool; - rport_scsi_tb_lock = &rport_pool->rport_free_pool_lock; - max_rport = lport->low_level_func.lport_cfg_items.max_login; - - max_rport = max_rport > SPFC_DEFAULT_RPORT_INDEX ? SPFC_DEFAULT_RPORT_INDEX : max_rport; - - spin_lock_irqsave(rport_scsi_tb_lock, pool_flag); - while (alloc_indx < max_rport) { - if (!test_bit((int)alloc_indx, rport_pool->rpi_bitmap)) { - /* Case for SPFC */ - if (unlikely(atomic_read(&lport->lport_no_operate_flag) == UNF_LPORT_NOP)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) is within NOP", lport->port_id); - - spin_unlock_irqrestore(rport_scsi_tb_lock, pool_flag); - return UNF_RETURN_ERROR; - } - - spin_lock_irqsave(&rport->rport_state_lock, rport_flag); - rport->rport_index = alloc_indx; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) RPort(0x%x) alloc index(0x%x) succeed", - lport->port_id, alloc_indx, rport->nport_id); - - spin_unlock_irqrestore(&rport->rport_state_lock, rport_flag); - - /* Set (index) bit */ - set_bit((int)alloc_indx, rport_pool->rpi_bitmap); - - /* Break here */ - break; - } - alloc_indx++; - } - spin_unlock_irqrestore(rport_scsi_tb_lock, pool_flag); - - if (max_rport == alloc_indx) - return UNF_RETURN_ERROR; - return RETURN_OK; -} - -static void unf_check_rport_pool_status(struct unf_lport *lport) -{ - struct unf_lport *unf_lport = lport; - struct unf_rport_pool *rport_pool = NULL; - ulong flags = 0; - u32 max_rport = 0; - - FC_CHECK_RETURN_VOID(lport); - rport_pool = &unf_lport->rport_pool; - - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flags); - max_rport = unf_lport->low_level_func.lport_cfg_items.max_login; - if (rport_pool->rport_pool_completion && - rport_pool->rport_pool_count == max_rport) { - complete(rport_pool->rport_pool_completion); - } - - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flags); -} - -static void unf_init_rport_sq_num(struct unf_rport *rport, struct unf_lport *lport) -{ - u32 session_order; - u32 ssq_average_session_num; - - ssq_average_session_num = (lport->max_ssq_num - 1) / UNF_SQ_NUM_PER_SESSION; - session_order = (rport->rport_index) % ssq_average_session_num; - rport->sqn_base = (session_order * UNF_SQ_NUM_PER_SESSION); -} - -void unf_init_rport_params(struct unf_rport *rport, struct unf_lport *lport) -{ - struct unf_rport *unf_rport = rport; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(unf_rport); - FC_CHECK_RETURN_VOID(lport); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_set_rport_state(unf_rport, UNF_RPORT_ST_INIT); - unf_rport->unf_rport_callback = unf_rport_callback; - unf_rport->lport = lport; - unf_rport->fcp_conf_needed = false; - unf_rport->tape_support_needed = false; - unf_rport->max_retries = UNF_MAX_RETRY_COUNT; - unf_rport->logo_retries = 0; - unf_rport->retries = 0; - unf_rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - unf_rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN; - unf_rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; - unf_rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN; - unf_rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; - unf_rport->node_name = 0; - unf_rport->port_name = INVALID_WWPN; - unf_rport->disc_done = 0; - unf_rport->scsi_id = INVALID_VALUE32; - unf_rport->data_thread = NULL; - sema_init(&unf_rport->task_sema, 0); - atomic_set(&unf_rport->rport_ref_cnt, 0); - atomic_set(&unf_rport->pending_io_cnt, 0); - unf_rport->rport_alloc_jifs = jiffies; - - unf_rport->ed_tov = UNF_DEFAULT_EDTOV + 500; - unf_rport->ra_tov = UNF_DEFAULT_RATOV; - - INIT_WORK(&unf_rport->closing_work, unf_rport_closing_timeout); - INIT_WORK(&unf_rport->start_work, unf_rport_linkup_to_scsi); - INIT_DELAYED_WORK(&unf_rport->recovery_work, unf_rport_recovery_timeout); - INIT_DELAYED_WORK(&unf_rport->open_work, unf_rport_open_timeout); - - atomic_inc(&unf_rport->rport_ref_cnt); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); -} - -static u32 unf_alloc_ll_rport_resource(struct unf_lport *lport, - struct unf_rport *rport, u32 nport_id) -{ - u32 ret = RETURN_OK; - struct unf_port_info rport_info = {0}; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_qos_info *qos_info = NULL; - struct unf_lport *unf_lport = NULL; - ulong flag = 0; - - unf_lport = lport->root_lport; - - if (unf_lport->low_level_func.service_op.unf_alloc_rport_res) { - spin_lock_irqsave(&lport->qos_mgr_lock, flag); - rport_info.qos_level = lport->qos_level; - list_for_each_safe(node, next_node, &lport->list_qos_head) { - qos_info = (struct unf_qos_info *)list_entry(node, struct unf_qos_info, - entry_qos_info); - - if (qos_info && qos_info->nport_id == nport_id) { - rport_info.qos_level = qos_info->qos_level; - break; - } - } - - spin_unlock_irqrestore(&lport->qos_mgr_lock, flag); - - unf_init_rport_sq_num(rport, unf_lport); - - rport->qos_level = rport_info.qos_level; - rport_info.nport_id = nport_id; - rport_info.rport_index = rport->rport_index; - rport_info.local_nport_id = lport->nport_id; - rport_info.port_name = 0; - rport_info.cs_ctrl = UNF_CSCTRL_INVALID; - rport_info.sqn_base = rport->sqn_base; - - if (unf_lport->priority == UNF_PRIORITY_ENABLE) { - if (rport_info.qos_level == UNF_QOS_LEVEL_DEFAULT) - rport_info.cs_ctrl = UNF_CSCTRL_LOW; - else if (rport_info.qos_level == UNF_QOS_LEVEL_MIDDLE) - rport_info.cs_ctrl = UNF_CSCTRL_MIDDLE; - else if (rport_info.qos_level == UNF_QOS_LEVEL_HIGH) - rport_info.cs_ctrl = UNF_CSCTRL_HIGH; - } - - ret = unf_lport->low_level_func.service_op.unf_alloc_rport_res(unf_lport->fc_port, - &rport_info); - } else { - ret = RETURN_OK; - } - - return ret; -} - -static void *unf_add_rport_to_busy_list(struct unf_lport *lport, - struct unf_rport *new_rport, - u32 nport_id) -{ - struct unf_rport_pool *rport_pool = NULL; - struct unf_lport *unf_lport = NULL; - struct unf_disc *disc = NULL; - struct unf_rport *unf_new_rport = new_rport; - struct unf_rport *old_rport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - spinlock_t *rport_free_lock = NULL; - spinlock_t *rport_busy_lock = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(new_rport, NULL); - - unf_lport = lport->root_lport; - disc = &lport->disc; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - rport_pool = &unf_lport->rport_pool; - rport_free_lock = &rport_pool->rport_free_pool_lock; - rport_busy_lock = &disc->rport_busy_pool_lock; - - spin_lock_irqsave(rport_busy_lock, flag); - list_for_each_safe(node, next_node, &disc->list_busy_rports) { - /* According to N_Port_ID */ - old_rport = list_entry(node, struct unf_rport, entry_rport); - if (old_rport->nport_id == nport_id) - break; - old_rport = NULL; - } - - if (old_rport) { - spin_unlock_irqrestore(rport_busy_lock, flag); - - /* Use old R_Port & Add new R_Port back to R_Port Pool */ - spin_lock_irqsave(rport_free_lock, flag); - clear_bit((int)unf_new_rport->rport_index, rport_pool->rpi_bitmap); - list_add_tail(&unf_new_rport->entry_rport, &rport_pool->list_rports_pool); - rport_pool->rport_pool_count++; - spin_unlock_irqrestore(rport_free_lock, flag); - - unf_check_rport_pool_status(unf_lport); - return (void *)old_rport; - } - spin_unlock_irqrestore(rport_busy_lock, flag); - if (nport_id != UNF_FC_FID_FLOGI) { - if (unf_alloc_ll_rport_resource(lport, unf_new_rport, nport_id) != RETURN_OK) { - /* Add new R_Port back to R_Port Pool */ - spin_lock_irqsave(rport_free_lock, flag); - clear_bit((int)unf_new_rport->rport_index, rport_pool->rpi_bitmap); - list_add_tail(&unf_new_rport->entry_rport, &rport_pool->list_rports_pool); - rport_pool->rport_pool_count++; - spin_unlock_irqrestore(rport_free_lock, flag); - unf_check_rport_pool_status(unf_lport); - - return NULL; - } - } - - spin_lock_irqsave(rport_busy_lock, flag); - /* Add new R_Port to busy list */ - list_add_tail(&unf_new_rport->entry_rport, &disc->list_busy_rports); - unf_new_rport->nport_id = nport_id; - unf_new_rport->local_nport_id = lport->nport_id; - spin_unlock_irqrestore(rport_busy_lock, flag); - unf_init_rport_params(unf_new_rport, lport); - - return (void *)unf_new_rport; -} - -void *unf_rport_get_free_and_init(void *lport, u32 port_type, u32 nport_id) -{ - struct unf_lport *unf_lport = NULL; - struct unf_rport_pool *rport_pool = NULL; - struct unf_disc *disc = NULL; - struct unf_disc *vport_disc = NULL; - struct unf_rport *rport = NULL; - struct list_head *list_head = NULL; - ulong flag = 0; - struct unf_disc_rport *disc_rport = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - unf_lport = ((struct unf_lport *)lport)->root_lport; - FC_CHECK_RETURN_VALUE(unf_lport, NULL); - - /* Check L_Port state: NOP */ - if (unlikely(atomic_read(&unf_lport->lport_no_operate_flag) == UNF_LPORT_NOP)) - return NULL; - - rport_pool = &unf_lport->rport_pool; - disc = &unf_lport->disc; - - /* 1. UNF_PORT_TYPE_DISC: Get from disc_rport_pool */ - if (port_type == UNF_PORT_TYPE_DISC) { - vport_disc = &((struct unf_lport *)lport)->disc; - /* NOTE: list_disc_rports_pool used with list_disc_rports_busy */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - if (!list_empty(&disc->disc_rport_mgr.list_disc_rports_pool)) { - /* Get & delete from Disc R_Port Pool & Add it to Busy list */ - list_head = UNF_OS_LIST_NEXT(&disc->disc_rport_mgr.list_disc_rports_pool); - list_del_init(list_head); - disc_rport = list_entry(list_head, struct unf_disc_rport, entry_rport); - disc_rport->nport_id = nport_id; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - /* Add to list_disc_rports_busy */ - spin_lock_irqsave(&vport_disc->rport_busy_pool_lock, flag); - list_add_tail(list_head, &vport_disc->disc_rport_mgr.list_disc_rports_busy); - spin_unlock_irqrestore(&vport_disc->rport_busy_pool_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "Port(0x%x_0x%x) add nportid:0x%x to rportbusy list", - unf_lport->port_id, unf_lport->nport_id, - disc_rport->nport_id); - } else { - disc_rport = NULL; - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - } - - /* NOTE: return */ - return disc_rport; - } - - /* 2. UNF_PORT_TYPE_FC (rport_pool): Get from list_rports_pool */ - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - if (!list_empty(&rport_pool->list_rports_pool)) { - /* Get & delete from R_Port free Pool */ - list_head = UNF_OS_LIST_NEXT(&rport_pool->list_rports_pool); - list_del_init(list_head); - rport_pool->rport_pool_count--; - rport = list_entry(list_head, struct unf_rport, entry_rport); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) RPort pool is empty", - unf_lport->port_id, unf_lport->nport_id); - - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - - return NULL; - } - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - - /* 3. Alloc (& set bit) R_Port index */ - if (unf_alloc_index_for_rport(unf_lport, rport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) allocate index for new RPort failed", - unf_lport->nport_id); - - /* Alloc failed: Add R_Port back to R_Port Pool */ - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - list_add_tail(&rport->entry_rport, &rport_pool->list_rports_pool); - rport_pool->rport_pool_count++; - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - unf_check_rport_pool_status(unf_lport); - return NULL; - } - - /* 4. Add R_Port to busy list */ - rport = unf_add_rport_to_busy_list(lport, rport, nport_id); - - return (void *)rport; -} - -u32 unf_release_rport_res(struct unf_lport *lport, struct unf_rport *rport) -{ - u32 ret = UNF_RETURN_ERROR; - struct unf_port_info rport_info; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - memset(&rport_info, 0, sizeof(struct unf_port_info)); - - rport_info.rport_index = rport->rport_index; - rport_info.nport_id = rport->nport_id; - rport_info.port_name = rport->port_name; - rport_info.sqn_base = rport->sqn_base; - - /* 2. release R_Port(parent context/Session) resource */ - if (!lport->low_level_func.service_op.unf_release_rport_res) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) release rport resource function can't be NULL", - lport->port_id); - - return ret; - } - - ret = lport->low_level_func.service_op.unf_release_rport_res(lport->fc_port, &rport_info); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) rport_index(0x%x, %p) send release session CMND failed", - lport->port_id, rport_info.rport_index, rport); - } - - return ret; -} - -static void unf_reset_rport_attribute(struct unf_rport *rport) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - rport->unf_rport_callback = NULL; - rport->lport = NULL; - rport->node_name = INVALID_VALUE64; - rport->port_name = INVALID_WWPN; - rport->nport_id = INVALID_VALUE32; - rport->local_nport_id = INVALID_VALUE32; - rport->max_frame_size = UNF_MAX_FRAME_SIZE; - rport->ed_tov = UNF_DEFAULT_EDTOV; - rport->ra_tov = UNF_DEFAULT_RATOV; - rport->rport_index = INVALID_VALUE32; - rport->scsi_id = INVALID_VALUE32; - rport->rport_alloc_jifs = INVALID_VALUE64; - - /* ini or tgt */ - rport->options = 0; - - /* fcp conf */ - rport->fcp_conf_needed = false; - - /* special req retry times */ - rport->retries = 0; - rport->logo_retries = 0; - - /* special req retry times */ - rport->max_retries = UNF_MAX_RETRY_COUNT; - - /* for target mode */ - rport->session = NULL; - rport->last_lport_ini_state = UNF_PORT_STATE_LINKDOWN; - rport->lport_ini_state = UNF_PORT_STATE_LINKDOWN; - rport->rp_state = UNF_RPORT_ST_INIT; - rport->last_lport_tgt_state = UNF_PORT_STATE_LINKDOWN; - rport->lport_tgt_state = UNF_PORT_STATE_LINKDOWN; - rport->rscn_position = UNF_RPORT_NOT_NEED_PROCESS; - rport->disc_done = 0; - rport->sqn_base = 0; - - /* for scsi */ - rport->data_thread = NULL; - spin_unlock_irqrestore(&rport->rport_state_lock, flag); -} - -u32 unf_rport_remove(void *rport) -{ - struct unf_lport *lport = NULL; - struct unf_rport *unf_rport = NULL; - struct unf_rport_pool *rport_pool = NULL; - ulong flag = 0; - u32 rport_index = 0; - u32 nport_id = 0; - - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - unf_rport = (struct unf_rport *)rport; - lport = unf_rport->lport; - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - rport_pool = &((struct unf_lport *)lport->root_lport)->rport_pool; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Remove RPort(0x%p) with remote_nport_id(0x%x) local_nport_id(0x%x)", - unf_rport, unf_rport->nport_id, unf_rport->local_nport_id); - - /* 1. Terminate open exchange before rport remove: set ABORT tag */ - unf_cm_xchg_mgr_abort_io_by_id(lport, unf_rport, unf_rport->nport_id, lport->nport_id, 0); - - /* 2. Abort sfp exchange before rport remove */ - unf_cm_xchg_mgr_abort_sfs_by_id(lport, unf_rport, unf_rport->nport_id, lport->nport_id); - - /* 3. Release R_Port resource: session reset/delete */ - if (likely(unf_rport->nport_id != UNF_FC_FID_FLOGI)) - (void)unf_release_rport_res(lport, unf_rport); - - nport_id = unf_rport->nport_id; - - /* 4.1 Delete R_Port from disc destroy/delete list */ - spin_lock_irqsave(&lport->disc.rport_busy_pool_lock, flag); - list_del_init(&unf_rport->entry_rport); - spin_unlock_irqrestore(&lport->disc.rport_busy_pool_lock, flag); - - rport_index = unf_rport->rport_index; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x) release RPort(0x%x_%p) with index(0x%x)", - lport->port_id, unf_rport->nport_id, unf_rport, - unf_rport->rport_index); - - unf_reset_rport_attribute(unf_rport); - - /* 4.2 Add rport to --->>> rport_pool (free pool) & clear bitmap */ - spin_lock_irqsave(&rport_pool->rport_free_pool_lock, flag); - if (unlikely(nport_id == UNF_FC_FID_FLOGI)) { - if (test_bit((int)rport_index, rport_pool->rpi_bitmap)) - clear_bit((int)rport_index, rport_pool->rpi_bitmap); - } - - list_add_tail(&unf_rport->entry_rport, &rport_pool->list_rports_pool); - rport_pool->rport_pool_count++; - spin_unlock_irqrestore(&rport_pool->rport_free_pool_lock, flag); - - unf_check_rport_pool_status((struct unf_lport *)lport->root_lport); - up(&unf_rport->task_sema); - - return RETURN_OK; -} - -u32 unf_rport_ref_inc(struct unf_rport *rport) -{ - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - if (atomic_read(&rport->rport_ref_cnt) <= 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Rport(0x%x) reference count is wrong %d", - rport->nport_id, - atomic_read(&rport->rport_ref_cnt)); - return UNF_RETURN_ERROR; - } - - atomic_inc(&rport->rport_ref_cnt); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Rport(0x%x) reference count is %d", rport->nport_id, - atomic_read(&rport->rport_ref_cnt)); - - return RETURN_OK; -} - -void unf_rport_ref_dec(struct unf_rport *rport) -{ - ulong flag = 0; - - FC_CHECK_RETURN_VOID(rport); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Rport(0x%x) reference count is %d", rport->nport_id, - atomic_read(&rport->rport_ref_cnt)); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - if (atomic_dec_and_test(&rport->rport_ref_cnt)) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - (void)unf_rport_remove(rport); - } else { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } -} - -void unf_set_rport_state(struct unf_rport *rport, - enum unf_rport_login_state states) -{ - FC_CHECK_RETURN_VOID(rport); - - if (rport->rp_state != states) { - /* Reset R_Port retry count */ - rport->retries = 0; - } - - rport->rp_state = states; -} - -static enum unf_rport_login_state -unf_rport_stat_init(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_LOGO: - next_state = UNF_RPORT_ST_LOGO; - break; - - case UNF_EVENT_RPORT_ENTER_PLOGI: - next_state = UNF_RPORT_ST_PLOGI_WAIT; - break; - - case UNF_EVENT_RPORT_LINK_DOWN: - next_state = UNF_RPORT_ST_CLOSING; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_rport_login_state unf_rport_stat_plogi_wait(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_ENTER_PRLI: - next_state = UNF_RPORT_ST_PRLI_WAIT; - break; - - case UNF_EVENT_RPORT_LINK_DOWN: - next_state = UNF_RPORT_ST_CLOSING; - break; - - case UNF_EVENT_RPORT_LOGO: - next_state = UNF_RPORT_ST_LOGO; - break; - - case UNF_EVENT_RPORT_RECOVERY: - next_state = UNF_RPORT_ST_READY; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_rport_login_state unf_rport_stat_prli_wait(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_READY: - next_state = UNF_RPORT_ST_READY; - break; - - case UNF_EVENT_RPORT_LOGO: - next_state = UNF_RPORT_ST_LOGO; - break; - - case UNF_EVENT_RPORT_LINK_DOWN: - next_state = UNF_RPORT_ST_CLOSING; - break; - - case UNF_EVENT_RPORT_RECOVERY: - next_state = UNF_RPORT_ST_READY; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_rport_login_state unf_rport_stat_ready(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_LOGO: - next_state = UNF_RPORT_ST_LOGO; - break; - - case UNF_EVENT_RPORT_LINK_DOWN: - next_state = UNF_RPORT_ST_CLOSING; - break; - - case UNF_EVENT_RPORT_ENTER_PLOGI: - next_state = UNF_RPORT_ST_PLOGI_WAIT; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_rport_login_state unf_rport_stat_closing(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_CLS_TIMEOUT: - next_state = UNF_RPORT_ST_DELETE; - break; - - case UNF_EVENT_RPORT_RELOGIN: - next_state = UNF_RPORT_ST_INIT; - break; - - case UNF_EVENT_RPORT_RECOVERY: - next_state = UNF_RPORT_ST_READY; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -static enum unf_rport_login_state unf_rport_stat_logo(enum unf_rport_login_state old_state, - enum unf_rport_event event) -{ - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - switch (event) { - case UNF_EVENT_RPORT_NORMAL_ENTER: - next_state = UNF_RPORT_ST_CLOSING; - break; - - case UNF_EVENT_RPORT_RECOVERY: - next_state = UNF_RPORT_ST_READY; - break; - - default: - next_state = old_state; - break; - } - - return next_state; -} - -void unf_rport_state_ma(struct unf_rport *rport, enum unf_rport_event event) -{ - enum unf_rport_login_state old_state = UNF_RPORT_ST_INIT; - enum unf_rport_login_state next_state = UNF_RPORT_ST_INIT; - - FC_CHECK_RETURN_VOID(rport); - - old_state = rport->rp_state; - - switch (rport->rp_state) { - case UNF_RPORT_ST_INIT: - next_state = unf_rport_stat_init(old_state, event); - break; - case UNF_RPORT_ST_PLOGI_WAIT: - next_state = unf_rport_stat_plogi_wait(old_state, event); - break; - case UNF_RPORT_ST_PRLI_WAIT: - next_state = unf_rport_stat_prli_wait(old_state, event); - break; - case UNF_RPORT_ST_LOGO: - next_state = unf_rport_stat_logo(old_state, event); - break; - case UNF_RPORT_ST_CLOSING: - next_state = unf_rport_stat_closing(old_state, event); - break; - case UNF_RPORT_ST_READY: - next_state = unf_rport_stat_ready(old_state, event); - break; - case UNF_RPORT_ST_DELETE: - default: - next_state = UNF_RPORT_ST_INIT; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "[info]RPort(0x%x) hold state(0x%x)", - rport->nport_id, rport->rp_state); - break; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MINOR, - "[info]RPort(0x%x) with oldstate(0x%x) event(0x%x) nextstate(0x%x)", - rport->nport_id, old_state, event, next_state); - - unf_set_rport_state(rport, next_state); -} - -void unf_clean_linkdown_rport(struct unf_lport *lport) -{ - /* for L_Port's R_Port(s) */ - struct unf_disc *disc = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct unf_rport *rport = NULL; - struct unf_lport *unf_lport = NULL; - ulong disc_lock_flag = 0; - ulong rport_lock_flag = 0; - - FC_CHECK_RETURN_VOID(lport); - disc = &lport->disc; - - /* for each busy R_Port */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, disc_lock_flag); - list_for_each_safe(node, next_node, &disc->list_busy_rports) { - rport = list_entry(node, struct unf_rport, entry_rport); - - /* 1. Prevent process Repeatly: Closing */ - spin_lock_irqsave(&rport->rport_state_lock, rport_lock_flag); - if (rport->rp_state == UNF_RPORT_ST_CLOSING) { - spin_unlock_irqrestore(&rport->rport_state_lock, rport_lock_flag); - continue; - } - - /* 2. Increase ref_cnt to protect R_Port */ - if (unf_rport_ref_inc(rport) != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, rport_lock_flag); - continue; - } - - /* 3. Update R_Port state: Link Down Event --->>> closing state - */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); - - /* 4. Put R_Port from busy to destroy list */ - list_del_init(&rport->entry_rport); - list_add_tail(&rport->entry_rport, &disc->list_destroy_rports); - - unf_lport = rport->lport; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_lock_flag); - - /* 5. Schedule Closing work (Enqueuing workqueue) */ - unf_schedule_closing_work(unf_lport, rport); - - /* 6. decrease R_Port ref_cnt (company with 2) */ - unf_rport_ref_dec(rport); - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, disc_lock_flag); -} - -void unf_rport_enter_closing(struct unf_rport *rport) -{ - /* - * call by - * 1. with RSCN processer - * 2. with LOGOUT processer - * * - * from - * 1. R_Port Link Down - * 2. R_Port enter LOGO - */ - ulong rport_lock_flag = 0; - u32 ret = UNF_RETURN_ERROR; - struct unf_lport *lport = NULL; - struct unf_disc *disc = NULL; - - FC_CHECK_RETURN_VOID(rport); - - /* 1. Increase ref_cnt to protect R_Port */ - spin_lock_irqsave(&rport->rport_state_lock, rport_lock_flag); - ret = unf_rport_ref_inc(rport); - if (ret != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, rport_lock_flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) is removing and no need process", - rport->nport_id, rport); - - return; - } - - /* NOTE: R_Port state has been set(with closing) */ - - lport = rport->lport; - spin_unlock_irqrestore(&rport->rport_state_lock, rport_lock_flag); - - /* 2. Put R_Port from busy to destroy list */ - disc = &lport->disc; - spin_lock_irqsave(&disc->rport_busy_pool_lock, rport_lock_flag); - list_del_init(&rport->entry_rport); - list_add_tail(&rport->entry_rport, &disc->list_destroy_rports); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, rport_lock_flag); - - /* 3. Schedule Closing work (Enqueuing workqueue) */ - unf_schedule_closing_work(lport, rport); - - /* 4. dec R_Port ref_cnt */ - unf_rport_ref_dec(rport); -} - -void unf_rport_error_recovery(struct unf_rport *rport) -{ - ulong delay = 0; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - - ret = unf_rport_ref_inc(rport); - if (ret != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) is removing and no need process", - rport->nport_id, rport); - return; - } - - /* Check R_Port state */ - if (rport->rp_state == UNF_RPORT_ST_CLOSING || - rport->rp_state == UNF_RPORT_ST_DELETE) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]RPort(0x%x_0x%p) offline and no need process", - rport->nport_id, rport); - - unf_rport_ref_dec(rport); - return; - } - - /* Check repeatability with recovery work */ - if (delayed_work_pending(&rport->recovery_work)) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]RPort(0x%x_0x%p) recovery work is running and no need process", - rport->nport_id, rport); - - unf_rport_ref_dec(rport); - return; - } - - /* NOTE: Re-login or Logout directly (recovery work) */ - if (rport->retries < rport->max_retries) { - rport->retries++; - delay = UNF_DEFAULT_EDTOV / 4; - - if (queue_delayed_work(unf_wq, &rport->recovery_work, - (ulong)msecs_to_jiffies((u32)delay))) { - /* Inc ref_cnt: corresponding to this work timer */ - (void)unf_rport_ref_inc(rport); - } - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) state(0x%x) retry login failed", - rport->nport_id, rport, rport->rp_state); - - /* Update R_Port state: LOGO event --->>> ST_LOGO */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LOGO); - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - unf_rport_enter_logo(rport->lport, rport); - } - - unf_rport_ref_dec(rport); -} - -static u32 unf_rport_reuse_only(struct unf_rport *rport) -{ - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - ret = unf_rport_ref_inc(rport); - if (ret != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* R_Port with delete state */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) is removing and no need process", - rport->nport_id, rport); - - return UNF_RETURN_ERROR; - } - - /* R_Port State check: delete */ - if (rport->rp_state == UNF_RPORT_ST_DELETE || - rport->rp_state == UNF_RPORT_ST_CLOSING) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) state(0x%x) is delete or closing no need process", - rport->nport_id, rport, rport->rp_state); - - ret = UNF_RETURN_ERROR; - } - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - unf_rport_ref_dec(rport); - - return ret; -} - -static u32 unf_rport_reuse_recover(struct unf_rport *rport) -{ - ulong flags = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&rport->rport_state_lock, flags); - ret = unf_rport_ref_inc(rport); - if (ret != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - /* R_Port with delete state */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) is removing and no need process", - rport->nport_id, rport); - - return UNF_RETURN_ERROR; - } - - /* R_Port state check: delete */ - if (rport->rp_state == UNF_RPORT_ST_DELETE || - rport->rp_state == UNF_RPORT_ST_CLOSING) { - ret = UNF_RETURN_ERROR; - } - - /* Update R_Port state: recovery --->>> ready */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_RECOVERY); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - unf_rport_ref_dec(rport); - - return ret; -} - -static u32 unf_rport_reuse_init(struct unf_rport *rport) -{ - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - spin_lock_irqsave(&rport->rport_state_lock, flag); - ret = unf_rport_ref_inc(rport); - if (ret != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - /* R_Port with delete state */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]RPort(0x%x_0x%p) is removing and no need process", - rport->nport_id, rport); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]RPort(0x%x)'s state is 0x%x with use_init flag", - rport->nport_id, rport->rp_state); - - /* R_Port State check: delete */ - if (rport->rp_state == UNF_RPORT_ST_DELETE || - rport->rp_state == UNF_RPORT_ST_CLOSING) { - ret = UNF_RETURN_ERROR; - } else { - /* Update R_Port state: re-enter Init state */ - unf_set_rport_state(rport, UNF_RPORT_ST_INIT); - } - spin_unlock_irqrestore(&rport->rport_state_lock, flag); - - unf_rport_ref_dec(rport); - - return ret; -} - -struct unf_rport *unf_get_rport_by_nport_id(struct unf_lport *lport, - u32 nport_id) -{ - struct unf_lport *unf_lport = NULL; - struct unf_disc *disc = NULL; - struct unf_rport *rport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - struct unf_rport *find_rport = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - unf_lport = (struct unf_lport *)lport; - disc = &unf_lport->disc; - - /* for each r_port from rport_busy_list: compare N_Port_ID */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - list_for_each_safe(node, next_node, &disc->list_busy_rports) { - rport = list_entry(node, struct unf_rport, entry_rport); - if (rport && rport->nport_id == nport_id) { - find_rport = rport; - break; - } - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - return find_rport; -} - -struct unf_rport *unf_get_rport_by_wwn(struct unf_lport *lport, u64 wwpn) -{ - struct unf_lport *unf_lport = NULL; - struct unf_disc *disc = NULL; - struct unf_rport *rport = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flag = 0; - struct unf_rport *find_rport = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - unf_lport = (struct unf_lport *)lport; - disc = &unf_lport->disc; - - /* for each r_port from busy_list: compare wwpn(port name) */ - spin_lock_irqsave(&disc->rport_busy_pool_lock, flag); - list_for_each_safe(node, next_node, &disc->list_busy_rports) { - rport = list_entry(node, struct unf_rport, entry_rport); - if (rport && rport->port_name == wwpn) { - find_rport = rport; - break; - } - } - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flag); - - return find_rport; -} - -struct unf_rport *unf_get_safe_rport(struct unf_lport *lport, - struct unf_rport *rport, - enum unf_rport_reuse_flag reuse_flag, - u32 nport_id) -{ - /* - * New add or plug - * * - * retry_flogi --->>> reuse_only - * name_server_register --->>> reuse_only - * SNS_plogi --->>> reuse_only - * enter_flogi --->>> reuse_only - * logout --->>> reuse_only - * flogi_handler --->>> reuse_only - * plogi_handler --->>> reuse_only - * adisc_handler --->>> reuse_recovery - * logout_handler --->>> reuse_init - * prlo_handler --->>> reuse_init - * login_with_loop --->>> reuse_only - * gffid_callback --->>> reuse_only - * delay_plogi --->>> reuse_only - * gffid_rjt --->>> reuse_only - * gffid_rsp_unknown --->>> reuse_only - * gpnid_acc --->>> reuse_init - * fdisc_callback --->>> reuse_only - * flogi_acc --->>> reuse_only - * plogi_acc --->>> reuse_only - * logo_callback --->>> reuse_init - * rffid_callback --->>> reuse_only - */ -#define UNF_AVOID_LINK_FLASH_TIME 3000 - - struct unf_rport *unf_rport = rport; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - /* 1. Alloc New R_Port or Update R_Port Property */ - if (!unf_rport) { - /* If NULL, get/Alloc new node (R_Port from R_Port pool) - * directly - */ - unf_rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, nport_id); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, - "[info]Port(0x%x) get exist RPort(0x%x) with state(0x%x) and reuse_flag(0x%x)", - lport->port_id, unf_rport->nport_id, - unf_rport->rp_state, reuse_flag); - - switch (reuse_flag) { - case UNF_RPORT_REUSE_ONLY: - ret = unf_rport_reuse_only(unf_rport); - if (ret != RETURN_OK) { - /* R_Port within delete list: need get new */ - unf_rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, - nport_id); - } - break; - - case UNF_RPORT_REUSE_INIT: - ret = unf_rport_reuse_init(unf_rport); - if (ret != RETURN_OK) { - /* R_Port within delete list: need get new */ - unf_rport = unf_rport_get_free_and_init(lport, UNF_PORT_TYPE_FC, - nport_id); - } - break; - - case UNF_RPORT_REUSE_RECOVER: - ret = unf_rport_reuse_recover(unf_rport); - if (ret != RETURN_OK) { - /* R_Port within delete list, - * NOTE: do nothing - */ - unf_rport = NULL; - } - break; - - default: - break; - } - } // end else: R_Port != NULL - - return unf_rport; -} - -u32 unf_get_port_feature(u64 wwpn) -{ - struct unf_rport_feature_recard *port_fea = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - ulong flags = 0; - struct list_head list_temp_node; - struct list_head *list_busy_head = NULL; - struct list_head *list_free_head = NULL; - spinlock_t *feature_lock = NULL; - - list_busy_head = &port_feature_pool->list_busy_head; - list_free_head = &port_feature_pool->list_free_head; - feature_lock = &port_feature_pool->port_fea_pool_lock; - spin_lock_irqsave(feature_lock, flags); - list_for_each_safe(node, next_node, list_busy_head) { - port_fea = list_entry(node, struct unf_rport_feature_recard, entry_feature); - - if (port_fea->wwpn == wwpn) { - list_del(&port_fea->entry_feature); - list_add(&port_fea->entry_feature, list_busy_head); - spin_unlock_irqrestore(feature_lock, flags); - - return port_fea->port_feature; - } - } - - list_for_each_safe(node, next_node, list_free_head) { - port_fea = list_entry(node, struct unf_rport_feature_recard, entry_feature); - - if (port_fea->wwpn == wwpn) { - list_del(&port_fea->entry_feature); - list_add(&port_fea->entry_feature, list_busy_head); - spin_unlock_irqrestore(feature_lock, flags); - - return port_fea->port_feature; - } - } - - /* can't find wwpn */ - if (list_empty(list_free_head)) { - /* free is empty, transport busy to free */ - list_temp_node = port_feature_pool->list_free_head; - port_feature_pool->list_free_head = port_feature_pool->list_busy_head; - port_feature_pool->list_busy_head = list_temp_node; - } - - port_fea = list_entry(UNF_OS_LIST_PREV(list_free_head), - struct unf_rport_feature_recard, - entry_feature); - list_del(&port_fea->entry_feature); - list_add(&port_fea->entry_feature, list_busy_head); - - port_fea->wwpn = wwpn; - port_fea->port_feature = UNF_PORT_MODE_UNKNOWN; - - spin_unlock_irqrestore(feature_lock, flags); - return UNF_PORT_MODE_UNKNOWN; -} - -void unf_update_port_feature(u64 wwpn, u32 port_feature) -{ - struct unf_rport_feature_recard *port_fea = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct list_head *busy_head = NULL; - struct list_head *free_head = NULL; - ulong flags = 0; - spinlock_t *feature_lock = NULL; - - feature_lock = &port_feature_pool->port_fea_pool_lock; - busy_head = &port_feature_pool->list_busy_head; - free_head = &port_feature_pool->list_free_head; - - spin_lock_irqsave(feature_lock, flags); - list_for_each_safe(node, next_node, busy_head) { - port_fea = list_entry(node, struct unf_rport_feature_recard, entry_feature); - - if (port_fea->wwpn == wwpn) { - port_fea->port_feature = port_feature; - list_del(&port_fea->entry_feature); - list_add(&port_fea->entry_feature, busy_head); - spin_unlock_irqrestore(feature_lock, flags); - - return; - } - } - - list_for_each_safe(node, next_node, free_head) { - port_fea = list_entry(node, struct unf_rport_feature_recard, entry_feature); - - if (port_fea->wwpn == wwpn) { - port_fea->port_feature = port_feature; - list_del(&port_fea->entry_feature); - list_add(&port_fea->entry_feature, busy_head); - - spin_unlock_irqrestore(feature_lock, flags); - - return; - } - } - - spin_unlock_irqrestore(feature_lock, flags); -} diff --git a/drivers/scsi/spfc/common/unf_rport.h b/drivers/scsi/spfc/common/unf_rport.h deleted file mode 100644 index a9d58cb29b8a..000000000000 --- a/drivers/scsi/spfc/common/unf_rport.h +++ /dev/null @@ -1,301 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_RPORT_H -#define UNF_RPORT_H - -#include "unf_type.h" -#include "unf_common.h" -#include "unf_lport.h" - -extern struct unf_rport_feature_pool *port_feature_pool; - -#define UNF_MAX_SCSI_ID 2048 -#define UNF_LOSE_TMO 30 -#define UNF_RPORT_INVALID_INDEX 0xffff - -/* RSCN compare DISC list with local RPort macro */ -#define UNF_RPORT_NEED_PROCESS 0x1 -#define UNF_RPORT_ONLY_IN_DISC_PROCESS 0x2 -#define UNF_RPORT_ONLY_IN_LOCAL_PROCESS 0x3 -#define UNF_RPORT_IN_DISC_AND_LOCAL_PROCESS 0x4 -#define UNF_RPORT_NOT_NEED_PROCESS 0x5 - -#define UNF_ECHO_SEND_MAX_TIMES 1 - -/* csctrl level value */ -#define UNF_CSCTRL_LOW 0x81 -#define UNF_CSCTRL_MIDDLE 0x82 -#define UNF_CSCTRL_HIGH 0x83 -#define UNF_CSCTRL_INVALID 0x0 - -enum unf_rport_login_state { - UNF_RPORT_ST_INIT = 0x1000, /* initialized */ - UNF_RPORT_ST_PLOGI_WAIT, /* waiting for PLOGI completion */ - UNF_RPORT_ST_PRLI_WAIT, /* waiting for PRLI completion */ - UNF_RPORT_ST_READY, /* ready for use */ - UNF_RPORT_ST_LOGO, /* port logout sent */ - UNF_RPORT_ST_CLOSING, /* being closed */ - UNF_RPORT_ST_DELETE, /* port being deleted */ - UNF_RPORT_ST_BUTT -}; - -enum unf_rport_event { - UNF_EVENT_RPORT_NORMAL_ENTER = 0x9000, - UNF_EVENT_RPORT_ENTER_PLOGI = 0x9001, - UNF_EVENT_RPORT_ENTER_PRLI = 0x9002, - UNF_EVENT_RPORT_READY = 0x9003, - UNF_EVENT_RPORT_LOGO = 0x9004, - UNF_EVENT_RPORT_CLS_TIMEOUT = 0x9005, - UNF_EVENT_RPORT_RECOVERY = 0x9006, - UNF_EVENT_RPORT_RELOGIN = 0x9007, - UNF_EVENT_RPORT_LINK_DOWN = 0x9008, - UNF_EVENT_RPORT_BUTT -}; - -/* RPort local link state */ -enum unf_port_state { - UNF_PORT_STATE_LINKUP = 0x1001, - UNF_PORT_STATE_LINKDOWN = 0x1002 -}; - -enum unf_rport_reuse_flag { - UNF_RPORT_REUSE_ONLY = 0x1001, - UNF_RPORT_REUSE_INIT = 0x1002, - UNF_RPORT_REUSE_RECOVER = 0x1003 -}; - -struct unf_disc_rport { - /* RPort entry */ - struct list_head entry_rport; - - u32 nport_id; /* Remote port NPortID */ - u32 disc_done; /* 1:Disc done */ -}; - -struct unf_rport_feature_pool { - struct list_head list_busy_head; - struct list_head list_free_head; - void *port_feature_pool_addr; - spinlock_t port_fea_pool_lock; -}; - -struct unf_rport_feature_recard { - struct list_head entry_feature; - u64 wwpn; - u32 port_feature; - u32 reserved; -}; - -struct unf_os_thread_private_data { - struct list_head list; - spinlock_t spin_lock; - struct task_struct *thread; - unsigned int in_process; - unsigned int cpu_id; - atomic_t user_count; -}; - -/* Remote Port struct */ -struct unf_rport { - u32 max_frame_size; - u32 supported_classes; - - /* Dynamic Attributes */ - /* Remote Port loss timeout in seconds. */ - u32 dev_loss_tmo; - - u64 node_name; - u64 port_name; - u32 nport_id; /* Remote port NPortID */ - u32 local_nport_id; - - u32 roles; - - /* Remote port local INI state */ - enum unf_port_state lport_ini_state; - enum unf_port_state last_lport_ini_state; - - /* Remote port local TGT state */ - enum unf_port_state lport_tgt_state; - enum unf_port_state last_lport_tgt_state; - - /* Port Type,fc or fcoe */ - u32 port_type; - - /* RPort reference counter */ - atomic_t rport_ref_cnt; - - /* Pending IO count */ - atomic_t pending_io_cnt; - - /* RPort entry */ - struct list_head entry_rport; - - /* Port State,delay reclaim when uiRpState == complete. */ - enum unf_rport_login_state rp_state; - u32 disc_done; /* 1:Disc done */ - - struct unf_lport *lport; - void *rport; - spinlock_t rport_state_lock; - - /* Port attribution */ - u32 ed_tov; - u32 ra_tov; - u32 options; /* ini or tgt */ - u32 last_report_link_up_options; - u32 fcp_conf_needed; /* INI Rport send FCP CONF flag */ - u32 tape_support_needed; /* INI tape support flag */ - u32 retries; /* special req retry times */ - u32 logo_retries; /* logo error recovery retry times */ - u32 max_retries; /* special req retry times */ - u64 rport_alloc_jifs; /* Rport alloc jiffies */ - - void *session; - - /* binding with SCSI */ - u32 scsi_id; - - /* disc list compare flag */ - u32 rscn_position; - - u32 rport_index; - - u32 sqn_base; - enum unf_rport_qos_level qos_level; - - /* RPort timer,closing status */ - struct work_struct closing_work; - - /* RPort timer,rport linkup */ - struct work_struct start_work; - - /* RPort timer,recovery */ - struct delayed_work recovery_work; - - /* RPort timer,TGT mode,PRLI waiting */ - struct delayed_work open_work; - - struct semaphore task_sema; - /* Callback after rport Ready/delete.[with state:ok/fail].Creat/free TGT session here */ - /* input : L_Port,R_Port,state:ready --creat session/delete--free session */ - void (*unf_rport_callback)(void *rport, void *lport, u32 result); - - struct unf_os_thread_private_data *data_thread; -}; - -#define UNF_IO_RESULT_CNT(scsi_table, scsi_id, io_result) \ - do { \ - if (likely(((io_result) < UNF_MAX_IO_RETURN_VALUE) && \ - ((scsi_id) < UNF_MAX_SCSI_ID) && \ - ((scsi_table)->wwn_rport_info_table) && \ - (((scsi_table)->wwn_rport_info_table[scsi_id].dfx_counter)))) {\ - atomic64_inc(&((scsi_table)->wwn_rport_info_table[scsi_id] \ - .dfx_counter->io_done_cnt[(io_result)])); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, \ - UNF_ERR, \ - "[err] io return value(0x%x) or " \ - "scsi id(0x%x) is invalid", \ - io_result, scsi_id); \ - } \ - } while (0) - -#define UNF_SCSI_CMD_CNT(scsi_table, scsi_id, io_type) \ - do { \ - if (likely(((io_type) < UNF_MAX_SCSI_CMD) && \ - ((scsi_id) < UNF_MAX_SCSI_ID) && \ - ((scsi_table)->wwn_rport_info_table) && \ - (((scsi_table)->wwn_rport_info_table[scsi_id].dfx_counter)))) { \ - atomic64_inc(&(((scsi_table)->wwn_rport_info_table[scsi_id]) \ - .dfx_counter->scsi_cmd_cnt[io_type])); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, \ - UNF_ERR, \ - "[err] scsi_cmd(0x%x) or scsi id(0x%x) " \ - "is invalid", \ - io_type, scsi_id); \ - } \ - } while (0) - -#define UNF_SCSI_ERROR_HANDLE_CNT(scsi_table, scsi_id, io_type) \ - do { \ - if (likely(((io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \ - ((scsi_id) < UNF_MAX_SCSI_ID) && \ - ((scsi_table)->wwn_rport_info_table) && \ - (((scsi_table)->wwn_rport_info_table[scsi_id] \ - .dfx_counter)))) { \ - atomic_inc(&((scsi_table)->wwn_rport_info_table[scsi_id] \ - .dfx_counter->error_handle[io_type])); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, \ - UNF_ERR, \ - "[err] scsi_cmd(0x%x) or scsi id(0x%x) " \ - "is invalid", \ - (io_type), (scsi_id)); \ - } \ - } while (0) - -#define UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_table, scsi_id, io_type) \ - do { \ - if (likely(((io_type) < UNF_SCSI_ERROR_HANDLE_BUTT) && \ - ((scsi_id) < UNF_MAX_SCSI_ID) && \ - ((scsi_table)->wwn_rport_info_table) &&\ - (((scsi_table)-> \ - wwn_rport_info_table[scsi_id].dfx_counter)))) { \ - atomic_inc(&( \ - (scsi_table) \ - ->wwn_rport_info_table[scsi_id] \ - .dfx_counter->error_handle_result[io_type])); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, \ - UNF_ERR, \ - "[err] scsi_cmd(0x%x) or scsi id(0x%x) " \ - "is invalid", \ - io_type, scsi_id); \ - } \ - } while (0) - -void unf_rport_state_ma(struct unf_rport *rport, enum unf_rport_event event); -void unf_update_lport_state_by_linkup_event(struct unf_lport *lport, - struct unf_rport *rport, - u32 rport_att); - -void unf_set_rport_state(struct unf_rport *rport, enum unf_rport_login_state states); -void unf_rport_enter_closing(struct unf_rport *rport); -u32 unf_release_rport_res(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_initrport_mgr_temp(struct unf_lport *lport); -void unf_clean_linkdown_rport(struct unf_lport *lport); -void unf_rport_error_recovery(struct unf_rport *rport); -struct unf_rport *unf_get_rport_by_nport_id(struct unf_lport *lport, u32 nport_id); -struct unf_rport *unf_get_rport_by_wwn(struct unf_lport *lport, u64 wwpn); -void unf_rport_enter_logo(struct unf_lport *lport, struct unf_rport *rport); -u32 unf_rport_ref_inc(struct unf_rport *rport); -void unf_rport_ref_dec(struct unf_rport *rport); - -struct unf_rport *unf_rport_set_qualifier_key_reuse(struct unf_lport *lport, - struct unf_rport *rport_by_nport_id, - struct unf_rport *rport_by_wwpn, - u64 wwpn, u32 sid); -void unf_rport_delay_login(struct unf_rport *rport); -struct unf_rport *unf_find_valid_rport(struct unf_lport *lport, u64 wwpn, - u32 sid); -void unf_rport_linkdown(struct unf_lport *lport, struct unf_rport *rport); -void unf_apply_for_session(struct unf_lport *lport, struct unf_rport *rport); -struct unf_rport *unf_get_safe_rport(struct unf_lport *lport, - struct unf_rport *rport, - enum unf_rport_reuse_flag reuse_flag, - u32 nport_id); -void *unf_rport_get_free_and_init(void *lport, u32 port_type, u32 nport_id); - -void unf_set_device_state(struct unf_lport *lport, u32 scsi_id, int scsi_state); -u32 unf_get_scsi_id_by_wwpn(struct unf_lport *lport, u64 wwpn); -u32 unf_get_device_state(struct unf_lport *lport, u32 scsi_id); -u32 unf_free_scsi_id(struct unf_lport *lport, u32 scsi_id); -void unf_schedule_closing_work(struct unf_lport *lport, struct unf_rport *rport); -void unf_sesion_loss_timeout(struct work_struct *work); -u32 unf_get_port_feature(u64 wwpn); -void unf_update_port_feature(u64 wwpn, u32 port_feature); - -#endif diff --git a/drivers/scsi/spfc/common/unf_scsi.c b/drivers/scsi/spfc/common/unf_scsi.c deleted file mode 100644 index 3615d95c77e9..000000000000 --- a/drivers/scsi/spfc/common/unf_scsi.c +++ /dev/null @@ -1,1462 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_type.h" -#include "unf_log.h" -#include "unf_scsi_common.h" -#include "unf_lport.h" -#include "unf_rport.h" -#include "unf_portman.h" -#include "unf_exchg.h" -#include "unf_exchg_abort.h" -#include "unf_npiv.h" -#include "unf_io.h" - -#define UNF_LUN_ID_MASK 0x00000000ffff0000 -#define UNF_CMD_PER_LUN 3 - -static int unf_scsi_queue_cmd(struct Scsi_Host *phost, struct scsi_cmnd *pcmd); -static int unf_scsi_abort_scsi_cmnd(struct scsi_cmnd *v_cmnd); -static int unf_scsi_device_reset_handler(struct scsi_cmnd *v_cmnd); -static int unf_scsi_bus_reset_handler(struct scsi_cmnd *v_cmnd); -static int unf_scsi_target_reset_handler(struct scsi_cmnd *v_cmnd); -static int unf_scsi_slave_alloc(struct scsi_device *sdev); -static void unf_scsi_destroy_slave(struct scsi_device *sdev); -static int unf_scsi_slave_configure(struct scsi_device *sdev); -static int unf_scsi_scan_finished(struct Scsi_Host *shost, unsigned long time); -static void unf_scsi_scan_start(struct Scsi_Host *shost); - -static struct scsi_transport_template *scsi_transport_template; -static struct scsi_transport_template *scsi_transport_template_v; - -struct unf_ini_error_code ini_error_code_table1[] = { - {UNF_IO_SUCCESS, UNF_SCSI_HOST(DID_OK)}, - {UNF_IO_ABORTED, UNF_SCSI_HOST(DID_ABORT)}, - {UNF_IO_FAILED, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_ABORT_ABTS, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_ABORT_LOGIN, UNF_SCSI_HOST(DID_NO_CONNECT)}, - {UNF_IO_ABORT_REET, UNF_SCSI_HOST(DID_RESET)}, - {UNF_IO_ABORT_FAILED, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_OUTOF_ORDER, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_FTO, UNF_SCSI_HOST(DID_TIME_OUT)}, - {UNF_IO_LINK_FAILURE, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_OVER_FLOW, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_RSP_OVER, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_LOST_FRAME, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_UNDER_FLOW, UNF_SCSI_HOST(DID_OK)}, - {UNF_IO_HOST_PROG_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_SEST_PROG_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_INVALID_ENTRY, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_ABORT_SEQ_NOT, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_REJECT, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_EDC_IN_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_EDC_OUT_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_UNINIT_KEK_ERR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_DEK_OUTOF_RANGE, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_KEY_UNWRAP_ERR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_KEY_TAG_ERR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_KEY_ECC_ERR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_BLOCK_SIZE_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_ILLEGAL_CIPHER_MODE, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_CLEAN_UP, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_ABORTED_BY_TARGET, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_TRANSPORT_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_LINK_FLASH, UNF_SCSI_HOST(DID_NO_CONNECT)}, - {UNF_IO_TIMEOUT, UNF_SCSI_HOST(DID_TIME_OUT)}, - {UNF_IO_DMA_ERROR, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_NO_LPORT, UNF_SCSI_HOST(DID_NO_CONNECT)}, - {UNF_IO_NO_XCHG, UNF_SCSI_HOST(DID_SOFT_ERROR)}, - {UNF_IO_SOFT_ERR, UNF_SCSI_HOST(DID_SOFT_ERROR)}, - {UNF_IO_PORT_LOGOUT, UNF_SCSI_HOST(DID_NO_CONNECT)}, - {UNF_IO_ERREND, UNF_SCSI_HOST(DID_ERROR)}, - {UNF_IO_DIF_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION))}, - {UNF_IO_INCOMPLETE, UNF_SCSI_HOST(DID_IMM_RETRY)}, - {UNF_IO_DIF_REF_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION))}, - {UNF_IO_DIF_GEN_ERROR, (UNF_SCSI_HOST(DID_OK) | UNF_SCSI_STATUS(SCSI_CHECK_CONDITION))} -}; - -u32 ini_err_code_table_cnt1 = sizeof(ini_error_code_table1) / sizeof(struct unf_ini_error_code); - -static void unf_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) -{ - if (timeout) - rport->dev_loss_tmo = timeout; - else - rport->dev_loss_tmo = 1; -} - -static void unf_get_host_port_id(struct Scsi_Host *shost) -{ - struct unf_lport *unf_lport = NULL; - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is null"); - return; - } - - fc_host_port_id(shost) = unf_lport->port_id; -} - -static void unf_get_host_speed(struct Scsi_Host *shost) -{ - struct unf_lport *unf_lport = NULL; - u32 speed = FC_PORTSPEED_UNKNOWN; - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is null"); - return; - } - - switch (unf_lport->speed) { - case UNF_PORT_SPEED_2_G: - speed = FC_PORTSPEED_2GBIT; - break; - case UNF_PORT_SPEED_4_G: - speed = FC_PORTSPEED_4GBIT; - break; - case UNF_PORT_SPEED_8_G: - speed = FC_PORTSPEED_8GBIT; - break; - case UNF_PORT_SPEED_16_G: - speed = FC_PORTSPEED_16GBIT; - break; - case UNF_PORT_SPEED_32_G: - speed = FC_PORTSPEED_32GBIT; - break; - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with unknown speed(0x%x) for FC mode", - unf_lport->port_id, unf_lport->speed); - break; - } - - fc_host_speed(shost) = speed; -} - -static void unf_get_host_port_type(struct Scsi_Host *shost) -{ - struct unf_lport *unf_lport = NULL; - u32 port_type = FC_PORTTYPE_UNKNOWN; - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is null"); - return; - } - - switch (unf_lport->act_topo) { - case UNF_ACT_TOP_PRIVATE_LOOP: - port_type = FC_PORTTYPE_LPORT; - break; - case UNF_ACT_TOP_PUBLIC_LOOP: - port_type = FC_PORTTYPE_NLPORT; - break; - case UNF_ACT_TOP_P2P_DIRECT: - port_type = FC_PORTTYPE_PTP; - break; - case UNF_ACT_TOP_P2P_FABRIC: - port_type = FC_PORTTYPE_NPORT; - break; - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) with unknown topo type(0x%x) for FC mode", - unf_lport->port_id, unf_lport->act_topo); - break; - } - - fc_host_port_type(shost) = port_type; -} - -static void unf_get_symbolic_name(struct Scsi_Host *shost) -{ - u8 *name = NULL; - struct unf_lport *unf_lport = NULL; - - unf_lport = (struct unf_lport *)(uintptr_t)shost->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Check l_port failed"); - return; - } - - name = fc_host_symbolic_name(shost); - if (name) - snprintf(name, FC_SYMBOLIC_NAME_SIZE, "SPFC_FW_RELEASE:%s SPFC_DRV_RELEASE:%s", - unf_lport->fw_version, SPFC_DRV_VERSION); -} - -static void unf_get_host_fabric_name(struct Scsi_Host *shost) -{ - struct unf_lport *unf_lport = NULL; - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is null"); - return; - } - fc_host_fabric_name(shost) = unf_lport->fabric_node_name; -} - -static void unf_get_host_port_state(struct Scsi_Host *shost) -{ - struct unf_lport *unf_lport = NULL; - enum fc_port_state port_state; - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is null"); - return; - } - - switch (unf_lport->link_up) { - case UNF_PORT_LINK_DOWN: - port_state = FC_PORTSTATE_OFFLINE; - break; - case UNF_PORT_LINK_UP: - port_state = FC_PORTSTATE_ONLINE; - break; - default: - port_state = FC_PORTSTATE_UNKNOWN; - break; - } - - fc_host_port_state(shost) = port_state; -} - -static void unf_dev_loss_timeout_callbk(struct fc_rport *rport) -{ - /* - * NOTE: about rport->dd_data - * --->>> local SCSI_ID - * 1. Assignment during scsi rport link up - * 2. Released when scsi rport link down & timeout(30s) - * 3. Used during scsi do callback with slave_alloc function - */ - struct Scsi_Host *host = NULL; - struct unf_lport *unf_lport = NULL; - u32 scsi_id = 0; - - if (unlikely(!rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]SCSI rport is null"); - return; - } - - host = rport_to_shost(rport); - if (unlikely(!host)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Host is null"); - return; - } - - scsi_id = *(u32 *)(rport->dd_data); /* according to Local SCSI_ID */ - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]rport(0x%p) scsi_id(0x%x) is max than(0x%x)", - rport, scsi_id, UNF_MAX_SCSI_ID); - return; - } - - unf_lport = (struct unf_lport *)host->hostdata[0]; - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[event]Port(0x%x_0x%x) rport(0x%p) scsi_id(0x%x) target_id(0x%x) loss timeout", - unf_lport->port_id, unf_lport->nport_id, rport, - scsi_id, rport->scsi_target_id); - - atomic_inc(&unf_lport->session_loss_tmo); - - /* Free SCSI ID & set table state with DEAD */ - (void)unf_free_scsi_id(unf_lport, scsi_id); - unf_xchg_up_abort_io_by_scsi_id(unf_lport, scsi_id); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(%p) is invalid", unf_lport); - } - - *((u32 *)rport->dd_data) = INVALID_VALUE32; -} - -int unf_scsi_create_vport(struct fc_vport *fc_port, bool disabled) -{ - struct unf_lport *vport = NULL; - struct unf_lport *unf_lport = NULL; - struct Scsi_Host *shost = NULL; - struct vport_config vport_config = {0}; - - shost = vport_to_shost(fc_port); - - unf_lport = (struct unf_lport *)shost->hostdata[0]; - if (unf_is_lport_valid(unf_lport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(%p) is invalid", unf_lport); - - return RETURN_ERROR; - } - - vport_config.port_name = fc_port->port_name; - - vport_config.port_mode = fc_port->roles; - - vport = unf_creat_vport(unf_lport, &vport_config); - if (!vport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) Create Vport failed on lldrive", - unf_lport->port_id); - - return RETURN_ERROR; - } - - fc_port->dd_data = vport; - vport->vport = fc_port; - - return RETURN_OK; -} - -int unf_scsi_delete_vport(struct fc_vport *fc_port) -{ - int ret = RETURN_ERROR; - struct unf_lport *vport = NULL; - - vport = (struct unf_lport *)fc_port->dd_data; - if (unf_is_lport_valid(vport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]VPort(%p) is invalid or is removing", vport); - - fc_port->dd_data = NULL; - - return ret; - } - - ret = (int)unf_destroy_one_vport(vport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]VPort(0x%x) destroy failed on drive", vport->port_id); - - return ret; - } - - fc_port->dd_data = NULL; - return ret; -} - -struct fc_function_template function_template = { - .show_host_node_name = 1, - .show_host_port_name = 1, - .show_host_supported_classes = 1, - .show_host_supported_speeds = 1, - - .get_host_port_id = unf_get_host_port_id, - .show_host_port_id = 1, - .get_host_speed = unf_get_host_speed, - .show_host_speed = 1, - .get_host_port_type = unf_get_host_port_type, - .show_host_port_type = 1, - .get_host_symbolic_name = unf_get_symbolic_name, - .show_host_symbolic_name = 1, - .set_host_system_hostname = NULL, - .show_host_system_hostname = 1, - .get_host_fabric_name = unf_get_host_fabric_name, - .show_host_fabric_name = 1, - .get_host_port_state = unf_get_host_port_state, - .show_host_port_state = 1, - - .dd_fcrport_size = sizeof(void *), - .show_rport_supported_classes = 1, - - .get_starget_node_name = NULL, - .show_starget_node_name = 1, - .get_starget_port_name = NULL, - .show_starget_port_name = 1, - .get_starget_port_id = NULL, - .show_starget_port_id = 1, - - .set_rport_dev_loss_tmo = unf_set_rport_loss_tmo, - .show_rport_dev_loss_tmo = 0, - - .issue_fc_host_lip = NULL, - .dev_loss_tmo_callbk = unf_dev_loss_timeout_callbk, - .terminate_rport_io = NULL, - .get_fc_host_stats = NULL, - - .vport_create = unf_scsi_create_vport, - .vport_disable = NULL, - .vport_delete = unf_scsi_delete_vport, - .bsg_request = NULL, - .bsg_timeout = NULL, -}; - -struct fc_function_template function_template_v = { - .show_host_node_name = 1, - .show_host_port_name = 1, - .show_host_supported_classes = 1, - .show_host_supported_speeds = 1, - - .get_host_port_id = unf_get_host_port_id, - .show_host_port_id = 1, - .get_host_speed = unf_get_host_speed, - .show_host_speed = 1, - .get_host_port_type = unf_get_host_port_type, - .show_host_port_type = 1, - .get_host_symbolic_name = unf_get_symbolic_name, - .show_host_symbolic_name = 1, - .set_host_system_hostname = NULL, - .show_host_system_hostname = 1, - .get_host_fabric_name = unf_get_host_fabric_name, - .show_host_fabric_name = 1, - .get_host_port_state = unf_get_host_port_state, - .show_host_port_state = 1, - - .dd_fcrport_size = sizeof(void *), - .show_rport_supported_classes = 1, - - .get_starget_node_name = NULL, - .show_starget_node_name = 1, - .get_starget_port_name = NULL, - .show_starget_port_name = 1, - .get_starget_port_id = NULL, - .show_starget_port_id = 1, - - .set_rport_dev_loss_tmo = unf_set_rport_loss_tmo, - .show_rport_dev_loss_tmo = 0, - - .issue_fc_host_lip = NULL, - .dev_loss_tmo_callbk = unf_dev_loss_timeout_callbk, - .terminate_rport_io = NULL, - .get_fc_host_stats = NULL, - - .vport_create = NULL, - .vport_disable = NULL, - .vport_delete = NULL, - .bsg_request = NULL, - .bsg_timeout = NULL, -}; - -struct scsi_host_template scsi_host_template = { - .module = THIS_MODULE, - .name = "SPFC", - - .queuecommand = unf_scsi_queue_cmd, - .eh_timed_out = fc_eh_timed_out, - .eh_abort_handler = unf_scsi_abort_scsi_cmnd, - .eh_device_reset_handler = unf_scsi_device_reset_handler, - - .eh_target_reset_handler = unf_scsi_target_reset_handler, - .eh_bus_reset_handler = unf_scsi_bus_reset_handler, - .eh_host_reset_handler = NULL, - - .slave_configure = unf_scsi_slave_configure, - .slave_alloc = unf_scsi_slave_alloc, - .slave_destroy = unf_scsi_destroy_slave, - - .scan_finished = unf_scsi_scan_finished, - .scan_start = unf_scsi_scan_start, - - .this_id = -1, /* this_id: -1 */ - .cmd_per_lun = UNF_CMD_PER_LUN, - .shost_attrs = NULL, - .sg_tablesize = SG_ALL, - .max_sectors = UNF_MAX_SECTORS, - .supported_mode = MODE_INITIATOR, -}; - -void unf_unmap_prot_sgl(struct scsi_cmnd *cmnd) -{ - struct device *dev = NULL; - - if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && spfc_dif_enable && - (scsi_prot_sg_count(cmnd))) { - dev = cmnd->device->host->dma_dev; - dma_unmap_sg(dev, scsi_prot_sglist(cmnd), - (int)scsi_prot_sg_count(cmnd), - cmnd->sc_data_direction); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "scsi done cmd:%p op:%u, difsglcount:%u", cmnd, - scsi_get_prot_op(cmnd), scsi_prot_sg_count(cmnd)); - } -} - -void unf_scsi_done(struct unf_scsi_cmnd *scsi_cmd) -{ - struct scsi_cmnd *cmd = NULL; - - cmd = (struct scsi_cmnd *)scsi_cmd->upper_cmnd; - FC_CHECK_RETURN_VOID(scsi_cmd); - FC_CHECK_RETURN_VOID(cmd); - FC_CHECK_RETURN_VOID(cmd->scsi_done); - scsi_set_resid(cmd, (int)scsi_cmd->resid); - - cmd->result = scsi_cmd->result; - scsi_dma_unmap(cmd); - unf_unmap_prot_sgl(cmd); - return cmd->scsi_done(cmd); -} - -static void unf_get_protect_op(struct scsi_cmnd *cmd, - struct unf_dif_control_info *dif_control_info) -{ - switch (scsi_get_prot_op(cmd)) { - /* OS-HBA: Unprotected, HBA-Target: Protected */ - case SCSI_PROT_READ_STRIP: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_VERIFY_AND_DELETE; - break; - case SCSI_PROT_WRITE_INSERT: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_INSERT; - break; - - /* OS-HBA: Protected, HBA-Target: Unprotected */ - case SCSI_PROT_READ_INSERT: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_INSERT; - break; - case SCSI_PROT_WRITE_STRIP: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_VERIFY_AND_DELETE; - break; - - /* OS-HBA: Protected, HBA-Target: Protected */ - case SCSI_PROT_READ_PASS: - case SCSI_PROT_WRITE_PASS: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_VERIFY_AND_FORWARD; - break; - - default: - dif_control_info->protect_opcode |= UNF_DIF_ACTION_VERIFY_AND_FORWARD; - break; - } -} - -int unf_get_protect_mode(struct unf_lport *lport, struct scsi_cmnd *scsi_cmd, - struct unf_scsi_cmnd *unf_scsi_cmd) -{ - struct scsi_cmnd *cmd = NULL; - int dif_seg_cnt = 0; - struct unf_dif_control_info *dif_control_info = NULL; - - cmd = scsi_cmd; - dif_control_info = &unf_scsi_cmd->dif_control; - - unf_get_protect_op(cmd, dif_control_info); - - if (dif_sgl_mode) - dif_control_info->flags |= UNF_DIF_DOUBLE_SGL; - dif_control_info->flags |= ((cmd->device->sector_size) == SECTOR_SIZE_4096) - ? UNF_DIF_SECTSIZE_4KB : UNF_DIF_SECTSIZE_512; - dif_control_info->protect_opcode |= UNF_VERIFY_CRC_MASK | UNF_VERIFY_LBA_MASK; - dif_control_info->dif_sge_count = scsi_prot_sg_count(cmd); - dif_control_info->dif_sgl = scsi_prot_sglist(cmd); - dif_control_info->start_lba = cpu_to_le32(((uint32_t)(0xffffffff & scsi_get_lba(cmd)))); - - if (cmd->device->sector_size == SECTOR_SIZE_4096) - dif_control_info->start_lba = dif_control_info->start_lba >> UNF_SHIFT_3; - - if (scsi_prot_sg_count(cmd)) { - dif_seg_cnt = dma_map_sg(&lport->low_level_func.dev->dev, scsi_prot_sglist(cmd), - (int)scsi_prot_sg_count(cmd), cmd->sc_data_direction); - if (unlikely(!dif_seg_cnt)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) cmd:%p map dif sgl err", - lport->port_id, cmd); - return UNF_RETURN_ERROR; - } - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "build scsi cmd:%p op:%u,difsglcount:%u,difsegcnt:%u", cmd, - scsi_get_prot_op(cmd), scsi_prot_sg_count(cmd), - dif_seg_cnt); - return RETURN_OK; -} - -static u32 unf_get_rport_qos_level(struct scsi_cmnd *cmd, u32 scsi_id, - struct unf_rport_scsi_id_image *scsi_image_table) -{ - enum unf_rport_qos_level level = 0; - - if (!scsi_image_table->wwn_rport_info_table[scsi_id].lun_qos_level || - cmd->device->lun >= UNF_MAX_LUN_PER_TARGET) { - level = 0; - } else { - level = (scsi_image_table->wwn_rport_info_table[scsi_id] - .lun_qos_level[cmd->device->lun]); - } - return level; -} - -u32 unf_get_frame_entry_buf(void *up_cmnd, void *driver_sgl, void **upper_sgl, - u32 *port_id, u32 *index, char **buf, u32 *buf_len) -{ -#define SPFC_MAX_DMA_LENGTH (0x20000 - 1) - struct scatterlist *scsi_sgl = *upper_sgl; - - if (unlikely(!scsi_sgl)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Command(0x%p) can not get SGL.", up_cmnd); - return RETURN_ERROR; - } - *buf = (char *)sg_dma_address(scsi_sgl); - *buf_len = sg_dma_len(scsi_sgl); - *upper_sgl = (void *)sg_next(scsi_sgl); - if (unlikely((*buf_len > SPFC_MAX_DMA_LENGTH) || (*buf_len == 0))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Command(0x%p) dmalen:0x%x is not support.", - up_cmnd, *buf_len); - return RETURN_ERROR; - } - - return RETURN_OK; -} - -static void unf_init_scsi_cmnd(struct Scsi_Host *host, struct scsi_cmnd *cmd, - struct unf_scsi_cmnd *scsi_cmnd, - struct unf_rport_scsi_id_image *scsi_image_table, - int datasegcnt) -{ - static atomic64_t count; - enum unf_rport_qos_level level = 0; - u32 scsi_id = 0; - - scsi_id = (u32)((u64)cmd->device->hostdata); - level = unf_get_rport_qos_level(cmd, scsi_id, scsi_image_table); - scsi_cmnd->scsi_host_id = host->host_no; /* save host_no to scsi_cmnd->scsi_host_id */ - scsi_cmnd->scsi_id = scsi_id; - scsi_cmnd->raw_lun_id = ((u64)cmd->device->lun << 16) & UNF_LUN_ID_MASK; - scsi_cmnd->data_direction = cmd->sc_data_direction; - scsi_cmnd->under_flow = cmd->underflow; - scsi_cmnd->cmnd_len = cmd->cmd_len; - scsi_cmnd->pcmnd = cmd->cmnd; - scsi_cmnd->transfer_len = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); - scsi_cmnd->sense_buflen = UNF_SCSI_SENSE_BUFFERSIZE; - scsi_cmnd->sense_buf = cmd->sense_buffer; - scsi_cmnd->time_out = 0; - scsi_cmnd->upper_cmnd = cmd; - scsi_cmnd->drv_private = (void *)(*(u64 *)shost_priv(host)); - scsi_cmnd->entry_count = datasegcnt; - scsi_cmnd->sgl = scsi_sglist(cmd); - scsi_cmnd->unf_ini_get_sgl_entry = unf_get_frame_entry_buf; - scsi_cmnd->done = unf_scsi_done; - scsi_cmnd->lun_id = (u8 *)&scsi_cmnd->raw_lun_id; - scsi_cmnd->err_code_table_cout = ini_err_code_table_cnt1; - scsi_cmnd->err_code_table = ini_error_code_table1; - scsi_cmnd->world_id = INVALID_WORLD_ID; - scsi_cmnd->cmnd_sn = atomic64_inc_return(&count); - scsi_cmnd->qos_level = level; - if (unlikely(scsi_cmnd->cmnd_sn == 0)) - scsi_cmnd->cmnd_sn = atomic64_inc_return(&count); -} - -static void unf_io_error_done(struct scsi_cmnd *cmd, - struct unf_rport_scsi_id_image *scsi_image_table, - u32 scsi_id, u32 result) -{ - cmd->result = (int)(result << UNF_SHIFT_16); - cmd->scsi_done(cmd); - if (scsi_image_table) - UNF_IO_RESULT_CNT(scsi_image_table, scsi_id, result); -} - -static bool unf_scan_device_cmd(struct scsi_cmnd *cmd) -{ - return ((cmd->cmnd[0] == INQUIRY) || (cmd->cmnd[0] == REPORT_LUNS)); -} - -static int unf_scsi_queue_cmd(struct Scsi_Host *phost, struct scsi_cmnd *pcmd) -{ - struct Scsi_Host *host = NULL; - struct scsi_cmnd *cmd = NULL; - struct unf_scsi_cmnd scsi_cmd = {0}; - u32 scsi_id = 0; - u32 scsi_state = 0; - int ret = SCSI_MLQUEUE_HOST_BUSY; - struct unf_lport *unf_lport = NULL; - struct fc_rport *rport = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - struct unf_rport *unf_rport = NULL; - u32 cmnd_result = 0; - u32 rport_state_err = 0; - bool scan_device_cmd = false; - int datasegcnt = 0; - - host = phost; - cmd = pcmd; - FC_CHECK_RETURN_VALUE(host, RETURN_ERROR); - FC_CHECK_RETURN_VALUE(cmd, RETURN_ERROR); - - /* Get L_Port from scsi_cmd */ - unf_lport = (struct unf_lport *)host->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Check l_port failed, cmd(%p)", cmd); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - /* Check device/session local state by device_id */ - scsi_id = (u32)((u64)cmd->device->hostdata); - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) scsi_id(0x%x) is max than %d", - unf_lport->port_id, scsi_id, UNF_MAX_SCSI_ID); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - scsi_image_table = &unf_lport->rport_scsi_table; - UNF_SCSI_CMD_CNT(scsi_image_table, scsi_id, cmd->cmnd[0]); - - /* Get scsi r_port */ - rport = starget_to_rport(scsi_target(cmd->device)); - if (unlikely(!rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) cmd(%p) to get scsi rport failed", - unf_lport->port_id, cmd); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - if (unlikely(!scsi_image_table->wwn_rport_info_table)) { - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_WARN, - "[warn]LPort porid(0x%x) WwnRportInfoTable NULL", - unf_lport->port_id); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - if (unlikely(unf_lport->port_removing)) { - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_WARN, - "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p) unf_lport removing", - unf_lport->port_id, scsi_id, rport, rport->scsi_target_id, cmd); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - scsi_state = atomic_read(&scsi_image_table->wwn_rport_info_table[scsi_id].scsi_state); - if (unlikely(scsi_state != UNF_SCSI_ST_ONLINE)) { - if (scsi_state == UNF_SCSI_ST_OFFLINE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) scsi_state(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), target is busy", - unf_lport->port_id, scsi_state, scsi_id, rport, - rport->scsi_target_id, cmd); - - scan_device_cmd = unf_scan_device_cmd(cmd); - /* report lun or inquiry cmd, if send failed, do not - * retry, prevent - * the scan_mutex in scsi host locked up by eachother - */ - if (scan_device_cmd) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) cmd(0x%x) DID_NO_CONNECT", - unf_lport->port_id, host->host_no, scsi_id, - (u64)cmd->device->lun, cmd->cmnd[0]); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - if (likely(scsi_image_table->wwn_rport_info_table)) { - if (likely(scsi_image_table->wwn_rport_info_table[scsi_id] - .dfx_counter)) { - atomic64_inc(&(scsi_image_table - ->wwn_rport_info_table[scsi_id] - .dfx_counter->target_busy)); - } - } - - /* Target busy: need scsi retry */ - return SCSI_MLQUEUE_TARGET_BUSY; - } - /* timeout(DEAD): scsi_done & return 0 & I/O error */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), target is loss timeout", - unf_lport->port_id, scsi_id, rport, - rport->scsi_target_id, cmd); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - return 0; - } - - if (scsi_sg_count(cmd)) { - datasegcnt = dma_map_sg(&unf_lport->low_level_func.dev->dev, scsi_sglist(cmd), - (int)scsi_sg_count(cmd), cmd->sc_data_direction); - if (unlikely(!datasegcnt)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) scsi_id(0x%x) rport(0x%p) target_id(0x%x) cmd(0x%p), dma map sg err", - unf_lport->port_id, scsi_id, rport, - rport->scsi_target_id, cmd); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_BUS_BUSY); - return SCSI_MLQUEUE_HOST_BUSY; - } - } - - /* Construct local SCSI CMND info */ - unf_init_scsi_cmnd(host, cmd, &scsi_cmd, scsi_image_table, datasegcnt); - - if ((scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) && spfc_dif_enable) { - ret = unf_get_protect_mode(unf_lport, cmd, &scsi_cmd); - if (ret != RETURN_OK) { - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_BUS_BUSY); - scsi_dma_unmap(cmd); - return SCSI_MLQUEUE_HOST_BUSY; - } - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) transfer length(0x%x) cmd_len(0x%x) direction(0x%x) cmd(0x%x) under_flow(0x%x) protect_opcode is (0x%x) dif_sgl_mode is %d, sector size(%d)", - unf_lport->port_id, host->host_no, scsi_id, (u64)cmd->device->lun, - scsi_cmd.transfer_len, scsi_cmd.cmnd_len, cmd->sc_data_direction, - scsi_cmd.pcmnd[0], scsi_cmd.under_flow, - scsi_cmd.dif_control.protect_opcode, dif_sgl_mode, - (cmd->device->sector_size)); - - /* Bind the Exchange address corresponding to scsi_cmd to - * scsi_cmd->host_scribble - */ - cmd->host_scribble = (unsigned char *)scsi_cmd.cmnd_sn; - ret = unf_cm_queue_command(&scsi_cmd); - if (ret != RETURN_OK) { - unf_rport = unf_find_rport_by_scsi_id(unf_lport, ini_error_code_table1, - ini_err_code_table_cnt1, - scsi_id, &cmnd_result); - rport_state_err = (!unf_rport) || - (unf_rport->lport_ini_state != UNF_PORT_STATE_LINKUP) || - (unf_rport->rp_state == UNF_RPORT_ST_CLOSING); - scan_device_cmd = unf_scan_device_cmd(cmd); - - /* report lun or inquiry cmd if send failed, do not - * retry,prevent the scan_mutex in scsi host locked up by - * eachother - */ - if (rport_state_err && scan_device_cmd) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) host(0x%x) scsi_id(0x%x) lun(0x%llx) cmd(0x%x) cmResult(0x%x) DID_NO_CONNECT", - unf_lport->port_id, host->host_no, scsi_id, - (u64)cmd->device->lun, cmd->cmnd[0], - cmnd_result); - unf_io_error_done(cmd, scsi_image_table, scsi_id, DID_NO_CONNECT); - scsi_dma_unmap(cmd); - unf_unmap_prot_sgl(cmd); - return 0; - } - - /* Host busy: scsi need to retry */ - ret = SCSI_MLQUEUE_HOST_BUSY; - if (likely(scsi_image_table->wwn_rport_info_table)) { - if (likely(scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter)) { - atomic64_inc(&(scsi_image_table->wwn_rport_info_table[scsi_id] - .dfx_counter->host_busy)); - } - } - scsi_dma_unmap(cmd); - unf_unmap_prot_sgl(cmd); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) return(0x%x) to process INI IO falid", - unf_lport->port_id, ret); - } - return ret; -} - -static void unf_init_abts_tmf_scsi_cmd(struct scsi_cmnd *cmnd, - struct unf_scsi_cmnd *scsi_cmd, - bool abort_cmd) -{ - struct Scsi_Host *scsi_host = NULL; - - scsi_host = cmnd->device->host; - scsi_cmd->scsi_host_id = scsi_host->host_no; - scsi_cmd->scsi_id = (u32)((u64)cmnd->device->hostdata); - scsi_cmd->raw_lun_id = (u64)cmnd->device->lun; - scsi_cmd->upper_cmnd = cmnd; - scsi_cmd->drv_private = (void *)(*(u64 *)shost_priv(scsi_host)); - scsi_cmd->cmnd_sn = (u64)(cmnd->host_scribble); - scsi_cmd->lun_id = (u8 *)&scsi_cmd->raw_lun_id; - if (abort_cmd) { - scsi_cmd->done = unf_scsi_done; - scsi_cmd->world_id = INVALID_WORLD_ID; - } -} - -int unf_scsi_abort_scsi_cmnd(struct scsi_cmnd *cmnd) -{ - /* SCSI ABORT Command --->>> FC ABTS */ - struct unf_scsi_cmnd scsi_cmd = {0}; - int ret = FAILED; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - struct unf_lport *unf_lport = NULL; - u32 scsi_id = 0; - u32 err_handle = 0; - - FC_CHECK_RETURN_VALUE(cmnd, FAILED); - - unf_lport = (struct unf_lport *)cmnd->device->host->hostdata[0]; - scsi_id = (u32)((u64)cmnd->device->hostdata); - - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_ABORT_IO_TYPE; - UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, scsi_id, err_handle); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[abort]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", - unf_lport->port_id, scsi_id, - (u32)cmnd->device->lun, cmnd->cmnd[0]); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Lport(%p) is moving or null", unf_lport); - return UNF_SCSI_ABORT_FAIL; - } - - /* Check local SCSI_ID validity */ - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]scsi_id(0x%x) is max than(0x%x)", scsi_id, - UNF_MAX_SCSI_ID); - return UNF_SCSI_ABORT_FAIL; - } - - /* Block scsi (check rport state -> whether offline or not) */ - ret = fc_block_scsi_eh(cmnd); - if (unlikely(ret != 0)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Block scsi eh failed(0x%x)", ret); - return ret; - } - - unf_init_abts_tmf_scsi_cmd(cmnd, &scsi_cmd, true); - /* Process scsi Abort cmnd */ - ret = unf_cm_eh_abort_handler(&scsi_cmd); - if (ret == UNF_SCSI_ABORT_SUCCESS) { - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_ABORT_IO_TYPE; - UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, - scsi_id, err_handle); - } - } - - return ret; -} - -int unf_scsi_device_reset_handler(struct scsi_cmnd *cmnd) -{ - /* LUN reset */ - struct unf_scsi_cmnd scsi_cmd = {0}; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - int ret = FAILED; - struct unf_lport *unf_lport = NULL; - u32 scsi_id = 0; - u32 err_handle = 0; - - FC_CHECK_RETURN_VALUE(cmnd, FAILED); - - unf_lport = (struct unf_lport *)cmnd->device->host->hostdata[0]; - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_DEVICE_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, scsi_id, err_handle); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[device_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", - unf_lport->port_id, scsi_id, (u32)cmnd->device->lun, cmnd->cmnd[0]); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is invalid"); - - return FAILED; - } - - /* Check local SCSI_ID validity */ - scsi_id = (u32)((u64)cmnd->device->hostdata); - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]scsi_id(0x%x) is max than(0x%x)", scsi_id, - UNF_MAX_SCSI_ID); - - return FAILED; - } - - /* Block scsi (check rport state -> whether offline or not) */ - ret = fc_block_scsi_eh(cmnd); - if (unlikely(ret != 0)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Block scsi eh failed(0x%x)", ret); - - return ret; - } - - unf_init_abts_tmf_scsi_cmd(cmnd, &scsi_cmd, false); - /* Process scsi device/LUN reset cmnd */ - ret = unf_cm_eh_device_reset_handler(&scsi_cmd); - if (ret == UNF_SCSI_ABORT_SUCCESS) { - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_DEVICE_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, - scsi_id, err_handle); - } - } - - return ret; -} - -int unf_scsi_bus_reset_handler(struct scsi_cmnd *cmnd) -{ - /* BUS Reset */ - struct unf_scsi_cmnd scsi_cmd = {0}; - struct unf_lport *unf_lport = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - int ret = FAILED; - u32 scsi_id = 0; - u32 err_handle = 0; - - FC_CHECK_RETURN_VALUE(cmnd, FAILED); - - unf_lport = (struct unf_lport *)cmnd->device->host->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port is null"); - - return FAILED; - } - - /* Check local SCSI_ID validity */ - scsi_id = (u32)((u64)cmnd->device->hostdata); - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]scsi_id(0x%x) is max than(0x%x)", scsi_id, - UNF_MAX_SCSI_ID); - - return FAILED; - } - - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_BUS_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, scsi_id, err_handle); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info][bus_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", - unf_lport->port_id, scsi_id, (u32)cmnd->device->lun, - cmnd->cmnd[0]); - } - - /* Block scsi (check rport state -> whether offline or not) */ - ret = fc_block_scsi_eh(cmnd); - if (unlikely(ret != 0)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Block scsi eh failed(0x%x)", ret); - - return ret; - } - - unf_init_abts_tmf_scsi_cmd(cmnd, &scsi_cmd, false); - /* Process scsi BUS Reset cmnd */ - ret = unf_cm_bus_reset_handler(&scsi_cmd); - if (ret == UNF_SCSI_ABORT_SUCCESS) { - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_BUS_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, scsi_id, err_handle); - } - } - - return ret; -} - -int unf_scsi_target_reset_handler(struct scsi_cmnd *cmnd) -{ - /* Session reset/delete */ - struct unf_scsi_cmnd scsi_cmd = {0}; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - int ret = FAILED; - struct unf_lport *unf_lport = NULL; - u32 scsi_id = 0; - u32 err_handle = 0; - - FC_CHECK_RETURN_VALUE(cmnd, RETURN_ERROR); - - unf_lport = (struct unf_lport *)cmnd->device->host->hostdata[0]; - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port is null"); - - return FAILED; - } - - /* Check local SCSI_ID validity */ - scsi_id = (u32)((u64)cmnd->device->hostdata); - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]scsi_id(0x%x) is max than(0x%x)", scsi_id, UNF_MAX_SCSI_ID); - - return FAILED; - } - - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_TARGET_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_CNT(scsi_image_table, scsi_id, err_handle); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[target_reset]Port(0x%x) scsi_id(0x%x) lun_id(0x%x) cmnd_type(0x%x)", - unf_lport->port_id, scsi_id, (u32)cmnd->device->lun, cmnd->cmnd[0]); - } - - /* Block scsi (check rport state -> whether offline or not) */ - ret = fc_block_scsi_eh(cmnd); - if (unlikely(ret != 0)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Block scsi eh failed(0x%x)", ret); - - return ret; - } - - unf_init_abts_tmf_scsi_cmd(cmnd, &scsi_cmd, false); - /* Process scsi Target/Session reset/delete cmnd */ - ret = unf_cm_target_reset_handler(&scsi_cmd); - if (ret == UNF_SCSI_ABORT_SUCCESS) { - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - err_handle = UNF_SCSI_TARGET_RESET_TYPE; - UNF_SCSI_ERROR_HANDLE_RESULT_CNT(scsi_image_table, scsi_id, err_handle); - } - } - - return ret; -} - -static int unf_scsi_slave_alloc(struct scsi_device *sdev) -{ - struct fc_rport *rport = NULL; - u32 scsi_id = 0; - struct unf_lport *unf_lport = NULL; - struct Scsi_Host *host = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - - /* About device */ - if (unlikely(!sdev)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]SDev is null"); - return -ENXIO; - } - - /* About scsi rport */ - rport = starget_to_rport(scsi_target(sdev)); - if (unlikely(!rport || fc_remote_port_chkready(rport))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]SCSI rport is null"); - - if (rport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]SCSI rport is not ready(0x%x)", - fc_remote_port_chkready(rport)); - } - - return -ENXIO; - } - - /* About host */ - host = rport_to_shost(rport); - if (unlikely(!host)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Host is null"); - - return -ENXIO; - } - - /* About Local Port */ - unf_lport = (struct unf_lport *)host->hostdata[0]; - if (unf_is_lport_valid(unf_lport) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Port is invalid"); - - return -ENXIO; - } - - /* About Local SCSI_ID */ - scsi_id = - *(u32 *)rport->dd_data; - if (unlikely(scsi_id >= UNF_MAX_SCSI_ID)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]scsi_id(0x%x) is max than(0x%x)", scsi_id, UNF_MAX_SCSI_ID); - - return -ENXIO; - } - - scsi_image_table = &unf_lport->rport_scsi_table; - if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) { - atomic_inc(&scsi_image_table->wwn_rport_info_table[scsi_id] - .dfx_counter->device_alloc); - } - atomic_inc(&unf_lport->device_alloc); - sdev->hostdata = (void *)(u64)scsi_id; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x) use scsi_id(%u) to alloc device[%u:%u:%u:%u]", - unf_lport->port_id, scsi_id, host->host_no, sdev->channel, sdev->id, - (u32)sdev->lun); - - return 0; -} - -static void unf_scsi_destroy_slave(struct scsi_device *sdev) -{ - /* - * NOTE: about sdev->hostdata - * --->>> pointing to local SCSI_ID - * 1. Assignment during slave allocation - * 2. Released when callback for slave destroy - * 3. Used during: Queue_CMND, Abort CMND, Device Reset, Target Reset & - * Bus Reset - */ - struct fc_rport *rport = NULL; - u32 scsi_id = 0; - struct unf_lport *unf_lport = NULL; - struct Scsi_Host *host = NULL; - struct unf_rport_scsi_id_image *scsi_image_table = NULL; - - /* About scsi device */ - if (unlikely(!sdev)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]SDev is null"); - - return; - } - - /* About scsi rport */ - rport = starget_to_rport(scsi_target(sdev)); - if (unlikely(!rport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]SCSI rport is null or remote port is not ready"); - return; - } - - /* About host */ - host = rport_to_shost(rport); - if (unlikely(!host)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Host is null"); - - return; - } - - /* About L_Port */ - unf_lport = (struct unf_lport *)host->hostdata[0]; - if (unf_is_lport_valid(unf_lport) == RETURN_OK) { - scsi_image_table = &unf_lport->rport_scsi_table; - atomic_inc(&unf_lport->device_destroy); - - scsi_id = (u32)((u64)sdev->hostdata); - if (scsi_id < UNF_MAX_SCSI_ID && scsi_image_table->wwn_rport_info_table) { - if (scsi_image_table->wwn_rport_info_table[scsi_id].dfx_counter) { - atomic_inc(&scsi_image_table->wwn_rport_info_table[scsi_id] - .dfx_counter->device_destroy); - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x) with scsi_id(%u) to destroy slave device[%u:%u:%u:%u]", - unf_lport->port_id, scsi_id, host->host_no, - sdev->channel, sdev->id, (u32)sdev->lun); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[err]Port(0x%x) scsi_id(%u) is invalid and destroy device[%u:%u:%u:%u]", - unf_lport->port_id, scsi_id, host->host_no, - sdev->channel, sdev->id, (u32)sdev->lun); - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(%p) is invalid", unf_lport); - } - - sdev->hostdata = NULL; -} - -static int unf_scsi_slave_configure(struct scsi_device *sdev) -{ -#define UNF_SCSI_DEV_DEPTH 32 - blk_queue_update_dma_alignment(sdev->request_queue, 0x7); - - scsi_change_queue_depth(sdev, UNF_SCSI_DEV_DEPTH); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[event]Enter slave configure, set depth is %d, sdev->tagged_supported is (%d)", - UNF_SCSI_DEV_DEPTH, sdev->tagged_supported); - - return 0; -} - -static int unf_scsi_scan_finished(struct Scsi_Host *shost, unsigned long time) -{ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Scan finished"); - - return 1; -} - -static void unf_scsi_scan_start(struct Scsi_Host *shost) -{ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Start scsi scan..."); -} - -void unf_host_init_attr_setting(struct Scsi_Host *scsi_host) -{ - struct unf_lport *unf_lport = NULL; - u32 speed = FC_PORTSPEED_UNKNOWN; - - unf_lport = (struct unf_lport *)scsi_host->hostdata[0]; - fc_host_supported_classes(scsi_host) = FC_COS_CLASS3; - fc_host_dev_loss_tmo(scsi_host) = (u32)unf_get_link_lose_tmo(unf_lport); - fc_host_node_name(scsi_host) = unf_lport->node_name; - fc_host_port_name(scsi_host) = unf_lport->port_name; - - fc_host_max_npiv_vports(scsi_host) = (u16)((unf_lport == unf_lport->root_lport) ? - unf_lport->low_level_func.support_max_npiv_num - : 0); - fc_host_npiv_vports_inuse(scsi_host) = 0; - fc_host_next_vport_number(scsi_host) = 0; - - /* About speed mode */ - if (unf_lport->low_level_func.fc_ser_max_speed == UNF_PORT_SPEED_32_G && - unf_lport->card_type == UNF_FC_SERVER_BOARD_32_G) { - speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT; - } else if (unf_lport->low_level_func.fc_ser_max_speed == UNF_PORT_SPEED_16_G && - unf_lport->card_type == UNF_FC_SERVER_BOARD_16_G) { - speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT; - } else if (unf_lport->low_level_func.fc_ser_max_speed == UNF_PORT_SPEED_8_G && - unf_lport->card_type == UNF_FC_SERVER_BOARD_8_G) { - speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; - } - - fc_host_supported_speeds(scsi_host) = speed; -} - -int unf_alloc_scsi_host(struct Scsi_Host **unf_scsi_host, - struct unf_host_param *host_param) -{ - int ret = RETURN_ERROR; - struct Scsi_Host *scsi_host = NULL; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(unf_scsi_host, RETURN_ERROR); - FC_CHECK_RETURN_VALUE(host_param, RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, "[event]Alloc scsi host..."); - - /* Check L_Port validity */ - unf_lport = (struct unf_lport *)(host_param->lport); - if (unlikely(!unf_lport)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port is NULL and return directly"); - - return RETURN_ERROR; - } - - scsi_host_template.can_queue = host_param->can_queue; - scsi_host_template.cmd_per_lun = host_param->cmnd_per_lun; - scsi_host_template.sg_tablesize = host_param->sg_table_size; - scsi_host_template.max_sectors = host_param->max_sectors; - - /* Alloc scsi host */ - scsi_host = scsi_host_alloc(&scsi_host_template, sizeof(u64)); - if (unlikely(!scsi_host)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, "[err]Register scsi host failed"); - - return RETURN_ERROR; - } - - scsi_host->max_channel = host_param->max_channel; - scsi_host->max_lun = host_param->max_lun; - scsi_host->max_cmd_len = host_param->max_cmnd_len; - scsi_host->unchecked_isa_dma = 0; - scsi_host->hostdata[0] = (unsigned long)(uintptr_t)unf_lport; /* save lport to scsi */ - scsi_host->unique_id = scsi_host->host_no; - scsi_host->max_id = host_param->max_id; - scsi_host->transportt = (unf_lport == unf_lport->root_lport) - ? scsi_transport_template - : scsi_transport_template_v; - - /* register DIF/DIX protection */ - if (spfc_dif_enable) { - /* Enable DIF and DIX function */ - scsi_host_set_prot(scsi_host, spfc_dif_type); - - spfc_guard = SHOST_DIX_GUARD_CRC; - /* Enable IP checksum algorithm in DIX */ - if (dix_flag) - spfc_guard |= SHOST_DIX_GUARD_IP; - scsi_host_set_guard(scsi_host, spfc_guard); - } - - /* Add scsi host */ - ret = scsi_add_host(scsi_host, host_param->pdev); - if (unlikely(ret)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Add scsi host failed with return value %d", ret); - - scsi_host_put(scsi_host); - return RETURN_ERROR; - } - - /* Set scsi host attribute */ - unf_host_init_attr_setting(scsi_host); - *unf_scsi_host = scsi_host; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Alloc and add scsi host(0x%llx) succeed", - (u64)scsi_host); - - return RETURN_OK; -} - -void unf_free_scsi_host(struct Scsi_Host *unf_scsi_host) -{ - struct Scsi_Host *scsi_host = NULL; - - scsi_host = unf_scsi_host; - fc_remove_host(scsi_host); - scsi_remove_host(scsi_host); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Remove scsi host(%u) succeed", scsi_host->host_no); - - scsi_host_put(scsi_host); -} - -u32 unf_register_ini_transport(void) -{ - /* Register INI Transport */ - scsi_transport_template = fc_attach_transport(&function_template); - - if (!scsi_transport_template) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Register FC transport to scsi failed"); - - return RETURN_ERROR; - } - - scsi_transport_template_v = fc_attach_transport(&function_template_v); - if (!scsi_transport_template_v) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Register FC vport transport to scsi failed"); - - fc_release_transport(scsi_transport_template); - - return RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Register FC transport to scsi succeed"); - - return RETURN_OK; -} - -void unf_unregister_ini_transport(void) -{ - fc_release_transport(scsi_transport_template); - fc_release_transport(scsi_transport_template_v); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[event]Unregister FC transport succeed"); -} - -void unf_save_sense_data(void *scsi_cmd, const char *sense, int sens_len) -{ - struct scsi_cmnd *cmd = NULL; - - FC_CHECK_RETURN_VOID(scsi_cmd); - FC_CHECK_RETURN_VOID(sense); - - cmd = (struct scsi_cmnd *)scsi_cmd; - memcpy(cmd->sense_buffer, sense, sens_len); -} diff --git a/drivers/scsi/spfc/common/unf_scsi_common.h b/drivers/scsi/spfc/common/unf_scsi_common.h deleted file mode 100644 index f20cdd7f0479..000000000000 --- a/drivers/scsi/spfc/common/unf_scsi_common.h +++ /dev/null @@ -1,570 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_SCSI_COMMON_H -#define UNF_SCSI_COMMON_H - -#include "unf_type.h" - -#define SCSI_SENSE_DATA_LEN 96 -#define DRV_SCSI_CDB_LEN 16 -#define DRV_SCSI_LUN_LEN 8 - -#define DRV_ENTRY_PER_SGL 64 /* Size of an entry array in a hash table */ - -#define UNF_DIF_AREA_SIZE (8) - -struct unf_dif_control_info { - u16 app_tag; - u16 flags; - u32 protect_opcode; - u32 fcp_dl; - u32 start_lba; - u8 actual_dif[UNF_DIF_AREA_SIZE]; - u8 expected_dif[UNF_DIF_AREA_SIZE]; - u32 dif_sge_count; - void *dif_sgl; -}; - -struct dif_result_info { - unsigned char actual_idf[UNF_DIF_AREA_SIZE]; - unsigned char expect_dif[UNF_DIF_AREA_SIZE]; -}; - -struct drv_sge { - char *buf; - void *page_ctrl; - u32 Length; - u32 offset; -}; - -struct drv_scsi_cmd_result { - u32 Status; - u16 sense_data_length; /* sense data length */ - u8 sense_data[SCSI_SENSE_DATA_LEN]; /* fail sense info */ -}; - -enum drv_io_direction { - DRV_IO_BIDIRECTIONAL = 0, - DRV_IO_DIRECTION_WRITE = 1, - DRV_IO_DIRECTION_READ = 2, - DRV_IO_DIRECTION_NONE = 3, -}; - -struct drv_sgl { - struct drv_sgl *next_sgl; /* poin to SGL,SGL list */ - unsigned short num_sges_in_chain; - unsigned short num_sges_in_sgl; - u32 flag; - u64 serial_num; - struct drv_sge sge[DRV_ENTRY_PER_SGL]; - struct list_head node; - u32 cpu_id; -}; - -struct dif_info { -/* Indicates the result returned when the data protection - *information is inconsistent,add by pangea - */ - struct dif_result_info dif_result; -/* Data protection information operation code - * bit[31-24] other operation code - * bit[23-16] Data Protection Information Operation - * bit[15-8] Data protection information - * verification bit[7-0] Data protection information - * replace - */ - u32 protect_opcode; - unsigned short apptag; - u64 start_lba; /* IO start LBA */ - struct drv_sgl *protection_sgl; -}; - -struct drv_device_address { - u16 initiator_id; /* ini id */ - u16 bus_id; /* device bus id */ - u16 target_id; /* device target id,for PCIe SSD,device id */ - u16 function_id; /* function id */ -}; - -struct drv_ini_cmd { - struct drv_scsi_cmd_result result; - void *upper; /* product private pointer */ - void *lower; /* driver private pointer */ - u8 cdb[DRV_SCSI_CDB_LEN]; /* CDB edit by product */ - u8 lun[DRV_SCSI_LUN_LEN]; - u16 cmd_len; - u16 tag; /* SCSI cmd add by driver */ - enum drv_io_direction io_direciton; - u32 data_length; - u32 underflow; - u32 overflow; - u32 resid; - u64 port_id; - u64 cmd_sn; - struct drv_device_address addr; - struct drv_sgl *sgl; - void *device; - void (*done)(struct drv_ini_cmd *cmd); /* callback pointer */ - struct dif_info dif_info; -}; - -typedef void (*uplevel_cmd_done)(struct drv_ini_cmd *scsi_cmnd); - -#ifndef SUCCESS -#define SUCCESS 0x2002 -#endif -#ifndef FAILED -#define FAILED 0x2003 -#endif - -#ifndef DRIVER_OK -#define DRIVER_OK 0x00 /* Driver status */ -#endif - -#ifndef PCI_FUNC -#define PCI_FUNC(devfn) ((devfn) & 0x07) -#endif - -#define UNF_SCSI_ABORT_SUCCESS SUCCESS -#define UNF_SCSI_ABORT_FAIL FAILED - -#define UNF_SCSI_STATUS(byte) (byte) -#define UNF_SCSI_MSG(byte) ((byte) << 8) -#define UNF_SCSI_HOST(byte) ((byte) << 16) -#define UNF_SCSI_DRIVER(byte) ((byte) << 24) -#define UNF_GET_SCSI_HOST_ID(scsi_host) ((scsi_host)->host_no) - -struct unf_ini_error_code { - u32 drv_errcode; /* driver error code */ - u32 ap_errcode; /* up level error code */ -}; - -typedef u32 (*ini_get_sgl_entry_buf)(void *upper_cmnd, void *driver_sgl, - void **upper_sgl, u32 *req_index, - u32 *index, char **buf, - u32 *buf_len); - -#define UNF_SCSI_SENSE_BUFFERSIZE 96 -struct unf_scsi_cmnd { - u32 scsi_host_id; - u32 scsi_id; /* cmd->dev->id */ - u64 raw_lun_id; - u64 port_id; - u32 under_flow; /* Underflow */ - u32 transfer_len; /* Transfer Length */ - u32 resid; /* Resid */ - u32 sense_buflen; - int result; - u32 entry_count; /* IO Buffer counter */ - u32 abort; - u32 err_code_table_cout; /* error code size */ - u64 cmnd_sn; - ulong time_out; /* EPL driver add timer */ - u16 cmnd_len; /* Cdb length */ - u8 data_direction; /* data direction */ - u8 *pcmnd; /* SCSI CDB */ - u8 *sense_buf; - void *drv_private; /* driver host pionter */ - void *driver_scribble; /* Xchg pionter */ - void *upper_cmnd; /* UpperCmnd pointer by driver */ - u8 *lun_id; /* new lunid */ - u32 world_id; - struct unf_dif_control_info dif_control; /* DIF control */ - struct unf_ini_error_code *err_code_table; /* error code table */ - void *sgl; /* Sgl pointer */ - ini_get_sgl_entry_buf unf_ini_get_sgl_entry; - void (*done)(struct unf_scsi_cmnd *cmd); - uplevel_cmd_done uplevel_done; - struct dif_info dif_info; - u32 qos_level; - void *pinitiator; -}; - -#ifndef FC_PORTSPEED_32GBIT -#define FC_PORTSPEED_32GBIT 0x40 -#endif - -#define UNF_GID_PORT_CNT 2048 -#define UNF_RSCN_PAGE_SUM 255 - -#define UNF_CPU_ENDIAN - -#define UNF_NPORTID_MASK 0x00FFFFFF -#define UNF_DOMAIN_MASK 0x00FF0000 -#define UNF_AREA_MASK 0x0000FF00 -#define UNF_ALPA_MASK 0x000000FF - -struct unf_fc_head { - u32 rctl_did; /* Routing control and Destination address of the seq */ - u32 csctl_sid; /* Class control and Source address of the sequence */ - u32 type_fctl; /* Data type and Initial frame control value of the seq - */ - u32 seqid_dfctl_seqcnt; /* Seq ID, Data Field and Initial seq count */ - u32 oxid_rxid; /* Originator & Responder exchange IDs for the sequence - */ - u32 parameter; /* Relative offset of the first frame of the sequence */ -}; - -#define UNF_FCPRSP_CTL_LEN (24) -#define UNF_MAX_RSP_INFO_LEN (8) -#define UNF_RSP_LEN_VLD (1 << 0) -#define UNF_SENSE_LEN_VLD (1 << 1) -#define UNF_RESID_OVERRUN (1 << 2) -#define UNF_RESID_UNDERRUN (1 << 3) -#define UNF_FCP_CONF_REQ (1 << 4) - -/* T10: FCP2r.07 9.4.1 Overview and format of FCP_RSP IU */ -struct unf_fcprsp_iu { - u32 reserved[2]; - u8 reserved2[2]; - u8 control; - u8 fcp_status; - u32 fcp_residual; - u32 fcp_sense_len; /* Length of sense info field */ - u32 fcp_response_len; /* Length of response info field in bytes 0,4 or 8 - */ - u8 fcp_resp_info[UNF_MAX_RSP_INFO_LEN]; /* Buffer for response info */ - u8 fcp_sense_info[SCSI_SENSE_DATA_LEN]; /* Buffer for sense info */ -} __attribute__((packed)); - -#define UNF_CMD_REF_MASK 0xFF000000 -#define UNF_TASK_ATTR_MASK 0x00070000 -#define UNF_TASK_MGMT_MASK 0x0000FF00 -#define UNF_FCP_WR_DATA 0x00000001 -#define UNF_FCP_RD_DATA 0x00000002 -#define UNF_CDB_LEN_MASK 0x0000007C -#define UNF_FCP_CDB_LEN_16 (16) -#define UNF_FCP_CDB_LEN_32 (32) -#define UNF_FCP_LUNID_LEN_8 (8) - -/* FCP-4 :Table 27 - RSP_CODE field */ -#define UNF_FCP_TM_RSP_COMPLETE (0) -#define UNF_FCP_TM_INVALID_CMND (0x2) -#define UNF_FCP_TM_RSP_REJECT (0x4) -#define UNF_FCP_TM_RSP_FAIL (0x5) -#define UNF_FCP_TM_RSP_SUCCEED (0x8) -#define UNF_FCP_TM_RSP_INCRECT_LUN (0x9) - -#define UNF_SET_TASK_MGMT_FLAGS(fcp_tm_code) ((fcp_tm_code) << 8) -#define UNF_GET_TASK_MGMT_FLAGS(control) (((control) & UNF_TASK_MGMT_MASK) >> 8) - -enum unf_task_mgmt_cmd { - UNF_FCP_TM_QUERY_TASK_SET = (1 << 0), - UNF_FCP_TM_ABORT_TASK_SET = (1 << 1), - UNF_FCP_TM_CLEAR_TASK_SET = (1 << 2), - UNF_FCP_TM_QUERY_UNIT_ATTENTION = (1 << 3), - UNF_FCP_TM_LOGICAL_UNIT_RESET = (1 << 4), - UNF_FCP_TM_TARGET_RESET = (1 << 5), - UNF_FCP_TM_CLEAR_ACA = (1 << 6), - UNF_FCP_TM_TERMINATE_TASK = (1 << 7) /* obsolete */ -}; - -struct unf_fcp_cmnd { - u8 lun[UNF_FCP_LUNID_LEN_8]; /* Logical unit number */ - u32 control; - u8 cdb[UNF_FCP_CDB_LEN_16]; /* Payload data containing cdb info */ - u32 data_length; /* Number of bytes expected to be transferred */ -} __attribute__((packed)); - -struct unf_fcp_cmd_hdr { - struct unf_fc_head frame_hdr; /* FCHS structure */ - struct unf_fcp_cmnd fcp_cmnd; /* Fcp Cmnd struct */ -}; - -/* FC-LS-2 Common Service Parameter applicability */ -struct unf_fabric_coparm { -#if defined(UNF_CPU_ENDIAN) - u32 bb_credit : 16; /* 0 [0-15] */ - u32 lowest_version : 8; /* 0 [16-23] */ - u32 highest_version : 8; /* 0 [24-31] */ -#else - u32 highest_version : 8; /* 0 [24-31] */ - u32 lowest_version : 8; /* 0 [16-23] */ - u32 bb_credit : 16; /* 0 [0-15] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 bb_receive_data_field_size : 12; /* 1 [0-11] */ - u32 bbscn : 4; /* 1 [12-15] */ - u32 payload_length : 1; /* 1 [16] */ - u32 seq_cnt : 1; /* 1 [17] */ - u32 dynamic_half_duplex : 1; /* 1 [18] */ - u32 r_t_tov : 1; /* 1 [19] */ - u32 reserved_co2 : 6; /* 1 [20-25] */ - u32 e_d_tov_resolution : 1; /* 1 [26] */ - u32 alternate_bb_credit_mgmt : 1; /* 1 [27] */ - u32 nport : 1; /* 1 [28] */ - u32 mnid_assignment : 1; /* 1 [29] */ - u32 random_relative_offset : 1; /* 1 [30] */ - u32 clean_address : 1; /* 1 [31] */ -#else - u32 reserved_co2 : 2; /* 1 [24-25] */ - u32 e_d_tov_resolution : 1; /* 1 [26] */ - u32 alternate_bb_credit_mgmt : 1; /* 1 [27] */ - u32 nport : 1; /* 1 [28] */ - u32 mnid_assignment : 1; /* 1 [29] */ - u32 random_relative_offset : 1; /* 1 [30] */ - u32 clean_address : 1; /* 1 [31] */ - - u32 payload_length : 1; /* 1 [16] */ - u32 seq_cnt : 1; /* 1 [17] */ - u32 dynamic_half_duplex : 1; /* 1 [18] */ - u32 r_t_tov : 1; /* 1 [19] */ - u32 reserved_co5 : 4; /* 1 [20-23] */ - - u32 bb_receive_data_field_size : 12; /* 1 [0-11] */ - u32 bbscn : 4; /* 1 [12-15] */ -#endif - u32 r_a_tov; /* 2 [0-31] */ - u32 e_d_tov; /* 3 [0-31] */ -}; - -/* FC-LS-2 Common Service Parameter applicability */ -/*Common Service Parameters - PLOGI and PLOGI LS_ACC */ -struct lgn_port_coparm { -#if defined(UNF_CPU_ENDIAN) - u32 bb_credit : 16; /* 0 [0-15] */ - u32 lowest_version : 8; /* 0 [16-23] */ - u32 highest_version : 8; /* 0 [24-31] */ -#else - u32 highest_version : 8; /* 0 [24-31] */ - u32 lowest_version : 8; /* 0 [16-23] */ - u32 bb_credit : 16; /* 0 [0-15] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 bb_receive_data_field_size : 12; /* 1 [0-11] */ - u32 bbscn : 4; /* 1 [12-15] */ - u32 payload_length : 1; /* 1 [16] */ - u32 seq_cnt : 1; /* 1 [17] */ - u32 dynamic_half_duplex : 1; /* 1 [18] */ - u32 reserved_co2 : 7; /* 1 [19-25] */ - u32 e_d_tov_resolution : 1; /* 1 [26] */ - u32 alternate_bb_credit_mgmt : 1; /* 1 [27] */ - u32 nport : 1; /* 1 [28] */ - u32 vendor_version_level : 1; /* 1 [29] */ - u32 random_relative_offset : 1; /* 1 [30] */ - u32 continuously_increasing : 1; /* 1 [31] */ -#else - u32 reserved_co2 : 2; /* 1 [24-25] */ - u32 e_d_tov_resolution : 1; /* 1 [26] */ - u32 alternate_bb_credit_mgmt : 1; /* 1 [27] */ - u32 nport : 1; /* 1 [28] */ - u32 vendor_version_level : 1; /* 1 [29] */ - u32 random_relative_offset : 1; /* 1 [30] */ - u32 continuously_increasing : 1; /* 1 [31] */ - - u32 payload_length : 1; /* 1 [16] */ - u32 seq_cnt : 1; /* 1 [17] */ - u32 dynamic_half_duplex : 1; /* 1 [18] */ - u32 reserved_co5 : 5; /* 1 [19-23] */ - - u32 bb_receive_data_field_size : 12; /* 1 [0-11] */ - u32 reserved_co1 : 4; /* 1 [12-15] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 relative_offset : 16; /* 2 [0-15] */ - u32 nport_total_concurrent_sequences : 16; /* 2 [16-31] */ -#else - u32 nport_total_concurrent_sequences : 16; /* 2 [16-31] */ - u32 relative_offset : 16; /* 2 [0-15] */ -#endif - - u32 e_d_tov; -}; - -/* FC-LS-2 Class Service Parameters Applicability */ -struct unf_lgn_port_clparm { -#if defined(UNF_CPU_ENDIAN) - u32 reserved_cl1 : 6; /* 0 [0-5] */ - u32 ic_data_compression_history_buffer_size : 2; /* 0 [6-7] */ - u32 ic_data_compression_capable : 1; /* 0 [8] */ - - u32 ic_ack_generation_assistance : 1; /* 0 [9] */ - u32 ic_ack_n_capable : 1; /* 0 [10] */ - u32 ic_ack_o_capable : 1; /* 0 [11] */ - u32 ic_initial_responder_processes_accociator : 2; /* 0 [12-13] */ - u32 ic_x_id_reassignment : 2; /* 0 [14-15] */ - - u32 reserved_cl2 : 7; /* 0 [16-22] */ - u32 priority : 1; /* 0 [23] */ - u32 buffered_class : 1; /* 0 [24] */ - u32 camp_on : 1; /* 0 [25] */ - u32 dedicated_simplex : 1; /* 0 [26] */ - u32 sequential_delivery : 1; /* 0 [27] */ - u32 stacked_connect_request : 2; /* 0 [28-29] */ - u32 intermix_mode : 1; /* 0 [30] */ - u32 valid : 1; /* 0 [31] */ -#else - u32 buffered_class : 1; /* 0 [24] */ - u32 camp_on : 1; /* 0 [25] */ - u32 dedicated_simplex : 1; /* 0 [26] */ - u32 sequential_delivery : 1; /* 0 [27] */ - u32 stacked_connect_request : 2; /* 0 [28-29] */ - u32 intermix_mode : 1; /* 0 [30] */ - u32 valid : 1; /* 0 [31] */ - u32 reserved_cl2 : 7; /* 0 [16-22] */ - u32 priority : 1; /* 0 [23] */ - u32 ic_data_compression_capable : 1; /* 0 [8] */ - u32 ic_ack_generation_assistance : 1; /* 0 [9] */ - u32 ic_ack_n_capable : 1; /* 0 [10] */ - u32 ic_ack_o_capable : 1; /* 0 [11] */ - u32 ic_initial_responder_processes_accociator : 2; /* 0 [12-13] */ - u32 ic_x_id_reassignment : 2; /* 0 [14-15] */ - - u32 reserved_cl1 : 6; /* 0 [0-5] */ - u32 ic_data_compression_history_buffer_size : 2; /* 0 [6-7] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 received_data_field_size : 16; /* 1 [0-15] */ - - u32 reserved_cl3 : 5; /* 1 [16-20] */ - u32 rc_data_compression_history_buffer_size : 2; /* 1 [21-22] */ - u32 rc_data_compression_capable : 1; /* 1 [23] */ - - u32 rc_data_categories_per_sequence : 2; /* 1 [24-25] */ - u32 reserved_cl4 : 1; /* 1 [26] */ - u32 rc_error_policy_supported : 2; /* 1 [27-28] */ - u32 rc_x_id_interlock : 1; /* 1 [29] */ - u32 rc_ack_n_capable : 1; /* 1 [30] */ - u32 rc_ack_o_capable : 1; /* 1 [31] */ -#else - u32 rc_data_categories_per_sequence : 2; /* 1 [24-25] */ - u32 reserved_cl4 : 1; /* 1 [26] */ - u32 rc_error_policy_supported : 2; /* 1 [27-28] */ - u32 rc_x_id_interlock : 1; /* 1 [29] */ - u32 rc_ack_n_capable : 1; /* 1 [30] */ - u32 rc_ack_o_capable : 1; /* 1 [31] */ - - u32 reserved_cl3 : 5; /* 1 [16-20] */ - u32 rc_data_compression_history_buffer_size : 2; /* 1 [21-22] */ - u32 rc_data_compression_capable : 1; /* 1 [23] */ - - u32 received_data_field_size : 16; /* 1 [0-15] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 nport_end_to_end_credit : 15; /* 2 [0-14] */ - u32 reserved_cl5 : 1; /* 2 [15] */ - - u32 concurrent_sequences : 16; /* 2 [16-31] */ -#else - u32 concurrent_sequences : 16; /* 2 [16-31] */ - - u32 nport_end_to_end_credit : 15; /* 2 [0-14] */ - u32 reserved_cl5 : 1; /* 2 [15] */ -#endif - -#if defined(UNF_CPU_ENDIAN) - u32 reserved_cl6 : 16; /* 3 [0-15] */ - u32 open_sequence_per_exchange : 16; /* 3 [16-31] */ -#else - u32 open_sequence_per_exchange : 16; /* 3 [16-31] */ - u32 reserved_cl6 : 16; /* 3 [0-15] */ -#endif -}; - -struct unf_fabric_parm { - struct unf_fabric_coparm co_parms; - u32 high_port_name; - u32 low_port_name; - u32 high_node_name; - u32 low_node_name; - struct unf_lgn_port_clparm cl_parms[3]; - u32 reserved_1[4]; - u32 vendor_version_level[4]; -}; - -struct unf_lgn_parm { - struct lgn_port_coparm co_parms; - u32 high_port_name; - u32 low_port_name; - u32 high_node_name; - u32 low_node_name; - struct unf_lgn_port_clparm cl_parms[3]; - u32 reserved_1[4]; - u32 vendor_version_level[4]; -}; - -#define ELS_RJT 0x1 -#define ELS_ACC 0x2 -#define ELS_PLOGI 0x3 -#define ELS_FLOGI 0x4 -#define ELS_LOGO 0x5 -#define ELS_ECHO 0x10 -#define ELS_RRQ 0x12 -#define ELS_REC 0x13 -#define ELS_PRLI 0x20 -#define ELS_PRLO 0x21 -#define ELS_TPRLO 0x24 -#define ELS_PDISC 0x50 -#define ELS_FDISC 0x51 -#define ELS_ADISC 0x52 -#define ELS_RSCN 0x61 /* registered state change notification */ -#define ELS_SCR 0x62 /* state change registration */ - -#define NS_GIEL 0X0101 -#define NS_GA_NXT 0X0100 -#define NS_GPN_ID 0x0112 /* get port name by ID */ -#define NS_GNN_ID 0x0113 /* get node name by ID */ -#define NS_GFF_ID 0x011f /* get FC-4 features by ID */ -#define NS_GID_PN 0x0121 /* get ID for port name */ -#define NS_GID_NN 0x0131 /* get IDs for node name */ -#define NS_GID_FT 0x0171 /* get IDs by FC4 type */ -#define NS_GPN_FT 0x0172 /* get port names by FC4 type */ -#define NS_GID_PT 0x01a1 /* get IDs by port type */ -#define NS_RFT_ID 0x0217 /* reg FC4 type for ID */ -#define NS_RPN_ID 0x0212 /* reg port name for ID */ -#define NS_RNN_ID 0x0213 /* reg node name for ID */ -#define NS_RSNPN 0x0218 /* reg symbolic port name */ -#define NS_RFF_ID 0x021f /* reg FC4 Features for ID */ -#define NS_RSNN 0x0239 /* reg symbolic node name */ -#define ST_NULL 0xffff /* reg symbolic node name */ - -#define BLS_ABTS 0xA001 /* ABTS */ - -#define FCP_SRR 0x14 /* Sequence Retransmission Request */ -#define UNF_FC_FID_DOM_MGR 0xfffc00 /* domain manager base */ -enum unf_fc_well_known_fabric_id { - UNF_FC_FID_NONE = 0x000000, /* No destination */ - UNF_FC_FID_DOM_CTRL = 0xfffc01, /* domain controller */ - UNF_FC_FID_BCAST = 0xffffff, /* broadcast */ - UNF_FC_FID_FLOGI = 0xfffffe, /* fabric login */ - UNF_FC_FID_FCTRL = 0xfffffd, /* fabric controller */ - UNF_FC_FID_DIR_SERV = 0xfffffc, /* directory server */ - UNF_FC_FID_TIME_SERV = 0xfffffb, /* time server */ - UNF_FC_FID_MGMT_SERV = 0xfffffa, /* management server */ - UNF_FC_FID_QOS = 0xfffff9, /* QoS Facilitator */ - UNF_FC_FID_ALIASES = 0xfffff8, /* alias server (FC-PH2) */ - UNF_FC_FID_SEC_KEY = 0xfffff7, /* Security key dist. server */ - UNF_FC_FID_CLOCK = 0xfffff6, /* clock synch server */ - UNF_FC_FID_MCAST_SERV = 0xfffff5 /* multicast server */ -}; - -#define INVALID_WORLD_ID 0xfffffffc - -struct unf_host_param { - int can_queue; - u16 sg_table_size; - short cmnd_per_lun; - u32 max_id; - u32 max_lun; - u32 max_channel; - u16 max_cmnd_len; - u16 max_sectors; - u64 dma_boundary; - u32 port_id; - void *lport; - struct device *pdev; -}; - -int unf_alloc_scsi_host(struct Scsi_Host **unf_scsi_host, struct unf_host_param *host_param); -void unf_free_scsi_host(struct Scsi_Host *unf_scsi_host); -u32 unf_register_ini_transport(void); -void unf_unregister_ini_transport(void); -void unf_save_sense_data(void *scsi_cmd, const char *sense, int sens_len); - -#endif diff --git a/drivers/scsi/spfc/common/unf_service.c b/drivers/scsi/spfc/common/unf_service.c deleted file mode 100644 index 9c86c99374c8..000000000000 --- a/drivers/scsi/spfc/common/unf_service.c +++ /dev/null @@ -1,1430 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "unf_service.h" -#include "unf_log.h" -#include "unf_rport.h" -#include "unf_ls.h" -#include "unf_gs.h" - -struct unf_els_handle_table els_handle_table[] = { - {ELS_PLOGI, unf_plogi_handler}, {ELS_FLOGI, unf_flogi_handler}, - {ELS_LOGO, unf_logo_handler}, {ELS_ECHO, unf_echo_handler}, - {ELS_RRQ, unf_rrq_handler}, {ELS_REC, unf_rec_handler}, - {ELS_PRLI, unf_prli_handler}, {ELS_PRLO, unf_prlo_handler}, - {ELS_PDISC, unf_pdisc_handler}, {ELS_ADISC, unf_adisc_handler}, - {ELS_RSCN, unf_rscn_handler} }; - -u32 max_frame_size = UNF_DEFAULT_FRAME_SIZE; - -#define UNF_NEED_BIG_RESPONSE_BUFF(cmnd_code) \ - (((cmnd_code) == ELS_ECHO) || ((cmnd_code) == NS_GID_PT) || \ - ((cmnd_code) == NS_GID_FT)) - -#define NEED_REFRESH_NPORTID(pkg) \ - ((((pkg)->cmnd == ELS_PLOGI) || ((pkg)->cmnd == ELS_PDISC) || \ - ((pkg)->cmnd == ELS_ADISC))) - -void unf_select_sq(struct unf_xchg *xchg, struct unf_frame_pkg *pkg) -{ - u32 ssq_index = 0; - struct unf_rport *unf_rport = NULL; - - if (likely(xchg)) { - unf_rport = xchg->rport; - - if (unf_rport) { - ssq_index = (xchg->hotpooltag % UNF_SQ_NUM_PER_SESSION) + - unf_rport->sqn_base; - } - } - - pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX] = ssq_index; -} - -u32 unf_ls_gs_cmnd_send(struct unf_lport *lport, struct unf_frame_pkg *pkg, - struct unf_xchg *xchg) -{ - u32 ret = UNF_RETURN_ERROR; - ulong time_out = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - if (unlikely(!lport->low_level_func.service_op.unf_ls_gs_send)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) LS/GS send function is NULL", - lport->port_id); - - return ret; - } - - if (pkg->type == UNF_PKG_GS_REQ) - time_out = UNF_GET_GS_SFS_XCHG_TIMER(lport); - else - time_out = UNF_GET_ELS_SFS_XCHG_TIMER(lport); - - if (xchg->cmnd_code == ELS_RRQ) { - time_out = ((ulong)UNF_GET_ELS_SFS_XCHG_TIMER(lport) > UNF_RRQ_MIN_TIMEOUT_INTERVAL) - ? (ulong)UNF_GET_ELS_SFS_XCHG_TIMER(lport) - : UNF_RRQ_MIN_TIMEOUT_INTERVAL; - } else if (xchg->cmnd_code == ELS_LOGO) { - time_out = UNF_LOGO_TIMEOUT_INTERVAL; - } - - pkg->private_data[PKG_PRIVATE_XCHG_TIMEER] = (u32)time_out; - lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, time_out, UNF_TIMER_TYPE_SFS); - - unf_select_sq(xchg, pkg); - - ret = lport->low_level_func.service_op.unf_ls_gs_send(lport->fc_port, pkg); - if (unlikely(ret != RETURN_OK)) - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - return ret; -} - -static u32 unf_bls_cmnd_send(struct unf_lport *lport, struct unf_frame_pkg *pkg, - struct unf_xchg *xchg) -{ - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - pkg->private_data[PKG_PRIVATE_XCHG_TIMEER] = (u32)UNF_GET_BLS_SFS_XCHG_TIMER(lport); - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - - unf_select_sq(xchg, pkg); - - return lport->low_level_func.service_op.unf_bls_send(lport->fc_port, pkg); -} - -void unf_fill_package(struct unf_frame_pkg *pkg, struct unf_xchg *xchg, - struct unf_rport *rport) -{ - /* v_pstRport maybe NULL */ - FC_CHECK_RETURN_VOID(pkg); - FC_CHECK_RETURN_VOID(xchg); - - pkg->cmnd = xchg->cmnd_code; - pkg->fcp_cmnd = &xchg->fcp_cmnd; - pkg->frame_head.csctl_sid = xchg->sid; - pkg->frame_head.rctl_did = xchg->did; - pkg->frame_head.oxid_rxid = ((u32)xchg->oxid << UNF_SHIFT_16 | xchg->rxid); - pkg->xchg_contex = xchg; - - FC_CHECK_RETURN_VOID(xchg->lport); - pkg->private_data[PKG_PRIVATE_XCHG_VP_INDEX] = xchg->lport->vp_index; - - if (!rport) { - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = UNF_RPORT_INVALID_INDEX; - pkg->private_data[PKG_PRIVATE_RPORT_RX_SIZE] = INVALID_VALUE32; - } else { - if (likely(rport->nport_id != UNF_FC_FID_FLOGI)) - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = rport->rport_index; - else - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = SPFC_DEFAULT_RPORT_INDEX; - - pkg->private_data[PKG_PRIVATE_RPORT_RX_SIZE] = rport->max_frame_size; - } - - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag; - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - pkg->private_data[PKG_PRIVATE_LOWLEVEL_XCHG_ADD] = - xchg->private_data[PKG_PRIVATE_LOWLEVEL_XCHG_ADD]; - pkg->unf_cmnd_pload_bl.buffer_ptr = - (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - pkg->unf_cmnd_pload_bl.buf_dma_addr = - xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr; - - /* Low level need to know payload length if send ECHO response */ - pkg->unf_cmnd_pload_bl.length = xchg->fcp_sfs_union.sfs_entry.cur_offset; -} - -struct unf_xchg *unf_get_sfs_free_xchg_and_init(struct unf_lport *lport, u32 did, - struct unf_rport *rport, - union unf_sfs_u **fc_entry) -{ - struct unf_xchg *xchg = NULL; - union unf_sfs_u *sfs_fc_entry = NULL; - - xchg = unf_cm_get_free_xchg(lport, UNF_XCHG_TYPE_SFS); - if (!xchg) - return NULL; - - xchg->did = did; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - xchg->disc_rport = NULL; - xchg->callback = NULL; - xchg->ob_callback = NULL; - - sfs_fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!sfs_fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return NULL; - } - - *fc_entry = sfs_fc_entry; - - return xchg; -} - -void *unf_get_one_big_sfs_buf(struct unf_xchg *xchg) -{ - struct unf_big_sfs *big_sfs = NULL; - struct list_head *list_head = NULL; - struct unf_xchg_mgr *xchg_mgr = NULL; - ulong flag = 0; - spinlock_t *big_sfs_pool_lock = NULL; - - FC_CHECK_RETURN_VALUE(xchg, NULL); - xchg_mgr = xchg->xchg_mgr; - FC_CHECK_RETURN_VALUE(xchg_mgr, NULL); - big_sfs_pool_lock = &xchg_mgr->big_sfs_pool.big_sfs_pool_lock; - - spin_lock_irqsave(big_sfs_pool_lock, flag); - if (!list_empty(&xchg_mgr->big_sfs_pool.list_freepool)) { - /* from free to busy */ - list_head = UNF_OS_LIST_NEXT(&xchg_mgr->big_sfs_pool.list_freepool); - list_del(list_head); - xchg_mgr->big_sfs_pool.free_count--; - list_add_tail(list_head, &xchg_mgr->big_sfs_pool.list_busypool); - big_sfs = list_entry(list_head, struct unf_big_sfs, entry_bigsfs); - } else { - spin_unlock_irqrestore(big_sfs_pool_lock, flag); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Allocate big sfs buf failed, count(0x%x) exchange(0x%p) command(0x%x)", - xchg_mgr->big_sfs_pool.free_count, xchg, xchg->cmnd_code); - - return NULL; - } - spin_unlock_irqrestore(big_sfs_pool_lock, flag); - - xchg->big_sfs_buf = big_sfs; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Allocate one big sfs buffer(0x%p), remaining count(0x%x) exchange(0x%p) command(0x%x)", - big_sfs->addr, xchg_mgr->big_sfs_pool.free_count, xchg, - xchg->cmnd_code); - - return big_sfs->addr; -} - -static void unf_fill_rjt_pld(struct unf_els_rjt *els_rjt, u32 reason_code, - u32 reason_explanation) -{ - FC_CHECK_RETURN_VOID(els_rjt); - - els_rjt->cmnd = UNF_ELS_CMND_RJT; - els_rjt->reason_code = (reason_code | reason_explanation); -} - -u32 unf_send_abts(struct unf_lport *lport, struct unf_xchg *xchg) -{ - struct unf_rport *unf_rport = NULL; - u32 ret = UNF_RETURN_ERROR; - struct unf_frame_pkg pkg; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - unf_rport = xchg->rport; - FC_CHECK_RETURN_VALUE(unf_rport, UNF_RETURN_ERROR); - - /* set pkg info */ - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - pkg.type = UNF_PKG_BLS_REQ; - pkg.frame_head.csctl_sid = xchg->sid; - pkg.frame_head.rctl_did = xchg->did; - pkg.frame_head.oxid_rxid = ((u32)xchg->oxid << UNF_SHIFT_16 | xchg->rxid); - pkg.xchg_contex = xchg; - pkg.unf_cmnd_pload_bl.buffer_ptr = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - - pkg.unf_cmnd_pload_bl.buf_dma_addr = xchg->fcp_sfs_union.sfs_entry.sfs_buff_phy_addr; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag; - - UNF_SET_XCHG_ALLOC_TIME(&pkg, xchg); - UNF_SET_ABORT_INFO_IOTYPE(&pkg, xchg); - - pkg.private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = - xchg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]; - - /* Send ABTS frame to target */ - ret = unf_bls_cmnd_send(lport, &pkg, xchg); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x_0x%x) send ABTS %s. Abort exch(0x%p) Cmdsn:0x%lx, tag(0x%x) iotype(0x%x)", - lport->port_id, lport->nport_id, - (ret == UNF_RETURN_ERROR) ? "failed" : "succeed", xchg, - (ulong)xchg->cmnd_sn, xchg->hotpooltag, xchg->data_direction); - - return ret; -} - -u32 unf_send_els_rjt_by_rport(struct unf_lport *lport, struct unf_xchg *xchg, - struct unf_rport *rport, struct unf_rjt_info *rjt_info) -{ - struct unf_els_rjt *els_rjt = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_RJT_TYPE(rjt_info->els_cmnd_code); - xchg->did = rport->nport_id; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = rport; - xchg->disc_rport = NULL; - - xchg->callback = NULL; - xchg->ob_callback = NULL; - - unf_fill_package(&pkg, xchg, rport); - pkg.class_mode = UNF_FC_PROTOCOL_CLASS_3; - pkg.type = UNF_PKG_ELS_REPLY; - - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - els_rjt = &fc_entry->els_rjt; - memset(els_rjt, 0, sizeof(struct unf_els_rjt)); - unf_fill_rjt_pld(els_rjt, rjt_info->reason_code, rjt_info->reason_explanation); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send LS_RJT for 0x%x %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - rjt_info->els_cmnd_code, - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, - rport->nport_id, ox_id, rx_id); - - return ret; -} - -u32 unf_send_els_rjt_by_did(struct unf_lport *lport, struct unf_xchg *xchg, - u32 did, struct unf_rjt_info *rjt_info) -{ - struct unf_els_rjt *els_rjt = NULL; - union unf_sfs_u *fc_entry = NULL; - struct unf_frame_pkg pkg = {0}; - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = 0; - u16 rx_id = 0; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - xchg->cmnd_code = UNF_SET_ELS_RJT_TYPE(rjt_info->els_cmnd_code); - xchg->did = did; - xchg->sid = lport->nport_id; - xchg->oid = xchg->sid; - xchg->lport = lport; - xchg->rport = NULL; - xchg->disc_rport = NULL; - - xchg->callback = NULL; - xchg->ob_callback = NULL; - - unf_fill_package(&pkg, xchg, NULL); - pkg.class_mode = UNF_FC_PROTOCOL_CLASS_3; - pkg.type = UNF_PKG_ELS_REPLY; - - if (rjt_info->reason_code == UNF_LS_RJT_CLASS_ERROR && - rjt_info->class_mode != UNF_FC_PROTOCOL_CLASS_3) { - pkg.class_mode = rjt_info->class_mode; - } - - fc_entry = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr; - if (!fc_entry) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) entry can't be NULL with tag(0x%x)", - lport->port_id, xchg->hotpooltag); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - - els_rjt = &fc_entry->els_rjt; - memset(els_rjt, 0, sizeof(struct unf_els_rjt)); - unf_fill_rjt_pld(els_rjt, rjt_info->reason_code, rjt_info->reason_explanation); - ox_id = xchg->oxid; - rx_id = xchg->rxid; - - ret = unf_ls_gs_cmnd_send(lport, &pkg, xchg); - if (ret != RETURN_OK) - unf_cm_free_xchg((void *)lport, (void *)xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]LOGIN: Send LS_RJT %s. Port(0x%x)--->RPort(0x%x) with OX_ID(0x%x) RX_ID(0x%x)", - (ret != RETURN_OK) ? "failed" : "succeed", lport->port_id, did, ox_id, rx_id); - - return ret; -} - -static u32 unf_els_cmnd_default_handler(struct unf_lport *lport, struct unf_xchg *xchg, u32 sid, - u32 els_cmnd_code) -{ - struct unf_rport *unf_rport = NULL; - struct unf_rjt_info rjt_info = {0}; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(xchg, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_KEVENT, - "[info]Receive Unknown ELS command(0x%x). Port(0x%x)<---RPort(0x%x) with OX_ID(0x%x)", - els_cmnd_code, lport->port_id, sid, xchg->oxid); - - memset(&rjt_info, 0, sizeof(struct unf_rjt_info)); - rjt_info.els_cmnd_code = els_cmnd_code; - rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; - - unf_rport = unf_get_rport_by_nport_id(lport, sid); - if (unf_rport) - ret = unf_send_els_rjt_by_rport(lport, xchg, unf_rport, &rjt_info); - else - ret = unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); - - return ret; -} - -static struct unf_xchg *unf_alloc_xchg_for_rcv_cmnd(struct unf_lport *lport, - struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - ulong flags = 0; - u32 i = 0; - u32 offset = 0; - u8 *cmnd_pld = NULL; - u32 first_dword = 0; - u32 alloc_time = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(pkg, NULL); - - if (!pkg->xchg_contex) { - xchg = unf_cm_get_free_xchg(lport, UNF_XCHG_TYPE_SFS); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[warn]Port(0x%x) get new exchange failed", - lport->port_id); - - return NULL; - } - - offset = (xchg->fcp_sfs_union.sfs_entry.cur_offset); - cmnd_pld = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; - first_dword = xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr - ->sfs_common.frame_head.rctl_did; - - if (cmnd_pld || first_dword != 0 || offset != 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) exchange(0x%p) abnormal, maybe data overrun, start(%llu) command(0x%x)", - lport->port_id, xchg, xchg->alloc_jif, pkg->cmnd); - - UNF_PRINT_SFS(UNF_INFO, lport->port_id, - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, - sizeof(union unf_sfs_u)); - } - - memset(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr, 0, sizeof(union unf_sfs_u)); - - pkg->xchg_contex = (void *)xchg; - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->fcp_sfs_union.sfs_entry.cur_offset = 0; - alloc_time = xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - for (i = 0; i < PKG_MAX_PRIVATE_DATA_SIZE; i++) - xchg->private_data[i] = pkg->private_data[i]; - - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = alloc_time; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } else { - xchg = (struct unf_xchg *)pkg->xchg_contex; - } - - if (!xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) { - unf_cm_free_xchg((void *)lport, (void *)xchg); - - return NULL; - } - - return xchg; -} - -static u8 *unf_calc_big_cmnd_pld_buffer(struct unf_xchg *xchg, u32 cmnd_code) -{ - u8 *cmnd_pld = NULL; - void *buf = NULL; - u8 *dest = NULL; - - FC_CHECK_RETURN_VALUE(xchg, NULL); - - if (cmnd_code == ELS_RSCN) - cmnd_pld = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld; - else - cmnd_pld = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; - - if (!cmnd_pld) { - buf = unf_get_one_big_sfs_buf(xchg); - if (!buf) - return NULL; - - if (cmnd_code == ELS_RSCN) { - memset(buf, 0, sizeof(struct unf_rscn_pld)); - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->rscn.rscn_pld = buf; - } else { - memset(buf, 0, sizeof(struct unf_echo_payload)); - xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld = buf; - } - - dest = (u8 *)buf; - } else { - dest = (u8 *)(cmnd_pld + xchg->fcp_sfs_union.sfs_entry.cur_offset); - } - - return dest; -} - -static u8 *unf_calc_other_pld_buffer(struct unf_xchg *xchg) -{ - u8 *dest = NULL; - u32 offset = 0; - - FC_CHECK_RETURN_VALUE(xchg, NULL); - - offset = (sizeof(struct unf_fc_head)) + (xchg->fcp_sfs_union.sfs_entry.cur_offset); - dest = (u8 *)((u8 *)(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + offset); - - return dest; -} - -struct unf_xchg *unf_mv_data_2_xchg(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - u8 *dest = NULL; - u32 length = 0; - ulong flags = 0; - - FC_CHECK_RETURN_VALUE(lport, NULL); - FC_CHECK_RETURN_VALUE(pkg, NULL); - - xchg = unf_alloc_xchg_for_rcv_cmnd(lport, pkg); - if (!xchg) - return NULL; - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - - memcpy(&xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->sfs_common.frame_head, - &pkg->frame_head, sizeof(pkg->frame_head)); - - if (pkg->cmnd == ELS_RSCN || pkg->cmnd == ELS_ECHO) - dest = unf_calc_big_cmnd_pld_buffer(xchg, pkg->cmnd); - else - dest = unf_calc_other_pld_buffer(xchg); - - if (!dest) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_cm_free_xchg((void *)lport, (void *)xchg); - - return NULL; - } - - if (((xchg->fcp_sfs_union.sfs_entry.cur_offset + - pkg->unf_cmnd_pload_bl.length) > (u32)sizeof(union unf_sfs_u)) && - pkg->cmnd != ELS_RSCN && pkg->cmnd != ELS_ECHO) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) excange(0x%p) command(0x%x,0x%x) copy payload overrun(0x%x:0x%x:0x%x)", - lport->port_id, xchg, pkg->cmnd, xchg->hotpooltag, - xchg->fcp_sfs_union.sfs_entry.cur_offset, - pkg->unf_cmnd_pload_bl.length, (u32)sizeof(union unf_sfs_u)); - - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_cm_free_xchg((void *)lport, (void *)xchg); - - return NULL; - } - - length = pkg->unf_cmnd_pload_bl.length; - if (length > 0) - memcpy(dest, pkg->unf_cmnd_pload_bl.buffer_ptr, length); - - xchg->fcp_sfs_union.sfs_entry.cur_offset += length; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return xchg; -} - -static u32 unf_check_els_cmnd_valid(struct unf_lport *lport, struct unf_frame_pkg *pkg, - struct unf_xchg *xchg) -{ - struct unf_rjt_info rjt_info = {0}; - struct unf_lport *vport = NULL; - u32 sid = 0; - u32 did = 0; - - sid = (pkg->frame_head.csctl_sid) & UNF_NPORTID_MASK; - did = (pkg->frame_head.rctl_did) & UNF_NPORTID_MASK; - - memset(&rjt_info, 0, sizeof(struct unf_rjt_info)); - - if (pkg->class_mode != UNF_FC_PROTOCOL_CLASS_3) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) unsupport class 0x%x cmd 0x%x and send RJT", - lport->port_id, pkg->class_mode, pkg->cmnd); - - rjt_info.reason_code = UNF_LS_RJT_CLASS_ERROR; - rjt_info.els_cmnd_code = pkg->cmnd; - rjt_info.class_mode = pkg->class_mode; - (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); - - return UNF_RETURN_ERROR; - } - - rjt_info.reason_code = UNF_LS_RJT_NOT_SUPPORTED; - - if (pkg->cmnd == ELS_FLOGI && lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) receive FLOGI in top (0x%x) and send LS_RJT", - lport->port_id, lport->act_topo); - - rjt_info.els_cmnd_code = ELS_FLOGI; - (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); - - return UNF_RETURN_ERROR; - } - - if (pkg->cmnd == ELS_PLOGI && did >= UNF_FC_FID_DOM_MGR) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x)receive PLOGI with wellknown address(0x%x) and Send LS_RJT", - lport->port_id, did); - - rjt_info.els_cmnd_code = ELS_PLOGI; - (void)unf_send_els_rjt_by_did(lport, xchg, sid, &rjt_info); - - return UNF_RETURN_ERROR; - } - - if ((lport->nport_id == 0 || lport->nport_id == INVALID_VALUE32) && - (NEED_REFRESH_NPORTID(pkg))) { - lport->nport_id = did; - } else if ((lport->nport_id != did) && (pkg->cmnd != ELS_FLOGI)) { - vport = unf_cm_lookup_vport_by_did(lport, did); - if (!vport) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive ELS cmd(0x%x) with abnormal D_ID(0x%x)", - lport->nport_id, pkg->cmnd, did); - - unf_cm_free_xchg(lport, xchg); - return UNF_RETURN_ERROR; - } - } - - return RETURN_OK; -} - -static u32 unf_rcv_els_cmnd_req(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 i = 0; - u32 sid = 0; - u32 did = 0; - struct unf_lport *vport = NULL; - u32 (*els_cmnd_handler)(struct unf_lport *, u32, struct unf_xchg *) = NULL; - - sid = (pkg->frame_head.csctl_sid) & UNF_NPORTID_MASK; - did = (pkg->frame_head.rctl_did) & UNF_NPORTID_MASK; - - xchg = unf_mv_data_2_xchg(lport, pkg); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) receive ElsCmnd(0x%x), exchange is NULL", - lport->port_id, pkg->cmnd); - return UNF_RETURN_ERROR; - } - - if (!pkg->last_pkg_flag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Exchange(%u) waiting for last WQE", - xchg->hotpooltag); - return RETURN_OK; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Exchange(%u) get last WQE", xchg->hotpooltag); - - xchg->oxid = UNF_GET_OXID(pkg); - xchg->abort_oxid = xchg->oxid; - xchg->rxid = UNF_GET_RXID(pkg); - xchg->cmnd_code = pkg->cmnd; - - ret = unf_check_els_cmnd_valid(lport, pkg, xchg); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - - if (lport->nport_id != did && pkg->cmnd != ELS_FLOGI) { - vport = unf_cm_lookup_vport_by_did(lport, did); - if (!vport) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) received unknown ELS command with S_ID(0x%x) D_ID(0x%x))", - lport->port_id, sid, did); - return UNF_RETURN_ERROR; - } - lport = vport; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]VPort(0x%x) received ELS command with S_ID(0x%x) D_ID(0x%x)", - lport->port_id, sid, did); - } - - do { - if (pkg->cmnd == els_handle_table[i].cmnd) { - els_cmnd_handler = els_handle_table[i].els_cmnd_handler; - break; - } - i++; - } while (i < (sizeof(els_handle_table) / sizeof(struct unf_els_handle_table))); - - if (els_cmnd_handler) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) receive ELS(0x%x) from RPort(0x%x) and process it", - lport->port_id, pkg->cmnd, sid); - ret = els_cmnd_handler(lport, sid, xchg); - } else { - ret = unf_els_cmnd_default_handler(lport, xchg, sid, pkg->cmnd); - } - return ret; -} - -u32 unf_send_els_rsp_succ(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - u32 ret = RETURN_OK; - u16 hot_pool_tag = 0; - ulong flags = 0; - void (*ob_callback)(struct unf_xchg *) = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (!lport->xchg_mgr_temp.unf_look_up_xchg_by_tag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) lookup exchange by tag function is NULL", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - hot_pool_tag = (u16)(pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); - xchg = (struct unf_xchg *)(lport->xchg_mgr_temp.unf_look_up_xchg_by_tag((void *)lport, - hot_pool_tag)); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) find exhange by tag(0x%x) failed", - lport->port_id, hot_pool_tag); - - return UNF_RETURN_ERROR; - } - - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (xchg->ob_callback && - (!(xchg->io_state & TGT_IO_STATE_ABORT))) { - ob_callback = xchg->ob_callback; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) with exchange(0x%p) tag(0x%x) do callback", - lport->port_id, xchg, hot_pool_tag); - - ob_callback(xchg); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } - - unf_cm_free_xchg((void *)lport, (void *)xchg); - return ret; -} - -static u8 *unf_calc_big_resp_pld_buffer(struct unf_xchg *xchg, u32 cmnd_code) -{ - u8 *resp_pld = NULL; - u8 *dest = NULL; - - FC_CHECK_RETURN_VALUE(xchg, NULL); - - if (cmnd_code == ELS_ECHO) { - resp_pld = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr->echo.echo_pld; - } else { - resp_pld = (u8 *)xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr - ->get_id.gid_rsp.gid_acc_pld; - } - - if (resp_pld) - dest = (u8 *)(resp_pld + xchg->fcp_sfs_union.sfs_entry.cur_offset); - - return dest; -} - -static u8 *unf_calc_other_resp_pld_buffer(struct unf_xchg *xchg) -{ - u8 *dest = NULL; - u32 offset = 0; - - FC_CHECK_RETURN_VALUE(xchg, NULL); - - offset = xchg->fcp_sfs_union.sfs_entry.cur_offset; - dest = (u8 *)((u8 *)(xchg->fcp_sfs_union.sfs_entry.fc_sfs_entry_ptr) + offset); - - return dest; -} - -u32 unf_mv_resp_2_xchg(struct unf_xchg *xchg, struct unf_frame_pkg *pkg) -{ - u8 *dest = NULL; - u32 length = 0; - u32 offset = 0; - u32 max_frame_len = 0; - ulong flags = 0; - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - - if (UNF_NEED_BIG_RESPONSE_BUFF(xchg->cmnd_code)) { - dest = unf_calc_big_resp_pld_buffer(xchg, xchg->cmnd_code); - offset = 0; - max_frame_len = sizeof(struct unf_gid_acc_pld); - } else if (NS_GA_NXT == xchg->cmnd_code || NS_GIEL == xchg->cmnd_code) { - dest = unf_calc_big_resp_pld_buffer(xchg, xchg->cmnd_code); - offset = 0; - max_frame_len = xchg->fcp_sfs_union.sfs_entry.sfs_buff_len; - } else { - dest = unf_calc_other_resp_pld_buffer(xchg); - offset = sizeof(struct unf_fc_head); - max_frame_len = sizeof(union unf_sfs_u); - } - - if (!dest) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return UNF_RETURN_ERROR; - } - - if (xchg->fcp_sfs_union.sfs_entry.cur_offset == 0) { - xchg->fcp_sfs_union.sfs_entry.cur_offset += offset; - dest = dest + offset; - } - - length = pkg->unf_cmnd_pload_bl.length; - - if ((xchg->fcp_sfs_union.sfs_entry.cur_offset + length) > - max_frame_len) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Exchange(0x%p) command(0x%x) hotpooltag(0x%x) OX_RX_ID(0x%x) S_ID(0x%x) D_ID(0x%x) copy payload overrun(0x%x:0x%x:0x%x)", - xchg, xchg->cmnd_code, xchg->hotpooltag, pkg->frame_head.oxid_rxid, - pkg->frame_head.csctl_sid & UNF_NPORTID_MASK, - pkg->frame_head.rctl_did & UNF_NPORTID_MASK, - xchg->fcp_sfs_union.sfs_entry.cur_offset, - pkg->unf_cmnd_pload_bl.length, max_frame_len); - - length = max_frame_len - xchg->fcp_sfs_union.sfs_entry.cur_offset; - } - - if (length > 0) - memcpy(dest, pkg->unf_cmnd_pload_bl.buffer_ptr, length); - - xchg->fcp_sfs_union.sfs_entry.cur_offset += length; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - return RETURN_OK; -} - -static void unf_ls_gs_do_callback(struct unf_xchg *xchg, - struct unf_frame_pkg *pkg) -{ - ulong flags = 0; - void (*callback)(void *, void *, void *) = NULL; - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (xchg->callback && - (xchg->cmnd_code == ELS_RRQ || - xchg->cmnd_code == ELS_LOGO || - !(xchg->io_state & TGT_IO_STATE_ABORT))) { - callback = xchg->callback; - - if (xchg->cmnd_code == ELS_FLOGI || xchg->cmnd_code == ELS_FDISC) - xchg->sid = pkg->frame_head.rctl_did & UNF_NPORTID_MASK; - - if (xchg->cmnd_code == ELS_ECHO) { - xchg->private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME] = - pkg->private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME]; - xchg->private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME] = - pkg->private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME]; - xchg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME] = - pkg->private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME]; - xchg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME] = - pkg->private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME]; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - callback(xchg->lport, xchg->rport, xchg); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } -} - -u32 unf_send_ls_gs_cmnd_succ(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - u32 ret = RETURN_OK; - u16 hot_pool_tag = 0; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - unf_lport = lport; - - if (!unf_lport->xchg_mgr_temp.unf_look_up_xchg_by_tag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) lookup exchange by tag function can't be NULL", - unf_lport->port_id); - - return UNF_RETURN_ERROR; - } - - hot_pool_tag = (u16)(pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); - xchg = (struct unf_xchg *)(unf_lport->xchg_mgr_temp - .unf_look_up_xchg_by_tag((void *)unf_lport, hot_pool_tag)); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) find exchange by tag(0x%x) failed", - unf_lport->port_id, unf_lport->nport_id, hot_pool_tag); - - return UNF_RETURN_ERROR; - } - - UNF_CHECK_ALLOCTIME_VALID(unf_lport, hot_pool_tag, xchg, - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]); - - if ((pkg->frame_head.csctl_sid & UNF_NPORTID_MASK) != xchg->did) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) find exhange invalid, package S_ID(0x%x) exchange S_ID(0x%x) D_ID(0x%x)", - unf_lport->port_id, pkg->frame_head.csctl_sid, xchg->sid, xchg->did); - - return UNF_RETURN_ERROR; - } - - if (pkg->last_pkg_flag == UNF_PKG_NOT_LAST_RESPONSE) { - ret = unf_mv_resp_2_xchg(xchg, pkg); - return ret; - } - - xchg->byte_orders = pkg->byte_orders; - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - unf_ls_gs_do_callback(xchg, pkg); - unf_cm_free_xchg((void *)unf_lport, (void *)xchg); - return ret; -} - -u32 unf_send_ls_gs_cmnd_failed(struct unf_lport *lport, - struct unf_frame_pkg *pkg) -{ - struct unf_xchg *xchg = NULL; - u32 ret = RETURN_OK; - u16 hot_pool_tag = 0; - ulong flags = 0; - void (*ob_callback)(struct unf_xchg *) = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (!lport->xchg_mgr_temp.unf_look_up_xchg_by_tag) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) lookup exchange by tag function can't be NULL", - lport->port_id); - - return UNF_RETURN_ERROR; - } - - hot_pool_tag = (u16)(pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]); - xchg = (struct unf_xchg *)(lport->xchg_mgr_temp.unf_look_up_xchg_by_tag((void *)lport, - hot_pool_tag)); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) find exhange by tag(0x%x) failed", - lport->port_id, lport->nport_id, hot_pool_tag); - - return UNF_RETURN_ERROR; - } - - UNF_CHECK_ALLOCTIME_VALID(lport, hot_pool_tag, xchg, - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]); - - lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (xchg->ob_callback && - (xchg->cmnd_code == ELS_RRQ || xchg->cmnd_code == ELS_LOGO || - (!(xchg->io_state & TGT_IO_STATE_ABORT)))) { - ob_callback = xchg->ob_callback; - xchg->ob_callback_sts = pkg->status; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - ob_callback(xchg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) exchange(0x%p) tag(0x%x) do callback", - lport->port_id, xchg, hot_pool_tag); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } - - unf_cm_free_xchg((void *)lport, (void *)xchg); - return ret; -} - -static u32 unf_rcv_ls_gs_cmnd_reply(struct unf_lport *lport, - struct unf_frame_pkg *pkg) -{ - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (pkg->status == UNF_IO_SUCCESS || pkg->status == UNF_IO_UNDER_FLOW) - ret = unf_send_ls_gs_cmnd_succ(lport, pkg); - else - ret = unf_send_ls_gs_cmnd_failed(lport, pkg); - - return ret; -} - -u32 unf_receive_ls_gs_pkg(void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - unf_lport = (struct unf_lport *)lport; - - switch (pkg->type) { - case UNF_PKG_ELS_REQ_DONE: - case UNF_PKG_GS_REQ_DONE: - ret = unf_rcv_ls_gs_cmnd_reply(unf_lport, pkg); - break; - - case UNF_PKG_ELS_REQ: - ret = unf_rcv_els_cmnd_req(unf_lport, pkg); - break; - - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) with exchange type(0x%x) abnormal", - unf_lport->port_id, unf_lport->nport_id, pkg->type); - break; - } - - return ret; -} - -u32 unf_send_els_done(void *lport, struct unf_frame_pkg *pkg) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (pkg->type == UNF_PKG_ELS_REPLY_DONE) { - if (pkg->status == UNF_IO_SUCCESS || pkg->status == UNF_IO_UNDER_FLOW) - ret = unf_send_els_rsp_succ(lport, pkg); - else - ret = unf_send_ls_gs_cmnd_failed(lport, pkg); - } - - return ret; -} - -void unf_rport_immediate_link_down(struct unf_lport *lport, struct unf_rport *rport) -{ - /* Swap case: Report Link Down immediately & release R_Port */ - ulong flags = 0; - struct unf_disc *disc = NULL; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&rport->rport_state_lock, flags); - /* 1. Inc R_Port ref_cnt */ - if (unf_rport_ref_inc(rport) != RETURN_OK) { - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) Rport(0x%p,0x%x) is removing and no need process", - lport->port_id, rport, rport->nport_id); - - return; - } - - /* 2. R_PORT state update: Link Down Event --->>> closing state */ - unf_rport_state_ma(rport, UNF_EVENT_RPORT_LINK_DOWN); - spin_unlock_irqrestore(&rport->rport_state_lock, flags); - - /* 3. Put R_Port from busy to destroy list */ - disc = &lport->disc; - spin_lock_irqsave(&disc->rport_busy_pool_lock, flags); - list_del_init(&rport->entry_rport); - list_add_tail(&rport->entry_rport, &disc->list_destroy_rports); - spin_unlock_irqrestore(&disc->rport_busy_pool_lock, flags); - - /* 4. Schedule Closing work (Enqueuing workqueue) */ - unf_schedule_closing_work(lport, rport); - - unf_rport_ref_dec(rport); -} - -struct unf_rport *unf_find_rport(struct unf_lport *lport, u32 rport_nport_id, - u64 lport_name) -{ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = NULL; - - FC_CHECK_RETURN_VALUE(lport, NULL); - - if (rport_nport_id >= UNF_FC_FID_DOM_MGR) { - /* R_Port is Fabric: by N_Port_ID */ - unf_rport = unf_get_rport_by_nport_id(unf_lport, rport_nport_id); - } else { - /* Others: by WWPN & N_Port_ID */ - unf_rport = unf_find_valid_rport(unf_lport, lport_name, rport_nport_id); - } - - return unf_rport; -} - -void unf_process_logo_in_pri_loop(struct unf_lport *lport, struct unf_rport *rport) -{ - /* Send PLOGI or LOGO */ - struct unf_rport *unf_rport = rport; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); /* PLOGI WAIT */ - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - /* Private Loop with INI mode, Avoid COM Mode problem */ - unf_rport_delay_login(unf_rport); -} - -void unf_process_logo_in_n2n(struct unf_lport *lport, struct unf_rport *rport) -{ - /* Send PLOGI or LOGO */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - spin_lock_irqsave(&unf_rport->rport_state_lock, flag); - - unf_rport_state_ma(unf_rport, UNF_EVENT_RPORT_ENTER_PLOGI); - spin_unlock_irqrestore(&unf_rport->rport_state_lock, flag); - - if (unf_lport->port_name > unf_rport->port_name) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x)'s WWN(0x%llx) is larger than(0x%llx), should be master", - unf_lport->port_id, unf_lport->port_name, unf_rport->port_name); - - ret = unf_send_plogi(unf_lport, unf_rport); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]LOGIN: Port(0x%x) send PLOGI failed, enter recovery", - lport->port_id); - - unf_rport_error_recovery(unf_rport); - } - } else { - unf_rport_enter_logo(unf_lport, unf_rport); - } -} - -void unf_process_logo_in_fabric(struct unf_lport *lport, - struct unf_rport *rport) -{ - /* Send GFF_ID or LOGO */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - struct unf_rport *sns_port = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - /* L_Port with INI Mode: Send GFF_ID */ - sns_port = unf_get_rport_by_nport_id(unf_lport, UNF_FC_FID_DIR_SERV); - if (!sns_port) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find fabric port", - unf_lport->port_id); - return; - } - - ret = unf_get_and_post_disc_event(lport, sns_port, unf_rport->nport_id, - UNF_DISC_GET_FEATURE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) add discovery event(0x%x) failed Rport(0x%x)", - unf_lport->port_id, UNF_DISC_GET_FEATURE, - unf_rport->nport_id); - - unf_rcv_gff_id_rsp_unknown(unf_lport, unf_rport->nport_id); - } -} - -void unf_process_rport_after_logo(struct unf_lport *lport, struct unf_rport *rport) -{ - /* - * 1. LOGO handler - * 2. RPLO handler - * 3. LOGO_CALL_BACK (send LOGO ACC) handler - */ - struct unf_lport *unf_lport = lport; - struct unf_rport *unf_rport = rport; - - FC_CHECK_RETURN_VOID(lport); - FC_CHECK_RETURN_VOID(rport); - - if (unf_rport->nport_id < UNF_FC_FID_DOM_MGR) { - /* R_Port is not fabric port (retry LOGIN or LOGO) */ - if (unf_lport->act_topo == UNF_ACT_TOP_PRIVATE_LOOP) { - /* Private Loop: PLOGI or LOGO */ - unf_process_logo_in_pri_loop(unf_lport, unf_rport); - } else if (unf_lport->act_topo == UNF_ACT_TOP_P2P_DIRECT) { - /* Point to Point: LOGIN or LOGO */ - unf_process_logo_in_n2n(unf_lport, unf_rport); - } else { - /* Fabric or Public Loop: GFF_ID or LOGO */ - unf_process_logo_in_fabric(unf_lport, unf_rport); - } - } else { - /* Rport is fabric port: link down now */ - unf_rport_linkdown(unf_lport, unf_rport); - } -} - -static u32 unf_rcv_bls_req_done(struct unf_lport *lport, struct unf_frame_pkg *pkg) -{ - /* - * About I/O resource: - * 1. normal: Release I/O resource during RRQ processer - * 2. exception: Release I/O resource immediately - */ - struct unf_xchg *xchg = NULL; - u16 hot_pool_tag = 0; - ulong flags = 0; - ulong time_ms = 0; - u32 ret = RETURN_OK; - struct unf_lport *unf_lport = NULL; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - unf_lport = lport; - - hot_pool_tag = (u16)pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX]; - xchg = (struct unf_xchg *)unf_cm_lookup_xchg_by_tag((void *)unf_lport, hot_pool_tag); - if (!xchg) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) can't find exchange by tag(0x%x) when receiving ABTS response", - unf_lport->port_id, hot_pool_tag); - return UNF_RETURN_ERROR; - } - - UNF_CHECK_ALLOCTIME_VALID(lport, hot_pool_tag, xchg, - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - xchg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]); - - ret = unf_xchg_ref_inc(xchg, TGT_ABTS_DONE); - FC_CHECK_RETURN_VALUE((ret == RETURN_OK), UNF_RETURN_ERROR); - - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - xchg->oxid = UNF_GET_OXID(pkg); - xchg->rxid = UNF_GET_RXID(pkg); - xchg->io_state |= INI_IO_STATE_DONE; - xchg->abts_state |= ABTS_RESPONSE_RECEIVED; - if (!(INI_IO_STATE_UPABORT & xchg->io_state)) { - /* NOTE: I/O exchange has been released and used again */ - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x_0x%x) SID(0x%x) exch(0x%p) (0x%x:0x%x:0x%x:0x%x) state(0x%x) is abnormal with cnt(0x%x)", - unf_lport->port_id, unf_lport->nport_id, xchg->sid, - xchg, xchg->hotpooltag, xchg->oxid, xchg->rxid, - xchg->oid, xchg->io_state, - atomic_read(&xchg->ref_cnt)); - - unf_xchg_ref_dec(xchg, TGT_ABTS_DONE); - return UNF_RETURN_ERROR; - } - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - unf_lport->xchg_mgr_temp.unf_xchg_cancel_timer((void *)xchg); - /* - * Exchage I/O Status check: Succ-> Add RRQ Timer - * ***** pkg->status --- to --->>> scsi_cmnd->result ***** - * * - * FAILED: ERR_Code or X_ID is err, or BA_RSP type is err - */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (pkg->status == UNF_IO_SUCCESS) { - /* Succeed: PKG status -->> EXCH status -->> scsi status */ - UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_SUCCESS); - xchg->io_state |= INI_IO_STATE_WAIT_RRQ; - xchg->rxid = UNF_GET_RXID(pkg); - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - /* Add RRQ timer */ - time_ms = (ulong)(unf_lport->ra_tov); - unf_lport->xchg_mgr_temp.unf_xchg_add_timer((void *)xchg, time_ms, - UNF_TIMER_TYPE_INI_RRQ); - } else { - /* Failed: PKG status -->> EXCH status -->> scsi status */ - UNF_SET_SCSI_CMND_RESULT(xchg, UNF_IO_FAILED); - if (MARKER_STS_RECEIVED & xchg->abts_state) { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - - /* NOTE: release I/O resource immediately */ - unf_cm_free_xchg(unf_lport, xchg); - } else { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) exch(0x%p) OX_RX(0x%x:0x%x) IOstate(0x%x) ABTSstate(0x%x) receive response abnormal ref(0x%x)", - unf_lport->port_id, xchg, xchg->oxid, xchg->rxid, - xchg->io_state, xchg->abts_state, atomic_read(&xchg->ref_cnt)); - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } - } - - /* - * If abts response arrived before - * marker sts received just wake up abts marker sema - */ - spin_lock_irqsave(&xchg->xchg_state_lock, flags); - if (!(MARKER_STS_RECEIVED & xchg->abts_state)) { - xchg->ucode_abts_state = pkg->status; - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - up(&xchg->task_sema); - } else { - spin_unlock_irqrestore(&xchg->xchg_state_lock, flags); - } - - unf_xchg_ref_dec(xchg, TGT_ABTS_DONE); - return ret; -} - -u32 unf_receive_bls_pkg(void *lport, struct unf_frame_pkg *pkg) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = UNF_RETURN_ERROR; - - unf_lport = (struct unf_lport *)lport; - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - if (pkg->type == UNF_PKG_BLS_REQ_DONE) { - /* INI: RCVD BLS Req Done */ - ret = unf_rcv_bls_req_done(lport, pkg); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) received BLS packet type(%xh) is error", - unf_lport->port_id, pkg->type); - - return UNF_RETURN_ERROR; - } - - return ret; -} - -static void unf_fill_free_xid_pkg(struct unf_xchg *xchg, struct unf_frame_pkg *pkg) -{ - pkg->frame_head.csctl_sid = xchg->sid; - pkg->frame_head.rctl_did = xchg->did; - pkg->frame_head.oxid_rxid = (u32)(((u32)xchg->oxid << UNF_SHIFT_16) | xchg->rxid); - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = xchg->hotpooltag; - UNF_SET_XCHG_ALLOC_TIME(pkg, xchg); - - if (xchg->xchg_type == UNF_XCHG_TYPE_SFS) { - if (UNF_XCHG_IS_ELS_REPLY(xchg)) { - pkg->type = UNF_PKG_ELS_REPLY; - pkg->rx_or_ox_id = UNF_PKG_FREE_RXID; - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE32; - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = INVALID_VALUE32; - } else { - pkg->type = UNF_PKG_ELS_REQ; - pkg->rx_or_ox_id = UNF_PKG_FREE_OXID; - } - } else if (xchg->xchg_type == UNF_XCHG_TYPE_INI) { - pkg->type = UNF_PKG_INI_IO; - pkg->rx_or_ox_id = UNF_PKG_FREE_OXID; - } -} - -void unf_notify_chip_free_xid(struct unf_xchg *xchg) -{ - struct unf_lport *unf_lport = NULL; - u32 ret = RETURN_ERROR; - struct unf_frame_pkg pkg = {0}; - - FC_CHECK_RETURN_VOID(xchg); - unf_lport = xchg->lport; - FC_CHECK_RETURN_VOID(unf_lport); - - unf_fill_free_xid_pkg(xchg, &pkg); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Sid_Did(0x%x)(0x%x) Xchg(0x%p) RXorOX(0x%x) tag(0x%x) xid(0x%x) magic(0x%x) Stat(0x%x)type(0x%x) wait timeout.", - xchg->sid, xchg->did, xchg, pkg.rx_or_ox_id, - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX], pkg.frame_head.oxid_rxid, - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], xchg->io_state, pkg.type); - - ret = unf_lport->low_level_func.service_op.ll_release_xid(unf_lport->fc_port, &pkg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Free xid abnormal:Sid_Did(0x%x 0x%x) Xchg(0x%p) RXorOX(0x%x) xid(0x%x) Stat(0x%x) tag(0x%x) magic(0x%x) type(0x%x).", - xchg->sid, xchg->did, xchg, pkg.rx_or_ox_id, - pkg.frame_head.oxid_rxid, xchg->io_state, - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX], - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME], - pkg.type); - } -} diff --git a/drivers/scsi/spfc/common/unf_service.h b/drivers/scsi/spfc/common/unf_service.h deleted file mode 100644 index 0dd2975c6a7b..000000000000 --- a/drivers/scsi/spfc/common/unf_service.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_SERVICE_H -#define UNF_SERVICE_H - -#include "unf_type.h" -#include "unf_exchg.h" -#include "unf_rport.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -extern u32 max_frame_size; -#define UNF_INIT_DISC 0x1 /* first time DISC */ -#define UNF_RSCN_DISC 0x2 /* RSCN Port Addr DISC */ -#define UNF_SET_ELS_ACC_TYPE(els_cmd) ((u32)(els_cmd) << 16 | ELS_ACC) -#define UNF_SET_ELS_RJT_TYPE(els_cmd) ((u32)(els_cmd) << 16 | ELS_RJT) -#define UNF_XCHG_IS_ELS_REPLY(xchg) \ - ((ELS_ACC == ((xchg)->cmnd_code & 0x0ffff)) || \ - (ELS_RJT == ((xchg)->cmnd_code & 0x0ffff))) - -struct unf_els_handle_table { - u32 cmnd; - u32 (*els_cmnd_handler)(struct unf_lport *lport, u32 sid, struct unf_xchg *xchg); -}; - -void unf_select_sq(struct unf_xchg *xchg, struct unf_frame_pkg *pkg); -void unf_fill_package(struct unf_frame_pkg *pkg, struct unf_xchg *xchg, - struct unf_rport *rport); -struct unf_xchg *unf_get_sfs_free_xchg_and_init(struct unf_lport *lport, - u32 did, - struct unf_rport *rport, - union unf_sfs_u **fc_entry); -void *unf_get_one_big_sfs_buf(struct unf_xchg *xchg); -u32 unf_mv_resp_2_xchg(struct unf_xchg *xchg, struct unf_frame_pkg *pkg); -void unf_rport_immediate_link_down(struct unf_lport *lport, - struct unf_rport *rport); -struct unf_rport *unf_find_rport(struct unf_lport *lport, u32 rport_nport_id, - u64 port_name); -void unf_process_logo_in_fabric(struct unf_lport *lport, - struct unf_rport *rport); -void unf_notify_chip_free_xid(struct unf_xchg *xchg); - -u32 unf_ls_gs_cmnd_send(struct unf_lport *lport, struct unf_frame_pkg *pkg, - struct unf_xchg *xchg); -u32 unf_receive_ls_gs_pkg(void *lport, struct unf_frame_pkg *pkg); -struct unf_xchg *unf_mv_data_2_xchg(struct unf_lport *lport, - struct unf_frame_pkg *pkg); -u32 unf_receive_bls_pkg(void *lport, struct unf_frame_pkg *pkg); -u32 unf_send_els_done(void *lport, struct unf_frame_pkg *pkg); -u32 unf_send_els_rjt_by_did(struct unf_lport *lport, struct unf_xchg *xchg, - u32 did, struct unf_rjt_info *rjt_info); -u32 unf_send_els_rjt_by_rport(struct unf_lport *lport, struct unf_xchg *xchg, - struct unf_rport *rport, - struct unf_rjt_info *rjt_info); -u32 unf_send_abts(struct unf_lport *lport, struct unf_xchg *xchg); -void unf_process_rport_after_logo(struct unf_lport *lport, - struct unf_rport *rport); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* __UNF_SERVICE_H__ */ diff --git a/drivers/scsi/spfc/common/unf_type.h b/drivers/scsi/spfc/common/unf_type.h deleted file mode 100644 index 28e163d0543c..000000000000 --- a/drivers/scsi/spfc/common/unf_type.h +++ /dev/null @@ -1,216 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef UNF_TYPE_H -#define UNF_TYPE_H - -#include <linux/sched.h> -#include <linux/kthread.h> -#include <linux/fs.h> -#include <linux/vmalloc.h> -#include <linux/version.h> -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/delay.h> -#include <linux/workqueue.h> -#include <linux/kref.h> -#include <linux/scatterlist.h> -#include <linux/crc-t10dif.h> -#include <linux/ctype.h> -#include <linux/types.h> -#include <linux/compiler.h> -#include <linux/init.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/interrupt.h> -#include <linux/random.h> -#include <linux/jiffies.h> -#include <linux/cpufreq.h> -#include <linux/semaphore.h> -#include <linux/jiffies.h> - -#include <scsi/scsi.h> -#include <scsi/scsi_host.h> -#include <scsi/scsi_device.h> -#include <scsi/scsi_cmnd.h> -#include <scsi/scsi_transport_fc.h> -#include <linux/sched/signal.h> - -#ifndef SPFC_FT -#define SPFC_FT -#endif - -#define BUF_LIST_PAGE_SIZE (PAGE_SIZE << 8) - -#define UNF_S_TO_NS (1000000000) -#define UNF_S_TO_MS (1000) - -enum UNF_OS_THRD_PRI_E { - UNF_OS_THRD_PRI_HIGHEST = 0, - UNF_OS_THRD_PRI_HIGH, - UNF_OS_THRD_PRI_SUBHIGH, - UNF_OS_THRD_PRI_MIDDLE, - UNF_OS_THRD_PRI_LOW, - UNF_OS_THRD_PRI_BUTT -}; - -#define UNF_OS_LIST_NEXT(a) ((a)->next) -#define UNF_OS_LIST_PREV(a) ((a)->prev) - -#define UNF_OS_PER_NS (1000000000) -#define UNF_OS_MS_TO_NS (1000000) - -#ifndef MIN -#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) -#endif - -#ifndef MAX -#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) -#endif - -#ifndef INVALID_VALUE64 -#define INVALID_VALUE64 0xFFFFFFFFFFFFFFFFULL -#endif /* INVALID_VALUE64 */ - -#ifndef INVALID_VALUE32 -#define INVALID_VALUE32 0xFFFFFFFF -#endif /* INVALID_VALUE32 */ - -#ifndef INVALID_VALUE16 -#define INVALID_VALUE16 0xFFFF -#endif /* INVALID_VALUE16 */ - -#ifndef INVALID_VALUE8 -#define INVALID_VALUE8 0xFF -#endif /* INVALID_VALUE8 */ - -#ifndef RETURN_OK -#define RETURN_OK 0 -#endif - -#ifndef RETURN_ERROR -#define RETURN_ERROR (~0) -#endif -#define UNF_RETURN_ERROR (~0) - -/* define shift bits */ -#define UNF_SHIFT_1 1 -#define UNF_SHIFT_2 2 -#define UNF_SHIFT_3 3 -#define UNF_SHIFT_4 4 -#define UNF_SHIFT_6 6 -#define UNF_SHIFT_7 7 -#define UNF_SHIFT_8 8 -#define UNF_SHIFT_11 11 -#define UNF_SHIFT_12 12 -#define UNF_SHIFT_15 15 -#define UNF_SHIFT_16 16 -#define UNF_SHIFT_17 17 -#define UNF_SHIFT_19 19 -#define UNF_SHIFT_20 20 -#define UNF_SHIFT_23 23 -#define UNF_SHIFT_24 24 -#define UNF_SHIFT_25 25 -#define UNF_SHIFT_26 26 -#define UNF_SHIFT_28 28 -#define UNF_SHIFT_29 29 -#define UNF_SHIFT_32 32 -#define UNF_SHIFT_35 35 -#define UNF_SHIFT_37 37 -#define UNF_SHIFT_39 39 -#define UNF_SHIFT_40 40 -#define UNF_SHIFT_43 43 -#define UNF_SHIFT_48 48 -#define UNF_SHIFT_51 51 -#define UNF_SHIFT_56 56 -#define UNF_SHIFT_57 57 -#define UNF_SHIFT_59 59 -#define UNF_SHIFT_60 60 -#define UNF_SHIFT_61 61 - -/* array index */ -#define ARRAY_INDEX_0 0 -#define ARRAY_INDEX_1 1 -#define ARRAY_INDEX_2 2 -#define ARRAY_INDEX_3 3 -#define ARRAY_INDEX_4 4 -#define ARRAY_INDEX_5 5 -#define ARRAY_INDEX_6 6 -#define ARRAY_INDEX_7 7 -#define ARRAY_INDEX_8 8 -#define ARRAY_INDEX_10 10 -#define ARRAY_INDEX_11 11 -#define ARRAY_INDEX_12 12 -#define ARRAY_INDEX_13 13 - -/* define mask bits */ -#define UNF_MASK_BIT_7_0 0xff -#define UNF_MASK_BIT_15_0 0x0000ffff -#define UNF_MASK_BIT_31_16 0xffff0000 - -#define UNF_IO_SUCCESS 0x00000000 -#define UNF_IO_ABORTED 0x00000001 /* the host system aborted the command */ -#define UNF_IO_FAILED 0x00000002 -#define UNF_IO_ABORT_ABTS 0x00000003 -#define UNF_IO_ABORT_LOGIN 0x00000004 /* abort login */ -#define UNF_IO_ABORT_REET 0x00000005 /* reset event aborted the transport */ -#define UNF_IO_ABORT_FAILED 0x00000006 /* abort failed */ -/* data out of order ,data reassembly error */ -#define UNF_IO_OUTOF_ORDER 0x00000007 -#define UNF_IO_FTO 0x00000008 /* frame time out */ -#define UNF_IO_LINK_FAILURE 0x00000009 -#define UNF_IO_OVER_FLOW 0x0000000a /* data over run */ -#define UNF_IO_RSP_OVER 0x0000000b -#define UNF_IO_LOST_FRAME 0x0000000c -#define UNF_IO_UNDER_FLOW 0x0000000d /* data under run */ -#define UNF_IO_HOST_PROG_ERROR 0x0000000e -#define UNF_IO_SEST_PROG_ERROR 0x0000000f -#define UNF_IO_INVALID_ENTRY 0x00000010 -#define UNF_IO_ABORT_SEQ_NOT 0x00000011 -#define UNF_IO_REJECT 0x00000012 -#define UNF_IO_RS_INFO 0x00000013 -#define UNF_IO_EDC_IN_ERROR 0x00000014 -#define UNF_IO_EDC_OUT_ERROR 0x00000015 -#define UNF_IO_UNINIT_KEK_ERR 0x00000016 -#define UNF_IO_DEK_OUTOF_RANGE 0x00000017 -#define UNF_IO_KEY_UNWRAP_ERR 0x00000018 -#define UNF_IO_KEY_TAG_ERR 0x00000019 -#define UNF_IO_KEY_ECC_ERR 0x0000001a -#define UNF_IO_BLOCK_SIZE_ERROR 0x0000001b -#define UNF_IO_ILLEGAL_CIPHER_MODE 0x0000001c -#define UNF_IO_CLEAN_UP 0x0000001d -#define UNF_SRR_RECEIVE 0x0000001e /* receive srr */ -/* The target device sent an ABTS to abort the I/O.*/ -#define UNF_IO_ABORTED_BY_TARGET 0x0000001f -#define UNF_IO_TRANSPORT_ERROR 0x00000020 -#define UNF_IO_LINK_FLASH 0x00000021 -#define UNF_IO_TIMEOUT 0x00000022 -#define UNF_IO_PORT_UNAVAILABLE 0x00000023 -#define UNF_IO_PORT_LOGOUT 0x00000024 -#define UNF_IO_PORT_CFG_CHG 0x00000025 -#define UNF_IO_FIRMWARE_RES_UNAVAILABLE 0x00000026 -#define UNF_IO_TASK_MGT_OVERRUN 0x00000027 -#define UNF_IO_DMA_ERROR 0x00000028 -#define UNF_IO_DIF_ERROR 0x00000029 -#define UNF_IO_NO_LPORT 0x0000002a -#define UNF_IO_NO_XCHG 0x0000002b -#define UNF_IO_SOFT_ERR 0x0000002c -#define UNF_IO_XCHG_ADD_ERROR 0x0000002d -#define UNF_IO_NO_LOGIN 0x0000002e -#define UNF_IO_NO_BUFFER 0x0000002f -#define UNF_IO_DID_ERROR 0x00000030 -#define UNF_IO_UNSUPPORT 0x00000031 -#define UNF_IO_NOREADY 0x00000032 -#define UNF_IO_NPORTID_REUSED 0x00000033 -#define UNF_IO_NPORT_HANDLE_REUSED 0x00000034 -#define UNF_IO_NO_NPORT_HANDLE 0x00000035 -#define UNF_IO_ABORT_BY_FW 0x00000036 -#define UNF_IO_ABORT_PORT_REMOVING 0x00000037 -#define UNF_IO_INCOMPLETE 0x00000038 -#define UNF_IO_DIF_REF_ERROR 0x00000039 -#define UNF_IO_DIF_GEN_ERROR 0x0000003a - -#define UNF_IO_ERREND 0xFFFFFFFF - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_chipitf.c b/drivers/scsi/spfc/hw/spfc_chipitf.c deleted file mode 100644 index be6073ff4dc0..000000000000 --- a/drivers/scsi/spfc/hw/spfc_chipitf.c +++ /dev/null @@ -1,1105 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_chipitf.h" -#include "sphw_hw.h" -#include "sphw_crm.h" - -#define SPFC_MBOX_TIME_SEC_MAX (60) - -#define SPFC_LINK_UP_COUNT 1 -#define SPFC_LINK_DOWN_COUNT 2 -#define SPFC_FC_DELETE_CMND_COUNT 3 - -#define SPFC_MBX_MAX_TIMEOUT 10000 - -u32 spfc_get_chip_msg(void *hba, void *mac) -{ - struct spfc_hba_info *spfc_hba = NULL; - struct unf_get_chip_info_argout *wwn = NULL; - struct spfc_inmbox_get_chip_info get_chip_info; - union spfc_outmbox_generic *get_chip_info_sts = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(mac, UNF_RETURN_ERROR); - - spfc_hba = (struct spfc_hba_info *)hba; - wwn = (struct unf_get_chip_info_argout *)mac; - - memset(&get_chip_info, 0, sizeof(struct spfc_inmbox_get_chip_info)); - - get_chip_info_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!get_chip_info_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(get_chip_info_sts, 0, sizeof(union spfc_outmbox_generic)); - - get_chip_info.header.cmnd_type = SPFC_MBOX_GET_CHIP_INFO; - get_chip_info.header.length = - SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_get_chip_info)); - - if (spfc_mb_send_and_wait_mbox(spfc_hba, &get_chip_info, - sizeof(struct spfc_inmbox_get_chip_info), - get_chip_info_sts) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]spfc can't send and wait mailbox, command type: 0x%x.", - get_chip_info.header.cmnd_type); - - goto exit; - } - - if (get_chip_info_sts->get_chip_info_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) mailbox status incorrect status(0x%x) .", - spfc_hba->port_cfg.port_id, - get_chip_info_sts->get_chip_info_sts.status); - - goto exit; - } - - if (get_chip_info_sts->get_chip_info_sts.header.cmnd_type != SPFC_MBOX_GET_CHIP_INFO_STS) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) receive mailbox type incorrect type: 0x%x.", - spfc_hba->port_cfg.port_id, - get_chip_info_sts->get_chip_info_sts.header.cmnd_type); - - goto exit; - } - - wwn->board_type = get_chip_info_sts->get_chip_info_sts.board_type; - spfc_hba->card_info.card_type = get_chip_info_sts->get_chip_info_sts.board_type; - wwn->wwnn = get_chip_info_sts->get_chip_info_sts.wwnn; - wwn->wwpn = get_chip_info_sts->get_chip_info_sts.wwpn; - - ret = RETURN_OK; -exit: - kfree(get_chip_info_sts); - - return ret; -} - -u32 spfc_get_chip_capability(void *hwdev_handle, - struct spfc_chip_info *chip_info) -{ - struct spfc_inmbox_get_chip_info get_chip_info; - union spfc_outmbox_generic *get_chip_info_sts = NULL; - u16 out_size = 0; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hwdev_handle, UNF_RETURN_ERROR); - - memset(&get_chip_info, 0, sizeof(struct spfc_inmbox_get_chip_info)); - - get_chip_info_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!get_chip_info_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(get_chip_info_sts, 0, sizeof(union spfc_outmbox_generic)); - - get_chip_info.header.cmnd_type = SPFC_MBOX_GET_CHIP_INFO; - get_chip_info.header.length = - SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_get_chip_info)); - get_chip_info.header.port_id = (u8)sphw_global_func_id(hwdev_handle); - out_size = sizeof(union spfc_outmbox_generic); - - if (sphw_msg_to_mgmt_sync(hwdev_handle, COMM_MOD_FC, SPFC_MBOX_GET_CHIP_INFO, - (void *)&get_chip_info.header, - sizeof(struct spfc_inmbox_get_chip_info), - (union spfc_outmbox_generic *)(get_chip_info_sts), &out_size, - (SPFC_MBX_MAX_TIMEOUT), SPHW_CHANNEL_FC) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "spfc can't send and wait mailbox, command type: 0x%x.", - SPFC_MBOX_GET_CHIP_INFO); - - goto exit; - } - - if (get_chip_info_sts->get_chip_info_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port mailbox status incorrect status(0x%x) .", - get_chip_info_sts->get_chip_info_sts.status); - - goto exit; - } - - if (get_chip_info_sts->get_chip_info_sts.header.cmnd_type != SPFC_MBOX_GET_CHIP_INFO_STS) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port receive mailbox type incorrect type: 0x%x.", - get_chip_info_sts->get_chip_info_sts.header.cmnd_type); - - goto exit; - } - - chip_info->wwnn = get_chip_info_sts->get_chip_info_sts.wwnn; - chip_info->wwpn = get_chip_info_sts->get_chip_info_sts.wwpn; - - ret = RETURN_OK; -exit: - kfree(get_chip_info_sts); - - return ret; -} - -u32 spfc_config_port_table(struct spfc_hba_info *hba) -{ - struct spfc_inmbox_config_api config_api; - union spfc_outmbox_generic *out_mbox = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - memset(&config_api, 0, sizeof(config_api)); - out_mbox = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!out_mbox) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(out_mbox, 0, sizeof(union spfc_outmbox_generic)); - - config_api.header.cmnd_type = SPFC_MBOX_CONFIG_API; - config_api.header.length = SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_config_api)); - - config_api.op_code = UNDEFINEOPCODE; - - /* change switching top cmd of CM to the cmd that up recognize */ - /* if the cmd equals UNF_TOP_P2P_MASK sending in CM means that it - * should be changed into P2P top, LL using SPFC_TOP_NON_LOOP_MASK - */ - if (((u8)(hba->port_topo_cfg)) == UNF_TOP_P2P_MASK) { - config_api.topy_mode = 0x2; - /* if the cmd equals UNF_TOP_LOOP_MASK sending in CM means that it - *should be changed into loop top, LL using SPFC_TOP_LOOP_MASK - */ - } else if (((u8)(hba->port_topo_cfg)) == UNF_TOP_LOOP_MASK) { - config_api.topy_mode = 0x1; - /* if the cmd equals UNF_TOP_AUTO_MASK sending in CM means that it - *should be changed into loop top, LL using SPFC_TOP_AUTO_MASK - */ - } else if (((u8)(hba->port_topo_cfg)) == UNF_TOP_AUTO_MASK) { - config_api.topy_mode = 0x0; - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) topo cmd is error, command type: 0x%x", - hba->port_cfg.port_id, (u8)(hba->port_topo_cfg)); - - goto exit; - } - - /* About speed */ - config_api.sfp_speed = (u8)(hba->port_speed_cfg); - config_api.max_speed = (u8)(hba->max_support_speed); - - config_api.rx_6432g_bb_credit = SPFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT; - config_api.rx_16g_bb_credit = SPFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT; - config_api.rx_84g_bb_credit = SPFC_LOWLEVEL_DEFAULT_8G_BB_CREDIT; - config_api.rdy_cnt_bf_fst_frm = SPFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT; - config_api.esch_32g_value = SPFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE; - config_api.esch_16g_value = SPFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE; - config_api.esch_8g_value = SPFC_LOWLEVEL_DEFAULT_8G_ESCH_VALUE; - config_api.esch_4g_value = SPFC_LOWLEVEL_DEFAULT_8G_ESCH_VALUE; - config_api.esch_64g_value = SPFC_LOWLEVEL_DEFAULT_8G_ESCH_VALUE; - config_api.esch_bust_size = SPFC_LOWLEVEL_DEFAULT_ESCH_BUST_SIZE; - - /* default value:0xFF */ - config_api.hard_alpa = 0xFF; - memcpy(config_api.port_name, hba->sys_port_name, UNF_WWN_LEN); - - /* if only for slave, the value is 1; if participate master choosing, - * the value is 0 - */ - config_api.slave = hba->port_loop_role; - - /* 1:auto negotiate, 0:fixed mode negotiate */ - if (config_api.sfp_speed == 0) - config_api.auto_sneg = 0x1; - else - config_api.auto_sneg = 0x0; - - if (spfc_mb_send_and_wait_mbox(hba, &config_api, sizeof(config_api), - out_mbox) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[warn]Port(0x%x) SPFC can't send and wait mailbox, command type: 0x%x", - hba->port_cfg.port_id, - config_api.header.cmnd_type); - - goto exit; - } - - if (out_mbox->config_api_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[err]Port(0x%x) receive mailbox type(0x%x) with status(0x%x) error", - hba->port_cfg.port_id, - out_mbox->config_api_sts.header.cmnd_type, - out_mbox->config_api_sts.status); - - goto exit; - } - - if (out_mbox->config_api_sts.header.cmnd_type != SPFC_MBOX_CONFIG_API_STS) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[err]Port(0x%x) receive mailbox type(0x%x) error", - hba->port_cfg.port_id, - out_mbox->config_api_sts.header.cmnd_type); - - goto exit; - } - - ret = RETURN_OK; -exit: - kfree(out_mbox); - - return ret; -} - -u32 spfc_port_switch(struct spfc_hba_info *hba, bool turn_on) -{ - struct spfc_inmbox_port_switch port_switch; - union spfc_outmbox_generic *port_switch_sts = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - memset(&port_switch, 0, sizeof(port_switch)); - - port_switch_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!port_switch_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(port_switch_sts, 0, sizeof(union spfc_outmbox_generic)); - - port_switch.header.cmnd_type = SPFC_MBOX_PORT_SWITCH; - port_switch.header.length = SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_port_switch)); - port_switch.op_code = (u8)turn_on; - - if (spfc_mb_send_and_wait_mbox(hba, &port_switch, sizeof(port_switch), - (union spfc_outmbox_generic *)((void *)port_switch_sts)) != - RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[warn]Port(0x%x) SPFC can't send and wait mailbox, command type(0x%x) opcode(0x%x)", - hba->port_cfg.port_id, - port_switch.header.cmnd_type, port_switch.op_code); - - goto exit; - } - - if (port_switch_sts->port_switch_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[err]Port(0x%x) receive mailbox type(0x%x) status(0x%x) error", - hba->port_cfg.port_id, - port_switch_sts->port_switch_sts.header.cmnd_type, - port_switch_sts->port_switch_sts.status); - - goto exit; - } - - if (port_switch_sts->port_switch_sts.header.cmnd_type != SPFC_MBOX_PORT_SWITCH_STS) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[err]Port(0x%x) receive mailbox type(0x%x) error", - hba->port_cfg.port_id, - port_switch_sts->port_switch_sts.header.cmnd_type); - - goto exit; - } - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_MAJOR, - "[event]Port(0x%x) switch succeed, turns to %s", - hba->port_cfg.port_id, (turn_on) ? "on" : "off"); - - ret = RETURN_OK; -exit: - kfree(port_switch_sts); - - return ret; -} - -u32 spfc_config_login_api(struct spfc_hba_info *hba, - struct unf_port_login_parms *login_parms) -{ -#define SPFC_LOOP_RDYNUM 8 - int iret = RETURN_OK; - u32 ret = UNF_RETURN_ERROR; - struct spfc_inmbox_config_login config_login; - union spfc_outmbox_generic *cfg_login_sts = NULL; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - memset(&config_login, 0, sizeof(config_login)); - cfg_login_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!cfg_login_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(cfg_login_sts, 0, sizeof(union spfc_outmbox_generic)); - - config_login.header.cmnd_type = SPFC_MBOX_CONFIG_LOGIN_API; - config_login.header.length = SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_config_login)); - config_login.header.port_id = hba->port_index; - - config_login.op_code = UNDEFINEOPCODE; - - config_login.tx_bb_credit = hba->remote_bb_credit; - - config_login.etov = hba->compared_edtov_val; - config_login.rtov = hba->compared_ratov_val; - - config_login.rt_tov_tag = hba->remote_rttov_tag; - config_login.ed_tov_tag = hba->remote_edtov_tag; - config_login.bb_credit = hba->remote_bb_credit; - config_login.bb_scn = SPFC_LSB(hba->compared_bb_scn); - - if (config_login.bb_scn) { - config_login.lr_flag = (login_parms->els_cmnd_code == ELS_PLOGI) ? 0 : 1; - ret = spfc_mb_send_and_wait_mbox(hba, &config_login, sizeof(config_login), - (union spfc_outmbox_generic *)cfg_login_sts); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) SPFC can't send and wait mailbox, command type: 0x%x.", - hba->port_cfg.port_id, config_login.header.cmnd_type); - - goto exit; - } - - if (cfg_login_sts->config_login_sts.header.cmnd_type != - SPFC_MBOX_CONFIG_LOGIN_API_STS) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) Receive mailbox type incorrect. Type: 0x%x.", - hba->port_cfg.port_id, - cfg_login_sts->config_login_sts.header.cmnd_type); - - goto exit; - } - - if (cfg_login_sts->config_login_sts.status != STATUS_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x.", - hba->port_cfg.port_id, - cfg_login_sts->config_login_sts.header.cmnd_type, - cfg_login_sts->config_login_sts.status); - - goto exit; - } - } else { - iret = sphw_msg_to_mgmt_async(hba->dev_handle, COMM_MOD_FC, - SPFC_MBOX_CONFIG_LOGIN_API, &config_login, - sizeof(config_login), SPHW_CHANNEL_FC); - - if (iret != 0) { - SPFC_MAILBOX_STAT(hba, SPFC_SEND_CONFIG_LOGINAPI_FAIL); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) spfc can't send config login cmd to up,ret:%d.", - hba->port_cfg.port_id, iret); - - goto exit; - } - - SPFC_MAILBOX_STAT(hba, SPFC_SEND_CONFIG_LOGINAPI); - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) Topo(0x%x) Config login param to up: txbbcredit(0x%x), BB_SC_N(0x%x).", - hba->port_cfg.port_id, hba->active_topo, - config_login.tx_bb_credit, config_login.bb_scn); - - ret = RETURN_OK; -exit: - kfree(cfg_login_sts); - - return ret; -} - -u32 spfc_mb_send_and_wait_mbox(struct spfc_hba_info *hba, const void *in_mbox, - u16 in_size, - union spfc_outmbox_generic *out_mbox) -{ - void *handle = NULL; - u16 out_size = 0; - ulong time_out = 0; - int ret = 0; - struct spfc_mbox_header *header = NULL; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(in_mbox, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(out_mbox, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(hba->dev_handle, UNF_RETURN_ERROR); - header = (struct spfc_mbox_header *)in_mbox; - out_size = sizeof(union spfc_outmbox_generic); - handle = hba->dev_handle; - header->port_id = (u8)sphw_global_func_id(handle); - - /* Wait for las mailbox completion: */ - time_out = wait_for_completion_timeout(&hba->mbox_complete, - (ulong)msecs_to_jiffies(SPFC_MBOX_TIME_SEC_MAX * - UNF_S_TO_MS)); - if (time_out == SPFC_ZERO) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[err]Port(0x%x) wait mailbox(0x%x) completion timeout: %d sec", - hba->port_cfg.port_id, header->cmnd_type, - SPFC_MBOX_TIME_SEC_MAX); - - return UNF_RETURN_ERROR; - } - - /* Send Msg to uP Sync: timer 10s */ - ret = sphw_msg_to_mgmt_sync(handle, COMM_MOD_FC, header->cmnd_type, - (void *)in_mbox, in_size, - (union spfc_outmbox_generic *)out_mbox, - &out_size, (SPFC_MBX_MAX_TIMEOUT), - SPHW_CHANNEL_FC); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[warn]Port(0x%x) can not send mailbox(0x%x) with ret:%d", - hba->port_cfg.port_id, header->cmnd_type, ret); - - complete(&hba->mbox_complete); - return UNF_RETURN_ERROR; - } - - complete(&hba->mbox_complete); - - return RETURN_OK; -} - -void spfc_initial_dynamic_info(struct spfc_hba_info *fc_port) -{ - struct spfc_hba_info *hba = fc_port; - ulong flag = 0; - - FC_CHECK_RETURN_VOID(hba); - - spin_lock_irqsave(&hba->hba_lock, flag); - hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN; - hba->active_topo = UNF_ACT_TOP_UNKNOWN; - hba->phy_link = UNF_PORT_LINK_DOWN; - hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_INIT; - hba->loop_map_valid = LOOP_MAP_INVALID; - hba->srq_delay_info.srq_delay_flag = 0; - hba->srq_delay_info.root_rq_rcvd_flag = 0; - spin_unlock_irqrestore(&hba->hba_lock, flag); -} - -static u32 spfc_recv_fc_linkup(struct spfc_hba_info *hba, void *buf_in) -{ -#define SPFC_LOOP_MASK 0x1 -#define SPFC_LOOPMAP_COUNT 128 - - u32 ret = UNF_RETURN_ERROR; - struct spfc_link_event *link_event = NULL; - - link_event = (struct spfc_link_event *)buf_in; - hba->phy_link = UNF_PORT_LINK_UP; - hba->active_port_speed = link_event->speed; - hba->led_states.green_speed_led = (u8)(link_event->green_speed_led); - hba->led_states.yellow_speed_led = (u8)(link_event->yellow_speed_led); - hba->led_states.ac_led = (u8)(link_event->ac_led); - - if (link_event->top_type == SPFC_LOOP_MASK && - (link_event->loop_map_info[ARRAY_INDEX_1] == UNF_FL_PORT_LOOP_ADDR || - link_event->loop_map_info[ARRAY_INDEX_2] == UNF_FL_PORT_LOOP_ADDR)) { - hba->active_topo = UNF_ACT_TOP_PUBLIC_LOOP; /* Public Loop */ - hba->active_alpa = link_event->alpa_value; /* AL_PA */ - memcpy(hba->loop_map, link_event->loop_map_info, SPFC_LOOPMAP_COUNT); - hba->loop_map_valid = LOOP_MAP_VALID; - } else if (link_event->top_type == SPFC_LOOP_MASK) { - hba->active_topo = UNF_ACT_TOP_PRIVATE_LOOP; /* Private Loop */ - hba->active_alpa = link_event->alpa_value; /* AL_PA */ - memcpy(hba->loop_map, link_event->loop_map_info, SPFC_LOOPMAP_COUNT); - hba->loop_map_valid = LOOP_MAP_VALID; - } else { - hba->active_topo = UNF_TOP_P2P_MASK; /* P2P_D or P2P_F */ - } - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_KEVENT, - "[event]Port(0x%x) receive link up event(0x%x) with speed(0x%x) uP_topo(0x%x) driver_topo(0x%x)", - hba->port_cfg.port_id, link_event->link_event, - link_event->speed, link_event->top_type, hba->active_topo); - - /* Set clear & flush state */ - spfc_set_hba_clear_state(hba, false); - spfc_set_hba_flush_state(hba, false); - spfc_set_rport_flush_state(hba, false); - - /* Report link up event to COM */ - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_LINK_UP, - &hba->active_port_speed); - - SPFC_LINK_EVENT_STAT(hba, SPFC_LINK_UP_COUNT); - - return ret; -} - -static u32 spfc_recv_fc_linkdown(struct spfc_hba_info *hba, void *buf_in) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_link_event *link_event = NULL; - - link_event = (struct spfc_link_event *)buf_in; - - /* 1. Led state setting */ - hba->led_states.green_speed_led = (u8)(link_event->green_speed_led); - hba->led_states.yellow_speed_led = (u8)(link_event->yellow_speed_led); - hba->led_states.ac_led = (u8)(link_event->ac_led); - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_KEVENT, - "[event]Port(0x%x) receive link down event(0x%x) reason(0x%x)", - hba->port_cfg.port_id, link_event->link_event, link_event->reason); - - spfc_initial_dynamic_info(hba); - - /* 2. set HBA flush state */ - spfc_set_hba_flush_state(hba, true); - - /* 3. set R_Port (parent SQ) flush state */ - spfc_set_rport_flush_state(hba, true); - - /* 4. Report link down event to COM */ - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_LINK_DOWN, 0); - - /* DFX setting */ - SPFC_LINK_REASON_STAT(hba, link_event->reason); - SPFC_LINK_EVENT_STAT(hba, SPFC_LINK_DOWN_COUNT); - - return ret; -} - -static u32 spfc_recv_fc_delcmd(struct spfc_hba_info *hba, void *buf_in) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_link_event *link_event = NULL; - - link_event = (struct spfc_link_event *)buf_in; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_KEVENT, - "[event]Port(0x%x) receive delete cmd event(0x%x)", - hba->port_cfg.port_id, link_event->link_event); - - /* Send buffer clear cmnd */ - ret = spfc_clear_fetched_sq_wqe(hba); - - hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_SCANNING; - SPFC_LINK_EVENT_STAT(hba, SPFC_FC_DELETE_CMND_COUNT); - - return ret; -} - -static u32 spfc_recv_fc_error(struct spfc_hba_info *hba, void *buf_in) -{ -#define FC_ERR_LEVEL_DEAD 0 -#define FC_ERR_LEVEL_HIGH 1 -#define FC_ERR_LEVEL_LOW 2 - - u32 ret = UNF_RETURN_ERROR; - struct spfc_up_error_event *up_error_event = NULL; - - up_error_event = (struct spfc_up_error_event *)buf_in; - if (up_error_event->error_type >= SPFC_UP_ERR_BUTT || - up_error_event->error_value >= SPFC_ERR_VALUE_BUTT) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) receive a unsupported UP Error Event Type(0x%x) Value(0x%x).", - hba->port_cfg.port_id, up_error_event->error_type, - up_error_event->error_value); - return ret; - } - - switch (up_error_event->error_level) { - case FC_ERR_LEVEL_DEAD: - ret = RETURN_OK; - break; - - case FC_ERR_LEVEL_HIGH: - /* port reset */ - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, - UNF_PORT_ABNORMAL_RESET, NULL); - break; - - case FC_ERR_LEVEL_LOW: - ret = RETURN_OK; - break; - - default: - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) receive a unsupported UP Error Event Level(0x%x), Can not Process.", - hba->port_cfg.port_id, - up_error_event->error_level); - return ret; - } - if (up_error_event->error_value < SPFC_ERR_VALUE_BUTT) - SPFC_UP_ERR_EVENT_STAT(hba, up_error_event->error_value); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Port(0x%x) process UP Error Event Level(0x%x) Type(0x%x) Value(0x%x) %s.", - hba->port_cfg.port_id, up_error_event->error_level, - up_error_event->error_type, up_error_event->error_value, - (ret == UNF_RETURN_ERROR) ? "ERROR" : "OK"); - - return ret; -} - -static struct spfc_up2drv_msg_handle up_msg_handle[] = { - {SPFC_MBOX_RECV_FC_LINKUP, spfc_recv_fc_linkup}, - {SPFC_MBOX_RECV_FC_LINKDOWN, spfc_recv_fc_linkdown}, - {SPFC_MBOX_RECV_FC_DELCMD, spfc_recv_fc_delcmd}, - {SPFC_MBOX_RECV_FC_ERROR, spfc_recv_fc_error} -}; - -void spfc_up_msg2driver_proc(void *hwdev_handle, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size) -{ - u32 ret = UNF_RETURN_ERROR; - u32 index = 0; - struct spfc_hba_info *hba = NULL; - struct spfc_mbox_header *mbx_header = NULL; - - FC_CHECK_RETURN_VOID(hwdev_handle); - FC_CHECK_RETURN_VOID(pri_handle); - FC_CHECK_RETURN_VOID(buf_in); - FC_CHECK_RETURN_VOID(buf_out); - FC_CHECK_RETURN_VOID(out_size); - - hba = (struct spfc_hba_info *)pri_handle; - if (!hba) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_ERR, "[err]Hba is null"); - return; - } - - mbx_header = (struct spfc_mbox_header *)buf_in; - if (mbx_header->cmnd_type != cmd) { - *out_size = sizeof(struct spfc_link_event); - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_ERR, - "[err]Port(0x%x) cmd(0x%x) is not matched with header cmd type(0x%x)", - hba->port_cfg.port_id, cmd, mbx_header->cmnd_type); - return; - } - - while (index < (sizeof(up_msg_handle) / sizeof(struct spfc_up2drv_msg_handle))) { - if (up_msg_handle[index].cmd == cmd && - up_msg_handle[index].spfc_msg_up2driver_handler) { - ret = up_msg_handle[index].spfc_msg_up2driver_handler(hba, buf_in); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_ERR, - "[warn]Port(0x%x) process up cmd(0x%x) failed", - hba->port_cfg.port_id, cmd); - } - *out_size = sizeof(struct spfc_link_event); - return; - } - index++; - } - - *out_size = sizeof(struct spfc_link_event); - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_ERR, - "[err]Port(0x%x) process up cmd(0x%x) failed", - hba->port_cfg.port_id, cmd); -} - -u32 spfc_get_topo_act(void *hba, void *topo_act) -{ - struct spfc_hba_info *spfc_hba = hba; - enum unf_act_topo *pen_topo_act = topo_act; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(topo_act, UNF_RETURN_ERROR); - - /* Get topo from low_level */ - *pen_topo_act = spfc_hba->active_topo; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Get active topology: 0x%x", *pen_topo_act); - - return RETURN_OK; -} - -u32 spfc_get_loop_alpa(void *hba, void *alpa) -{ - ulong flags = 0; - struct spfc_hba_info *spfc_hba = hba; - u8 *alpa_temp = alpa; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(alpa, UNF_RETURN_ERROR); - - spin_lock_irqsave(&spfc_hba->hba_lock, flags); - *alpa_temp = spfc_hba->active_alpa; - spin_unlock_irqrestore(&spfc_hba->hba_lock, flags); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Get active AL_PA(0x%x)", *alpa_temp); - - return RETURN_OK; -} - -static void spfc_get_fabric_login_params(struct spfc_hba_info *hba, - struct unf_port_login_parms *params_addr) -{ - ulong flag = 0; - - spin_lock_irqsave(&hba->hba_lock, flag); - hba->active_topo = params_addr->act_topo; - hba->compared_ratov_val = params_addr->compared_ratov_val; - hba->compared_edtov_val = params_addr->compared_edtov_val; - hba->compared_bb_scn = params_addr->compared_bbscn; - hba->remote_edtov_tag = params_addr->remote_edtov_tag; - hba->remote_rttov_tag = params_addr->remote_rttov_tag; - hba->remote_bb_credit = params_addr->remote_bb_credit; - spin_unlock_irqrestore(&hba->hba_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) topo(0x%x) get fabric params: R_A_TOV(0x%x) E_D_TOV(%u) BB_CREDIT(0x%x) BB_SC_N(0x%x)", - hba->port_cfg.port_id, hba->active_topo, - hba->compared_ratov_val, hba->compared_edtov_val, - hba->remote_bb_credit, hba->compared_bb_scn); -} - -static void spfc_get_port_login_params(struct spfc_hba_info *hba, - struct unf_port_login_parms *params_addr) -{ - ulong flag = 0; - - spin_lock_irqsave(&hba->hba_lock, flag); - hba->compared_ratov_val = params_addr->compared_ratov_val; - hba->compared_edtov_val = params_addr->compared_edtov_val; - hba->compared_bb_scn = params_addr->compared_bbscn; - hba->remote_edtov_tag = params_addr->remote_edtov_tag; - hba->remote_rttov_tag = params_addr->remote_rttov_tag; - hba->remote_bb_credit = params_addr->remote_bb_credit; - spin_unlock_irqrestore(&hba->hba_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) Topo(0x%x) Get Port Params: R_A_TOV(0x%x), E_D_TOV(0x%x), BB_CREDIT(0x%x), BB_SC_N(0x%x).", - hba->port_cfg.port_id, hba->active_topo, - hba->compared_ratov_val, hba->compared_edtov_val, - hba->remote_bb_credit, hba->compared_bb_scn); -} - -u32 spfc_update_fabric_param(void *hba, void *para_in) -{ - u32 ret = RETURN_OK; - struct spfc_hba_info *spfc_hba = hba; - struct unf_port_login_parms *login_coparms = para_in; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(para_in, UNF_RETURN_ERROR); - - spfc_get_fabric_login_params(spfc_hba, login_coparms); - - if (spfc_hba->active_topo == UNF_ACT_TOP_P2P_FABRIC || - spfc_hba->active_topo == UNF_ACT_TOP_PUBLIC_LOOP) { - if (spfc_hba->work_mode == SPFC_SMARTIO_WORK_MODE_FC) - ret = spfc_config_login_api(spfc_hba, login_coparms); - } - - return ret; -} - -u32 spfc_update_port_param(void *hba, void *para_in) -{ - u32 ret = RETURN_OK; - struct spfc_hba_info *spfc_hba = hba; - struct unf_port_login_parms *login_coparms = - (struct unf_port_login_parms *)para_in; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(para_in, UNF_RETURN_ERROR); - - if (spfc_hba->active_topo == UNF_ACT_TOP_PRIVATE_LOOP || - spfc_hba->active_topo == UNF_ACT_TOP_P2P_DIRECT) { - spfc_get_port_login_params(spfc_hba, login_coparms); - ret = spfc_config_login_api(spfc_hba, login_coparms); - } - - spfc_save_login_parms_in_sq_info(spfc_hba, login_coparms); - - return ret; -} - -u32 spfc_get_workable_bb_credit(void *hba, void *bb_credit) -{ - u32 *bb_credit_temp = (u32 *)bb_credit; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(bb_credit, UNF_RETURN_ERROR); - if (spfc_hba->active_port_speed == UNF_PORT_SPEED_32_G) - *bb_credit_temp = SPFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT; - else if (spfc_hba->active_port_speed == UNF_PORT_SPEED_16_G) - *bb_credit_temp = SPFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT; - else - *bb_credit_temp = SPFC_LOWLEVEL_DEFAULT_8G_BB_CREDIT; - - return RETURN_OK; -} - -u32 spfc_get_workable_bb_scn(void *hba, void *bb_scn) -{ - u32 *bb_scn_temp = (u32 *)bb_scn; - struct spfc_hba_info *spfc_hba = (struct spfc_hba_info *)hba; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(bb_scn, UNF_RETURN_ERROR); - - *bb_scn_temp = spfc_hba->port_bb_scn_cfg; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Return BBSCN(0x%x) to CM", *bb_scn_temp); - - return RETURN_OK; -} - -u32 spfc_get_loop_map(void *hba, void *buf) -{ - ulong flags = 0; - struct unf_buf *buf_temp = (struct unf_buf *)buf; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf_temp, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf_temp->buf, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(buf_temp->buf_len, UNF_RETURN_ERROR); - - if (buf_temp->buf_len > UNF_LOOPMAP_COUNT) - return UNF_RETURN_ERROR; - - spin_lock_irqsave(&spfc_hba->hba_lock, flags); - if (spfc_hba->loop_map_valid != LOOP_MAP_VALID) { - spin_unlock_irqrestore(&spfc_hba->hba_lock, flags); - return UNF_RETURN_ERROR; - } - memcpy(buf_temp->buf, spfc_hba->loop_map, buf_temp->buf_len); - spin_unlock_irqrestore(&spfc_hba->hba_lock, flags); - - return RETURN_OK; -} - -u32 spfc_mb_reset_chip(struct spfc_hba_info *hba, u8 sub_type) -{ - struct spfc_inmbox_port_reset port_reset; - union spfc_outmbox_generic *port_reset_sts = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - memset(&port_reset, 0, sizeof(port_reset)); - - port_reset_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!port_reset_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(port_reset_sts, 0, sizeof(union spfc_outmbox_generic)); - port_reset.header.cmnd_type = SPFC_MBOX_PORT_RESET; - port_reset.header.length = SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_port_reset)); - port_reset.op_code = sub_type; - - if (spfc_mb_send_and_wait_mbox(hba, &port_reset, sizeof(port_reset), - port_reset_sts) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[warn]Port(0x%x) can't send and wait mailbox with command type(0x%x)", - hba->port_cfg.port_id, port_reset.header.cmnd_type); - - goto exit; - } - - if (port_reset_sts->port_reset_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[warn]Port(0x%x) receive mailbox type(0x%x) status(0x%x) incorrect", - hba->port_cfg.port_id, - port_reset_sts->port_reset_sts.header.cmnd_type, - port_reset_sts->port_reset_sts.status); - - goto exit; - } - - if (port_reset_sts->port_reset_sts.header.cmnd_type != SPFC_MBOX_PORT_RESET_STS) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "[warn]Port(0x%x) recv mailbox type(0x%x) incorrect", - hba->port_cfg.port_id, - port_reset_sts->port_reset_sts.header.cmnd_type); - - goto exit; - } - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_MAJOR, - "[info]Port(0x%x) reset chip mailbox success", - hba->port_cfg.port_id); - - ret = RETURN_OK; -exit: - kfree(port_reset_sts); - - return ret; -} - -u32 spfc_clear_sq_wqe_done(struct spfc_hba_info *hba) -{ - int ret1 = RETURN_OK; - u32 ret2 = RETURN_OK; - struct spfc_inmbox_clear_done clear_done; - - clear_done.header.cmnd_type = SPFC_MBOX_BUFFER_CLEAR_DONE; - clear_done.header.length = SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_clear_done)); - clear_done.header.port_id = hba->port_index; - - ret1 = sphw_msg_to_mgmt_async(hba->dev_handle, COMM_MOD_FC, - SPFC_MBOX_BUFFER_CLEAR_DONE, &clear_done, - sizeof(clear_done), SPHW_CHANNEL_FC); - - if (ret1 != 0) { - SPFC_MAILBOX_STAT(hba, SPFC_SEND_CLEAR_DONE_FAIL); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC Port(0x%x) can't send clear done cmd to up, ret:%d", - hba->port_cfg.port_id, ret1); - - return UNF_RETURN_ERROR; - } - - SPFC_MAILBOX_STAT(hba, SPFC_SEND_CLEAR_DONE); - hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_FLUSHDONE; - hba->next_clear_sq = 0; - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_KEVENT, - "[info]Port(0x%x) clear done msg(0x%x) sent to up succeed with stage(0x%x)", - hba->port_cfg.port_id, clear_done.header.cmnd_type, - hba->queue_set_stage); - - return ret2; -} - -u32 spfc_mbx_get_fw_clear_stat(struct spfc_hba_info *hba, u32 *clear_state) -{ - struct spfc_inmbox_get_clear_state get_clr_state; - union spfc_outmbox_generic *port_clear_state_sts = NULL; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(clear_state, UNF_RETURN_ERROR); - - memset(&get_clr_state, 0, sizeof(get_clr_state)); - - port_clear_state_sts = kmalloc(sizeof(union spfc_outmbox_generic), GFP_ATOMIC); - if (!port_clear_state_sts) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "malloc outmbox memory failed"); - return UNF_RETURN_ERROR; - } - memset(port_clear_state_sts, 0, sizeof(union spfc_outmbox_generic)); - - get_clr_state.header.cmnd_type = SPFC_MBOX_GET_CLEAR_STATE; - get_clr_state.header.length = - SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_get_clear_state)); - - if (spfc_mb_send_and_wait_mbox(hba, &get_clr_state, sizeof(get_clr_state), - port_clear_state_sts) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "spfc can't send and wait mailbox, command type: 0x%x", - get_clr_state.header.cmnd_type); - - goto exit; - } - - if (port_clear_state_sts->get_clr_state_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "Port(0x%x) Receive mailbox type(0x%x) status incorrect. Status: 0x%x, state 0x%x.", - hba->port_cfg.port_id, - port_clear_state_sts->get_clr_state_sts.header.cmnd_type, - port_clear_state_sts->get_clr_state_sts.status, - port_clear_state_sts->get_clr_state_sts.state); - - goto exit; - } - - if (port_clear_state_sts->get_clr_state_sts.header.cmnd_type != - SPFC_MBOX_GET_CLEAR_STATE_STS) { - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_ERR, - "Port(0x%x) recv mailbox type(0x%x) incorrect.", - hba->port_cfg.port_id, - port_clear_state_sts->get_clr_state_sts.header.cmnd_type); - - goto exit; - } - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "Port(0x%x) get port clear state 0x%x.", - hba->port_cfg.port_id, - port_clear_state_sts->get_clr_state_sts.state); - - *clear_state = port_clear_state_sts->get_clr_state_sts.state; - - ret = RETURN_OK; -exit: - kfree(port_clear_state_sts); - - return ret; -} - -u32 spfc_mbx_config_default_session(void *hba, u32 flag) -{ - struct spfc_hba_info *spfc_hba = NULL; - struct spfc_inmbox_default_sq_info default_sq_info; - union spfc_outmbox_generic default_sq_info_sts; - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - spfc_hba = (struct spfc_hba_info *)hba; - - memset(&default_sq_info, 0, sizeof(struct spfc_inmbox_default_sq_info)); - memset(&default_sq_info_sts, 0, sizeof(union spfc_outmbox_generic)); - - default_sq_info.header.cmnd_type = SPFC_MBOX_SEND_DEFAULT_SQ_INFO; - default_sq_info.header.length = - SPFC_BYTES_TO_DW_NUM(sizeof(struct spfc_inmbox_default_sq_info)); - default_sq_info.func_id = sphw_global_func_id(spfc_hba->dev_handle); - - /* When flag is 1, set default SQ info when probe, when 0, clear when - * remove - */ - if (flag) { - default_sq_info.sq_cid = spfc_hba->default_sq_info.sq_cid; - default_sq_info.sq_xid = spfc_hba->default_sq_info.sq_xid; - default_sq_info.valid = 1; - } - - ret = - spfc_mb_send_and_wait_mbox(spfc_hba, &default_sq_info, sizeof(default_sq_info), - (union spfc_outmbox_generic *)(void *)&default_sq_info_sts); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "spfc can't send and wait mailbox, command type: 0x%x.", - default_sq_info.header.cmnd_type); - - return UNF_RETURN_ERROR; - } - - if (default_sq_info_sts.default_sq_sts.status != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) mailbox status incorrect status(0x%x) .", - spfc_hba->port_cfg.port_id, - default_sq_info_sts.default_sq_sts.status); - - return UNF_RETURN_ERROR; - } - - if (SPFC_MBOX_SEND_DEFAULT_SQ_INFO_STS != - default_sq_info_sts.default_sq_sts.header.cmnd_type) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Port(0x%x) receive mailbox type incorrect type: 0x%x.", - spfc_hba->port_cfg.port_id, - default_sq_info_sts.default_sq_sts.header.cmnd_type); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} diff --git a/drivers/scsi/spfc/hw/spfc_chipitf.h b/drivers/scsi/spfc/hw/spfc_chipitf.h deleted file mode 100644 index acd770514edf..000000000000 --- a/drivers/scsi/spfc/hw/spfc_chipitf.h +++ /dev/null @@ -1,797 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_CHIPITF_H -#define SPFC_CHIPITF_H - -#include "unf_type.h" -#include "unf_log.h" -#include "spfc_utils.h" -#include "spfc_module.h" - -#include "spfc_service.h" - -/* CONF_API_CMND */ -#define SPFC_MBOX_CONFIG_API (0x00) -#define SPFC_MBOX_CONFIG_API_STS (0xA0) - -/* GET_CHIP_INFO_API_CMD */ -#define SPFC_MBOX_GET_CHIP_INFO (0x01) -#define SPFC_MBOX_GET_CHIP_INFO_STS (0xA1) - -/* PORT_RESET */ -#define SPFC_MBOX_PORT_RESET (0x02) -#define SPFC_MBOX_PORT_RESET_STS (0xA2) - -/* SFP_SWITCH_API_CMND */ -#define SPFC_MBOX_PORT_SWITCH (0x03) -#define SPFC_MBOX_PORT_SWITCH_STS (0xA3) - -/* CONF_AF_LOGIN_API_CMND */ -#define SPFC_MBOX_CONFIG_LOGIN_API (0x06) -#define SPFC_MBOX_CONFIG_LOGIN_API_STS (0xA6) - -/* BUFFER_CLEAR_DONE_CMND */ -#define SPFC_MBOX_BUFFER_CLEAR_DONE (0x07) -#define SPFC_MBOX_BUFFER_CLEAR_DONE_STS (0xA7) - -#define SPFC_MBOX_GET_UP_STATE (0x09) -#define SPFC_MBOX_GET_UP_STATE_STS (0xA9) - -/* GET CLEAR DONE STATE */ -#define SPFC_MBOX_GET_CLEAR_STATE (0x0E) -#define SPFC_MBOX_GET_CLEAR_STATE_STS (0xAE) - -/* CONFIG TIMER */ -#define SPFC_MBOX_CONFIG_TIMER (0x10) -#define SPFC_MBOX_CONFIG_TIMER_STS (0xB0) - -/* Led Test */ -#define SPFC_MBOX_LED_TEST (0x12) -#define SPFC_MBOX_LED_TEST_STS (0xB2) - -/* set esch */ -#define SPFC_MBOX_SET_ESCH (0x13) -#define SPFC_MBOX_SET_ESCH_STS (0xB3) - -/* set get tx serdes */ -#define SPFC_MBOX_SET_GET_SERDES_TX (0x14) -#define SPFC_MBOX_SET_GET_SERDES_TX_STS (0xB4) - -/* get rx serdes */ -#define SPFC_MBOX_GET_SERDES_RX (0x15) -#define SPFC_MBOX_GET_SERDES_RX_STS (0xB5) - -/* i2c read write */ -#define SPFC_MBOX_I2C_WR_RD (0x16) -#define SPFC_MBOX_I2C_WR_RD_STS (0xB6) - -/* GET UCODE STATS CMD */ -#define SPFC_MBOX_GET_UCODE_STAT (0x18) -#define SPFC_MBOX_GET_UCODE_STAT_STS (0xB8) - -/* gpio read write */ -#define SPFC_MBOX_GPIO_WR_RD (0x19) -#define SPFC_MBOX_GPIO_WR_RD_STS (0xB9) - -#define SPFC_MBOX_SEND_DEFAULT_SQ_INFO (0x26) -#define SPFC_MBOX_SEND_DEFAULT_SQ_INFO_STS (0xc6) - -/* FC: DRV->UP */ -#define SPFC_MBOX_SEND_ELS_CMD (0x2A) -#define SPFC_MBOX_SEND_VPORT_INFO (0x2B) - -/* FC: UP->DRV */ -#define SPFC_MBOX_RECV_FC_LINKUP (0x40) -#define SPFC_MBOX_RECV_FC_LINKDOWN (0x41) -#define SPFC_MBOX_RECV_FC_DELCMD (0x42) -#define SPFC_MBOX_RECV_FC_ERROR (0x43) - -#define LOOP_MAP_VALID (1) -#define LOOP_MAP_INVALID (0) - -#define SPFC_MBOX_SIZE (1024) -#define SPFC_MBOX_HEADER_SIZE (4) - -#define UNDEFINEOPCODE (0) - -#define VALUEMASK_L 0x00000000FFFFFFFF -#define VALUEMASK_H 0xFFFFFFFF00000000 - -#define STATUS_OK (0) -#define STATUS_FAIL (1) - -enum spfc_drv2up_unblock_msg_cmd_code { - SPFC_SEND_ELS_CMD, - SPFC_SEND_ELS_CMD_FAIL, - SPFC_RCV_ELS_CMD_RSP, - SPFC_SEND_CONFIG_LOGINAPI, - SPFC_SEND_CONFIG_LOGINAPI_FAIL, - SPFC_RCV_CONFIG_LOGIN_API_RSP, - SPFC_SEND_CLEAR_DONE, - SPFC_SEND_CLEAR_DONE_FAIL, - SPFC_RCV_CLEAR_DONE_RSP, - SPFC_SEND_VPORT_INFO_DONE, - SPFC_SEND_VPORT_INFO_FAIL, - SPFC_SEND_VPORT_INFO_RSP, - SPFC_MBOX_CMD_BUTT -}; - -/* up to dirver cmd code */ -enum spfc_up2drv_msg_cmd_code { - SPFC_UP2DRV_MSG_CMD_LINKUP = 0x1, - SPFC_UP2DRV_MSG_CMD_LINKDOWN = 0x2, - SPFC_UP2DRV_MSG_CMD_BUTT -}; - -/* up to driver handle templete */ -struct spfc_up2drv_msg_handle { - u8 cmd; - u32 (*spfc_msg_up2driver_handler)(struct spfc_hba_info *hba, void *buf_in); -}; - -/* tile to driver cmd code */ -enum spfc_tile2drv_msg_cmd_code { - SPFC_TILE2DRV_MSG_CMD_SCAN_DONE, - SPFC_TILE2DRV_MSG_CMD_FLUSH_DONE, - SPFC_TILE2DRV_MSG_CMD_BUTT -}; - -/* tile to driver handle templete */ -struct spfc_tile2drv_msg_handle { - u8 cmd; - u32 (*spfc_msg_tile2driver_handler)(struct spfc_hba_info *hba, u8 cmd, u64 val); -}; - -/* Mbox Common Header */ -struct spfc_mbox_header { - u8 cmnd_type; - u8 length; - u8 port_id; - u8 reserved; -}; - -/* open or close the sfp */ -struct spfc_inmbox_port_switch { - struct spfc_mbox_header header; - u32 op_code : 8; - u32 rsvd0 : 24; - u32 rsvd1[6]; -}; - -struct spfc_inmbox_send_vport_info { - struct spfc_mbox_header header; - - u64 sys_port_wwn; - u64 sys_node_name; - - u32 nport_id : 24; - u32 vpi : 8; -}; - -struct spfc_outmbox_port_switch_sts { - struct spfc_mbox_header header; - - u16 reserved1; - u8 reserved2; - u8 status; -}; - -/* config API */ -struct spfc_inmbox_config_api { - struct spfc_mbox_header header; - - u32 op_code : 8; - u32 reserved1 : 24; - - u8 topy_mode; - u8 sfp_speed; - u8 max_speed; - u8 hard_alpa; - - u8 port_name[UNF_WWN_LEN]; - - u32 slave : 1; - u32 auto_sneg : 1; - u32 reserved2 : 30; - - u32 rx_6432g_bb_credit : 16; /* 160 */ - u32 rx_16g_bb_credit : 16; /* 80 */ - u32 rx_84g_bb_credit : 16; /* 50 */ - u32 rdy_cnt_bf_fst_frm : 16; /* 8 */ - - u32 esch_32g_value; - u32 esch_16g_value; - u32 esch_8g_value; - u32 esch_4g_value; - u32 esch_64g_value; - u32 esch_bust_size; -}; - -struct spfc_outmbox_config_api_sts { - struct spfc_mbox_header header; - u16 reserved1; - u8 reserved2; - u8 status; -}; - -/* Get chip info */ -struct spfc_inmbox_get_chip_info { - struct spfc_mbox_header header; -}; - -struct spfc_outmbox_get_chip_info_sts { - struct spfc_mbox_header header; - u8 status; - u8 board_type; - u8 rvsd0[2]; - u64 wwpn; - u64 wwnn; - u64 rsvd1; -}; - -/* Get reg info */ -struct spfc_inmbox_get_reg_info { - struct spfc_mbox_header header; - u32 op_code : 1; - u32 reg_len : 8; - u32 rsvd1 : 23; - u32 reg_addr; - u32 reg_value_l32; - u32 reg_value_h32; - u32 rsvd2[27]; -}; - -/* Get reg info sts */ -struct spfc_outmbox_get_reg_info_sts { - struct spfc_mbox_header header; - - u16 rsvd0; - u8 rsvd1; - u8 status; - u32 reg_value_l32; - u32 reg_value_h32; - u32 rsvd2[28]; -}; - -/* Config login API */ -struct spfc_inmbox_config_login { - struct spfc_mbox_header header; - - u32 op_code : 8; - u32 reserved1 : 24; - - u16 tx_bb_credit; - u16 reserved2; - - u32 rtov; - u32 etov; - - u32 rt_tov_tag : 1; - u32 ed_tov_tag : 1; - u32 bb_credit : 6; - u32 bb_scn : 8; - u32 lr_flag : 16; -}; - -struct spfc_outmbox_config_login_sts { - struct spfc_mbox_header header; - - u16 reserved1; - u8 reserved2; - u8 status; -}; - -/* port reset */ -#define SPFC_MBOX_SUBTYPE_LIGHT_RESET (0x0) -#define SPFC_MBOX_SUBTYPE_HEAVY_RESET (0x1) - -struct spfc_inmbox_port_reset { - struct spfc_mbox_header header; - - u32 op_code : 8; - u32 reserved : 24; -}; - -struct spfc_outmbox_port_reset_sts { - struct spfc_mbox_header header; - - u16 reserved1; - u8 reserved2; - u8 status; -}; - -/* led test */ -struct spfc_inmbox_led_test { - struct spfc_mbox_header header; - - /* 0->act type;1->low speed;1->high speed */ - u8 led_type; - /* 0:twinkle;1:light on;2:light off;0xff:defalut */ - u8 led_mode; - u8 resvd[ARRAY_INDEX_2]; -}; - -struct spfc_outmbox_led_test_sts { - struct spfc_mbox_header header; - - u16 rsvd1; - u8 rsvd2; - u8 status; -}; - -/* set esch */ -struct spfc_inmbox_set_esch { - struct spfc_mbox_header header; - - u32 esch_value; - u32 esch_bust_size; -}; - -struct spfc_outmbox_set_esch_sts { - struct spfc_mbox_header header; - - u16 rsvd1; - u8 rsvd2; - u8 status; -}; - -struct spfc_inmbox_set_serdes_tx { - struct spfc_mbox_header header; - - u8 swing; /* amplitude setting */ - char serdes_pre1; /* pre1 setting */ - char serdes_pre2; /* pre2 setting */ - char serdes_post; /* post setting */ - u8 serdes_main; /* main setting */ - u8 op_code; /* opcode,0:setting;1:read */ - u8 rsvd[ARRAY_INDEX_2]; -}; - -struct spfc_outmbox_set_serdes_tx_sts { - struct spfc_mbox_header header; - u16 rvsd0; - u8 rvsd1; - u8 status; - u8 swing; - char serdes_pre1; - char serdes_pre2; - char serdes_post; - u8 serdes_main; - u8 rsvd2[ARRAY_INDEX_3]; -}; - -struct spfc_inmbox_i2c_wr_rd { - struct spfc_mbox_header header; - u8 op_code; /* 0 write, 1 read */ - u8 rsvd[ARRAY_INDEX_3]; - - u32 dev_addr; - u32 offset; - u32 wr_data; -}; - -struct spfc_outmbox_i2c_wr_rd_sts { - struct spfc_mbox_header header; - u8 status; - u8 resvd[ARRAY_INDEX_3]; - - u32 rd_data; -}; - -struct spfc_inmbox_gpio_wr_rd { - struct spfc_mbox_header header; - u8 op_code; /* 0 write,1 read */ - u8 rsvd[ARRAY_INDEX_3]; - - u32 pin; - u32 wr_data; -}; - -struct spfc_outmbox_gpio_wr_rd_sts { - struct spfc_mbox_header header; - u8 status; - u8 resvd[ARRAY_INDEX_3]; - - u32 rd_data; -}; - -struct spfc_inmbox_get_serdes_rx { - struct spfc_mbox_header header; - - u8 op_code; - u8 h16_macro; - u8 h16_lane; - u8 rsvd; -}; - -struct spfc_inmbox_get_serdes_rx_sts { - struct spfc_mbox_header header; - u16 rvsd0; - u8 rvsd1; - u8 status; - int left_eye; - int right_eye; - int low_eye; - int high_eye; -}; - -struct spfc_ser_op_m_l { - u8 op_code; - u8 h16_macro; - u8 h16_lane; - u8 rsvd; -}; - -/* get sfp info */ -#define SPFC_MBOX_GET_SFP_INFO_MB_LENGTH 1 -#define OFFSET_TWO_DWORD 2 -#define OFFSET_ONE_DWORD 1 - -struct spfc_inmbox_get_sfp_info { - struct spfc_mbox_header header; -}; - -struct spfc_outmbox_get_sfp_info_sts { - struct spfc_mbox_header header; - - u32 rcvd : 8; - u32 length : 16; - u32 status : 8; -}; - -/* get ucode stats */ -#define SPFC_UCODE_STAT_NUM 64 - -struct spfc_outmbox_get_ucode_stat { - struct spfc_mbox_header header; -}; - -struct spfc_outmbox_get_ucode_stat_sts { - struct spfc_mbox_header header; - - u16 rsvd; - u8 rsvd2; - u8 status; - - u32 ucode_stat[SPFC_UCODE_STAT_NUM]; -}; - -/* uP-->Driver asyn event API */ -struct spfc_link_event { - struct spfc_mbox_header header; - - u8 link_event; - u8 reason; - u8 speed; - u8 top_type; - - u8 alpa_value; - u8 reserved1; - u16 paticpate : 1; - u16 ac_led : 1; - u16 yellow_speed_led : 1; - u16 green_speed_led : 1; - u16 reserved2 : 12; - - u8 loop_map_info[128]; -}; - -enum spfc_up_err_type { - SPFC_UP_ERR_DRV_PARA = 0, - SPFC_UP_ERR_SFP = 1, - SPFC_UP_ERR_32G_PUB = 2, - SPFC_UP_ERR_32G_UA = 3, - SPFC_UP_ERR_32G_MAC = 4, - SPFC_UP_ERR_NON32G_DFX = 5, - SPFC_UP_ERR_NON32G_MAC = 6, - SPFC_UP_ERR_BUTT - -}; - -enum spfc_up_err_value { - /* ERR type 0 */ - SPFC_DRV_2_UP_PARA_ERR = 0, - - /* ERR type 1 */ - SPFC_SFP_SPEED_ERR, - - /* ERR type 2 */ - SPFC_32GPUB_UA_RXESCH_FIFO_OF, - SPFC_32GPUB_UA_RXESCH_FIFO_UCERR, - - /* ERR type 3 */ - SPFC_32G_UA_UATX_LEN_ABN, - SPFC_32G_UA_RXAFIFO_OF, - SPFC_32G_UA_TXAFIFO_OF, - SPFC_32G_UA_RXAFIFO_UCERR, - SPFC_32G_UA_TXAFIFO_UCERR, - - /* ERR type 4 */ - SPFC_32G_MAC_RX_BBC_FATAL, - SPFC_32G_MAC_TX_BBC_FATAL, - SPFC_32G_MAC_TXFIFO_UF, - SPFC_32G_MAC_PCS_TXFIFO_UF, - SPFC_32G_MAC_RXBBC_CRDT_TO, - SPFC_32G_MAC_PCS_RXAFIFO_OF, - SPFC_32G_MAC_PCS_TXFIFO_OF, - SPFC_32G_MAC_FC2P_RXFIFO_OF, - SPFC_32G_MAC_FC2P_TXFIFO_OF, - SPFC_32G_MAC_FC2P_CAFIFO_OF, - SPFC_32G_MAC_PCS_RXRSFECM_UCEER, - SPFC_32G_MAC_PCS_RXAFIFO_UCEER, - SPFC_32G_MAC_PCS_TXFIFO_UCEER, - SPFC_32G_MAC_FC2P_RXFIFO_UCEER, - SPFC_32G_MAC_FC2P_TXFIFO_UCEER, - - /* ERR type 5 */ - SPFC_NON32G_DFX_FC1_DFX_BF_FIFO, - SPFC_NON32G_DFX_FC1_DFX_BP_FIFO, - SPFC_NON32G_DFX_FC1_DFX_RX_AFIFO_ERR, - SPFC_NON32G_DFX_FC1_DFX_TX_AFIFO_ERR, - SPFC_NON32G_DFX_FC1_DFX_DIRQ_RXBUF_FIFO1, - SPFC_NON32G_DFX_FC1_DFX_DIRQ_RXBBC_TO, - SPFC_NON32G_DFX_FC1_DFX_DIRQ_TXDAT_FIFO, - SPFC_NON32G_DFX_FC1_DFX_DIRQ_TXCMD_FIFO, - SPFC_NON32G_DFX_FC1_ERR_R_RDY, - - /* ERR type 6 */ - SPFC_NON32G_MAC_FC1_FAIRNESS_ERROR, - - SPFC_ERR_VALUE_BUTT - -}; - -struct spfc_up_error_event { - struct spfc_mbox_header header; - - u8 link_event; - u8 error_level; - u8 error_type; - u8 error_value; -}; - -struct spfc_inmbox_clear_done { - struct spfc_mbox_header header; -}; - -/* receive els cmd */ -struct spfc_inmbox_rcv_els { - struct spfc_mbox_header header; - u16 pkt_type; - u16 pkt_len; - u8 frame[ARRAY_INDEX_0]; -}; - -/* FCF event type */ -enum spfc_fcf_event_type { - SPFC_FCF_SELECTED = 0, - SPFC_FCF_DEAD, - SPFC_FCF_CLEAR_VLINK, - SPFC_FCF_CLEAR_VLINK_APPOINTED -}; - -struct spfc_nport_id_info { - u32 nport_id : 24; - u32 vp_index : 8; -}; - -struct spfc_inmbox_fcf_event { - struct spfc_mbox_header header; - - u8 fcf_map[ARRAY_INDEX_3]; - u8 event_type; - - u8 fcf_mac_h4[ARRAY_INDEX_4]; - - u16 vlan_info; - u8 fcf_mac_l2[ARRAY_INDEX_2]; - - struct spfc_nport_id_info nport_id_info[UNF_SPFC_MAXNPIV_NUM + 1]; -}; - -/* send els cmd */ -struct spfc_inmbox_send_els { - struct spfc_mbox_header header; - - u8 oper_code; - u8 rsvd[ARRAY_INDEX_3]; - - u8 resvd; - u8 els_cmd_type; - u16 pkt_len; - - u8 fcf_mac_h4[ARRAY_INDEX_4]; - - u16 vlan_info; - u8 fcf_mac_l2[ARRAY_INDEX_2]; - - u8 fc_frame[SPFC_FC_HEAD_LEN + UNF_FLOGI_PAYLOAD_LEN]; -}; - -struct spfc_inmbox_send_els_sts { - struct spfc_mbox_header header; - - u16 rx_id; - u16 err_code; - - u16 ox_id; - u16 rsvd; -}; - -struct spfc_inmbox_get_clear_state { - struct spfc_mbox_header header; - u32 resvd[31]; -}; - -struct spfc_outmbox_get_clear_state_sts { - struct spfc_mbox_header header; - u16 rsvd1; - u8 state; /* 1--clear doing. 0---clear done. */ - u8 status; /* 0--ok,!0---fail */ - u32 rsvd2[30]; -}; - -#define SPFC_FIP_MODE_VN2VF (0) -#define SPFC_FIP_MODE_VN2VN (1) - -/* get up state */ -struct spfc_inmbox_get_up_state { - struct spfc_mbox_header header; - - u64 cur_jiff_time; -}; - -/* get port state */ -struct spfc_inmbox_get_port_info { - struct spfc_mbox_header header; -}; - -struct spfc_outmbox_get_up_state_sts { - struct spfc_mbox_header header; - - u8 status; - u8 rsv0; - u16 rsv1; - struct unf_port_dynamic_info dymic_info; -}; - -struct spfc_outmbox_get_port_info_sts { - struct spfc_mbox_header header; - - u32 status : 8; - u32 fe_16g_cvis_tts : 8; - u32 bb_scn : 8; - u32 loop_credit : 8; - - u32 non_loop_rx_credit : 8; - u32 non_loop_tx_credit : 8; - u32 sfp_speed : 8; - u32 present : 8; -}; - -struct spfc_inmbox_config_timer { - struct spfc_mbox_header header; - - u16 op_code; - u16 fun_id; - u32 user_data; -}; - -struct spfc_inmbox_config_srqc { - struct spfc_mbox_header header; - - u16 valid; - u16 fun_id; - u32 srqc_gpa_hi; - u32 srqc_gpa_lo; -}; - -struct spfc_outmbox_config_timer_sts { - struct spfc_mbox_header header; - - u8 status; - u8 rsv[ARRAY_INDEX_3]; -}; - -struct spfc_outmbox_config_srqc_sts { - struct spfc_mbox_header header; - - u8 status; - u8 rsv[ARRAY_INDEX_3]; -}; - -struct spfc_inmbox_default_sq_info { - struct spfc_mbox_header header; - u32 sq_cid; - u32 sq_xid; - u16 func_id; - u16 valid; -}; - -struct spfc_outmbox_default_sq_info_sts { - struct spfc_mbox_header header; - u8 status; - u8 rsv[ARRAY_INDEX_3]; -}; - -/* Generic Inmailbox and Outmailbox */ -union spfc_inmbox_generic { - struct { - struct spfc_mbox_header header; - u32 rsvd[(SPFC_MBOX_SIZE - SPFC_MBOX_HEADER_SIZE) / sizeof(u32)]; - } generic; - - struct spfc_inmbox_port_switch port_switch; - struct spfc_inmbox_config_api config_api; - struct spfc_inmbox_get_chip_info get_chip_info; - struct spfc_inmbox_config_login config_login; - struct spfc_inmbox_port_reset port_reset; - struct spfc_inmbox_set_esch esch_set; - struct spfc_inmbox_led_test led_test; - struct spfc_inmbox_get_sfp_info get_sfp_info; - struct spfc_inmbox_clear_done clear_done; - struct spfc_outmbox_get_ucode_stat get_ucode_stat; - struct spfc_inmbox_get_clear_state get_clr_state; - struct spfc_inmbox_send_vport_info send_vport_info; - struct spfc_inmbox_get_up_state get_up_state; - struct spfc_inmbox_config_timer timer_config; - struct spfc_inmbox_config_srqc config_srqc; - struct spfc_inmbox_get_port_info get_port_info; -}; - -union spfc_outmbox_generic { - struct { - struct spfc_mbox_header header; - u32 rsvd[(SPFC_MBOX_SIZE - SPFC_MBOX_HEADER_SIZE) / sizeof(u32)]; - } generic; - - struct spfc_outmbox_port_switch_sts port_switch_sts; - struct spfc_outmbox_config_api_sts config_api_sts; - struct spfc_outmbox_get_chip_info_sts get_chip_info_sts; - struct spfc_outmbox_get_reg_info_sts get_reg_info_sts; - struct spfc_outmbox_config_login_sts config_login_sts; - struct spfc_outmbox_port_reset_sts port_reset_sts; - struct spfc_outmbox_led_test_sts led_test_sts; - struct spfc_outmbox_set_esch_sts esch_set_sts; - struct spfc_inmbox_get_serdes_rx_sts serdes_rx_get_sts; - struct spfc_outmbox_set_serdes_tx_sts serdes_tx_set_sts; - struct spfc_outmbox_i2c_wr_rd_sts i2c_wr_rd_sts; - struct spfc_outmbox_gpio_wr_rd_sts gpio_wr_rd_sts; - struct spfc_outmbox_get_sfp_info_sts get_sfp_info_sts; - struct spfc_outmbox_get_ucode_stat_sts get_ucode_stat_sts; - struct spfc_outmbox_get_clear_state_sts get_clr_state_sts; - struct spfc_outmbox_get_up_state_sts get_up_state_sts; - struct spfc_outmbox_config_timer_sts timer_config_sts; - struct spfc_outmbox_config_srqc_sts config_srqc_sts; - struct spfc_outmbox_get_port_info_sts get_port_info_sts; - struct spfc_outmbox_default_sq_info_sts default_sq_sts; -}; - -u32 spfc_get_chip_msg(void *hba, void *mac); -u32 spfc_config_port_table(struct spfc_hba_info *hba); -u32 spfc_port_switch(struct spfc_hba_info *hba, bool turn_on); -u32 spfc_get_loop_map(void *hba, void *buf); -u32 spfc_get_workable_bb_credit(void *hba, void *bb_credit); -u32 spfc_get_workable_bb_scn(void *hba, void *bb_scn); -u32 spfc_get_port_current_info(void *hba, void *port_info); -u32 spfc_get_port_fec(void *hba, void *para_out); - -u32 spfc_get_loop_alpa(void *hba, void *alpa); -u32 spfc_get_topo_act(void *hba, void *topo_act); -u32 spfc_config_login_api(struct spfc_hba_info *hba, struct unf_port_login_parms *login_parms); -u32 spfc_mb_send_and_wait_mbox(struct spfc_hba_info *hba, const void *in_mbox, u16 in_size, - union spfc_outmbox_generic *out_mbox); -void spfc_up_msg2driver_proc(void *hwdev_handle, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size); - -u32 spfc_mb_reset_chip(struct spfc_hba_info *hba, u8 sub_type); -u32 spfc_clear_sq_wqe_done(struct spfc_hba_info *hba); -u32 spfc_update_fabric_param(void *hba, void *para_in); -u32 spfc_update_port_param(void *hba, void *para_in); -u32 spfc_update_fdisc_param(void *hba, void *vport_info); -u32 spfc_mbx_get_fw_clear_stat(struct spfc_hba_info *hba, u32 *clear_state); -u32 spfc_get_chip_capability(void *hwdev_handle, struct spfc_chip_info *chip_info); -u32 spfc_mbx_config_default_session(void *hba, u32 flag); - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.c b/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.c deleted file mode 100644 index 0c1d97d9e3e6..000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.c +++ /dev/null @@ -1,1611 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/pci.h> -#include <linux/module.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> -#include <linux/device.h> -#include <linux/gfp.h> -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" - -#include "spfc_cqm_object.h" -#include "spfc_cqm_bitmap_table.h" -#include "spfc_cqm_bat_cla.h" -#include "spfc_cqm_main.h" - -static unsigned char cqm_ver = 8; -module_param(cqm_ver, byte, 0644); -MODULE_PARM_DESC(cqm_ver, "for cqm version control (default=8)"); - -static void -cqm_bat_fill_cla_common_gpa(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - struct cqm_bat_entry_standerd *bat_entry_standerd) -{ - u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; - struct sphw_func_attr *func_attr = NULL; - struct cqm_bat_entry_vf2pf gpa = {0}; - u32 cla_gpa_h = 0; - dma_addr_t pa; - - if (cla_table->cla_lvl == CQM_CLA_LVL_0) - pa = cla_table->cla_z_buf.buf_list[0].pa; - else if (cla_table->cla_lvl == CQM_CLA_LVL_1) - pa = cla_table->cla_y_buf.buf_list[0].pa; - else - pa = cla_table->cla_x_buf.buf_list[0].pa; - - gpa.cla_gpa_h = CQM_ADDR_HI(pa) & CQM_CHIP_GPA_HIMASK; - - /* On the SPU, the value of spu_en in the GPA address - * in the BAT is determined by the host ID and fun IDx. - */ - if (sphw_host_id(cqm_handle->ex_handle) == CQM_SPU_HOST_ID) { - func_attr = &cqm_handle->func_attribute; - gpa.acs_spu_en = func_attr->func_global_idx & 0x1; - } else { - gpa.acs_spu_en = 0; - } - - memcpy(&cla_gpa_h, &gpa, sizeof(u32)); - bat_entry_standerd->cla_gpa_h = cla_gpa_h; - - /* GPA is valid when gpa[0] = 1. - * CQM_BAT_ENTRY_T_REORDER does not support GPA validity check. - */ - if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) - bat_entry_standerd->cla_gpa_l = CQM_ADDR_LW(pa); - else - bat_entry_standerd->cla_gpa_l = CQM_ADDR_LW(pa) | gpa_check_enable; -} - -static void cqm_bat_fill_cla_common(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 *entry_base_addr) -{ - struct cqm_bat_entry_standerd *bat_entry_standerd = NULL; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 cache_line = 0; - - if (cla_table->type == CQM_BAT_ENTRY_T_TIMER && cqm_ver == 8) - cache_line = CQM_CHIP_TIMER_CACHELINE; - else - cache_line = CQM_CHIP_CACHELINE; - - if (cla_table->obj_num == 0) { - cqm_info(handle->dev_hdl, - "Cla alloc: cla_type %u, obj_num=0, don't init bat entry\n", - cla_table->type); - return; - } - - bat_entry_standerd = (struct cqm_bat_entry_standerd *)entry_base_addr; - - /* The QPC value is 256/512/1024 and the timer value is 512. - * The other cacheline value is 256B. - * The conversion operation is performed inside the chip. - */ - if (cla_table->obj_size > cache_line) { - if (cla_table->obj_size == CQM_OBJECT_512) - bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_512; - else - bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_1024; - bat_entry_standerd->max_number = cla_table->max_buffer_size / cla_table->obj_size; - } else { - if (cache_line == CQM_CHIP_CACHELINE) { - bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_256; - bat_entry_standerd->max_number = cla_table->max_buffer_size / cache_line; - } else { - bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_512; - bat_entry_standerd->max_number = cla_table->max_buffer_size / cache_line; - } - } - - bat_entry_standerd->max_number = bat_entry_standerd->max_number - 1; - - bat_entry_standerd->bypass = CQM_BAT_NO_BYPASS_CACHE; - bat_entry_standerd->z = cla_table->cacheline_z; - bat_entry_standerd->y = cla_table->cacheline_y; - bat_entry_standerd->x = cla_table->cacheline_x; - bat_entry_standerd->cla_level = cla_table->cla_lvl; - - cqm_bat_fill_cla_common_gpa(cqm_handle, cla_table, bat_entry_standerd); -} - -static void cqm_bat_fill_cla_cfg(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 **entry_base_addr) -{ - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - struct cqm_bat_entry_cfg *bat_entry_cfg = NULL; - - bat_entry_cfg = (struct cqm_bat_entry_cfg *)(*entry_base_addr); - bat_entry_cfg->cur_conn_cache = 0; - bat_entry_cfg->max_conn_cache = - func_cap->flow_table_based_conn_cache_number; - bat_entry_cfg->cur_conn_num_h_4 = 0; - bat_entry_cfg->cur_conn_num_l_16 = 0; - bat_entry_cfg->max_conn_num = func_cap->flow_table_based_conn_number; - - /* Aligns with 64 buckets and shifts rightward by 6 bits. - * The maximum value of this field is 16 bits. A maximum of 4M buckets - * can be supported. The value is subtracted by 1. It is used for &hash - * value. - */ - if ((func_cap->hash_number >> CQM_HASH_NUMBER_UNIT) != 0) { - bat_entry_cfg->bucket_num = ((func_cap->hash_number >> - CQM_HASH_NUMBER_UNIT) - 1); - } - if (func_cap->bloomfilter_length != 0) { - bat_entry_cfg->bloom_filter_len = func_cap->bloomfilter_length - - 1; - bat_entry_cfg->bloom_filter_addr = func_cap->bloomfilter_addr; - } - - (*entry_base_addr) += sizeof(struct cqm_bat_entry_cfg); -} - -static void cqm_bat_fill_cla_other(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 **entry_base_addr) -{ - cqm_bat_fill_cla_common(cqm_handle, cla_table, *entry_base_addr); - - (*entry_base_addr) += sizeof(struct cqm_bat_entry_standerd); -} - -static void cqm_bat_fill_cla_taskmap(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 **entry_base_addr) -{ - struct cqm_bat_entry_taskmap *bat_entry_taskmap = NULL; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - int i; - - if (cqm_handle->func_capability.taskmap_number != 0) { - bat_entry_taskmap = - (struct cqm_bat_entry_taskmap *)(*entry_base_addr); - for (i = 0; i < CQM_BAT_ENTRY_TASKMAP_NUM; i++) { - bat_entry_taskmap->addr[i].gpa_h = - (u32)(cla_table->cla_z_buf.buf_list[i].pa >> - CQM_CHIP_GPA_HSHIFT); - bat_entry_taskmap->addr[i].gpa_l = - (u32)(cla_table->cla_z_buf.buf_list[i].pa & - CQM_CHIP_GPA_LOMASK); - cqm_info(handle->dev_hdl, - "Cla alloc: taskmap bat entry: 0x%x 0x%x\n", - bat_entry_taskmap->addr[i].gpa_h, - bat_entry_taskmap->addr[i].gpa_l); - } - } - - (*entry_base_addr) += sizeof(struct cqm_bat_entry_taskmap); -} - -static void cqm_bat_fill_cla_timer(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 **entry_base_addr) -{ - /* Only the PPF allocates timer resources. */ - if (cqm_handle->func_attribute.func_type != CQM_PPF) { - (*entry_base_addr) += CQM_BAT_ENTRY_SIZE; - } else { - cqm_bat_fill_cla_common(cqm_handle, cla_table, *entry_base_addr); - - (*entry_base_addr) += sizeof(struct cqm_bat_entry_standerd); - } -} - -static void cqm_bat_fill_cla_invalid(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u8 **entry_base_addr) -{ - (*entry_base_addr) += CQM_BAT_ENTRY_SIZE; -} - -static void cqm_bat_fill_cla(struct cqm_handle *cqm_handle) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = NULL; - u32 entry_type = CQM_BAT_ENTRY_T_INVALID; - u8 *entry_base_addr = NULL; - u32 i = 0; - - /* Fills each item in the BAT table according to the BAT format. */ - entry_base_addr = bat_table->bat; - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - entry_type = bat_table->bat_entry_type[i]; - cla_table = &bat_table->entry[i]; - - if (entry_type == CQM_BAT_ENTRY_T_CFG) - cqm_bat_fill_cla_cfg(cqm_handle, cla_table, &entry_base_addr); - else if (entry_type == CQM_BAT_ENTRY_T_TASKMAP) - cqm_bat_fill_cla_taskmap(cqm_handle, cla_table, &entry_base_addr); - else if (entry_type == CQM_BAT_ENTRY_T_INVALID) - cqm_bat_fill_cla_invalid(cqm_handle, cla_table, &entry_base_addr); - else if (entry_type == CQM_BAT_ENTRY_T_TIMER) - cqm_bat_fill_cla_timer(cqm_handle, cla_table, &entry_base_addr); - else - cqm_bat_fill_cla_other(cqm_handle, cla_table, &entry_base_addr); - - /* Check whether entry_base_addr is out-of-bounds array. */ - if (entry_base_addr >= (bat_table->bat + CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE)) - break; - } -} - -u32 cqm_funcid2smfid(struct cqm_handle *cqm_handle) -{ - u32 funcid = 0; - u32 smf_sel = 0; - u32 smf_id = 0; - u32 smf_pg_partial = 0; - /* SMF_Selection is selected based on - * the lower two bits of the function id - */ - u32 lbf_smfsel[4] = {0, 2, 1, 3}; - /* SMFID is selected based on SMF_PG[1:0] and SMF_Selection(0-1) */ - u32 smfsel_smfid01[4][2] = { {0, 0}, {0, 0}, {1, 1}, {0, 1} }; - /* SMFID is selected based on SMF_PG[3:2] and SMF_Selection(2-4) */ - u32 smfsel_smfid23[4][2] = { {2, 2}, {2, 2}, {3, 3}, {2, 3} }; - - /* When the LB mode is disabled, SMF0 is always returned. */ - if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL) { - smf_id = 0; - } else { - funcid = cqm_handle->func_attribute.func_global_idx & 0x3; - smf_sel = lbf_smfsel[funcid]; - - if (smf_sel < 2) { - smf_pg_partial = cqm_handle->func_capability.smf_pg & 0x3; - smf_id = smfsel_smfid01[smf_pg_partial][smf_sel]; - } else { - smf_pg_partial = (cqm_handle->func_capability.smf_pg >> 2) & 0x3; - smf_id = smfsel_smfid23[smf_pg_partial][smf_sel - 2]; - } - } - - return smf_id; -} - -/* This function is used in LB mode 1/2. The timer spoker info - * of independent space needs to be configured for 4 SMFs. - */ -static void cqm_update_timer_gpa(struct cqm_handle *cqm_handle, u32 smf_id) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = NULL; - u32 entry_type = CQM_BAT_ENTRY_T_INVALID; - u8 *entry_base_addr = NULL; - u32 i = 0; - - if (cqm_handle->func_attribute.func_type != CQM_PPF) - return; - - if (cqm_handle->func_capability.lb_mode != CQM_LB_MODE_1 && - cqm_handle->func_capability.lb_mode != CQM_LB_MODE_2) - return; - - cla_table = &bat_table->timer_entry[smf_id]; - entry_base_addr = bat_table->bat; - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - entry_type = bat_table->bat_entry_type[i]; - - if (entry_type == CQM_BAT_ENTRY_T_TIMER) { - cqm_bat_fill_cla_timer(cqm_handle, cla_table, &entry_base_addr); - break; - } - - if (entry_type == CQM_BAT_ENTRY_T_TASKMAP) - entry_base_addr += sizeof(struct cqm_bat_entry_taskmap); - else - entry_base_addr += CQM_BAT_ENTRY_SIZE; - - /* Check whether entry_base_addr is out-of-bounds array. */ - if (entry_base_addr >= - (bat_table->bat + CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE)) - break; - } -} - -static s32 cqm_bat_update_cmd(struct cqm_handle *cqm_handle, struct cqm_cmd_buf *buf_in, - u32 smf_id, u32 func_id) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cmdq_bat_update *bat_update_cmd = NULL; - s32 ret = CQM_FAIL; - - bat_update_cmd = (struct cqm_cmdq_bat_update *)(buf_in->buf); - bat_update_cmd->offset = 0; - - if (cqm_handle->bat_table.bat_size > CQM_BAT_MAX_SIZE) { - cqm_err(handle->dev_hdl, - "bat_size = %u, which is more than %d.\n", - cqm_handle->bat_table.bat_size, CQM_BAT_MAX_SIZE); - return CQM_FAIL; - } - bat_update_cmd->byte_len = cqm_handle->bat_table.bat_size; - - memcpy(bat_update_cmd->data, cqm_handle->bat_table.bat, bat_update_cmd->byte_len); - - bat_update_cmd->smf_id = smf_id; - bat_update_cmd->func_id = func_id; - - cqm_info(handle->dev_hdl, "Bat update: smf_id=%u\n", bat_update_cmd->smf_id); - cqm_info(handle->dev_hdl, "Bat update: func_id=%u\n", bat_update_cmd->func_id); - - cqm_swab32((u8 *)bat_update_cmd, sizeof(struct cqm_cmdq_bat_update) >> CQM_DW_SHIFT); - - ret = cqm3_send_cmd_box((void *)(cqm_handle->ex_handle), CQM_MOD_CQM, - CQM_CMD_T_BAT_UPDATE, buf_in, NULL, NULL, - CQM_CMD_TIMEOUT, SPHW_CHANNEL_DEFAULT); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_send_cmd_box)); - cqm_err(handle->dev_hdl, "%s: send_cmd_box ret=%d\n", __func__, - ret); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_bat_update(struct cqm_handle *cqm_handle) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cmd_buf *buf_in = NULL; - s32 ret = CQM_FAIL; - u32 smf_id = 0; - u32 func_id = 0; - u32 i = 0; - - buf_in = cqm3_cmd_alloc((void *)(cqm_handle->ex_handle)); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); - buf_in->size = sizeof(struct cqm_cmdq_bat_update); - - /* In non-fake mode, func_id is set to 0xffff */ - func_id = 0xffff; - - /* The LB scenario is supported. - * The normal mode is the traditional mode and is configured on SMF0. - * In mode 0, load is balanced to four SMFs based on the func ID (except - * the PPF func ID). The PPF in mode 0 needs to be configured on four - * SMF, so the timer resources can be shared by the four timer engine. - * Mode 1/2 is load balanced to four SMF by flow. Therefore, one - * function needs to be configured to four SMF. - */ - if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && - cqm_handle->func_attribute.func_type != CQM_PPF)) { - smf_id = cqm_funcid2smfid(cqm_handle); - ret = cqm_bat_update_cmd(cqm_handle, buf_in, smf_id, func_id); - } else if ((cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1) || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2) || - ((cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0) && - (cqm_handle->func_attribute.func_type == CQM_PPF))) { - for (i = 0; i < CQM_LB_SMF_MAX; i++) { - cqm_update_timer_gpa(cqm_handle, i); - - /* The smf_pg variable stores the currently enabled SMF. */ - if (cqm_handle->func_capability.smf_pg & (1U << i)) { - smf_id = i; - ret = cqm_bat_update_cmd(cqm_handle, buf_in, smf_id, func_id); - if (ret != CQM_SUCCESS) - goto out; - } - } - } else { - cqm_err(handle->dev_hdl, "Bat update: unsupport lb mode=%u\n", - cqm_handle->func_capability.lb_mode); - ret = CQM_FAIL; - } - -out: - cqm3_cmd_free((void *)(cqm_handle->ex_handle), buf_in); - return ret; -} - -s32 cqm_bat_init_ft(struct cqm_handle *cqm_handle, struct cqm_bat_table *bat_table, - enum func_type function_type) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 i = 0; - - bat_table->bat_entry_type[CQM_BAT_INDEX0] = CQM_BAT_ENTRY_T_CFG; - bat_table->bat_entry_type[CQM_BAT_INDEX1] = CQM_BAT_ENTRY_T_HASH; - bat_table->bat_entry_type[CQM_BAT_INDEX2] = CQM_BAT_ENTRY_T_QPC; - bat_table->bat_entry_type[CQM_BAT_INDEX3] = CQM_BAT_ENTRY_T_SCQC; - bat_table->bat_entry_type[CQM_BAT_INDEX4] = CQM_BAT_ENTRY_T_LUN; - bat_table->bat_entry_type[CQM_BAT_INDEX5] = CQM_BAT_ENTRY_T_TASKMAP; - - if (function_type == CQM_PF || function_type == CQM_PPF) { - bat_table->bat_entry_type[CQM_BAT_INDEX6] = CQM_BAT_ENTRY_T_L3I; - bat_table->bat_entry_type[CQM_BAT_INDEX7] = CQM_BAT_ENTRY_T_CHILDC; - bat_table->bat_entry_type[CQM_BAT_INDEX8] = CQM_BAT_ENTRY_T_TIMER; - bat_table->bat_entry_type[CQM_BAT_INDEX9] = CQM_BAT_ENTRY_T_XID2CID; - bat_table->bat_entry_type[CQM_BAT_INDEX10] = CQM_BAT_ENTRY_T_REORDER; - bat_table->bat_size = CQM_BAT_SIZE_FT_PF; - } else if (function_type == CQM_VF) { - bat_table->bat_size = CQM_BAT_SIZE_FT_VF; - } else { - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) - bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; - - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(function_type)); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_bat_init(struct cqm_handle *cqm_handle) -{ - struct cqm_func_capability *capability = &cqm_handle->func_capability; - enum func_type function_type = cqm_handle->func_attribute.func_type; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 i; - - memset(bat_table, 0, sizeof(struct cqm_bat_table)); - - /* Initialize the type of each bat entry. */ - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) - bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; - - /* Select BATs based on service types. Currently, - * feature-related resources of the VF are stored in the BATs of the VF. - */ - if (capability->ft_enable) - return cqm_bat_init_ft(cqm_handle, bat_table, function_type); - - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(capability->ft_enable)); - - return CQM_FAIL; -} - -void cqm_bat_uninit(struct cqm_handle *cqm_handle) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 i; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) - bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; - - memset(bat_table->bat, 0, CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE); - - /* Instruct the chip to update the BAT table. */ - if (cqm_bat_update(cqm_handle) != CQM_SUCCESS) - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update)); -} - -s32 cqm_cla_fill_buf(struct cqm_handle *cqm_handle, struct cqm_buf *cla_base_buf, - struct cqm_buf *cla_sub_buf, u8 gpa_check_enable) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct sphw_func_attr *func_attr = NULL; - dma_addr_t *base = NULL; - u64 fake_en = 0; - u64 spu_en = 0; - u64 pf_id = 0; - u32 i = 0; - u32 addr_num; - u32 buf_index = 0; - - /* Apply for space for base_buf */ - if (!cla_base_buf->buf_list) { - if (cqm_buf_alloc(cqm_handle, cla_base_buf, false) == - CQM_FAIL) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(cla_base_buf)); - return CQM_FAIL; - } - } - - /* Apply for space for sub_buf */ - if (!cla_sub_buf->buf_list) { - if (cqm_buf_alloc(cqm_handle, cla_sub_buf, false) == CQM_FAIL) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(cla_sub_buf)); - cqm_buf_free(cla_base_buf, cqm_handle->dev); - return CQM_FAIL; - } - } - - /* Fill base_buff with the gpa of sub_buf */ - addr_num = cla_base_buf->buf_size / sizeof(dma_addr_t); - base = (dma_addr_t *)(cla_base_buf->buf_list[0].va); - for (i = 0; i < cla_sub_buf->buf_number; i++) { - /* The SPU SMF supports load balancing from the SMF to the CPI, - * depending on the host ID and func ID. - */ - if (sphw_host_id(cqm_handle->ex_handle) == CQM_SPU_HOST_ID) { - func_attr = &cqm_handle->func_attribute; - spu_en = (u64)(func_attr->func_global_idx & 0x1) << 63; - } else { - spu_en = 0; - } - - *base = (((((cla_sub_buf->buf_list[i].pa & CQM_CHIP_GPA_MASK) | - spu_en) | - fake_en) | - pf_id) | - gpa_check_enable); - - cqm_swab64((u8 *)base, 1); - if ((i + 1) % addr_num == 0) { - buf_index++; - if (buf_index < cla_base_buf->buf_number) - base = cla_base_buf->buf_list[buf_index].va; - } else { - base++; - } - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_xyz_lvl1(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 trunk_size) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf *cla_y_buf = NULL; - struct cqm_buf *cla_z_buf = NULL; - s32 shift = 0; - s32 ret = CQM_FAIL; - u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; - u32 cache_line = 0; - - if (cla_table->type == CQM_BAT_ENTRY_T_TIMER && cqm_ver == 8) - cache_line = CQM_CHIP_TIMER_CACHELINE; - else - cache_line = CQM_CHIP_CACHELINE; - - if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) - gpa_check_enable = 0; - - cla_table->cla_lvl = CQM_CLA_LVL_1; - - shift = cqm_shift(trunk_size / cla_table->obj_size); - cla_table->z = shift ? (shift - 1) : (shift); - cla_table->y = CQM_MAX_INDEX_BIT; - cla_table->x = 0; - - if (cla_table->obj_size >= cache_line) { - cla_table->cacheline_z = cla_table->z; - cla_table->cacheline_y = cla_table->y; - cla_table->cacheline_x = cla_table->x; - } else { - shift = cqm_shift(trunk_size / cache_line); - cla_table->cacheline_z = shift ? (shift - 1) : (shift); - cla_table->cacheline_y = CQM_MAX_INDEX_BIT; - cla_table->cacheline_x = 0; - } - - /* Applying for CLA_Y_BUF Space */ - cla_y_buf = &cla_table->cla_y_buf; - cla_y_buf->buf_size = trunk_size; - cla_y_buf->buf_number = 1; - cla_y_buf->page_number = cla_y_buf->buf_number << - cla_table->trunk_order; - ret = cqm_buf_alloc(cqm_handle, cla_y_buf, false); - CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, CQM_FAIL, - CQM_ALLOC_FAIL(lvl_1_y_buf)); - - /* Applying for CLA_Z_BUF Space */ - cla_z_buf = &cla_table->cla_z_buf; - cla_z_buf->buf_size = trunk_size; - cla_z_buf->buf_number = - (ALIGN(cla_table->max_buffer_size, trunk_size)) / trunk_size; - cla_z_buf->page_number = cla_z_buf->buf_number << - cla_table->trunk_order; - /* All buffer space must be statically allocated. */ - if (cla_table->alloc_static) { - ret = cqm_cla_fill_buf(cqm_handle, cla_y_buf, cla_z_buf, - gpa_check_enable); - CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, CQM_FAIL, - CQM_FUNCTION_FAIL(cqm_cla_fill_buf)); - } else { /* Only the buffer list space is initialized. The buffer space - * is dynamically allocated in services. - */ - cla_z_buf->buf_list = vmalloc(cla_z_buf->buf_number * - sizeof(struct cqm_buf_list)); - if (!cla_z_buf->buf_list) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_1_z_buf)); - cqm_buf_free(cla_y_buf, cqm_handle->dev); - return CQM_FAIL; - } - memset(cla_z_buf->buf_list, 0, - cla_z_buf->buf_number * sizeof(struct cqm_buf_list)); - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_xyz_lvl2(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 trunk_size) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf *cla_x_buf = NULL; - struct cqm_buf *cla_y_buf = NULL; - struct cqm_buf *cla_z_buf = NULL; - s32 shift = 0; - s32 ret = CQM_FAIL; - u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; - u32 cache_line = 0; - - if (cla_table->type == CQM_BAT_ENTRY_T_TIMER && cqm_ver == 8) - cache_line = CQM_CHIP_TIMER_CACHELINE; - else - cache_line = CQM_CHIP_CACHELINE; - - if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) - gpa_check_enable = 0; - - cla_table->cla_lvl = CQM_CLA_LVL_2; - - shift = cqm_shift(trunk_size / cla_table->obj_size); - cla_table->z = shift ? (shift - 1) : (shift); - shift = cqm_shift(trunk_size / sizeof(dma_addr_t)); - cla_table->y = cla_table->z + shift; - cla_table->x = CQM_MAX_INDEX_BIT; - - if (cla_table->obj_size >= cache_line) { - cla_table->cacheline_z = cla_table->z; - cla_table->cacheline_y = cla_table->y; - cla_table->cacheline_x = cla_table->x; - } else { - shift = cqm_shift(trunk_size / cache_line); - cla_table->cacheline_z = shift ? (shift - 1) : (shift); - shift = cqm_shift(trunk_size / sizeof(dma_addr_t)); - cla_table->cacheline_y = cla_table->cacheline_z + shift; - cla_table->cacheline_x = CQM_MAX_INDEX_BIT; - } - - /* Apply for CLA_X_BUF Space */ - cla_x_buf = &cla_table->cla_x_buf; - cla_x_buf->buf_size = trunk_size; - cla_x_buf->buf_number = 1; - cla_x_buf->page_number = cla_x_buf->buf_number << - cla_table->trunk_order; - ret = cqm_buf_alloc(cqm_handle, cla_x_buf, false); - CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, CQM_FAIL, - CQM_ALLOC_FAIL(lvl_2_x_buf)); - - /* Apply for CLA_Z_BUF and CLA_Y_BUF Space */ - cla_z_buf = &cla_table->cla_z_buf; - cla_z_buf->buf_size = trunk_size; - cla_z_buf->buf_number = - (ALIGN(cla_table->max_buffer_size, trunk_size)) / trunk_size; - cla_z_buf->page_number = cla_z_buf->buf_number << - cla_table->trunk_order; - - cla_y_buf = &cla_table->cla_y_buf; - cla_y_buf->buf_size = trunk_size; - cla_y_buf->buf_number = - (ALIGN(cla_z_buf->buf_number * sizeof(dma_addr_t), trunk_size)) / - trunk_size; - cla_y_buf->page_number = cla_y_buf->buf_number << - cla_table->trunk_order; - /* All buffer space must be statically allocated. */ - if (cla_table->alloc_static) { - /* Apply for y buf and z buf, and fill the gpa of - * z buf list in y buf - */ - if (cqm_cla_fill_buf(cqm_handle, cla_y_buf, cla_z_buf, - gpa_check_enable) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_cla_fill_buf)); - cqm_buf_free(cla_x_buf, cqm_handle->dev); - return CQM_FAIL; - } - - /* Fill the gpa of the y buf list into the x buf. - * After the x and y bufs are applied for, - * this function will not fail. - * Use void to forcibly convert the return of the function. - */ - (void)cqm_cla_fill_buf(cqm_handle, cla_x_buf, cla_y_buf, - gpa_check_enable); - } else { /* Only the buffer list space is initialized. The buffer space - * is dynamically allocated in services. - */ - cla_z_buf->buf_list = vmalloc(cla_z_buf->buf_number * - sizeof(struct cqm_buf_list)); - if (!cla_z_buf->buf_list) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_z_buf)); - cqm_buf_free(cla_x_buf, cqm_handle->dev); - return CQM_FAIL; - } - memset(cla_z_buf->buf_list, 0, - cla_z_buf->buf_number * sizeof(struct cqm_buf_list)); - - cla_y_buf->buf_list = vmalloc(cla_y_buf->buf_number * - sizeof(struct cqm_buf_list)); - if (!cla_y_buf->buf_list) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(lvl_2_y_buf)); - cqm_buf_free(cla_z_buf, cqm_handle->dev); - cqm_buf_free(cla_x_buf, cqm_handle->dev); - return CQM_FAIL; - } - memset(cla_y_buf->buf_list, 0, - cla_y_buf->buf_number * sizeof(struct cqm_buf_list)); - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_xyz_check(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 *size) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 trunk_size = 0; - - /* If the capability(obj_num) is set to 0, the CLA does not need to be - * initialized and exits directly. - */ - if (cla_table->obj_num == 0) { - cqm_info(handle->dev_hdl, - "Cla alloc: cla_type %u, obj_num=0, don't alloc buffer\n", - cla_table->type); - return CQM_SUCCESS; - } - - cqm_info(handle->dev_hdl, - "Cla alloc: cla_type %u, obj_num=0x%x, gpa_check_enable=%d\n", - cla_table->type, cla_table->obj_num, - cqm_handle->func_capability.gpa_check_enable); - - /* Check whether obj_size is 2^n-aligned. An error is reported when - * obj_size is 0 or 1. - */ - if (!cqm_check_align(cla_table->obj_size)) { - cqm_err(handle->dev_hdl, - "Cla alloc: cla_type %u, obj_size 0x%x is not align on 2^n\n", - cla_table->type, cla_table->obj_size); - return CQM_FAIL; - } - - trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); - - if (trunk_size < cla_table->obj_size) { - cqm_err(handle->dev_hdl, - "Cla alloc: cla type %u, obj_size 0x%x is out of trunk size\n", - cla_table->type, cla_table->obj_size); - return CQM_FAIL; - } - - *size = trunk_size; - - return CQM_CONTINUE; -} - -s32 cqm_cla_xyz(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf *cla_z_buf = NULL; - u32 trunk_size = 0; - s32 ret = CQM_FAIL; - - ret = cqm_cla_xyz_check(cqm_handle, cla_table, &trunk_size); - if (ret != CQM_CONTINUE) - return ret; - - /* Level-0 CLA occupies a small space. - * Only CLA_Z_BUF can be allocated during initialization. - */ - if (cla_table->max_buffer_size <= trunk_size) { - cla_table->cla_lvl = CQM_CLA_LVL_0; - - cla_table->z = CQM_MAX_INDEX_BIT; - cla_table->y = 0; - cla_table->x = 0; - - cla_table->cacheline_z = cla_table->z; - cla_table->cacheline_y = cla_table->y; - cla_table->cacheline_x = cla_table->x; - - /* Applying for CLA_Z_BUF Space */ - cla_z_buf = &cla_table->cla_z_buf; - cla_z_buf->buf_size = trunk_size; /* (u32)(PAGE_SIZE << - * cla_table->trunk_order); - */ - cla_z_buf->buf_number = 1; - cla_z_buf->page_number = cla_z_buf->buf_number << cla_table->trunk_order; - ret = cqm_buf_alloc(cqm_handle, cla_z_buf, false); - CQM_CHECK_EQUAL_RET(handle->dev_hdl, ret, CQM_SUCCESS, CQM_FAIL, - CQM_ALLOC_FAIL(lvl_0_z_buf)); - } - /* Level-1 CLA - * Allocates CLA_Y_BUF and CLA_Z_BUF during initialization. - */ - else if (cla_table->max_buffer_size <= (trunk_size * (trunk_size / sizeof(dma_addr_t)))) { - if (cqm_cla_xyz_lvl1(cqm_handle, cla_table, trunk_size) == CQM_FAIL) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl1)); - return CQM_FAIL; - } - } - /* Level-2 CLA - * Allocates CLA_X_BUF, CLA_Y_BUF, and CLA_Z_BUF during initialization. - */ - else if (cla_table->max_buffer_size <= - (trunk_size * (trunk_size / sizeof(dma_addr_t)) * - (trunk_size / sizeof(dma_addr_t)))) { - if (cqm_cla_xyz_lvl2(cqm_handle, cla_table, trunk_size) == - CQM_FAIL) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl2)); - return CQM_FAIL; - } - } else { /* The current memory management mode does not support such - * a large buffer addressing. The order value needs to - * be increased. - */ - cqm_err(handle->dev_hdl, - "Cla alloc: cla max_buffer_size 0x%x exceeds support range\n", - cla_table->max_buffer_size); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -void cqm_cla_init_entry_normal(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - struct cqm_func_capability *capability) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - - switch (cla_table->type) { - case CQM_BAT_ENTRY_T_HASH: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->hash_number * capability->hash_basic_size; - cla_table->obj_size = capability->hash_basic_size; - cla_table->obj_num = capability->hash_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_QPC: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->qpc_number * capability->qpc_basic_size; - cla_table->obj_size = capability->qpc_basic_size; - cla_table->obj_num = capability->qpc_number; - cla_table->alloc_static = capability->qpc_alloc_static; - cqm_info(handle->dev_hdl, "Cla alloc: qpc alloc_static=%d\n", - cla_table->alloc_static); - break; - case CQM_BAT_ENTRY_T_MPT: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->mpt_number * capability->mpt_basic_size; - cla_table->obj_size = capability->mpt_basic_size; - cla_table->obj_num = capability->mpt_number; - /* CCB decided. MPT uses only static application scenarios. */ - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_SCQC: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->scqc_number * capability->scqc_basic_size; - cla_table->obj_size = capability->scqc_basic_size; - cla_table->obj_num = capability->scqc_number; - cla_table->alloc_static = capability->scqc_alloc_static; - cqm_info(handle->dev_hdl, "Cla alloc: scqc alloc_static=%d\n", - cla_table->alloc_static); - break; - case CQM_BAT_ENTRY_T_SRQC: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->srqc_number * capability->srqc_basic_size; - cla_table->obj_size = capability->srqc_basic_size; - cla_table->obj_num = capability->srqc_number; - cla_table->alloc_static = false; - break; - default: - break; - } -} - -void cqm_cla_init_entry_extern(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - struct cqm_func_capability *capability) -{ - switch (cla_table->type) { - case CQM_BAT_ENTRY_T_GID: - /* Level-0 CLA table required */ - cla_table->max_buffer_size = capability->gid_number * capability->gid_basic_size; - cla_table->trunk_order = - (u32)cqm_shift(ALIGN(cla_table->max_buffer_size, PAGE_SIZE) / PAGE_SIZE); - cla_table->obj_size = capability->gid_basic_size; - cla_table->obj_num = capability->gid_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_LUN: - cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; - cla_table->max_buffer_size = capability->lun_number * capability->lun_basic_size; - cla_table->obj_size = capability->lun_basic_size; - cla_table->obj_num = capability->lun_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_TASKMAP: - cla_table->trunk_order = CQM_4K_PAGE_ORDER; - cla_table->max_buffer_size = capability->taskmap_number * - capability->taskmap_basic_size; - cla_table->obj_size = capability->taskmap_basic_size; - cla_table->obj_num = capability->taskmap_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_L3I: - cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; - cla_table->max_buffer_size = capability->l3i_number * capability->l3i_basic_size; - cla_table->obj_size = capability->l3i_basic_size; - cla_table->obj_num = capability->l3i_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_CHILDC: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->childc_number * - capability->childc_basic_size; - cla_table->obj_size = capability->childc_basic_size; - cla_table->obj_num = capability->childc_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_TIMER: - /* Ensure that the basic size of the timer buffer page does not - * exceed 128 x 4 KB. Otherwise, clearing the timer buffer of - * the function is complex. - */ - cla_table->trunk_order = CQM_4K_PAGE_ORDER; - cla_table->max_buffer_size = capability->timer_number * - capability->timer_basic_size; - cla_table->obj_size = capability->timer_basic_size; - cla_table->obj_num = capability->timer_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_XID2CID: - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->xid2cid_number * - capability->xid2cid_basic_size; - cla_table->obj_size = capability->xid2cid_basic_size; - cla_table->obj_num = capability->xid2cid_number; - cla_table->alloc_static = true; - break; - case CQM_BAT_ENTRY_T_REORDER: - /* This entry supports only IWARP and does not support GPA validity check. */ - cla_table->trunk_order = capability->pagesize_reorder; - cla_table->max_buffer_size = capability->reorder_number * - capability->reorder_basic_size; - cla_table->obj_size = capability->reorder_basic_size; - cla_table->obj_num = capability->reorder_number; - cla_table->alloc_static = true; - break; - default: - break; - } -} - -s32 cqm_cla_init_entry_condition(struct cqm_handle *cqm_handle, u32 entry_type) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = &bat_table->entry[entry_type]; - struct cqm_cla_table *cla_table_timer = NULL; - u32 i; - - /* When the timer is in LB mode 1 or 2, the timer needs to be - * configured for four SMFs and the address space is independent. - */ - if (cla_table->type == CQM_BAT_ENTRY_T_TIMER && - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || - cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2)) { - for (i = 0; i < CQM_LB_SMF_MAX; i++) { - cla_table_timer = &bat_table->timer_entry[i]; - memcpy(cla_table_timer, cla_table, sizeof(struct cqm_cla_table)); - - if (cqm_cla_xyz(cqm_handle, cla_table_timer) == CQM_FAIL) { - cqm_cla_uninit(cqm_handle, entry_type); - return CQM_FAIL; - } - } - } - - if (cqm_cla_xyz(cqm_handle, cla_table) == CQM_FAIL) { - cqm_cla_uninit(cqm_handle, entry_type); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_init_entry(struct cqm_handle *cqm_handle, - struct cqm_func_capability *capability) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = NULL; - s32 ret; - u32 i = 0; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - cla_table->type = bat_table->bat_entry_type[i]; - - cqm_cla_init_entry_normal(cqm_handle, cla_table, capability); - cqm_cla_init_entry_extern(cqm_handle, cla_table, capability); - - /* Allocate CLA entry space at each level. */ - if (cla_table->type < CQM_BAT_ENTRY_T_HASH || - cla_table->type > CQM_BAT_ENTRY_T_REORDER) { - mutex_init(&cla_table->lock); - continue; - } - - /* For the PPF, resources (8 wheels x 2k scales x 32B x - * func_num) need to be applied for to the timer. The - * structure of the timer entry in the BAT table needs - * to be filled. For the PF, no resource needs to be - * applied for the timer and no structure needs to be - * filled in the timer entry in the BAT table. - */ - if (!(cla_table->type == CQM_BAT_ENTRY_T_TIMER && - cqm_handle->func_attribute.func_type != CQM_PPF)) { - ret = cqm_cla_init_entry_condition(cqm_handle, i); - if (ret != CQM_SUCCESS) - return CQM_FAIL; - } - mutex_init(&cla_table->lock); - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_init(struct cqm_handle *cqm_handle) -{ - struct cqm_func_capability *capability = &cqm_handle->func_capability; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - s32 ret; - - /* Applying for CLA Entries */ - ret = cqm_cla_init_entry(cqm_handle, capability); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_init_entry)); - return ret; - } - - /* After the CLA entry is applied, the address is filled in the BAT table. */ - cqm_bat_fill_cla(cqm_handle); - - /* Instruct the chip to update the BAT table. */ - ret = cqm_bat_update(cqm_handle); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update)); - goto err; - } - - cqm_info(handle->dev_hdl, "Timer start: func_type=%d, timer_enable=%u\n", - cqm_handle->func_attribute.func_type, - cqm_handle->func_capability.timer_enable); - - if (cqm_handle->func_attribute.func_type == CQM_PPF && - cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE) { - /* Enable the timer after the timer resources are applied for */ - cqm_info(handle->dev_hdl, "Timer start: spfc ppf timer start\n"); - ret = sphw_ppf_tmr_start((void *)(cqm_handle->ex_handle)); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, "Timer start: spfc ppf timer start, ret=%d\n", - ret); - goto err; - } - } - - return CQM_SUCCESS; - -err: - cqm_cla_uninit(cqm_handle, CQM_BAT_ENTRY_MAX); - return CQM_FAIL; -} - -void cqm_cla_uninit(struct cqm_handle *cqm_handle, u32 entry_numb) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = NULL; - s32 inv_flag = 0; - u32 i; - - for (i = 0; i < entry_numb; i++) { - cla_table = &bat_table->entry[i]; - if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_x_buf, &inv_flag); - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_y_buf, &inv_flag); - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_z_buf, &inv_flag); - } - } - - /* When the lb mode is 1/2, the timer space allocated to the 4 SMFs - * needs to be released. - */ - if (cqm_handle->func_attribute.func_type == CQM_PPF && - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || - cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2)) { - for (i = 0; i < CQM_LB_SMF_MAX; i++) { - cla_table = &bat_table->timer_entry[i]; - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_x_buf, &inv_flag); - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_y_buf, &inv_flag); - cqm_buf_free_cache_inv(cqm_handle, &cla_table->cla_z_buf, &inv_flag); - } - } -} - -s32 cqm_cla_update_cmd(struct cqm_handle *cqm_handle, struct cqm_cmd_buf *buf_in, - struct cqm_cla_update_cmd *cmd) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cla_update_cmd *cla_update_cmd = NULL; - s32 ret = CQM_FAIL; - - cla_update_cmd = (struct cqm_cla_update_cmd *)(buf_in->buf); - - cla_update_cmd->gpa_h = cmd->gpa_h; - cla_update_cmd->gpa_l = cmd->gpa_l; - cla_update_cmd->value_h = cmd->value_h; - cla_update_cmd->value_l = cmd->value_l; - cla_update_cmd->smf_id = cmd->smf_id; - cla_update_cmd->func_id = cmd->func_id; - - cqm_swab32((u8 *)cla_update_cmd, - (sizeof(struct cqm_cla_update_cmd) >> CQM_DW_SHIFT)); - - ret = cqm3_send_cmd_box((void *)(cqm_handle->ex_handle), CQM_MOD_CQM, - CQM_CMD_T_CLA_UPDATE, buf_in, NULL, NULL, - CQM_CMD_TIMEOUT, SPHW_CHANNEL_DEFAULT); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_send_cmd_box)); - cqm_err(handle->dev_hdl, "Cla alloc: cqm_cla_update, cqm3_send_cmd_box_ret=%d\n", - ret); - cqm_err(handle->dev_hdl, "Cla alloc: cqm_cla_update, cla_update_cmd: 0x%x 0x%x 0x%x 0x%x\n", - cmd->gpa_h, cmd->gpa_l, cmd->value_h, cmd->value_l); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_update(struct cqm_handle *cqm_handle, struct cqm_buf_list *buf_node_parent, - struct cqm_buf_list *buf_node_child, u32 child_index, u8 cla_update_mode) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cmd_buf *buf_in = NULL; - struct cqm_cla_update_cmd cmd; - dma_addr_t pa = 0; - s32 ret = CQM_FAIL; - u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; - u32 i = 0; - u64 spu_en; - - buf_in = cqm3_cmd_alloc(cqm_handle->ex_handle); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); - buf_in->size = sizeof(struct cqm_cla_update_cmd); - - /* Fill command format, convert to big endian. */ - /* SPU function sets bit63: acs_spu_en based on function id. */ - if (sphw_host_id(cqm_handle->ex_handle) == CQM_SPU_HOST_ID) - spu_en = ((u64)(cqm_handle->func_attribute.func_global_idx & - 0x1)) << 63; - else - spu_en = 0; - - pa = ((buf_node_parent->pa + (child_index * sizeof(dma_addr_t))) | - spu_en); - cmd.gpa_h = CQM_ADDR_HI(pa); - cmd.gpa_l = CQM_ADDR_LW(pa); - - pa = (buf_node_child->pa | spu_en); - cmd.value_h = CQM_ADDR_HI(pa); - cmd.value_l = CQM_ADDR_LW(pa); - - /* current CLA GPA CHECK */ - if (gpa_check_enable) { - switch (cla_update_mode) { - /* gpa[0]=1 means this GPA is valid */ - case CQM_CLA_RECORD_NEW_GPA: - cmd.value_l |= 1; - break; - /* gpa[0]=0 means this GPA is valid */ - case CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID: - case CQM_CLA_DEL_GPA_WITH_CACHE_INVALID: - cmd.value_l &= (~1); - break; - default: - cqm_err(handle->dev_hdl, - "Cla alloc: %s, wrong cla_update_mode=%u\n", - __func__, cla_update_mode); - break; - } - } - - /* In non-fake mode, set func_id to 0xffff. */ - cmd.func_id = 0xffff; - - /* Mode 0 is hashed to 4 SMF engines (excluding PPF) by func ID. */ - if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && - cqm_handle->func_attribute.func_type != CQM_PPF)) { - cmd.smf_id = cqm_funcid2smfid(cqm_handle); - ret = cqm_cla_update_cmd(cqm_handle, buf_in, &cmd); - } - /* Modes 1/2 are allocated to four SMF engines by flow. - * Therefore, one function needs to be allocated to four SMF engines. - */ - /* Mode 0 PPF needs to be configured on 4 engines, - * and the timer resources need to be shared by the 4 engines. - */ - else if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || - cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2 || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && - cqm_handle->func_attribute.func_type == CQM_PPF)) { - for (i = 0; i < CQM_LB_SMF_MAX; i++) { - /* The smf_pg variable stores currently enabled SMF. */ - if (cqm_handle->func_capability.smf_pg & (1U << i)) { - cmd.smf_id = i; - ret = cqm_cla_update_cmd(cqm_handle, buf_in, - &cmd); - if (ret != CQM_SUCCESS) - goto out; - } - } - } else { - cqm_err(handle->dev_hdl, "Cla update: unsupport lb mode=%u\n", - cqm_handle->func_capability.lb_mode); - ret = CQM_FAIL; - } - -out: - cqm3_cmd_free((void *)(cqm_handle->ex_handle), buf_in); - return ret; -} - -s32 cqm_cla_alloc(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - struct cqm_buf_list *buf_node_parent, - struct cqm_buf_list *buf_node_child, u32 child_index) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - s32 ret = CQM_FAIL; - - /* Apply for trunk page */ - buf_node_child->va = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - cla_table->trunk_order); - CQM_PTR_CHECK_RET(buf_node_child->va, CQM_FAIL, CQM_ALLOC_FAIL(va)); - - /* PCI mapping */ - buf_node_child->pa = pci_map_single(cqm_handle->dev, buf_node_child->va, - PAGE_SIZE << cla_table->trunk_order, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(cqm_handle->dev, buf_node_child->pa)) { - cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_node_child->pa)); - goto err1; - } - - /* Notify the chip of trunk_pa so that the chip fills in cla entry */ - ret = cqm_cla_update(cqm_handle, buf_node_parent, buf_node_child, - child_index, CQM_CLA_RECORD_NEW_GPA); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update)); - goto err2; - } - - return CQM_SUCCESS; - -err2: - pci_unmap_single(cqm_handle->dev, buf_node_child->pa, - PAGE_SIZE << cla_table->trunk_order, - PCI_DMA_BIDIRECTIONAL); -err1: - free_pages((ulong)(buf_node_child->va), cla_table->trunk_order); - buf_node_child->va = NULL; - return CQM_FAIL; -} - -void cqm_cla_free(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - struct cqm_buf_list *buf_node_parent, - struct cqm_buf_list *buf_node_child, u32 child_index, u8 cla_update_mode) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 trunk_size; - - if (cqm_cla_update(cqm_handle, buf_node_parent, buf_node_child, - child_index, cla_update_mode) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update)); - return; - } - - if (cla_update_mode == CQM_CLA_DEL_GPA_WITH_CACHE_INVALID) { - trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); - if (cqm_cla_cache_invalid(cqm_handle, buf_node_child->pa, - trunk_size) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_cla_cache_invalid)); - return; - } - } - - /* Remove PCI mapping from the trunk page */ - pci_unmap_single(cqm_handle->dev, buf_node_child->pa, - PAGE_SIZE << cla_table->trunk_order, - PCI_DMA_BIDIRECTIONAL); - - /* Rlease trunk page */ - free_pages((ulong)(buf_node_child->va), cla_table->trunk_order); - buf_node_child->va = NULL; -} - -u8 *cqm_cla_get_unlock_lvl0(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa) -{ - struct cqm_buf *cla_z_buf = &cla_table->cla_z_buf; - u8 *ret_addr = NULL; - u32 offset = 0; - - /* Level 0 CLA pages are statically allocated. */ - offset = index * cla_table->obj_size; - ret_addr = (u8 *)(cla_z_buf->buf_list->va) + offset; - *pa = cla_z_buf->buf_list->pa + offset; - - return ret_addr; -} - -u8 *cqm_cla_get_unlock_lvl1(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa) -{ - struct cqm_buf *cla_y_buf = &cla_table->cla_y_buf; - struct cqm_buf *cla_z_buf = &cla_table->cla_z_buf; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf_list *buf_node_y = NULL; - struct cqm_buf_list *buf_node_z = NULL; - u32 y_index = 0; - u32 z_index = 0; - u8 *ret_addr = NULL; - u32 offset = 0; - - z_index = index & ((1U << (cla_table->z + 1)) - 1); - y_index = index >> (cla_table->z + 1); - - if (y_index >= cla_z_buf->buf_number) { - cqm_err(handle->dev_hdl, - "Cla get: index exceeds buf_number, y_index %u, z_buf_number %u\n", - y_index, cla_z_buf->buf_number); - return NULL; - } - buf_node_z = &cla_z_buf->buf_list[y_index]; - buf_node_y = cla_y_buf->buf_list; - - /* The z buf node does not exist, applying for a page first. */ - if (!buf_node_z->va) { - if (cqm_cla_alloc(cqm_handle, cla_table, buf_node_y, buf_node_z, - y_index) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_cla_alloc)); - cqm_err(handle->dev_hdl, - "Cla get: cla_table->type=%u\n", - cla_table->type); - return NULL; - } - } - - buf_node_z->refcount += count; - offset = z_index * cla_table->obj_size; - ret_addr = (u8 *)(buf_node_z->va) + offset; - *pa = buf_node_z->pa + offset; - - return ret_addr; -} - -u8 *cqm_cla_get_unlock_lvl2(struct cqm_handle *cqm_handle, - struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa) -{ - struct cqm_buf *cla_x_buf = &cla_table->cla_x_buf; - struct cqm_buf *cla_y_buf = &cla_table->cla_y_buf; - struct cqm_buf *cla_z_buf = &cla_table->cla_z_buf; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf_list *buf_node_x = NULL; - struct cqm_buf_list *buf_node_y = NULL; - struct cqm_buf_list *buf_node_z = NULL; - u32 x_index = 0; - u32 y_index = 0; - u32 z_index = 0; - u32 trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); - u8 *ret_addr = NULL; - u32 offset = 0; - u64 tmp; - - z_index = index & ((1U << (cla_table->z + 1)) - 1); - y_index = (index >> (cla_table->z + 1)) & - ((1U << (cla_table->y - cla_table->z)) - 1); - x_index = index >> (cla_table->y + 1); - tmp = x_index * (trunk_size / sizeof(dma_addr_t)) + y_index; - - if (x_index >= cla_y_buf->buf_number || tmp >= cla_z_buf->buf_number) { - cqm_err(handle->dev_hdl, - "Cla get: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n", - x_index, y_index, cla_y_buf->buf_number, - cla_z_buf->buf_number); - return NULL; - } - - buf_node_x = cla_x_buf->buf_list; - buf_node_y = &cla_y_buf->buf_list[x_index]; - buf_node_z = &cla_z_buf->buf_list[tmp]; - - /* The y buf node does not exist, applying for pages for y node. */ - if (!buf_node_y->va) { - if (cqm_cla_alloc(cqm_handle, cla_table, buf_node_x, buf_node_y, - x_index) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_cla_alloc)); - return NULL; - } - } - - /* The z buf node does not exist, applying for pages for z node. */ - if (!buf_node_z->va) { - if (cqm_cla_alloc(cqm_handle, cla_table, buf_node_y, buf_node_z, - y_index) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_cla_alloc)); - if (buf_node_y->refcount == 0) - /* To release node Y, cache_invalid is - * required. - */ - cqm_cla_free(cqm_handle, cla_table, buf_node_x, buf_node_y, x_index, - CQM_CLA_DEL_GPA_WITH_CACHE_INVALID); - return NULL; - } - - /* reference counting of the y buffer node needs to increase - * by 1. - */ - buf_node_y->refcount++; - } - - buf_node_z->refcount += count; - offset = z_index * cla_table->obj_size; - ret_addr = (u8 *)(buf_node_z->va) + offset; - *pa = buf_node_z->pa + offset; - - return ret_addr; -} - -u8 *cqm_cla_get_unlock(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa) -{ - u8 *ret_addr = NULL; - - if (cla_table->cla_lvl == CQM_CLA_LVL_0) - ret_addr = cqm_cla_get_unlock_lvl0(cqm_handle, cla_table, index, - count, pa); - else if (cla_table->cla_lvl == CQM_CLA_LVL_1) - ret_addr = cqm_cla_get_unlock_lvl1(cqm_handle, cla_table, index, - count, pa); - else - ret_addr = cqm_cla_get_unlock_lvl2(cqm_handle, cla_table, index, - count, pa); - - return ret_addr; -} - -u8 *cqm_cla_get_lock(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa) -{ - u8 *ret_addr = NULL; - - mutex_lock(&cla_table->lock); - - ret_addr = cqm_cla_get_unlock(cqm_handle, cla_table, index, count, pa); - - mutex_unlock(&cla_table->lock); - - return ret_addr; -} - -void cqm_cla_put(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count) -{ - struct cqm_buf *cla_z_buf = &cla_table->cla_z_buf; - struct cqm_buf *cla_y_buf = &cla_table->cla_y_buf; - struct cqm_buf *cla_x_buf = &cla_table->cla_x_buf; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_buf_list *buf_node_z = NULL; - struct cqm_buf_list *buf_node_y = NULL; - struct cqm_buf_list *buf_node_x = NULL; - u32 x_index = 0; - u32 y_index = 0; - u32 trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); - u64 tmp; - - /* The buffer is applied statically, and the reference counting - * does not need to be controlled. - */ - if (cla_table->alloc_static) - return; - - mutex_lock(&cla_table->lock); - - if (cla_table->cla_lvl == CQM_CLA_LVL_1) { - y_index = index >> (cla_table->z + 1); - - if (y_index >= cla_z_buf->buf_number) { - cqm_err(handle->dev_hdl, - "Cla put: index exceeds buf_number, y_index %u, z_buf_number %u\n", - y_index, cla_z_buf->buf_number); - cqm_err(handle->dev_hdl, - "Cla put: cla_table->type=%u\n", - cla_table->type); - mutex_unlock(&cla_table->lock); - return; - } - - buf_node_z = &cla_z_buf->buf_list[y_index]; - buf_node_y = cla_y_buf->buf_list; - - /* When the value of reference counting on the z node page is 0, - * the z node page is released. - */ - buf_node_z->refcount -= count; - if (buf_node_z->refcount == 0) - /* The cache invalid is not required for the Z node. */ - cqm_cla_free(cqm_handle, cla_table, buf_node_y, - buf_node_z, y_index, - CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID); - } else if (cla_table->cla_lvl == CQM_CLA_LVL_2) { - y_index = (index >> (cla_table->z + 1)) & - ((1U << (cla_table->y - cla_table->z)) - 1); - x_index = index >> (cla_table->y + 1); - tmp = x_index * (trunk_size / sizeof(dma_addr_t)) + y_index; - - if (x_index >= cla_y_buf->buf_number || tmp >= cla_z_buf->buf_number) { - cqm_err(handle->dev_hdl, - "Cla put: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n", - x_index, y_index, cla_y_buf->buf_number, - cla_z_buf->buf_number); - mutex_unlock(&cla_table->lock); - return; - } - - buf_node_x = cla_x_buf->buf_list; - buf_node_y = &cla_y_buf->buf_list[x_index]; - buf_node_z = &cla_z_buf->buf_list[tmp]; - - /* When the value of reference counting on the z node page is 0, - * the z node page is released. - */ - buf_node_z->refcount -= count; - if (buf_node_z->refcount == 0) { - cqm_cla_free(cqm_handle, cla_table, buf_node_y, - buf_node_z, y_index, - CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID); - - /* When the value of reference counting on the y node - * page is 0, the y node page is released. - */ - buf_node_y->refcount--; - if (buf_node_y->refcount == 0) - /* Node y requires cache to be invalid. */ - cqm_cla_free(cqm_handle, cla_table, buf_node_x, buf_node_y, x_index, - CQM_CLA_DEL_GPA_WITH_CACHE_INVALID); - } - } - - mutex_unlock(&cla_table->lock); -} - -struct cqm_cla_table *cqm_cla_table_get(struct cqm_bat_table *bat_table, u32 entry_type) -{ - struct cqm_cla_table *cla_table = NULL; - u32 i = 0; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - if (entry_type == cla_table->type) - return cla_table; - } - - return NULL; -} diff --git a/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.h b/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.h deleted file mode 100644 index 85b060e7935c..000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_bat_cla.h +++ /dev/null @@ -1,215 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_CQM_BAT_CLA_H -#define SPFC_CQM_BAT_CLA_H - -/* When the connection check is enabled, the maximum number of connections - * supported by the chip is 1M - 63, which cannot reach 1M - */ -#define CQM_BAT_MAX_CONN_NUM (0x100000 - 63) -#define CQM_BAT_MAX_CACHE_CONN_NUM (0x100000 - 63) - -#define CLA_TABLE_PAGE_ORDER 0 -#define CQM_4K_PAGE_ORDER 0 -#define CQM_4K_PAGE_SIZE 4096 - -#define CQM_BAT_ENTRY_MAX 16 -#define CQM_BAT_ENTRY_SIZE 16 - -#define CQM_BAT_SIZE_FT_PF 192 -#define CQM_BAT_SIZE_FT_VF 112 - -#define CQM_BAT_INDEX0 0 -#define CQM_BAT_INDEX1 1 -#define CQM_BAT_INDEX2 2 -#define CQM_BAT_INDEX3 3 -#define CQM_BAT_INDEX4 4 -#define CQM_BAT_INDEX5 5 -#define CQM_BAT_INDEX6 6 -#define CQM_BAT_INDEX7 7 -#define CQM_BAT_INDEX8 8 -#define CQM_BAT_INDEX9 9 -#define CQM_BAT_INDEX10 10 -#define CQM_BAT_INDEX11 11 -#define CQM_BAT_INDEX12 12 -#define CQM_BAT_INDEX13 13 -#define CQM_BAT_INDEX14 14 -#define CQM_BAT_INDEX15 15 - -enum cqm_bat_entry_type { - CQM_BAT_ENTRY_T_CFG = 0, - CQM_BAT_ENTRY_T_HASH = 1, - CQM_BAT_ENTRY_T_QPC = 2, - CQM_BAT_ENTRY_T_SCQC = 3, - CQM_BAT_ENTRY_T_SRQC = 4, - CQM_BAT_ENTRY_T_MPT = 5, - CQM_BAT_ENTRY_T_GID = 6, - CQM_BAT_ENTRY_T_LUN = 7, - CQM_BAT_ENTRY_T_TASKMAP = 8, - CQM_BAT_ENTRY_T_L3I = 9, - CQM_BAT_ENTRY_T_CHILDC = 10, - CQM_BAT_ENTRY_T_TIMER = 11, - CQM_BAT_ENTRY_T_XID2CID = 12, - CQM_BAT_ENTRY_T_REORDER = 13, - CQM_BAT_ENTRY_T_INVALID = 14, - CQM_BAT_ENTRY_T_MAX = 15, -}; - -/* CLA update mode */ -#define CQM_CLA_RECORD_NEW_GPA 0 -#define CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID 1 -#define CQM_CLA_DEL_GPA_WITH_CACHE_INVALID 2 - -#define CQM_CLA_LVL_0 0 -#define CQM_CLA_LVL_1 1 -#define CQM_CLA_LVL_2 2 - -#define CQM_MAX_INDEX_BIT 19 - -#define CQM_CHIP_CACHELINE 256 -#define CQM_CHIP_TIMER_CACHELINE 512 -#define CQM_OBJECT_256 256 -#define CQM_OBJECT_512 512 -#define CQM_OBJECT_1024 1024 -#define CQM_CHIP_GPA_MASK 0x1ffffffffffffff -#define CQM_CHIP_GPA_HIMASK 0x1ffffff -#define CQM_CHIP_GPA_LOMASK 0xffffffff -#define CQM_CHIP_GPA_HSHIFT 32 - -/* Aligns with 64 buckets and shifts rightward by 6 bits */ -#define CQM_HASH_NUMBER_UNIT 6 - -struct cqm_cla_table { - u32 type; - u32 max_buffer_size; - u32 obj_num; - bool alloc_static; /* Whether the buffer is statically allocated */ - u32 cla_lvl; - u32 cacheline_x; /* x value calculated based on cacheline, used by the chip */ - u32 cacheline_y; /* y value calculated based on cacheline, used by the chip */ - u32 cacheline_z; /* z value calculated based on cacheline, used by the chip */ - u32 x; /* x value calculated based on obj_size, used by software */ - u32 y; /* y value calculated based on obj_size, used by software */ - u32 z; /* z value calculated based on obj_size, used by software */ - struct cqm_buf cla_x_buf; - struct cqm_buf cla_y_buf; - struct cqm_buf cla_z_buf; - u32 trunk_order; /* A continuous physical page contains 2^order pages */ - u32 obj_size; - struct mutex lock; /* Lock for cla buffer allocation and free */ - - struct cqm_bitmap bitmap; - - struct cqm_object_table obj_table; /* Mapping table between indexes and objects */ -}; - -struct cqm_bat_entry_cfg { - u32 cur_conn_num_h_4 : 4; - u32 rsv1 : 4; - u32 max_conn_num : 20; - u32 rsv2 : 4; - - u32 max_conn_cache : 10; - u32 rsv3 : 6; - u32 cur_conn_num_l_16 : 16; - - u32 bloom_filter_addr : 16; - u32 cur_conn_cache : 10; - u32 rsv4 : 6; - - u32 bucket_num : 16; - u32 bloom_filter_len : 16; -}; - -#define CQM_BAT_NO_BYPASS_CACHE 0 -#define CQM_BAT_BYPASS_CACHE 1 - -#define CQM_BAT_ENTRY_SIZE_256 0 -#define CQM_BAT_ENTRY_SIZE_512 1 -#define CQM_BAT_ENTRY_SIZE_1024 2 - -struct cqm_bat_entry_standerd { - u32 entry_size : 2; - u32 rsv1 : 6; - u32 max_number : 20; - u32 rsv2 : 4; - - u32 cla_gpa_h : 32; - - u32 cla_gpa_l : 32; - - u32 rsv3 : 8; - u32 z : 5; - u32 y : 5; - u32 x : 5; - u32 rsv24 : 1; - u32 bypass : 1; - u32 cla_level : 2; - u32 rsv5 : 5; -}; - -struct cqm_bat_entry_vf2pf { - u32 cla_gpa_h : 25; - u32 pf_id : 5; - u32 fake_vf_en : 1; - u32 acs_spu_en : 1; -}; - -#define CQM_BAT_ENTRY_TASKMAP_NUM 4 -struct cqm_bat_entry_taskmap_addr { - u32 gpa_h; - u32 gpa_l; -}; - -struct cqm_bat_entry_taskmap { - struct cqm_bat_entry_taskmap_addr addr[CQM_BAT_ENTRY_TASKMAP_NUM]; -}; - -struct cqm_bat_table { - u32 bat_entry_type[CQM_BAT_ENTRY_MAX]; - u8 bat[CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE]; - struct cqm_cla_table entry[CQM_BAT_ENTRY_MAX]; - /* In LB mode 1, the timer needs to be configured in 4 SMFs, - * and the GPAs must be different and independent. - */ - struct cqm_cla_table timer_entry[4]; - u32 bat_size; -}; - -#define CQM_BAT_MAX_SIZE 256 -struct cqm_cmdq_bat_update { - u32 offset; - u32 byte_len; - u8 data[CQM_BAT_MAX_SIZE]; - u32 smf_id; - u32 func_id; -}; - -struct cqm_cla_update_cmd { - /* Gpa address to be updated */ - u32 gpa_h; - u32 gpa_l; - - /* Updated Value */ - u32 value_h; - u32 value_l; - - u32 smf_id; - u32 func_id; -}; - -s32 cqm_bat_init(struct cqm_handle *cqm_handle); -void cqm_bat_uninit(struct cqm_handle *cqm_handle); -s32 cqm_cla_init(struct cqm_handle *cqm_handle); -void cqm_cla_uninit(struct cqm_handle *cqm_handle, u32 entry_numb); -u8 *cqm_cla_get_unlock(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa); -u8 *cqm_cla_get_lock(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count, dma_addr_t *pa); -void cqm_cla_put(struct cqm_handle *cqm_handle, struct cqm_cla_table *cla_table, - u32 index, u32 count); -struct cqm_cla_table *cqm_cla_table_get(struct cqm_bat_table *bat_table, u32 entry_type); -u32 cqm_funcid2smfid(struct cqm_handle *cqm_handle); - -#endif /* SPFC_CQM_BAT_CLA_H */ diff --git a/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.c b/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.c deleted file mode 100644 index 21100e8db8f4..000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.c +++ /dev/null @@ -1,885 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/pci.h> -#include <linux/module.h> -#include <linux/vmalloc.h> -#include <linux/device.h> -#include <linux/mm.h> -#include <linux/gfp.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" - -#include "spfc_cqm_object.h" -#include "spfc_cqm_bitmap_table.h" -#include "spfc_cqm_bat_cla.h" -#include "spfc_cqm_main.h" - -#define common_section - -void cqm_swab64(u8 *addr, u32 cnt) -{ - u64 *temp = (u64 *)addr; - u64 value = 0; - u32 i; - - for (i = 0; i < cnt; i++) { - value = __swab64(*temp); - *temp = value; - temp++; - } -} - -void cqm_swab32(u8 *addr, u32 cnt) -{ - u32 *temp = (u32 *)addr; - u32 value = 0; - u32 i; - - for (i = 0; i < cnt; i++) { - value = __swab32(*temp); - *temp = value; - temp++; - } -} - -s32 cqm_shift(u32 data) -{ - s32 shift = -1; - - do { - data >>= 1; - shift++; - } while (data); - - return shift; -} - -bool cqm_check_align(u32 data) -{ - if (data == 0) - return false; - - do { - /* When the value can be exactly divided by 2, - * the value of data is shifted right by one bit, that is, - * divided by 2. - */ - if ((data & 0x1) == 0) - data >>= 1; - /* If the value cannot be divisible by 2, the value is - * not 2^n-aligned and false is returned. - */ - else - return false; - } while (data != 1); - - return true; -} - -void *cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order) -{ - void *orig_addr = NULL; - void *align_addr = NULL; - void *index_addr = NULL; - - orig_addr = kmalloc(size + ((u64)1 << align_order) + sizeof(void *), - flags); - if (!orig_addr) - return NULL; - - index_addr = (void *)((char *)orig_addr + sizeof(void *)); - align_addr = - (void *)((((u64)index_addr + ((u64)1 << align_order) - 1) >> - align_order) << align_order); - - /* Record the original memory address for memory release. */ - index_addr = (void *)((char *)align_addr - sizeof(void *)); - *(void **)index_addr = orig_addr; - - return align_addr; -} - -void cqm_kfree_align(void *addr) -{ - void *index_addr = NULL; - - /* Release the original memory address. */ - index_addr = (void *)((char *)addr - sizeof(void *)); - - kfree(*(void **)index_addr); -} - -void cqm_write_lock(rwlock_t *lock, bool bh) -{ - if (bh) - write_lock_bh(lock); - else - write_lock(lock); -} - -void cqm_write_unlock(rwlock_t *lock, bool bh) -{ - if (bh) - write_unlock_bh(lock); - else - write_unlock(lock); -} - -void cqm_read_lock(rwlock_t *lock, bool bh) -{ - if (bh) - read_lock_bh(lock); - else - read_lock(lock); -} - -void cqm_read_unlock(rwlock_t *lock, bool bh) -{ - if (bh) - read_unlock_bh(lock); - else - read_unlock(lock); -} - -s32 cqm_buf_alloc_direct(struct cqm_handle *cqm_handle, struct cqm_buf *buf, bool direct) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct page **pages = NULL; - u32 i, j, order; - - order = get_order(buf->buf_size); - - if (!direct) { - buf->direct.va = NULL; - return CQM_SUCCESS; - } - - pages = vmalloc(sizeof(struct page *) * buf->page_number); - if (!pages) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(pages)); - return CQM_FAIL; - } - - for (i = 0; i < buf->buf_number; i++) { - for (j = 0; j < ((u32)1 << order); j++) - pages[(ulong)(unsigned int)((i << order) + j)] = - (void *)virt_to_page((u8 *)(buf->buf_list[i].va) + - (PAGE_SIZE * j)); - } - - buf->direct.va = vmap(pages, buf->page_number, VM_MAP, PAGE_KERNEL); - vfree(pages); - if (!buf->direct.va) { - cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf->direct.va)); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_buf_alloc_page(struct cqm_handle *cqm_handle, struct cqm_buf *buf) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct page *newpage = NULL; - u32 order; - void *va = NULL; - s32 i, node; - - order = get_order(buf->buf_size); - /* Page for applying for each buffer for non-ovs */ - if (handle->board_info.service_mode != 0) { - for (i = 0; i < (s32)buf->buf_number; i++) { - va = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - order); - if (!va) { - cqm_err(handle->dev_hdl, - CQM_ALLOC_FAIL(buf_page)); - break; - } - /* Initialize the page after the page is applied for. - * If hash entries are involved, the initialization - * value must be 0. - */ - memset(va, 0, buf->buf_size); - buf->buf_list[i].va = va; - } - } else { - node = dev_to_node(handle->dev_hdl); - for (i = 0; i < (s32)buf->buf_number; i++) { - newpage = alloc_pages_node(node, - GFP_KERNEL | __GFP_ZERO, - order); - if (!newpage) { - cqm_err(handle->dev_hdl, - CQM_ALLOC_FAIL(buf_page)); - break; - } - va = (void *)page_address(newpage); - /* Initialize the page after the page is applied for. - * If hash entries are involved, the initialization - * value must be 0. - */ - memset(va, 0, buf->buf_size); - buf->buf_list[i].va = va; - } - } - - if (i != buf->buf_number) { - i--; - for (; i >= 0; i--) { - free_pages((ulong)(buf->buf_list[i].va), order); - buf->buf_list[i].va = NULL; - } - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_buf_alloc_map(struct cqm_handle *cqm_handle, struct cqm_buf *buf) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct pci_dev *dev = cqm_handle->dev; - void *va = NULL; - s32 i; - - for (i = 0; i < (s32)buf->buf_number; i++) { - va = buf->buf_list[i].va; - buf->buf_list[i].pa = pci_map_single(dev, va, buf->buf_size, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(dev, buf->buf_list[i].pa)) { - cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_list)); - break; - } - } - - if (i != buf->buf_number) { - i--; - for (; i >= 0; i--) - pci_unmap_single(dev, buf->buf_list[i].pa, - buf->buf_size, PCI_DMA_BIDIRECTIONAL); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_buf_alloc(struct cqm_handle *cqm_handle, struct cqm_buf *buf, bool direct) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct pci_dev *dev = cqm_handle->dev; - u32 order; - s32 i; - - order = get_order(buf->buf_size); - - /* Applying for the buffer list descriptor space */ - buf->buf_list = vmalloc(buf->buf_number * sizeof(struct cqm_buf_list)); - CQM_PTR_CHECK_RET(buf->buf_list, CQM_FAIL, - CQM_ALLOC_FAIL(linux_buf_list)); - memset(buf->buf_list, 0, buf->buf_number * sizeof(struct cqm_buf_list)); - - /* Page for applying for each buffer */ - if (cqm_buf_alloc_page(cqm_handle, buf) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(linux_cqm_buf_alloc_page)); - goto err1; - } - - /* PCI mapping of the buffer */ - if (cqm_buf_alloc_map(cqm_handle, buf) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(linux_cqm_buf_alloc_map)); - goto err2; - } - - /* direct remapping */ - if (cqm_buf_alloc_direct(cqm_handle, buf, direct) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_buf_alloc_direct)); - goto err3; - } - - return CQM_SUCCESS; - -err3: - for (i = 0; i < (s32)buf->buf_number; i++) - pci_unmap_single(dev, buf->buf_list[i].pa, buf->buf_size, - PCI_DMA_BIDIRECTIONAL); -err2: - for (i = 0; i < (s32)buf->buf_number; i++) { - free_pages((ulong)(buf->buf_list[i].va), order); - buf->buf_list[i].va = NULL; - } -err1: - vfree(buf->buf_list); - buf->buf_list = NULL; - return CQM_FAIL; -} - -void cqm_buf_free(struct cqm_buf *buf, struct pci_dev *dev) -{ - u32 order; - s32 i; - - order = get_order(buf->buf_size); - - if (buf->direct.va) { - vunmap(buf->direct.va); - buf->direct.va = NULL; - } - - if (buf->buf_list) { - for (i = 0; i < (s32)(buf->buf_number); i++) { - if (buf->buf_list[i].va) { - pci_unmap_single(dev, buf->buf_list[i].pa, - buf->buf_size, - PCI_DMA_BIDIRECTIONAL); - - free_pages((ulong)(buf->buf_list[i].va), order); - buf->buf_list[i].va = NULL; - } - } - - vfree(buf->buf_list); - buf->buf_list = NULL; - } -} - -s32 cqm_cla_cache_invalid_cmd(struct cqm_handle *cqm_handle, struct cqm_cmd_buf *buf_in, - struct cqm_cla_cache_invalid_cmd *cmd) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cla_cache_invalid_cmd *cla_cache_invalid_cmd = NULL; - s32 ret; - - cla_cache_invalid_cmd = (struct cqm_cla_cache_invalid_cmd *)(buf_in->buf); - cla_cache_invalid_cmd->gpa_h = cmd->gpa_h; - cla_cache_invalid_cmd->gpa_l = cmd->gpa_l; - cla_cache_invalid_cmd->cache_size = cmd->cache_size; - cla_cache_invalid_cmd->smf_id = cmd->smf_id; - cla_cache_invalid_cmd->func_id = cmd->func_id; - - cqm_swab32((u8 *)cla_cache_invalid_cmd, - /* shift 2 bits by right to get length of dw(4B) */ - (sizeof(struct cqm_cla_cache_invalid_cmd) >> 2)); - - /* Send the cmdq command. */ - ret = cqm3_send_cmd_box((void *)(cqm_handle->ex_handle), CQM_MOD_CQM, - CQM_CMD_T_CLA_CACHE_INVALID, buf_in, NULL, NULL, - CQM_CMD_TIMEOUT, SPHW_CHANNEL_DEFAULT); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_send_cmd_box)); - cqm_err(handle->dev_hdl, - "Cla cache invalid: cqm3_send_cmd_box_ret=%d\n", - ret); - cqm_err(handle->dev_hdl, - "Cla cache invalid: cla_cache_invalid_cmd: 0x%x 0x%x 0x%x\n", - cmd->gpa_h, cmd->gpa_l, cmd->cache_size); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -s32 cqm_cla_cache_invalid(struct cqm_handle *cqm_handle, dma_addr_t gpa, u32 cache_size) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cmd_buf *buf_in = NULL; - struct cqm_cla_cache_invalid_cmd cmd; - s32 ret = CQM_FAIL; - u32 i; - - buf_in = cqm3_cmd_alloc((void *)(cqm_handle->ex_handle)); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); - buf_in->size = sizeof(struct cqm_cla_cache_invalid_cmd); - - /* Fill command and convert it to big endian */ - cmd.cache_size = cache_size; - cmd.gpa_h = CQM_ADDR_HI(gpa); - cmd.gpa_l = CQM_ADDR_LW(gpa); - - /* In non-fake mode, set func_id to 0xffff. */ - cmd.func_id = 0xffff; - - /* Mode 0 is hashed to 4 SMF engines (excluding PPF) by func ID. */ - if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && - cqm_handle->func_attribute.func_type != CQM_PPF)) { - cmd.smf_id = cqm_funcid2smfid(cqm_handle); - ret = cqm_cla_cache_invalid_cmd(cqm_handle, buf_in, &cmd); - } - /* Mode 1/2 are allocated to 4 SMF engines by flow. Therefore, - * one function needs to be allocated to 4 SMF engines. - */ - /* The PPF in mode 0 needs to be configured on 4 engines, - * and the timer resources need to be shared by the 4 engines. - */ - else if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || - cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2 || - (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && - cqm_handle->func_attribute.func_type == CQM_PPF)) { - for (i = 0; i < CQM_LB_SMF_MAX; i++) { - /* The smf_pg stored currently enabled SMF engine. */ - if (cqm_handle->func_capability.smf_pg & (1U << i)) { - cmd.smf_id = i; - ret = cqm_cla_cache_invalid_cmd(cqm_handle, - buf_in, &cmd); - if (ret != CQM_SUCCESS) - goto out; - } - } - } else { - cqm_err(handle->dev_hdl, "Cla cache invalid: unsupport lb mode=%u\n", - cqm_handle->func_capability.lb_mode); - ret = CQM_FAIL; - } - -out: - cqm3_cmd_free((void *)(cqm_handle->ex_handle), buf_in); - return ret; -} - -static void free_cache_inv(struct cqm_handle *cqm_handle, struct cqm_buf *buf, - s32 *inv_flag) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 order; - s32 i; - - order = get_order(buf->buf_size); - - if (!handle->chip_present_flag) - return; - - if (!buf->buf_list) - return; - - for (i = 0; i < (s32)(buf->buf_number); i++) { - if (!buf->buf_list[i].va) - continue; - - if (*inv_flag != CQM_SUCCESS) - continue; - - /* In the Pangea environment, if the cmdq times out, - * no subsequent message is sent. - */ - *inv_flag = cqm_cla_cache_invalid(cqm_handle, buf->buf_list[i].pa, - (u32)(PAGE_SIZE << order)); - if (*inv_flag != CQM_SUCCESS) - cqm_err(handle->dev_hdl, - "Buffer free: fail to invalid buf_list pa cache, inv_flag=%d\n", - *inv_flag); - } -} - -void cqm_buf_free_cache_inv(struct cqm_handle *cqm_handle, struct cqm_buf *buf, - s32 *inv_flag) -{ - /* Send a command to the chip to kick out the cache. */ - free_cache_inv(cqm_handle, buf, inv_flag); - - /* Clear host resources */ - cqm_buf_free(buf, cqm_handle->dev); -} - -#define bitmap_section - -s32 cqm_single_bitmap_init(struct cqm_bitmap *bitmap) -{ - u32 bit_number; - - spin_lock_init(&bitmap->lock); - - /* Max_num of the bitmap is 8-aligned and then - * shifted rightward by 3 bits to obtain the number of bytes required. - */ - bit_number = (ALIGN(bitmap->max_num, CQM_NUM_BIT_BYTE) >> CQM_BYTE_BIT_SHIFT); - bitmap->table = vmalloc(bit_number); - CQM_PTR_CHECK_RET(bitmap->table, CQM_FAIL, CQM_ALLOC_FAIL(bitmap->table)); - memset(bitmap->table, 0, bit_number); - - return CQM_SUCCESS; -} - -s32 cqm_bitmap_init(struct cqm_handle *cqm_handle) -{ - struct cqm_func_capability *capability = &cqm_handle->func_capability; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_cla_table *cla_table = NULL; - struct cqm_bitmap *bitmap = NULL; - s32 ret = CQM_SUCCESS; - u32 i; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - if (cla_table->obj_num == 0) { - cqm_info(handle->dev_hdl, - "Cla alloc: cla_type %u, obj_num=0, don't init bitmap\n", - cla_table->type); - continue; - } - - bitmap = &cla_table->bitmap; - - switch (cla_table->type) { - case CQM_BAT_ENTRY_T_QPC: - bitmap->max_num = capability->qpc_number; - bitmap->reserved_top = capability->qpc_reserved; - bitmap->last = capability->qpc_reserved; - cqm_info(handle->dev_hdl, - "Bitmap init: cla_table_type=%u, max_num=0x%x\n", - cla_table->type, bitmap->max_num); - ret = cqm_single_bitmap_init(bitmap); - break; - case CQM_BAT_ENTRY_T_MPT: - bitmap->max_num = capability->mpt_number; - bitmap->reserved_top = capability->mpt_reserved; - bitmap->last = capability->mpt_reserved; - cqm_info(handle->dev_hdl, - "Bitmap init: cla_table_type=%u, max_num=0x%x\n", - cla_table->type, bitmap->max_num); - ret = cqm_single_bitmap_init(bitmap); - break; - case CQM_BAT_ENTRY_T_SCQC: - bitmap->max_num = capability->scqc_number; - bitmap->reserved_top = capability->scq_reserved; - bitmap->last = capability->scq_reserved; - cqm_info(handle->dev_hdl, - "Bitmap init: cla_table_type=%u, max_num=0x%x\n", - cla_table->type, bitmap->max_num); - ret = cqm_single_bitmap_init(bitmap); - break; - case CQM_BAT_ENTRY_T_SRQC: - bitmap->max_num = capability->srqc_number; - bitmap->reserved_top = capability->srq_reserved; - bitmap->last = capability->srq_reserved; - cqm_info(handle->dev_hdl, - "Bitmap init: cla_table_type=%u, max_num=0x%x\n", - cla_table->type, bitmap->max_num); - ret = cqm_single_bitmap_init(bitmap); - break; - default: - break; - } - - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - "Bitmap init: failed to init cla_table_type=%u, obj_num=0x%x\n", - cla_table->type, cla_table->obj_num); - goto err; - } - } - - return CQM_SUCCESS; - -err: - cqm_bitmap_uninit(cqm_handle); - return CQM_FAIL; -} - -void cqm_bitmap_uninit(struct cqm_handle *cqm_handle) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_cla_table *cla_table = NULL; - struct cqm_bitmap *bitmap = NULL; - u32 i; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - bitmap = &cla_table->bitmap; - if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { - if (bitmap->table) { - vfree(bitmap->table); - bitmap->table = NULL; - } - } - } -} - -u32 cqm_bitmap_check_range(const ulong *table, u32 step, u32 max_num, u32 begin, - u32 count) -{ - u32 end = (begin + (count - 1)); - u32 i; - - /* Single-bit check is not performed. */ - if (count == 1) - return begin; - - /* The end value exceeds the threshold. */ - if (end >= max_num) - return max_num; - - /* Bit check, the next bit is returned when a non-zero bit is found. */ - for (i = (begin + 1); i <= end; i++) { - if (test_bit((s32)i, table)) - return i + 1; - } - - /* Check whether it's in different steps. */ - if ((begin & (~(step - 1))) != (end & (~(step - 1)))) - return (end & (~(step - 1))); - - /* If the check succeeds, begin is returned. */ - return begin; -} - -void cqm_bitmap_find(struct cqm_bitmap *bitmap, u32 *index, u32 last, u32 step, u32 count) -{ - u32 max_num = bitmap->max_num; - ulong *table = bitmap->table; - - do { - *index = (u32)find_next_zero_bit(table, max_num, last); - if (*index < max_num) - last = cqm_bitmap_check_range(table, step, max_num, - *index, count); - else - break; - } while (last != *index); -} - -u32 cqm_bitmap_alloc(struct cqm_bitmap *bitmap, u32 step, u32 count, bool update_last) -{ - u32 index = 0; - u32 max_num = bitmap->max_num; - u32 last = bitmap->last; - ulong *table = bitmap->table; - u32 i; - - spin_lock(&bitmap->lock); - - /* Search for an idle bit from the last position. */ - cqm_bitmap_find(bitmap, &index, last, step, count); - - /* The preceding search fails. Search for an idle bit - * from the beginning. - */ - if (index >= max_num) { - last = bitmap->reserved_top; - cqm_bitmap_find(bitmap, &index, last, step, count); - } - - /* Set the found bit to 1 and reset last. */ - if (index < max_num) { - for (i = index; i < (index + count); i++) - set_bit(i, table); - - if (update_last) { - bitmap->last = (index + count); - if (bitmap->last >= bitmap->max_num) - bitmap->last = bitmap->reserved_top; - } - } - - spin_unlock(&bitmap->lock); - return index; -} - -u32 cqm_bitmap_alloc_reserved(struct cqm_bitmap *bitmap, u32 count, u32 index) -{ - ulong *table = bitmap->table; - u32 ret_index; - - if (index >= bitmap->reserved_top || index >= bitmap->max_num || count != 1) - return CQM_INDEX_INVALID; - - spin_lock(&bitmap->lock); - - if (test_bit((s32)index, table)) { - ret_index = CQM_INDEX_INVALID; - } else { - set_bit(index, table); - ret_index = index; - } - - spin_unlock(&bitmap->lock); - return ret_index; -} - -void cqm_bitmap_free(struct cqm_bitmap *bitmap, u32 index, u32 count) -{ - u32 i; - - spin_lock(&bitmap->lock); - - for (i = index; i < (index + count); i++) - clear_bit((s32)i, bitmap->table); - - spin_unlock(&bitmap->lock); -} - -#define obj_table_section -s32 cqm_single_object_table_init(struct cqm_object_table *obj_table) -{ - rwlock_init(&obj_table->lock); - - obj_table->table = vmalloc(obj_table->max_num * sizeof(void *)); - CQM_PTR_CHECK_RET(obj_table->table, CQM_FAIL, CQM_ALLOC_FAIL(table)); - memset(obj_table->table, 0, obj_table->max_num * sizeof(void *)); - return CQM_SUCCESS; -} - -s32 cqm_object_table_init(struct cqm_handle *cqm_handle) -{ - struct cqm_func_capability *capability = &cqm_handle->func_capability; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object_table *obj_table = NULL; - struct cqm_cla_table *cla_table = NULL; - s32 ret = CQM_SUCCESS; - u32 i; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - if (cla_table->obj_num == 0) { - cqm_info(handle->dev_hdl, - "Obj table init: cla_table_type %u, obj_num=0, don't init obj table\n", - cla_table->type); - continue; - } - - obj_table = &cla_table->obj_table; - - switch (cla_table->type) { - case CQM_BAT_ENTRY_T_QPC: - obj_table->max_num = capability->qpc_number; - ret = cqm_single_object_table_init(obj_table); - break; - case CQM_BAT_ENTRY_T_MPT: - obj_table->max_num = capability->mpt_number; - ret = cqm_single_object_table_init(obj_table); - break; - case CQM_BAT_ENTRY_T_SCQC: - obj_table->max_num = capability->scqc_number; - ret = cqm_single_object_table_init(obj_table); - break; - case CQM_BAT_ENTRY_T_SRQC: - obj_table->max_num = capability->srqc_number; - ret = cqm_single_object_table_init(obj_table); - break; - default: - break; - } - - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - "Obj table init: failed to init cla_table_type=%u, obj_num=0x%x\n", - cla_table->type, cla_table->obj_num); - goto err; - } - } - - return CQM_SUCCESS; - -err: - cqm_object_table_uninit(cqm_handle); - return CQM_FAIL; -} - -void cqm_object_table_uninit(struct cqm_handle *cqm_handle) -{ - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_object_table *obj_table = NULL; - struct cqm_cla_table *cla_table = NULL; - u32 i; - - for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { - cla_table = &bat_table->entry[i]; - obj_table = &cla_table->obj_table; - if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { - if (obj_table->table) { - vfree(obj_table->table); - obj_table->table = NULL; - } - } - } -} - -s32 cqm_object_table_insert(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, struct cqm_object *obj, bool bh) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - - if (index >= object_table->max_num) { - cqm_err(handle->dev_hdl, - "Obj table insert: index 0x%x exceeds max_num 0x%x\n", - index, object_table->max_num); - return CQM_FAIL; - } - - cqm_write_lock(&object_table->lock, bh); - - if (!object_table->table[index]) { - object_table->table[index] = obj; - cqm_write_unlock(&object_table->lock, bh); - return CQM_SUCCESS; - } - - cqm_write_unlock(&object_table->lock, bh); - cqm_err(handle->dev_hdl, - "Obj table insert: object_table->table[0x%x] has been inserted\n", - index); - - return CQM_FAIL; -} - -void cqm_object_table_remove(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, const struct cqm_object *obj, bool bh) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - - if (index >= object_table->max_num) { - cqm_err(handle->dev_hdl, - "Obj table remove: index 0x%x exceeds max_num 0x%x\n", - index, object_table->max_num); - return; - } - - cqm_write_lock(&object_table->lock, bh); - - if (object_table->table[index] && object_table->table[index] == obj) - object_table->table[index] = NULL; - else - cqm_err(handle->dev_hdl, - "Obj table remove: object_table->table[0x%x] has been removed\n", - index); - - cqm_write_unlock(&object_table->lock, bh); -} - -struct cqm_object *cqm_object_table_get(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, bool bh) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object *obj = NULL; - - if (index >= object_table->max_num) { - cqm_err(handle->dev_hdl, - "Obj table get: index 0x%x exceeds max_num 0x%x\n", - index, object_table->max_num); - return NULL; - } - - cqm_read_lock(&object_table->lock, bh); - - obj = object_table->table[index]; - if (obj) - atomic_inc(&obj->refcount); - - cqm_read_unlock(&object_table->lock, bh); - - return obj; -} diff --git a/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.h b/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.h deleted file mode 100644 index 5ae554eac54a..000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_bitmap_table.h +++ /dev/null @@ -1,65 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_CQM_BITMAP_TABLE_H -#define SPFC_CQM_BITMAP_TABLE_H - -struct cqm_bitmap { - ulong *table; - u32 max_num; - u32 last; - u32 reserved_top; /* reserved index */ - spinlock_t lock; -}; - -struct cqm_object_table { - /* Now is big array. Later will be optimized as a red-black tree. */ - struct cqm_object **table; - u32 max_num; - rwlock_t lock; -}; - -struct cqm_cla_cache_invalid_cmd { - u32 gpa_h; - u32 gpa_l; - - u32 cache_size; /* CLA cache size=4096B */ - - u32 smf_id; - u32 func_id; -}; - -struct cqm_handle; - -s32 cqm_bitmap_init(struct cqm_handle *cqm_handle); -void cqm_bitmap_uninit(struct cqm_handle *cqm_handle); -u32 cqm_bitmap_alloc(struct cqm_bitmap *bitmap, u32 step, u32 count, bool update_last); -u32 cqm_bitmap_alloc_reserved(struct cqm_bitmap *bitmap, u32 count, u32 index); -void cqm_bitmap_free(struct cqm_bitmap *bitmap, u32 index, u32 count); -s32 cqm_object_table_init(struct cqm_handle *cqm_handle); -void cqm_object_table_uninit(struct cqm_handle *cqm_handle); -s32 cqm_object_table_insert(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, struct cqm_object *obj, bool bh); -void cqm_object_table_remove(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, const struct cqm_object *obj, bool bh); -struct cqm_object *cqm_object_table_get(struct cqm_handle *cqm_handle, - struct cqm_object_table *object_table, - u32 index, bool bh); - -void cqm_swab64(u8 *addr, u32 cnt); -void cqm_swab32(u8 *addr, u32 cnt); -bool cqm_check_align(u32 data); -s32 cqm_shift(u32 data); -s32 cqm_buf_alloc(struct cqm_handle *cqm_handle, struct cqm_buf *buf, bool direct); -s32 cqm_buf_alloc_direct(struct cqm_handle *cqm_handle, struct cqm_buf *buf, bool direct); -void cqm_buf_free(struct cqm_buf *buf, struct pci_dev *dev); -void cqm_buf_free_cache_inv(struct cqm_handle *cqm_handle, struct cqm_buf *buf, - s32 *inv_flag); -s32 cqm_cla_cache_invalid(struct cqm_handle *cqm_handle, dma_addr_t gpa, - u32 cache_size); -void *cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order); -void cqm_kfree_align(void *addr); - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_cqm_main.c b/drivers/scsi/spfc/hw/spfc_cqm_main.c deleted file mode 100644 index 52cc2c7838e9..000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_main.c +++ /dev/null @@ -1,987 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/pci.h> -#include <linux/module.h> -#include <linux/delay.h> -#include <linux/vmalloc.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hw_cfg.h" -#include "spfc_cqm_main.h" - -s32 cqm3_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - s32 ret; - - CQM_PTR_CHECK_RET(ex_handle, CQM_FAIL, CQM_PTR_NULL(ex_handle)); - - cqm_handle = kmalloc(sizeof(*cqm_handle), GFP_KERNEL | __GFP_ZERO); - CQM_PTR_CHECK_RET(cqm_handle, CQM_FAIL, CQM_ALLOC_FAIL(cqm_handle)); - - /* Clear the memory to prevent other systems from - * not clearing the memory. - */ - memset(cqm_handle, 0, sizeof(struct cqm_handle)); - - cqm_handle->ex_handle = handle; - cqm_handle->dev = (struct pci_dev *)(handle->pcidev_hdl); - handle->cqm_hdl = (void *)cqm_handle; - - /* Clearing Statistics */ - memset(&handle->hw_stats.cqm_stats, 0, sizeof(struct cqm_stats)); - - /* Reads VF/PF information. */ - cqm_handle->func_attribute = handle->hwif->attr; - cqm_info(handle->dev_hdl, "Func init: function[%u] type %d(0:PF,1:VF,2:PPF)\n", - cqm_handle->func_attribute.func_global_idx, - cqm_handle->func_attribute.func_type); - - /* Read capability from configuration management module */ - ret = cqm_capability_init(ex_handle); - if (ret == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_capability_init)); - goto err1; - } - - /* Initialize memory entries such as BAT, CLA, and bitmap. */ - if (cqm_mem_init(ex_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_mem_init)); - goto err1; - } - - /* Event callback initialization */ - if (cqm_event_init(ex_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_event_init)); - goto err2; - } - - /* Doorbell initiation */ - if (cqm_db_init(ex_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_init)); - goto err3; - } - - /* Initialize the bloom filter. */ - if (cqm_bloomfilter_init(ex_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_bloomfilter_init)); - goto err4; - } - - /* The timer bitmap is set directly at the beginning of the CQM. - * The ifconfig up/down command is not used to set or clear the bitmap. - */ - if (sphw_func_tmr_bitmap_set(ex_handle, true) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, "Timer start: enable timer bitmap failed\n"); - goto err5; - } - - return CQM_SUCCESS; - -err5: - cqm_bloomfilter_uninit(ex_handle); -err4: - cqm_db_uninit(ex_handle); -err3: - cqm_event_uninit(ex_handle); -err2: - cqm_mem_uninit(ex_handle); -err1: - handle->cqm_hdl = NULL; - kfree(cqm_handle); - return CQM_FAIL; -} - -void cqm3_uninit(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - s32 ret; - - CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle)); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle)); - - /* The timer bitmap is set directly at the beginning of the CQM. - * The ifconfig up/down command is not used to set or clear the bitmap. - */ - cqm_info(handle->dev_hdl, "Timer stop: disable timer\n"); - if (sphw_func_tmr_bitmap_set(ex_handle, false) != CQM_SUCCESS) - cqm_err(handle->dev_hdl, "Timer stop: disable timer bitmap failed\n"); - - /* After the TMR timer stops, the system releases resources - * after a delay of one or two milliseconds. - */ - if (cqm_handle->func_attribute.func_type == CQM_PPF && - cqm_handle->func_capability.timer_enable == CQM_TIMER_ENABLE) { - cqm_info(handle->dev_hdl, "Timer stop: spfc ppf timer stop\n"); - ret = sphw_ppf_tmr_stop(handle); - if (ret != CQM_SUCCESS) - /* The timer fails to be stopped, - * and the resource release is not affected. - */ - cqm_info(handle->dev_hdl, "Timer stop: spfc ppf timer stop, ret=%d\n", - ret); - /* Somebody requires a delay of 1 ms, which is inaccurate. */ - usleep_range(900, 1000); - } - - /* Release Bloom Filter Table */ - cqm_bloomfilter_uninit(ex_handle); - - /* Release hardware doorbell */ - cqm_db_uninit(ex_handle); - - /* Cancel the callback of the event */ - cqm_event_uninit(ex_handle); - - /* Release various memory tables and require the service - * to release all objects. - */ - cqm_mem_uninit(ex_handle); - - /* Release cqm_handle */ - handle->cqm_hdl = NULL; - kfree(cqm_handle); -} - -void cqm_test_mode_init(struct cqm_handle *cqm_handle, - struct service_cap *service_capability) -{ - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - - if (service_capability->test_mode == 0) - return; - - cqm_info(handle->dev_hdl, "Enter CQM test mode\n"); - - func_cap->qpc_number = service_capability->test_qpc_num; - func_cap->qpc_reserved = - GET_MAX(func_cap->qpc_reserved, - service_capability->test_qpc_resvd_num); - func_cap->xid_alloc_mode = service_capability->test_xid_alloc_mode; - func_cap->gpa_check_enable = service_capability->test_gpa_check_enable; - func_cap->pagesize_reorder = service_capability->test_page_size_reorder; - func_cap->qpc_alloc_static = - (bool)(service_capability->test_qpc_alloc_mode); - func_cap->scqc_alloc_static = - (bool)(service_capability->test_scqc_alloc_mode); - func_cap->flow_table_based_conn_number = - service_capability->test_max_conn_num; - func_cap->flow_table_based_conn_cache_number = - service_capability->test_max_cache_conn_num; - func_cap->scqc_number = service_capability->test_scqc_num; - func_cap->mpt_number = service_capability->test_mpt_num; - func_cap->mpt_reserved = service_capability->test_mpt_recvd_num; - func_cap->reorder_number = service_capability->test_reorder_num; - /* 256K buckets, 256K*64B = 16MB */ - func_cap->hash_number = service_capability->test_hash_num; -} - -void cqm_service_capability_update(struct cqm_handle *cqm_handle) -{ - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - - func_cap->qpc_number = GET_MIN(CQM_MAX_QPC_NUM, func_cap->qpc_number); - func_cap->scqc_number = GET_MIN(CQM_MAX_SCQC_NUM, func_cap->scqc_number); - func_cap->srqc_number = GET_MIN(CQM_MAX_SRQC_NUM, func_cap->srqc_number); - func_cap->childc_number = GET_MIN(CQM_MAX_CHILDC_NUM, func_cap->childc_number); -} - -void cqm_service_valid_init(struct cqm_handle *cqm_handle, - struct service_cap *service_capability) -{ - enum cfg_svc_type_en type = service_capability->chip_svc_type; - struct cqm_service *svc = cqm_handle->service; - - svc[CQM_SERVICE_T_FC].valid = ((u32)type & CFG_SVC_FC_BIT5) ? true : false; -} - -void cqm_service_capability_init_fc(struct cqm_handle *cqm_handle, void *pra) -{ - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - struct service_cap *service_capability = (struct service_cap *)pra; - struct fc_service_cap *fc_cap = &service_capability->fc_cap; - struct dev_fc_svc_cap *dev_fc_cap = &fc_cap->dev_fc_cap; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - - cqm_info(handle->dev_hdl, "Cap init: fc is valid\n"); - cqm_info(handle->dev_hdl, "Cap init: fc qpc 0x%x, scqc 0x%x, srqc 0x%x\n", - dev_fc_cap->max_parent_qpc_num, dev_fc_cap->scq_num, - dev_fc_cap->srq_num); - func_cap->hash_number += dev_fc_cap->max_parent_qpc_num; - func_cap->hash_basic_size = CQM_HASH_BUCKET_SIZE_64; - func_cap->qpc_number += dev_fc_cap->max_parent_qpc_num; - func_cap->qpc_basic_size = GET_MAX(fc_cap->parent_qpc_size, - func_cap->qpc_basic_size); - func_cap->qpc_alloc_static = true; - func_cap->scqc_number += dev_fc_cap->scq_num; - func_cap->scqc_basic_size = GET_MAX(fc_cap->scqc_size, - func_cap->scqc_basic_size); - func_cap->srqc_number += dev_fc_cap->srq_num; - func_cap->srqc_basic_size = GET_MAX(fc_cap->srqc_size, - func_cap->srqc_basic_size); - func_cap->lun_number = CQM_LUN_FC_NUM; - func_cap->lun_basic_size = CQM_LUN_SIZE_8; - func_cap->taskmap_number = CQM_TASKMAP_FC_NUM; - func_cap->taskmap_basic_size = PAGE_SIZE; - func_cap->childc_number += dev_fc_cap->max_child_qpc_num; - func_cap->childc_basic_size = GET_MAX(fc_cap->child_qpc_size, - func_cap->childc_basic_size); - func_cap->pagesize_reorder = CQM_FC_PAGESIZE_ORDER; -} - -void cqm_service_capability_init(struct cqm_handle *cqm_handle, - struct service_cap *service_capability) -{ - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 i; - - for (i = 0; i < CQM_SERVICE_T_MAX; i++) { - cqm_handle->service[i].valid = false; - cqm_handle->service[i].has_register = false; - cqm_handle->service[i].buf_order = 0; - } - - cqm_service_valid_init(cqm_handle, service_capability); - - cqm_info(handle->dev_hdl, "Cap init: service type %d\n", - service_capability->chip_svc_type); - - if (cqm_handle->service[CQM_SERVICE_T_FC].valid) - cqm_service_capability_init_fc(cqm_handle, (void *)service_capability); -} - -/* Set func_type in fake_cqm_handle to ppf, pf, or vf. */ -void cqm_set_func_type(struct cqm_handle *cqm_handle) -{ - u32 idx = cqm_handle->func_attribute.func_global_idx; - - if (idx == 0) - cqm_handle->func_attribute.func_type = CQM_PPF; - else if (idx < CQM_MAX_PF_NUM) - cqm_handle->func_attribute.func_type = CQM_PF; - else - cqm_handle->func_attribute.func_type = CQM_VF; -} - -void cqm_lb_fake_mode_init(struct cqm_handle *cqm_handle, struct service_cap *svc_cap) -{ - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - - func_cap->lb_mode = svc_cap->lb_mode; - - /* Initializing the LB Mode */ - if (func_cap->lb_mode == CQM_LB_MODE_NORMAL) - func_cap->smf_pg = 0; - else - func_cap->smf_pg = svc_cap->smf_pg; - - func_cap->fake_cfg_number = 0; - func_cap->fake_func_type = CQM_FAKE_FUNC_NORMAL; -} - -s32 cqm_capability_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; - struct sphw_func_attr *func_attr = &cqm_handle->func_attribute; - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - u32 total_function_num = 0; - int err = 0; - - /* Initializes the PPF capabilities: include timer, pf, vf. */ - if (func_attr->func_type == CQM_PPF) { - total_function_num = service_capability->host_total_function; - func_cap->timer_enable = service_capability->timer_en; - func_cap->pf_num = service_capability->pf_num; - func_cap->pf_id_start = service_capability->pf_id_start; - func_cap->vf_num = service_capability->vf_num; - func_cap->vf_id_start = service_capability->vf_id_start; - - cqm_info(handle->dev_hdl, "Cap init: total function num 0x%x\n", - total_function_num); - cqm_info(handle->dev_hdl, "Cap init: pf_num 0x%x, pf_id_start 0x%x, vf_num 0x%x, vf_id_start 0x%x\n", - func_cap->pf_num, func_cap->pf_id_start, - func_cap->vf_num, func_cap->vf_id_start); - cqm_info(handle->dev_hdl, "Cap init: timer_enable %u (1: enable; 0: disable)\n", - func_cap->timer_enable); - } - - func_cap->flow_table_based_conn_number = service_capability->max_connect_num; - func_cap->flow_table_based_conn_cache_number = service_capability->max_stick2cache_num; - cqm_info(handle->dev_hdl, "Cap init: cfg max_conn_num 0x%x, max_cache_conn_num 0x%x\n", - func_cap->flow_table_based_conn_number, - func_cap->flow_table_based_conn_cache_number); - - func_cap->bloomfilter_enable = service_capability->bloomfilter_en; - cqm_info(handle->dev_hdl, "Cap init: bloomfilter_enable %u (1: enable; 0: disable)\n", - func_cap->bloomfilter_enable); - - if (func_cap->bloomfilter_enable) { - func_cap->bloomfilter_length = service_capability->bfilter_len; - func_cap->bloomfilter_addr = - service_capability->bfilter_start_addr; - if (func_cap->bloomfilter_length != 0 && - !cqm_check_align(func_cap->bloomfilter_length)) { - cqm_err(handle->dev_hdl, "Cap init: bloomfilter_length %u is not the power of 2\n", - func_cap->bloomfilter_length); - - err = CQM_FAIL; - goto out; - } - } - - cqm_info(handle->dev_hdl, "Cap init: bloomfilter_length 0x%x, bloomfilter_addr 0x%x\n", - func_cap->bloomfilter_length, func_cap->bloomfilter_addr); - - func_cap->qpc_reserved = 0; - func_cap->mpt_reserved = 0; - func_cap->scq_reserved = 0; - func_cap->srq_reserved = 0; - func_cap->qpc_alloc_static = false; - func_cap->scqc_alloc_static = false; - - func_cap->l3i_number = CQM_L3I_COMM_NUM; - func_cap->l3i_basic_size = CQM_L3I_SIZE_8; - - func_cap->timer_number = CQM_TIMER_ALIGN_SCALE_NUM * total_function_num; - func_cap->timer_basic_size = CQM_TIMER_SIZE_32; - - func_cap->gpa_check_enable = true; - - cqm_lb_fake_mode_init(cqm_handle, service_capability); - cqm_info(handle->dev_hdl, "Cap init: lb_mode=%u\n", func_cap->lb_mode); - cqm_info(handle->dev_hdl, "Cap init: smf_pg=%u\n", func_cap->smf_pg); - cqm_info(handle->dev_hdl, "Cap init: fake_func_type=%u\n", func_cap->fake_func_type); - cqm_info(handle->dev_hdl, "Cap init: fake_cfg_number=%u\n", func_cap->fake_cfg_number); - - cqm_service_capability_init(cqm_handle, service_capability); - - cqm_test_mode_init(cqm_handle, service_capability); - - cqm_service_capability_update(cqm_handle); - - func_cap->ft_enable = service_capability->sf_svc_attr.ft_en; - func_cap->rdma_enable = service_capability->sf_svc_attr.rdma_en; - - cqm_info(handle->dev_hdl, "Cap init: pagesize_reorder %u\n", func_cap->pagesize_reorder); - cqm_info(handle->dev_hdl, "Cap init: xid_alloc_mode %d, gpa_check_enable %d\n", - func_cap->xid_alloc_mode, func_cap->gpa_check_enable); - cqm_info(handle->dev_hdl, "Cap init: qpc_alloc_mode %d, scqc_alloc_mode %d\n", - func_cap->qpc_alloc_static, func_cap->scqc_alloc_static); - cqm_info(handle->dev_hdl, "Cap init: hash_number 0x%x\n", func_cap->hash_number); - cqm_info(handle->dev_hdl, "Cap init: qpc_number 0x%x, qpc_reserved 0x%x, qpc_basic_size 0x%x\n", - func_cap->qpc_number, func_cap->qpc_reserved, func_cap->qpc_basic_size); - cqm_info(handle->dev_hdl, "Cap init: scqc_number 0x%x scqc_reserved 0x%x, scqc_basic_size 0x%x\n", - func_cap->scqc_number, func_cap->scq_reserved, func_cap->scqc_basic_size); - cqm_info(handle->dev_hdl, "Cap init: srqc_number 0x%x, srqc_basic_size 0x%x\n", - func_cap->srqc_number, func_cap->srqc_basic_size); - cqm_info(handle->dev_hdl, "Cap init: mpt_number 0x%x, mpt_reserved 0x%x\n", - func_cap->mpt_number, func_cap->mpt_reserved); - cqm_info(handle->dev_hdl, "Cap init: gid_number 0x%x, lun_number 0x%x\n", - func_cap->gid_number, func_cap->lun_number); - cqm_info(handle->dev_hdl, "Cap init: taskmap_number 0x%x, l3i_number 0x%x\n", - func_cap->taskmap_number, func_cap->l3i_number); - cqm_info(handle->dev_hdl, "Cap init: timer_number 0x%x, childc_number 0x%x\n", - func_cap->timer_number, func_cap->childc_number); - cqm_info(handle->dev_hdl, "Cap init: childc_basic_size 0x%x\n", - func_cap->childc_basic_size); - cqm_info(handle->dev_hdl, "Cap init: xid2cid_number 0x%x, reorder_number 0x%x\n", - func_cap->xid2cid_number, func_cap->reorder_number); - cqm_info(handle->dev_hdl, "Cap init: ft_enable %d, rdma_enable %d\n", - func_cap->ft_enable, func_cap->rdma_enable); - - return CQM_SUCCESS; - -out: - if (func_attr->func_type == CQM_PPF) - func_cap->timer_enable = 0; - - return err; -} - -s32 cqm_mem_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - - if (cqm_bat_init(cqm_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_init)); - return CQM_FAIL; - } - - if (cqm_cla_init(cqm_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_init)); - goto err1; - } - - if (cqm_bitmap_init(cqm_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_init)); - goto err2; - } - - if (cqm_object_table_init(cqm_handle) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_object_table_init)); - goto err3; - } - - return CQM_SUCCESS; - -err3: - cqm_bitmap_uninit(cqm_handle); -err2: - cqm_cla_uninit(cqm_handle, CQM_BAT_ENTRY_MAX); -err1: - cqm_bat_uninit(cqm_handle); - return CQM_FAIL; -} - -void cqm_mem_uninit(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - - cqm_object_table_uninit(cqm_handle); - cqm_bitmap_uninit(cqm_handle); - cqm_cla_uninit(cqm_handle, CQM_BAT_ENTRY_MAX); - cqm_bat_uninit(cqm_handle); -} - -s32 cqm_event_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - if (sphw_aeq_register_swe_cb(ex_handle, SPHW_STATEFULL_EVENT, - cqm_aeq_callback) != CHIPIF_SUCCESS) { - cqm_err(handle->dev_hdl, "Event: fail to register aeq callback\n"); - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -void cqm_event_uninit(void *ex_handle) -{ - sphw_aeq_unregister_swe_cb(ex_handle, SPHW_STATEFULL_EVENT); -} - -u32 cqm_aeq_event2type(u8 event) -{ - u32 service_type; - - /* Distributes events to different service modules - * based on the event type. - */ - if (event >= CQM_AEQ_BASE_T_FC && event < CQM_AEQ_MAX_T_FC) - service_type = CQM_SERVICE_T_FC; - else - service_type = CQM_SERVICE_T_MAX; - - return service_type; -} - -u8 cqm_aeq_callback(void *ex_handle, u8 event, u8 *data) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct service_register_template *service_template = NULL; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - u8 event_level = FAULT_LEVEL_MAX; - u32 service_type; - - CQM_PTR_CHECK_RET(ex_handle, event_level, - CQM_PTR_NULL(aeq_callback_ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_aeq_callback_cnt[event]); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_RET(cqm_handle, event_level, - CQM_PTR_NULL(aeq_callback_cqm_handle)); - - /* Distributes events to different service modules - * based on the event type. - */ - service_type = cqm_aeq_event2type(event); - if (service_type == CQM_SERVICE_T_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(event)); - return event_level; - } - - service = &cqm_handle->service[service_type]; - service_template = &service->service_template; - - if (!service_template->aeq_level_callback) - cqm_err(handle->dev_hdl, "Event: service_type %u aeq_level_callback unregistered\n", - service_type); - else - event_level = service_template->aeq_level_callback(service_template->service_handle, - event, data); - - if (!service_template->aeq_callback) - cqm_err(handle->dev_hdl, "Event: service_type %u aeq_callback unregistered\n", - service_type); - else - service_template->aeq_callback(service_template->service_handle, - event, data); - - return event_level; -} - -s32 cqm3_service_register(void *ex_handle, struct service_register_template *service_template) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - - CQM_PTR_CHECK_RET(ex_handle, CQM_FAIL, CQM_PTR_NULL(ex_handle)); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_RET(cqm_handle, CQM_FAIL, CQM_PTR_NULL(cqm_handle)); - CQM_PTR_CHECK_RET(service_template, CQM_FAIL, - CQM_PTR_NULL(service_template)); - - if (service_template->service_type >= CQM_SERVICE_T_MAX) { - cqm_err(handle->dev_hdl, - CQM_WRONG_VALUE(service_template->service_type)); - return CQM_FAIL; - } - service = &cqm_handle->service[service_template->service_type]; - if (!service->valid) { - cqm_err(handle->dev_hdl, "Service register: service_type %u is invalid\n", - service_template->service_type); - return CQM_FAIL; - } - - if (service->has_register) { - cqm_err(handle->dev_hdl, "Service register: service_type %u has registered\n", - service_template->service_type); - return CQM_FAIL; - } - - service->has_register = true; - (void)memcpy((void *)(&service->service_template), - (void *)service_template, - sizeof(struct service_register_template)); - - return CQM_SUCCESS; -} - -void cqm3_service_unregister(void *ex_handle, u32 service_type) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - - CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle)); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_NO_RET(cqm_handle, CQM_PTR_NULL(cqm_handle)); - - if (service_type >= CQM_SERVICE_T_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return; - } - - service = &cqm_handle->service[service_type]; - if (!service->valid) - cqm_err(handle->dev_hdl, "Service unregister: service_type %u is disable\n", - service_type); - - service->has_register = false; - memset(&service->service_template, 0, sizeof(struct service_register_template)); -} - -struct cqm_cmd_buf *cqm3_cmd_alloc(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - CQM_PTR_CHECK_RET(ex_handle, NULL, CQM_PTR_NULL(ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_alloc_cnt); - - return (struct cqm_cmd_buf *)sphw_alloc_cmd_buf(ex_handle); -} - -void cqm3_cmd_free(void *ex_handle, struct cqm_cmd_buf *cmd_buf) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle)); - CQM_PTR_CHECK_NO_RET(cmd_buf, CQM_PTR_NULL(cmd_buf)); - CQM_PTR_CHECK_NO_RET(cmd_buf->buf, CQM_PTR_NULL(buf)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_free_cnt); - - sphw_free_cmd_buf(ex_handle, (struct sphw_cmd_buf *)cmd_buf); -} - -s32 cqm3_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, struct cqm_cmd_buf *buf_in, - struct cqm_cmd_buf *buf_out, u64 *out_param, u32 timeout, - u16 channel) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - CQM_PTR_CHECK_RET(ex_handle, CQM_FAIL, CQM_PTR_NULL(ex_handle)); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_PTR_NULL(buf_in)); - CQM_PTR_CHECK_RET(buf_in->buf, CQM_FAIL, CQM_PTR_NULL(buf)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_send_cmd_box_cnt); - - return sphw_cmdq_detail_resp(ex_handle, mod, cmd, - (struct sphw_cmd_buf *)buf_in, - (struct sphw_cmd_buf *)buf_out, - out_param, timeout, channel); -} - -int cqm_alloc_fc_db_addr(void *hwdev, void __iomem **db_base, - void __iomem **dwqe_base) -{ - struct sphw_hwif *hwif = NULL; - u32 idx = 0; -#define SPFC_DB_ADDR_RSVD 12 -#define SPFC_DB_MASK 128 - u64 db_base_phy_fc; - - if (!hwdev || !db_base) - return -EINVAL; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - db_base_phy_fc = hwif->db_base_phy >> SPFC_DB_ADDR_RSVD; - - if (db_base_phy_fc & (SPFC_DB_MASK - 1)) - idx = SPFC_DB_MASK - (db_base_phy_fc && (SPFC_DB_MASK - 1)); - - *db_base = hwif->db_base + idx * SPHW_DB_PAGE_SIZE; - - if (!dwqe_base) - return 0; - - *dwqe_base = (u8 *)*db_base + SPHW_DWQE_OFFSET; - - return 0; -} - -s32 cqm3_db_addr_alloc(void *ex_handle, void __iomem **db_addr, - void __iomem **dwqe_addr) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - CQM_PTR_CHECK_RET(ex_handle, CQM_FAIL, CQM_PTR_NULL(ex_handle)); - CQM_PTR_CHECK_RET(db_addr, CQM_FAIL, CQM_PTR_NULL(db_addr)); - CQM_PTR_CHECK_RET(dwqe_addr, CQM_FAIL, CQM_PTR_NULL(dwqe_addr)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_alloc_cnt); - - return cqm_alloc_fc_db_addr(ex_handle, db_addr, dwqe_addr); -} - -s32 cqm_db_phy_addr_alloc(void *ex_handle, u64 *db_paddr, u64 *dwqe_addr) -{ - return sphw_alloc_db_phy_addr(ex_handle, db_paddr, dwqe_addr); -} - -void cqm3_db_addr_free(void *ex_handle, const void __iomem *db_addr, - void __iomem *dwqe_addr) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - - CQM_PTR_CHECK_NO_RET(ex_handle, CQM_PTR_NULL(ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_free_cnt); - - sphw_free_db_addr(ex_handle, db_addr, dwqe_addr); -} - -void cqm_db_phy_addr_free(void *ex_handle, u64 *db_paddr, u64 *dwqe_addr) -{ - sphw_free_db_phy_addr(ex_handle, *db_paddr, *dwqe_addr); -} - -s32 cqm_db_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - s32 i; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - - /* Allocate hardware doorbells to services. */ - for (i = 0; i < CQM_SERVICE_T_MAX; i++) { - service = &cqm_handle->service[i]; - if (!service->valid) - continue; - - if (cqm3_db_addr_alloc(ex_handle, &service->hardware_db_vaddr, - &service->dwqe_vaddr) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_db_addr_alloc)); - break; - } - - if (cqm_db_phy_addr_alloc(handle, &service->hardware_db_paddr, - &service->dwqe_paddr) != CQM_SUCCESS) { - cqm3_db_addr_free(ex_handle, service->hardware_db_vaddr, - service->dwqe_vaddr); - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_phy_addr_alloc)); - break; - } - } - - if (i != CQM_SERVICE_T_MAX) { - i--; - for (; i >= 0; i--) { - service = &cqm_handle->service[i]; - if (!service->valid) - continue; - - cqm3_db_addr_free(ex_handle, service->hardware_db_vaddr, - service->dwqe_vaddr); - cqm_db_phy_addr_free(ex_handle, - &service->hardware_db_paddr, - &service->dwqe_paddr); - } - return CQM_FAIL; - } - - return CQM_SUCCESS; -} - -void cqm_db_uninit(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - s32 i; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - - /* Release hardware doorbell. */ - for (i = 0; i < CQM_SERVICE_T_MAX; i++) { - service = &cqm_handle->service[i]; - if (service->valid) - cqm3_db_addr_free(ex_handle, service->hardware_db_vaddr, - service->dwqe_vaddr); - } -} - -s32 cqm3_ring_hardware_db_fc(void *ex_handle, u32 service_type, u8 db_count, - u8 pagenum, u64 db) -{ -#define SPFC_DB_FAKE_VF_OFFSET 32 - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - struct sphw_hwdev *handle = NULL; - void *dbaddr = NULL; - - handle = (struct sphw_hwdev *)ex_handle; - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - service = &cqm_handle->service[service_type]; - /* Considering the performance of ringing hardware db, - * the parameter is not checked. - */ - wmb(); - dbaddr = (u8 *)service->hardware_db_vaddr + - ((pagenum + SPFC_DB_FAKE_VF_OFFSET) * SPHW_DB_PAGE_SIZE); - *((u64 *)dbaddr + db_count) = db; - return CQM_SUCCESS; -} - -s32 cqm_ring_direct_wqe_db_fc(void *ex_handle, u32 service_type, - void *direct_wqe) -{ - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - struct sphw_hwdev *handle = NULL; - u64 *tmp = (u64 *)direct_wqe; - int i; - - handle = (struct sphw_hwdev *)ex_handle; - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - service = &cqm_handle->service[service_type]; - - /* Considering the performance of ringing hardware db, - * the parameter is not checked. - */ - wmb(); - *((u64 *)service->dwqe_vaddr + 0) = tmp[2]; - *((u64 *)service->dwqe_vaddr + 1) = tmp[3]; - *((u64 *)service->dwqe_vaddr + 2) = tmp[0]; - *((u64 *)service->dwqe_vaddr + 3) = tmp[1]; - tmp += 4; - - /* The FC use 256B WQE. The directwqe is written at block0, - * and the length is 256B - */ - for (i = 4; i < 32; i++) - *((u64 *)service->dwqe_vaddr + i) = *tmp++; - - return CQM_SUCCESS; -} - -static s32 bloomfilter_init_cmd(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - struct cqm_func_capability *capability = &cqm_handle->func_capability; - struct cqm_bloomfilter_init_cmd *cmd = NULL; - struct cqm_cmd_buf *buf_in = NULL; - s32 ret; - - buf_in = cqm3_cmd_alloc((void *)(cqm_handle->ex_handle)); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); - - /* Fill the command format and convert it to big-endian. */ - buf_in->size = sizeof(struct cqm_bloomfilter_init_cmd); - cmd = (struct cqm_bloomfilter_init_cmd *)(buf_in->buf); - cmd->bloom_filter_addr = capability->bloomfilter_addr; - cmd->bloom_filter_len = capability->bloomfilter_length; - - cqm_swab32((u8 *)cmd, (sizeof(struct cqm_bloomfilter_init_cmd) >> CQM_DW_SHIFT)); - - ret = cqm3_send_cmd_box((void *)(cqm_handle->ex_handle), - CQM_MOD_CQM, CQM_CMD_T_BLOOMFILTER_INIT, buf_in, - NULL, NULL, CQM_CMD_TIMEOUT, - SPHW_CHANNEL_DEFAULT); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_send_cmd_box)); - cqm_err(handle->dev_hdl, "Bloomfilter: %s ret=%d\n", __func__, - ret); - cqm_err(handle->dev_hdl, "Bloomfilter: %s: 0x%x 0x%x\n", - __func__, cmd->bloom_filter_addr, - cmd->bloom_filter_len); - cqm3_cmd_free((void *)(cqm_handle->ex_handle), buf_in); - return CQM_FAIL; - } - cqm3_cmd_free((void *)(cqm_handle->ex_handle), buf_in); - return CQM_SUCCESS; -} - -s32 cqm_bloomfilter_init(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_bloomfilter_table *bloomfilter_table = NULL; - struct cqm_func_capability *capability = NULL; - struct cqm_handle *cqm_handle = NULL; - u32 array_size; - s32 ret; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - bloomfilter_table = &cqm_handle->bloomfilter_table; - capability = &cqm_handle->func_capability; - - if (capability->bloomfilter_length == 0) { - cqm_info(handle->dev_hdl, - "Bloomfilter: bf_length=0, don't need to init bloomfilter\n"); - return CQM_SUCCESS; - } - - /* The unit of bloomfilter_length is 64B(512bits). Each bit is a table - * node. Therefore the value must be shift 9 bits to the left. - */ - bloomfilter_table->table_size = capability->bloomfilter_length << - CQM_BF_LENGTH_UNIT; - /* The unit of bloomfilter_length is 64B. The unit of array entryis 32B. - */ - array_size = capability->bloomfilter_length << 1; - if (array_size == 0 || array_size > CQM_BF_BITARRAY_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(array_size)); - return CQM_FAIL; - } - - bloomfilter_table->array_mask = array_size - 1; - /* This table is not a bitmap, it is the counter of corresponding bit. - */ - bloomfilter_table->table = vmalloc(bloomfilter_table->table_size * (sizeof(u32))); - CQM_PTR_CHECK_RET(bloomfilter_table->table, CQM_FAIL, CQM_ALLOC_FAIL(table)); - - memset(bloomfilter_table->table, 0, - (bloomfilter_table->table_size * sizeof(u32))); - - /* The the bloomfilter must be initialized to 0 by ucode, - * because the bloomfilter is mem mode - */ - if (cqm_handle->func_capability.bloomfilter_enable) { - ret = bloomfilter_init_cmd(ex_handle); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - "Bloomfilter: bloomfilter_init_cmd ret=%d\n", - ret); - vfree(bloomfilter_table->table); - bloomfilter_table->table = NULL; - return CQM_FAIL; - } - } - - mutex_init(&bloomfilter_table->lock); - return CQM_SUCCESS; -} - -void cqm_bloomfilter_uninit(void *ex_handle) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_bloomfilter_table *bloomfilter_table = NULL; - struct cqm_handle *cqm_handle = NULL; - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - bloomfilter_table = &cqm_handle->bloomfilter_table; - - if (bloomfilter_table->table) { - vfree(bloomfilter_table->table); - bloomfilter_table->table = NULL; - } -} - -s32 cqm_bloomfilter_cmd(void *ex_handle, u32 op, u32 k_flag, u64 id) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_cmd_buf *buf_in = NULL; - struct cqm_bloomfilter_cmd *cmd = NULL; - s32 ret; - - buf_in = cqm3_cmd_alloc(ex_handle); - CQM_PTR_CHECK_RET(buf_in, CQM_FAIL, CQM_ALLOC_FAIL(buf_in)); - - /* Fill the command format and convert it to big-endian. */ - buf_in->size = sizeof(struct cqm_bloomfilter_cmd); - cmd = (struct cqm_bloomfilter_cmd *)(buf_in->buf); - memset((void *)cmd, 0, sizeof(struct cqm_bloomfilter_cmd)); - cmd->k_en = k_flag; - cmd->index_h = (u32)(id >> CQM_DW_OFFSET); - cmd->index_l = (u32)(id & CQM_DW_MASK); - - cqm_swab32((u8 *)cmd, (sizeof(struct cqm_bloomfilter_cmd) >> CQM_DW_SHIFT)); - - ret = cqm3_send_cmd_box(ex_handle, CQM_MOD_CQM, (u8)op, buf_in, NULL, - NULL, CQM_CMD_TIMEOUT, SPHW_CHANNEL_DEFAULT); - if (ret != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm3_send_cmd_box)); - cqm_err(handle->dev_hdl, "Bloomfilter: bloomfilter_cmd ret=%d\n", ret); - cqm_err(handle->dev_hdl, "Bloomfilter: op=0x%x, cmd: 0x%x 0x%x 0x%x 0x%x\n", - op, *((u32 *)cmd), *(((u32 *)cmd) + CQM_DW_INDEX1), - *(((u32 *)cmd) + CQM_DW_INDEX2), - *(((u32 *)cmd) + CQM_DW_INDEX3)); - cqm3_cmd_free(ex_handle, buf_in); - return CQM_FAIL; - } - - cqm3_cmd_free(ex_handle, buf_in); - - return CQM_SUCCESS; -} diff --git a/drivers/scsi/spfc/hw/spfc_cqm_main.h b/drivers/scsi/spfc/hw/spfc_cqm_main.h deleted file mode 100644 index cf10d7f5c339..000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_main.h +++ /dev/null @@ -1,411 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_CQM_MAIN_H -#define SPFC_CQM_MAIN_H - -#include "sphw_hwdev.h" -#include "sphw_hwif.h" -#include "spfc_cqm_object.h" -#include "spfc_cqm_bitmap_table.h" -#include "spfc_cqm_bat_cla.h" - -#define GET_MAX(a, b) ((a) > (b) ? (a) : (b)) -#define GET_MIN(a, b) ((a) < (b) ? (a) : (b)) -#define CQM_DW_SHIFT 2 -#define CQM_QW_SHIFT 3 -#define CQM_BYTE_BIT_SHIFT 3 -#define CQM_NUM_BIT_BYTE 8 - -#define CHIPIF_SUCCESS 0 -#define CHIPIF_FAIL (-1) - -#define CQM_TIMER_ENABLE 1 -#define CQM_TIMER_DISABLE 0 - -/* The value must be the same as that of sphw_service_type in sphw_crm.h. */ -#define CQM_SERVICE_T_FC SERVICE_T_FC -#define CQM_SERVICE_T_MAX SERVICE_T_MAX - -struct cqm_service { - bool valid; /* Whether to enable this service on the function. */ - bool has_register; /* Registered or Not */ - u64 hardware_db_paddr; - void __iomem *hardware_db_vaddr; - u64 dwqe_paddr; - void __iomem *dwqe_vaddr; - u32 buf_order; /* The size of each buf node is 2^buf_order pages. */ - struct service_register_template service_template; -}; - -struct cqm_fake_cfg { - u32 parent_func; /* The parent func_id of the fake vfs. */ - u32 child_func_start; /* The start func_id of the child fake vfs. */ - u32 child_func_number; /* The number of the child fake vfs. */ -}; - -#define CQM_MAX_FACKVF_GROUP 4 - -struct cqm_func_capability { - /* BAT_PTR table(SMLC) */ - bool ft_enable; /* BAT for flow table enable: support fc service - */ - bool rdma_enable; /* BAT for rdma enable: support RoCE */ - /* VAT table(SMIR) */ - bool ft_pf_enable; /* Same as ft_enable. BAT entry for fc on pf - */ - bool rdma_pf_enable; /* Same as rdma_enable. BAT entry for rdma on pf */ - - /* Dynamic or static memory allocation during the application of - * specified QPC/SCQC for each service. - */ - bool qpc_alloc_static; - bool scqc_alloc_static; - - u8 timer_enable; /* Whether the timer function is enabled */ - u8 bloomfilter_enable; /* Whether the bloomgfilter function is enabled */ - /* Maximum number of connections for fc, whitch cannot excedd qpc_number */ - u32 flow_table_based_conn_number; - u32 flow_table_based_conn_cache_number; /* Maximum number of sticky caches */ - u32 bloomfilter_length; /* Size of the bloomfilter table, 64-byte aligned */ - u32 bloomfilter_addr; /* Start position of the bloomfilter table in the SMF main cache. */ - u32 qpc_reserved; /* Reserved bit in bitmap */ - u32 mpt_reserved; /* The ROCE/IWARP MPT also has a reserved bit. */ - - /* All basic_size must be 2^n-aligned. */ - /* The number of hash bucket. The size of BAT table is aliaed with 64 bucket. - *At least 64 buckets is required. - */ - u32 hash_number; - /* THe basic size of hash bucket is 64B, including 5 valid entry and one next entry. */ - u32 hash_basic_size; - u32 qpc_number; - u32 qpc_basic_size; - - /* NUmber of PFs/VFs on the current host */ - u32 pf_num; - u32 pf_id_start; - u32 vf_num; - u32 vf_id_start; - - u32 lb_mode; - /* Only lower 4bit is valid, indicating which SMFs are enabled. - * For example, 0101B indicates that SMF0 and SMF2 are enabled. - */ - u32 smf_pg; - - u32 fake_mode; - /* Whether the current function belongs to the fake group (parent or child) */ - u32 fake_func_type; - u32 fake_cfg_number; /* Number of current configuration groups */ - struct cqm_fake_cfg fake_cfg[CQM_MAX_FACKVF_GROUP]; - - /* Note: for cqm specail test */ - u32 pagesize_reorder; - bool xid_alloc_mode; - bool gpa_check_enable; - u32 scq_reserved; - u32 srq_reserved; - - u32 mpt_number; - u32 mpt_basic_size; - u32 scqc_number; - u32 scqc_basic_size; - u32 srqc_number; - u32 srqc_basic_size; - - u32 gid_number; - u32 gid_basic_size; - u32 lun_number; - u32 lun_basic_size; - u32 taskmap_number; - u32 taskmap_basic_size; - u32 l3i_number; - u32 l3i_basic_size; - u32 childc_number; - u32 childc_basic_size; - u32 child_qpc_id_start; /* FC service Child CTX is global addressing. */ - u32 childc_number_all_function; /* The chip supports a maximum of 8096 child CTXs. */ - u32 timer_number; - u32 timer_basic_size; - u32 xid2cid_number; - u32 xid2cid_basic_size; - u32 reorder_number; - u32 reorder_basic_size; -}; - -#define CQM_PF TYPE_PF -#define CQM_VF TYPE_VF -#define CQM_PPF TYPE_PPF -#define CQM_UNKNOWN TYPE_UNKNOWN -#define CQM_MAX_PF_NUM 32 - -#define CQM_LB_MODE_NORMAL 0xff -#define CQM_LB_MODE_0 0 -#define CQM_LB_MODE_1 1 -#define CQM_LB_MODE_2 2 - -#define CQM_LB_SMF_MAX 4 - -#define CQM_FPGA_MODE 0 -#define CQM_EMU_MODE 1 -#define CQM_FAKE_MODE_DISABLE 0 -#define CQM_FAKE_CFUNC_START 32 - -#define CQM_FAKE_FUNC_NORMAL 0 -#define CQM_FAKE_FUNC_PARENT 1 -#define CQM_FAKE_FUNC_CHILD 2 -#define CQM_FAKE_FUNC_CHILD_CONFLICT 3 -#define CQM_FAKE_FUNC_MAX 32 - -#define CQM_SPU_HOST_ID 4 - -#define CQM_QPC_ROCE_PER_DRCT 12 -#define CQM_QPC_NORMAL_RESERVE_DRC 0 -#define CQM_QPC_ROCEAA_ENABLE 1 -#define CQM_QPC_ROCE_VBS_MODE 2 -#define CQM_QPC_NORMAL_WITHOUT_RSERVER_DRC 3 - -struct cqm_db_common { - u32 rsvd1 : 23; - u32 c : 1; - u32 cos : 3; - u32 service_type : 5; - - u32 rsvd2; -}; - -struct cqm_bloomfilter_table { - u32 *table; - u32 table_size; /* The unit is bit */ - u32 array_mask; /* The unit of array entry is 32B, used to address entry - */ - struct mutex lock; -}; - -struct cqm_bloomfilter_init_cmd { - u32 bloom_filter_len; - u32 bloom_filter_addr; -}; - -struct cqm_bloomfilter_cmd { - u32 rsv1; - - u32 k_en : 4; - u32 rsv2 : 28; - - u32 index_h; - u32 index_l; -}; - -struct cqm_handle { - struct sphw_hwdev *ex_handle; - struct pci_dev *dev; - struct sphw_func_attr func_attribute; /* vf/pf attributes */ - struct cqm_func_capability func_capability; /* function capability set */ - struct cqm_service service[CQM_SERVICE_T_MAX]; /* Service-related structure */ - struct cqm_bat_table bat_table; - struct cqm_bloomfilter_table bloomfilter_table; - /* fake-vf-related structure */ - struct cqm_handle *fake_cqm_handle[CQM_FAKE_FUNC_MAX]; - struct cqm_handle *parent_cqm_handle; -}; - -enum cqm_cmd_type { - CQM_CMD_T_INVALID = 0, - CQM_CMD_T_BAT_UPDATE, - CQM_CMD_T_CLA_UPDATE, - CQM_CMD_T_CLA_CACHE_INVALID = 6, - CQM_CMD_T_BLOOMFILTER_INIT, - CQM_CMD_T_MAX -}; - -#define CQM_CQN_FROM_CEQE(data) ((data) & 0xfffff) -#define CQM_XID_FROM_CEQE(data) ((data) & 0xfffff) -#define CQM_QID_FROM_CEQE(data) (((data) >> 20) & 0x7) -#define CQM_TYPE_FROM_CEQE(data) (((data) >> 23) & 0x7) - -#define CQM_HASH_BUCKET_SIZE_64 64 - -#define CQM_MAX_QPC_NUM 0x100000 -#define CQM_MAX_SCQC_NUM 0x100000 -#define CQM_MAX_SRQC_NUM 0x100000 -#define CQM_MAX_CHILDC_NUM 0x100000 - -#define CQM_QPC_SIZE_256 256 -#define CQM_QPC_SIZE_512 512 -#define CQM_QPC_SIZE_1024 1024 - -#define CQM_SCQC_SIZE_32 32 -#define CQM_SCQC_SIZE_64 64 -#define CQM_SCQC_SIZE_128 128 - -#define CQM_SRQC_SIZE_32 32 -#define CQM_SRQC_SIZE_64 64 -#define CQM_SRQC_SIZE_128 128 - -#define CQM_MPT_SIZE_64 64 - -#define CQM_GID_SIZE_32 32 - -#define CQM_LUN_SIZE_8 8 - -#define CQM_L3I_SIZE_8 8 - -#define CQM_TIMER_SIZE_32 32 - -#define CQM_XID2CID_SIZE_8 8 - -#define CQM_XID2CID_SIZE_8K 8192 - -#define CQM_REORDER_SIZE_256 256 - -#define CQM_CHILDC_SIZE_256 256 - -#define CQM_XID2CID_VBS_NUM (18 * 1024) /* 16K virtio VQ + 2K nvme Q */ - -#define CQM_VBS_QPC_NUM 2048 /* 2K VOLQ */ - -#define CQM_VBS_QPC_SIZE 512 - -#define CQM_XID2CID_VIRTIO_NUM (16 * 1024) - -#define CQM_GID_RDMA_NUM 128 - -#define CQM_LUN_FC_NUM 64 - -#define CQM_TASKMAP_FC_NUM 4 - -#define CQM_L3I_COMM_NUM 64 - -#define CQM_CHILDC_ROCE_NUM (8 * 1024) -#define CQM_CHILDC_OVS_VBS_NUM (8 * 1024) -#define CQM_CHILDC_TOE_NUM 256 -#define CQM_CHILDC_IPSEC_NUM (4 * 1024) - -#define CQM_TIMER_SCALE_NUM (2 * 1024) -#define CQM_TIMER_ALIGN_WHEEL_NUM 8 -#define CQM_TIMER_ALIGN_SCALE_NUM \ - (CQM_TIMER_SCALE_NUM * CQM_TIMER_ALIGN_WHEEL_NUM) - -#define CQM_QPC_OVS_RSVD (1024 * 1024) -#define CQM_QPC_ROCE_RSVD 2 -#define CQM_QPC_ROCEAA_SWITCH_QP_NUM 4 -#define CQM_QPC_ROCEAA_RSVD \ - (4 * 1024 + CQM_QPC_ROCEAA_SWITCH_QP_NUM) /* 4096 Normal QP + 4 Switch QP */ -#define CQM_CQ_ROCEAA_RSVD 64 -#define CQM_SRQ_ROCEAA_RSVD 64 -#define CQM_QPC_ROCE_VBS_RSVD \ - (1024 + CQM_QPC_ROCE_RSVD) /* (204800 + CQM_QPC_ROCE_RSVD) */ - -#define CQM_OVS_PAGESIZE_ORDER 8 -#define CQM_OVS_MAX_TIMER_FUNC 48 - -#define CQM_FC_PAGESIZE_ORDER 0 - -#define CQM_QHEAD_ALIGN_ORDER 6 - -#define CQM_CMD_TIMEOUT 300000 /* ms */ - -#define CQM_DW_MASK 0xffffffff -#define CQM_DW_OFFSET 32 -#define CQM_DW_INDEX0 0 -#define CQM_DW_INDEX1 1 -#define CQM_DW_INDEX2 2 -#define CQM_DW_INDEX3 3 - -/* The unit of bloomfilter_length is 64B(512bits). */ -#define CQM_BF_LENGTH_UNIT 9 -#define CQM_BF_BITARRAY_MAX BIT(17) - -typedef void (*serv_cap_init_cb)(struct cqm_handle *, void *); - -/* Only for llt test */ -s32 cqm_capability_init(void *ex_handle); -/* Can be defined as static */ -s32 cqm_mem_init(void *ex_handle); -void cqm_mem_uninit(void *ex_handle); -s32 cqm_event_init(void *ex_handle); -void cqm_event_uninit(void *ex_handle); -u8 cqm_aeq_callback(void *ex_handle, u8 event, u8 *data); - -s32 cqm3_init(void *ex_handle); -void cqm3_uninit(void *ex_handle); -s32 cqm3_service_register(void *ex_handle, struct service_register_template *service_template); -void cqm3_service_unregister(void *ex_handle, u32 service_type); - -struct cqm_cmd_buf *cqm3_cmd_alloc(void *ex_handle); -void cqm3_cmd_free(void *ex_handle, struct cqm_cmd_buf *cmd_buf); -s32 cqm3_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, struct cqm_cmd_buf *buf_in, - struct cqm_cmd_buf *buf_out, u64 *out_param, u32 timeout, - u16 channel); - -s32 cqm3_db_addr_alloc(void *ex_handle, void __iomem **db_addr, void __iomem **dwqe_addr); -s32 cqm_db_phy_addr_alloc(void *ex_handle, u64 *db_paddr, u64 *dwqe_addr); -s32 cqm_db_init(void *ex_handle); -void cqm_db_uninit(void *ex_handle); - -s32 cqm_bloomfilter_cmd(void *ex_handle, u32 op, u32 k_flag, u64 id); -s32 cqm_bloomfilter_init(void *ex_handle); -void cqm_bloomfilter_uninit(void *ex_handle); - -#define CQM_LOG_ID 0 - -#define CQM_PTR_NULL(x) "%s: " #x " is null\n", __func__ -#define CQM_ALLOC_FAIL(x) "%s: " #x " alloc fail\n", __func__ -#define CQM_MAP_FAIL(x) "%s: " #x " map fail\n", __func__ -#define CQM_FUNCTION_FAIL(x) "%s: " #x " return failure\n", __func__ -#define CQM_WRONG_VALUE(x) "%s: " #x " %u is wrong\n", __func__, (u32)(x) - -#define cqm_err(dev, format, ...) dev_err(dev, "[CQM]" format, ##__VA_ARGS__) -#define cqm_warn(dev, format, ...) dev_warn(dev, "[CQM]" format, ##__VA_ARGS__) -#define cqm_notice(dev, format, ...) \ - dev_notice(dev, "[CQM]" format, ##__VA_ARGS__) -#define cqm_info(dev, format, ...) dev_info(dev, "[CQM]" format, ##__VA_ARGS__) - -#define CQM_32_ALIGN_CHECK_RET(dev_hdl, x, ret, desc) \ - do { \ - if (unlikely(((x) & 0x1f) != 0)) { \ - cqm_err(dev_hdl, desc); \ - return ret; \ - } \ - } while (0) -#define CQM_64_ALIGN_CHECK_RET(dev_hdl, x, ret, desc) \ - do { \ - if (unlikely(((x) & 0x3f) != 0)) { \ - cqm_err(dev_hdl, desc); \ - return ret; \ - } \ - } while (0) - -#define CQM_PTR_CHECK_RET(ptr, ret, desc) \ - do { \ - if (unlikely((ptr) == NULL)) { \ - pr_err("[CQM]" desc); \ - return ret; \ - } \ - } while (0) - -#define CQM_PTR_CHECK_NO_RET(ptr, desc) \ - do { \ - if (unlikely((ptr) == NULL)) { \ - pr_err("[CQM]" desc); \ - return; \ - } \ - } while (0) -#define CQM_CHECK_EQUAL_RET(dev_hdl, actual, expect, ret, desc) \ - do { \ - if (unlikely((expect) != (actual))) { \ - cqm_err(dev_hdl, desc); \ - return ret; \ - } \ - } while (0) -#define CQM_CHECK_EQUAL_NO_RET(dev_hdl, actual, expect, desc) \ - do { \ - if (unlikely((expect) != (actual))) { \ - cqm_err(dev_hdl, desc); \ - return; \ - } \ - } while (0) - -#endif /* SPFC_CQM_MAIN_H */ diff --git a/drivers/scsi/spfc/hw/spfc_cqm_object.c b/drivers/scsi/spfc/hw/spfc_cqm_object.c deleted file mode 100644 index 165794e9c7e5..000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_object.c +++ /dev/null @@ -1,937 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/pci.h> -#include <linux/module.h> -#include <linux/vmalloc.h> -#include <linux/device.h> -#include <linux/gfp.h> -#include <linux/mm.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" - -#include "spfc_cqm_object.h" -#include "spfc_cqm_bitmap_table.h" -#include "spfc_cqm_bat_cla.h" -#include "spfc_cqm_main.h" - -s32 cqm_qpc_mpt_bitmap_alloc(struct cqm_object *object, struct cqm_cla_table *cla_table) -{ - struct cqm_qpc_mpt *common = container_of(object, struct cqm_qpc_mpt, object); - struct cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, - struct cqm_qpc_mpt_info, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_func_capability *func_cap = &cqm_handle->func_capability; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_bitmap *bitmap = &cla_table->bitmap; - u32 index, count; - - count = (ALIGN(object->object_size, cla_table->obj_size)) / cla_table->obj_size; - qpc_mpt_info->index_count = count; - - if (qpc_mpt_info->common.xid == CQM_INDEX_INVALID) { - /* apply for an index normally */ - index = cqm_bitmap_alloc(bitmap, 1U << (cla_table->z + 1), - count, func_cap->xid_alloc_mode); - if (index < bitmap->max_num) { - qpc_mpt_info->common.xid = index; - } else { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_bitmap_alloc)); - return CQM_FAIL; - } - } else { - /* apply for index to be reserved */ - index = cqm_bitmap_alloc_reserved(bitmap, count, - qpc_mpt_info->common.xid); - if (index != qpc_mpt_info->common.xid) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_bitmap_alloc_reserved)); - return CQM_FAIL; - } - } - - return CQM_SUCCESS; -} - -s32 cqm_qpc_mpt_create(struct cqm_object *object) -{ - struct cqm_qpc_mpt *common = container_of(object, struct cqm_qpc_mpt, object); - struct cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, - struct cqm_qpc_mpt_info, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object_table *object_table = NULL; - struct cqm_cla_table *cla_table = NULL; - struct cqm_bitmap *bitmap = NULL; - u32 index, count; - - /* find the corresponding cla table */ - if (object->object_type == CQM_OBJECT_SERVICE_CTX) { - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); - } else { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); - return CQM_FAIL; - } - - CQM_PTR_CHECK_RET(cla_table, CQM_FAIL, - CQM_FUNCTION_FAIL(cqm_cla_table_get)); - - /* Bitmap applies for index. */ - if (cqm_qpc_mpt_bitmap_alloc(object, cla_table) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_qpc_mpt_bitmap_alloc)); - return CQM_FAIL; - } - - bitmap = &cla_table->bitmap; - index = qpc_mpt_info->common.xid; - count = qpc_mpt_info->index_count; - - /* Find the trunk page from the BAT/CLA and allocate the buffer. - * Ensure that the released buffer has been cleared. - */ - if (cla_table->alloc_static) - qpc_mpt_info->common.vaddr = cqm_cla_get_unlock(cqm_handle, - cla_table, - index, count, - &common->paddr); - else - qpc_mpt_info->common.vaddr = cqm_cla_get_lock(cqm_handle, - cla_table, index, - count, - &common->paddr); - - if (!qpc_mpt_info->common.vaddr) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_get_lock)); - cqm_err(handle->dev_hdl, "Qpc mpt init: qpc mpt vaddr is null, cla_table->alloc_static=%d\n", - cla_table->alloc_static); - goto err1; - } - - /* Indexes are associated with objects, and FC is executed - * in the interrupt context. - */ - object_table = &cla_table->obj_table; - if (object->service_type == CQM_SERVICE_T_FC) { - if (cqm_object_table_insert(cqm_handle, object_table, index, - object, false) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_object_table_insert)); - goto err2; - } - } else { - if (cqm_object_table_insert(cqm_handle, object_table, index, - object, true) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_object_table_insert)); - goto err2; - } - } - - return CQM_SUCCESS; - -err2: - cqm_cla_put(cqm_handle, cla_table, index, count); -err1: - cqm_bitmap_free(bitmap, index, count); - return CQM_FAIL; -} - -struct cqm_qpc_mpt *cqm3_object_qpc_mpt_create(void *ex_handle, u32 service_type, - enum cqm_object_type object_type, - u32 object_size, void *object_priv, - u32 index) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_qpc_mpt_info *qpc_mpt_info = NULL; - struct cqm_handle *cqm_handle = NULL; - s32 ret = CQM_FAIL; - - CQM_PTR_CHECK_RET(ex_handle, NULL, CQM_PTR_NULL(ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_qpc_mpt_create_cnt); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_RET(cqm_handle, NULL, CQM_PTR_NULL(cqm_handle)); - - if (service_type >= CQM_SERVICE_T_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return NULL; - } - /* exception of service registrion check */ - if (!cqm_handle->service[service_type].has_register) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return NULL; - } - - if (object_type != CQM_OBJECT_SERVICE_CTX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); - return NULL; - } - - qpc_mpt_info = kmalloc(sizeof(*qpc_mpt_info), GFP_ATOMIC | __GFP_ZERO); - CQM_PTR_CHECK_RET(qpc_mpt_info, NULL, CQM_ALLOC_FAIL(qpc_mpt_info)); - - qpc_mpt_info->common.object.service_type = service_type; - qpc_mpt_info->common.object.object_type = object_type; - qpc_mpt_info->common.object.object_size = object_size; - atomic_set(&qpc_mpt_info->common.object.refcount, 1); - init_completion(&qpc_mpt_info->common.object.free); - qpc_mpt_info->common.object.cqm_handle = cqm_handle; - qpc_mpt_info->common.xid = index; - - qpc_mpt_info->common.priv = object_priv; - - ret = cqm_qpc_mpt_create(&qpc_mpt_info->common.object); - if (ret == CQM_SUCCESS) - return &qpc_mpt_info->common; - - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_qpc_mpt_create)); - kfree(qpc_mpt_info); - return NULL; -} - -void cqm_linkwqe_fill(struct cqm_buf *buf, u32 wqe_per_buf, u32 wqe_size, - u32 wqe_number, bool tail, u8 link_mode) -{ - struct cqm_linkwqe_128B *linkwqe = NULL; - struct cqm_linkwqe *wqe = NULL; - dma_addr_t addr; - u8 *tmp = NULL; - u8 *va = NULL; - u32 i; - - /* The linkwqe of other buffer except the last buffer - * is directly filled to the tail. - */ - for (i = 0; i < buf->buf_number; i++) { - va = (u8 *)(buf->buf_list[i].va); - - if (i != (buf->buf_number - 1)) { - wqe = (struct cqm_linkwqe *)(va + (u32)(wqe_size * wqe_per_buf)); - wqe->wf = CQM_WQE_WF_LINK; - wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; - wqe->lp = CQM_LINK_WQE_LP_INVALID; - /* The valid value of link wqe needs to be set to 1. - * Each service ensures that o-bit=1 indicates that - * link wqe is valid and o-bit=0 indicates that - * link wqe is invalid. - */ - wqe->o = CQM_LINK_WQE_OWNER_VALID; - addr = buf->buf_list[(u32)(i + 1)].pa; - wqe->next_page_gpa_h = CQM_ADDR_HI(addr); - wqe->next_page_gpa_l = CQM_ADDR_LW(addr); - } else { /* linkwqe special padding of the last buffer */ - if (tail) { - /* must be filled at the end of the page */ - tmp = va + (u32)(wqe_size * wqe_per_buf); - wqe = (struct cqm_linkwqe *)tmp; - } else { - /* The last linkwqe is filled - * following the last wqe. - */ - tmp = va + (u32)(wqe_size * (wqe_number - - wqe_per_buf * - (buf->buf_number - - 1))); - wqe = (struct cqm_linkwqe *)tmp; - } - wqe->wf = CQM_WQE_WF_LINK; - wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; - - /* In link mode, the last link WQE is invalid; - * In ring mode, the last link wqe is valid, pointing to - * the home page, and the lp is set. - */ - if (link_mode == CQM_QUEUE_LINK_MODE) { - wqe->o = CQM_LINK_WQE_OWNER_INVALID; - } else { - /* The lp field of the last link_wqe is set to - * 1, indicating that the meaning of the o-bit - * is reversed. - */ - wqe->lp = CQM_LINK_WQE_LP_VALID; - wqe->o = CQM_LINK_WQE_OWNER_VALID; - addr = buf->buf_list[0].pa; - wqe->next_page_gpa_h = CQM_ADDR_HI(addr); - wqe->next_page_gpa_l = CQM_ADDR_LW(addr); - } - } - - if (wqe_size == CQM_LINKWQE_128B) { - /* After the B800 version, the WQE obit scheme is - * changed. The 64B bits before and after the 128B WQE - * need to be assigned a value: - * ifoe the 63rd bit from the end of the last 64B is - * obit; - * toe the 157th bit from the end of the last 64B is - * obit. - */ - linkwqe = (struct cqm_linkwqe_128B *)wqe; - linkwqe->second64B.forth_16B.bs.ifoe_o = CQM_LINK_WQE_OWNER_VALID; - - /* shift 2 bits by right to get length of dw(4B) */ - cqm_swab32((u8 *)wqe, sizeof(struct cqm_linkwqe_128B) >> 2); - } else { - /* shift 2 bits by right to get length of dw(4B) */ - cqm_swab32((u8 *)wqe, sizeof(struct cqm_linkwqe) >> 2); - } - } -} - -s32 cqm_nonrdma_queue_ctx_create(struct cqm_object *object) -{ - struct cqm_queue *common = container_of(object, struct cqm_queue, object); - struct cqm_nonrdma_qinfo *qinfo = container_of(common, struct cqm_nonrdma_qinfo, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object_table *object_table = NULL; - struct cqm_cla_table *cla_table = NULL; - struct cqm_bitmap *bitmap = NULL; - s32 shift; - - if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) { - shift = cqm_shift(qinfo->q_ctx_size); - common->q_ctx_vaddr = cqm_kmalloc_align(qinfo->q_ctx_size, - GFP_KERNEL | __GFP_ZERO, - (u16)shift); - if (!common->q_ctx_vaddr) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_ctx_vaddr)); - return CQM_FAIL; - } - - common->q_ctx_paddr = pci_map_single(cqm_handle->dev, - common->q_ctx_vaddr, - qinfo->q_ctx_size, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(cqm_handle->dev, - common->q_ctx_paddr)) { - cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_ctx_vaddr)); - cqm_kfree_align(common->q_ctx_vaddr); - common->q_ctx_vaddr = NULL; - return CQM_FAIL; - } - } else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { - /* find the corresponding cla table */ - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); - if (!cla_table) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(nonrdma_cqm_cla_table_get)); - return CQM_FAIL; - } - - /* bitmap applies for index */ - bitmap = &cla_table->bitmap; - qinfo->index_count = - (ALIGN(qinfo->q_ctx_size, cla_table->obj_size)) / - cla_table->obj_size; - qinfo->common.index = cqm_bitmap_alloc(bitmap, 1U << (cla_table->z + 1), - qinfo->index_count, - cqm_handle->func_capability.xid_alloc_mode); - if (qinfo->common.index >= bitmap->max_num) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(nonrdma_cqm_bitmap_alloc)); - return CQM_FAIL; - } - - /* find the trunk page from BAT/CLA and allocate the buffer */ - common->q_ctx_vaddr = cqm_cla_get_lock(cqm_handle, cla_table, - qinfo->common.index, - qinfo->index_count, - &common->q_ctx_paddr); - if (!common->q_ctx_vaddr) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(nonrdma_cqm_cla_get_lock)); - cqm_bitmap_free(bitmap, qinfo->common.index, - qinfo->index_count); - return CQM_FAIL; - } - - /* index and object association */ - object_table = &cla_table->obj_table; - if (object->service_type == CQM_SERVICE_T_FC) { - if (cqm_object_table_insert(cqm_handle, object_table, - qinfo->common.index, object, - false) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(nonrdma_cqm_object_table_insert)); - cqm_cla_put(cqm_handle, cla_table, - qinfo->common.index, - qinfo->index_count); - cqm_bitmap_free(bitmap, qinfo->common.index, - qinfo->index_count); - return CQM_FAIL; - } - } else { - if (cqm_object_table_insert(cqm_handle, object_table, - qinfo->common.index, object, - true) != CQM_SUCCESS) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(nonrdma_cqm_object_table_insert)); - cqm_cla_put(cqm_handle, cla_table, - qinfo->common.index, - qinfo->index_count); - cqm_bitmap_free(bitmap, qinfo->common.index, - qinfo->index_count); - return CQM_FAIL; - } - } - } - - return CQM_SUCCESS; -} - -s32 cqm_nonrdma_queue_create(struct cqm_object *object) -{ - struct cqm_queue *common = container_of(object, struct cqm_queue, object); - struct cqm_nonrdma_qinfo *qinfo = container_of(common, struct cqm_nonrdma_qinfo, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_service *service = cqm_handle->service + object->service_type; - struct cqm_buf *q_room_buf = &common->q_room_buf_1; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - u32 wqe_number = qinfo->common.object.object_size; - u32 wqe_size = qinfo->wqe_size; - u32 order = service->buf_order; - u32 buf_number, buf_size; - bool tail = false; /* determine whether the linkwqe is at the end of the page */ - - /* When creating a CQ/SCQ queue, the page size is 4 KB, - * the linkwqe must be at the end of the page. - */ - if (object->object_type == CQM_OBJECT_NONRDMA_EMBEDDED_CQ || - object->object_type == CQM_OBJECT_NONRDMA_SCQ) { - /* depth: 2^n-aligned; depth range: 256-32 K */ - if (wqe_number < CQM_CQ_DEPTH_MIN || - wqe_number > CQM_CQ_DEPTH_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_number)); - return CQM_FAIL; - } - if (!cqm_check_align(wqe_number)) { - cqm_err(handle->dev_hdl, "Nonrdma queue alloc: wqe_number is not align on 2^n\n"); - return CQM_FAIL; - } - - order = CQM_4K_PAGE_ORDER; /* wqe page 4k */ - tail = true; /* The linkwqe must be at the end of the page. */ - buf_size = CQM_4K_PAGE_SIZE; - } else { - buf_size = (u32)(PAGE_SIZE << order); - } - - /* Calculate the total number of buffers required, - * -1 indicates that the link wqe in a buffer is deducted. - */ - qinfo->wqe_per_buf = (buf_size / wqe_size) - 1; - /* number of linkwqes that are included in the depth transferred - * by the service - */ - buf_number = ALIGN((wqe_size * wqe_number), buf_size) / buf_size; - - /* apply for buffer */ - q_room_buf->buf_number = buf_number; - q_room_buf->buf_size = buf_size; - q_room_buf->page_number = buf_number << order; - if (cqm_buf_alloc(cqm_handle, q_room_buf, false) == CQM_FAIL) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc)); - return CQM_FAIL; - } - /* fill link wqe, wqe_number - buf_number is the number of wqe without - * link wqe - */ - cqm_linkwqe_fill(q_room_buf, qinfo->wqe_per_buf, wqe_size, - wqe_number - buf_number, tail, - common->queue_link_mode); - - /* create queue header */ - qinfo->common.q_header_vaddr = cqm_kmalloc_align(sizeof(struct cqm_queue_header), - GFP_KERNEL | __GFP_ZERO, - CQM_QHEAD_ALIGN_ORDER); - if (!qinfo->common.q_header_vaddr) { - cqm_err(handle->dev_hdl, CQM_ALLOC_FAIL(q_header_vaddr)); - goto err1; - } - - common->q_header_paddr = pci_map_single(cqm_handle->dev, - qinfo->common.q_header_vaddr, - sizeof(struct cqm_queue_header), - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(cqm_handle->dev, common->q_header_paddr)) { - cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_header_vaddr)); - goto err2; - } - - /* create queue ctx */ - if (cqm_nonrdma_queue_ctx_create(object) == CQM_FAIL) { - cqm_err(handle->dev_hdl, - CQM_FUNCTION_FAIL(cqm_nonrdma_queue_ctx_create)); - goto err3; - } - - return CQM_SUCCESS; - -err3: - pci_unmap_single(cqm_handle->dev, common->q_header_paddr, - sizeof(struct cqm_queue_header), PCI_DMA_BIDIRECTIONAL); -err2: - cqm_kfree_align(qinfo->common.q_header_vaddr); - qinfo->common.q_header_vaddr = NULL; -err1: - cqm_buf_free(q_room_buf, cqm_handle->dev); - return CQM_FAIL; -} - -struct cqm_queue *cqm3_object_fc_srq_create(void *ex_handle, u32 service_type, - enum cqm_object_type object_type, - u32 wqe_number, u32 wqe_size, - void *object_priv) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_nonrdma_qinfo *nonrdma_qinfo = NULL; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - u32 valid_wqe_per_buffer; - u32 wqe_sum; /* include linkwqe, normal wqe */ - u32 buf_size; - u32 buf_num; - s32 ret; - - CQM_PTR_CHECK_RET(ex_handle, NULL, CQM_PTR_NULL(ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_fc_srq_create_cnt); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_RET(cqm_handle, NULL, CQM_PTR_NULL(cqm_handle)); - - /* service_type must be fc */ - if (service_type != CQM_SERVICE_T_FC) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return NULL; - } - - /* exception of service unregistered check */ - if (!cqm_handle->service[service_type].has_register) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return NULL; - } - - /* wqe_size cannot exceed PAGE_SIZE and must be 2^n aligned. */ - if (wqe_size >= PAGE_SIZE || (!cqm_check_align(wqe_size))) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); - return NULL; - } - - /* FC RQ is SRQ. (Different from the SRQ concept of TOE, FC indicates - * that packets received by all flows are placed on the same RQ. - * The SRQ of TOE is similar to the RQ resource pool.) - */ - if (object_type != CQM_OBJECT_NONRDMA_SRQ) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); - return NULL; - } - - service = &cqm_handle->service[service_type]; - buf_size = (u32)(PAGE_SIZE << (service->buf_order)); - /* subtract 1 link wqe */ - valid_wqe_per_buffer = buf_size / wqe_size - 1; - buf_num = wqe_number / valid_wqe_per_buffer; - if (wqe_number % valid_wqe_per_buffer != 0) - buf_num++; - - /* calculate the total number of WQEs */ - wqe_sum = buf_num * (valid_wqe_per_buffer + 1); - nonrdma_qinfo = kmalloc(sizeof(*nonrdma_qinfo), GFP_KERNEL | __GFP_ZERO); - CQM_PTR_CHECK_RET(nonrdma_qinfo, NULL, CQM_ALLOC_FAIL(nonrdma_qinfo)); - - /* initialize object member */ - nonrdma_qinfo->common.object.service_type = service_type; - nonrdma_qinfo->common.object.object_type = object_type; - /* total number of WQEs */ - nonrdma_qinfo->common.object.object_size = wqe_sum; - atomic_set(&nonrdma_qinfo->common.object.refcount, 1); - init_completion(&nonrdma_qinfo->common.object.free); - nonrdma_qinfo->common.object.cqm_handle = cqm_handle; - - /* Initialize the doorbell used by the current queue. - * The default doorbell is the hardware doorbell. - */ - nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; - /* Currently, the connection mode is fixed. In the future, - * the service needs to transfer the connection mode. - */ - nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE; - - /* initialize public members */ - nonrdma_qinfo->common.priv = object_priv; - nonrdma_qinfo->common.valid_wqe_num = wqe_sum - buf_num; - - /* initialize internal private members */ - nonrdma_qinfo->wqe_size = wqe_size; - /* RQ (also called SRQ of FC) created by FC services, - * CTX needs to be created. - */ - nonrdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size; - - ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); - if (ret == CQM_SUCCESS) - return &nonrdma_qinfo->common; - - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_fc_queue_create)); - kfree(nonrdma_qinfo); - return NULL; -} - -struct cqm_queue *cqm3_object_nonrdma_queue_create(void *ex_handle, u32 service_type, - enum cqm_object_type object_type, - u32 wqe_number, u32 wqe_size, - void *object_priv) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_nonrdma_qinfo *nonrdma_qinfo = NULL; - struct cqm_handle *cqm_handle = NULL; - struct cqm_service *service = NULL; - s32 ret; - - CQM_PTR_CHECK_RET(ex_handle, NULL, CQM_PTR_NULL(ex_handle)); - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_nonrdma_queue_create_cnt); - - cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - CQM_PTR_CHECK_RET(cqm_handle, NULL, CQM_PTR_NULL(cqm_handle)); - - /* exception of service registrion check */ - if (!cqm_handle->service[service_type].has_register) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); - return NULL; - } - /* wqe_size can't be more than PAGE_SIZE, can't be zero, must be power - * of 2 the function of cqm_check_align is to check above - */ - if (wqe_size >= PAGE_SIZE || (!cqm_check_align(wqe_size))) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); - return NULL; - } - - /* nonrdma supports: RQ, SQ, SRQ, CQ, SCQ */ - if (object_type < CQM_OBJECT_NONRDMA_EMBEDDED_RQ || - object_type > CQM_OBJECT_NONRDMA_SCQ) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); - return NULL; - } - - nonrdma_qinfo = kmalloc(sizeof(*nonrdma_qinfo), GFP_KERNEL | __GFP_ZERO); - CQM_PTR_CHECK_RET(nonrdma_qinfo, NULL, CQM_ALLOC_FAIL(nonrdma_qinfo)); - - nonrdma_qinfo->common.object.service_type = service_type; - nonrdma_qinfo->common.object.object_type = object_type; - nonrdma_qinfo->common.object.object_size = wqe_number; - atomic_set(&nonrdma_qinfo->common.object.refcount, 1); - init_completion(&nonrdma_qinfo->common.object.free); - nonrdma_qinfo->common.object.cqm_handle = cqm_handle; - - /* Initialize the doorbell used by the current queue. - * The default value is hardware doorbell - */ - nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; - /* Currently, the link mode is hardcoded and needs to be transferred by - * the service side. - */ - nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE; - - nonrdma_qinfo->common.priv = object_priv; - - /* Initialize internal private members */ - nonrdma_qinfo->wqe_size = wqe_size; - service = &cqm_handle->service[service_type]; - switch (object_type) { - case CQM_OBJECT_NONRDMA_SCQ: - nonrdma_qinfo->q_ctx_size = - service->service_template.scq_ctx_size; - break; - case CQM_OBJECT_NONRDMA_SRQ: - /* Currently, the SRQ of the service is created through a - * dedicated interface. - */ - nonrdma_qinfo->q_ctx_size = - service->service_template.srq_ctx_size; - break; - default: - break; - } - - ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); - if (ret == CQM_SUCCESS) - return &nonrdma_qinfo->common; - - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_nonrdma_queue_create)); - kfree(nonrdma_qinfo); - return NULL; -} - -void cqm_qpc_mpt_delete(struct cqm_object *object) -{ - struct cqm_qpc_mpt *common = container_of(object, struct cqm_qpc_mpt, object); - struct cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, - struct cqm_qpc_mpt_info, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object_table *object_table = NULL; - struct cqm_cla_table *cla_table = NULL; - u32 count = qpc_mpt_info->index_count; - u32 index = qpc_mpt_info->common.xid; - struct cqm_bitmap *bitmap = NULL; - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_qpc_mpt_delete_cnt); - - /* find the corresponding cla table */ - if (object->object_type == CQM_OBJECT_SERVICE_CTX) { - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); - } else { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); - return; - } - - CQM_PTR_CHECK_NO_RET(cla_table, - CQM_FUNCTION_FAIL(cqm_cla_table_get_qpc)); - - /* disassociate index and object */ - object_table = &cla_table->obj_table; - if (object->service_type == CQM_SERVICE_T_FC) - cqm_object_table_remove(cqm_handle, object_table, index, object, - false); - else - cqm_object_table_remove(cqm_handle, object_table, index, object, - true); - - /* wait for completion to ensure that all references to - * the QPC are complete - */ - if (atomic_dec_and_test(&object->refcount)) - complete(&object->free); - else - cqm_err(handle->dev_hdl, "Qpc mpt del: object is referred by others, has to wait for completion\n"); - - /* Static QPC allocation must be non-blocking. - * Services ensure that the QPC is referenced - * when the QPC is deleted. - */ - if (!cla_table->alloc_static) - wait_for_completion(&object->free); - - /* release qpc buffer */ - cqm_cla_put(cqm_handle, cla_table, index, count); - - /* release the index to the bitmap */ - bitmap = &cla_table->bitmap; - cqm_bitmap_free(bitmap, index, count); -} - -s32 cqm_qpc_mpt_delete_ret(struct cqm_object *object) -{ - u32 object_type; - - object_type = object->object_type; - switch (object_type) { - case CQM_OBJECT_SERVICE_CTX: - cqm_qpc_mpt_delete(object); - return CQM_SUCCESS; - default: - return CQM_FAIL; - } -} - -void cqm_nonrdma_queue_delete(struct cqm_object *object) -{ - struct cqm_queue *common = container_of(object, struct cqm_queue, object); - struct cqm_nonrdma_qinfo *qinfo = container_of(common, struct cqm_nonrdma_qinfo, - common); - struct cqm_handle *cqm_handle = (struct cqm_handle *)object->cqm_handle; - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_buf *q_room_buf = &common->q_room_buf_1; - struct sphw_hwdev *handle = cqm_handle->ex_handle; - struct cqm_object_table *object_table = NULL; - struct cqm_cla_table *cla_table = NULL; - struct cqm_bitmap *bitmap = NULL; - u32 index = qinfo->common.index; - u32 count = qinfo->index_count; - - atomic_inc(&handle->hw_stats.cqm_stats.cqm_nonrdma_queue_delete_cnt); - - /* The SCQ has an independent SCQN association. */ - if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); - CQM_PTR_CHECK_NO_RET(cla_table, CQM_FUNCTION_FAIL(cqm_cla_table_get_queue)); - - /* disassociate index and object */ - object_table = &cla_table->obj_table; - if (object->service_type == CQM_SERVICE_T_FC) - cqm_object_table_remove(cqm_handle, object_table, index, - object, false); - else - cqm_object_table_remove(cqm_handle, object_table, index, - object, true); - } - - /* wait for completion to ensure that all references to - * the QPC are complete - */ - if (atomic_dec_and_test(&object->refcount)) - complete(&object->free); - else - cqm_err(handle->dev_hdl, "Nonrdma queue del: object is referred by others, has to wait for completion\n"); - - wait_for_completion(&object->free); - - /* If the q header exists, release. */ - if (qinfo->common.q_header_vaddr) { - pci_unmap_single(cqm_handle->dev, common->q_header_paddr, - sizeof(struct cqm_queue_header), - PCI_DMA_BIDIRECTIONAL); - - cqm_kfree_align(qinfo->common.q_header_vaddr); - qinfo->common.q_header_vaddr = NULL; - } - - cqm_buf_free(q_room_buf, cqm_handle->dev); - /* SRQ and SCQ have independent CTXs and release. */ - if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) { - /* The CTX of the SRQ of the nordma is - * applied for independently. - */ - if (common->q_ctx_vaddr) { - pci_unmap_single(cqm_handle->dev, common->q_ctx_paddr, - qinfo->q_ctx_size, - PCI_DMA_BIDIRECTIONAL); - - cqm_kfree_align(common->q_ctx_vaddr); - common->q_ctx_vaddr = NULL; - } - } else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { - /* The CTX of the SCQ of the nordma is managed by BAT/CLA. */ - cqm_cla_put(cqm_handle, cla_table, index, count); - - /* release the index to the bitmap */ - bitmap = &cla_table->bitmap; - cqm_bitmap_free(bitmap, index, count); - } -} - -s32 cqm_nonrdma_queue_delete_ret(struct cqm_object *object) -{ - u32 object_type; - - object_type = object->object_type; - switch (object_type) { - case CQM_OBJECT_NONRDMA_EMBEDDED_RQ: - case CQM_OBJECT_NONRDMA_EMBEDDED_SQ: - case CQM_OBJECT_NONRDMA_EMBEDDED_CQ: - case CQM_OBJECT_NONRDMA_SCQ: - cqm_nonrdma_queue_delete(object); - return CQM_SUCCESS; - case CQM_OBJECT_NONRDMA_SRQ: - cqm_nonrdma_queue_delete(object); - return CQM_SUCCESS; - default: - return CQM_FAIL; - } -} - -void cqm3_object_delete(struct cqm_object *object) -{ - struct cqm_handle *cqm_handle = NULL; - struct sphw_hwdev *handle = NULL; - - CQM_PTR_CHECK_NO_RET(object, CQM_PTR_NULL(object)); - if (!object->cqm_handle) { - pr_err("[CQM]object del: cqm_handle is null, service type %u, refcount %d\n", - object->service_type, (int)object->refcount.counter); - kfree(object); - return; - } - - cqm_handle = (struct cqm_handle *)object->cqm_handle; - - if (!cqm_handle->ex_handle) { - pr_err("[CQM]object del: ex_handle is null, service type %u, refcount %d\n", - object->service_type, (int)object->refcount.counter); - kfree(object); - return; - } - - handle = cqm_handle->ex_handle; - - if (object->service_type >= CQM_SERVICE_T_MAX) { - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->service_type)); - kfree(object); - return; - } - - if (cqm_qpc_mpt_delete_ret(object) == CQM_SUCCESS) { - kfree(object); - return; - } - - if (cqm_nonrdma_queue_delete_ret(object) == CQM_SUCCESS) { - kfree(object); - return; - } - - cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); - kfree(object); -} - -struct cqm_object *cqm3_object_get(void *ex_handle, enum cqm_object_type object_type, - u32 index, bool bh) -{ - struct sphw_hwdev *handle = (struct sphw_hwdev *)ex_handle; - struct cqm_handle *cqm_handle = (struct cqm_handle *)(handle->cqm_hdl); - struct cqm_bat_table *bat_table = &cqm_handle->bat_table; - struct cqm_object_table *object_table = NULL; - struct cqm_cla_table *cla_table = NULL; - struct cqm_object *object = NULL; - - /* The data flow path takes performance into consideration and - * does not check input parameters. - */ - switch (object_type) { - case CQM_OBJECT_SERVICE_CTX: - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); - break; - case CQM_OBJECT_NONRDMA_SCQ: - cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); - break; - default: - return NULL; - } - - if (!cla_table) { - cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_table_get)); - return NULL; - } - - object_table = &cla_table->obj_table; - object = cqm_object_table_get(cqm_handle, object_table, index, bh); - return object; -} - -void cqm3_object_put(struct cqm_object *object) -{ - /* The data flow path takes performance into consideration and - * does not check input parameters. - */ - if (atomic_dec_and_test(&object->refcount)) - complete(&object->free); -} diff --git a/drivers/scsi/spfc/hw/spfc_cqm_object.h b/drivers/scsi/spfc/hw/spfc_cqm_object.h deleted file mode 100644 index 02a3e9070162..000000000000 --- a/drivers/scsi/spfc/hw/spfc_cqm_object.h +++ /dev/null @@ -1,279 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_CQM_OBJECT_H -#define SPFC_CQM_OBJECT_H - -#ifdef __cplusplus -#if __cplusplus -extern "C" { -#endif -#endif /* __cplusplus */ - -#define CQM_SUCCESS 0 -#define CQM_FAIL (-1) -/* Ignore the return value and continue */ -#define CQM_CONTINUE 1 - -/* type of WQE is LINK WQE */ -#define CQM_WQE_WF_LINK 1 - -/* chain queue mode */ -#define CQM_QUEUE_LINK_MODE 0 -/* RING queue mode */ -#define CQM_QUEUE_RING_MODE 1 - -#define CQM_CQ_DEPTH_MAX 32768 -#define CQM_CQ_DEPTH_MIN 256 - -/* linkwqe */ -#define CQM_LINK_WQE_CTRLSL_VALUE 2 -#define CQM_LINK_WQE_LP_VALID 1 -#define CQM_LINK_WQE_LP_INVALID 0 -#define CQM_LINK_WQE_OWNER_VALID 1 -#define CQM_LINK_WQE_OWNER_INVALID 0 - -#define CQM_ADDR_HI(addr) ((u32)((u64)(addr) >> 32)) -#define CQM_ADDR_LW(addr) ((u32)((u64)(addr) & 0xffffffff)) - -#define CQM_QPC_LAYOUT_TABLE_SIZE 16 - -#define CQM_MOD_CQM 8 - -/* generic linkwqe structure */ -struct cqm_linkwqe { - u32 rsv1 : 14; /* <reserved field */ - u32 wf : 1; /* <wf */ - u32 rsv2 : 14; /* <reserved field */ - u32 ctrlsl : 2; /* <ctrlsl */ - u32 o : 1; /* <o bit */ - - u32 rsv3 : 31; /* <reserved field */ - u32 lp : 1; /* The lp field determines whether the o-bit meaning is reversed. */ - u32 next_page_gpa_h; - u32 next_page_gpa_l; - u32 next_buffer_addr_h; - u32 next_buffer_addr_l; -}; - -/* SRQ linkwqe structure. The wqe size must not exceed the common RQE size. */ -struct cqm_srq_linkwqe { - struct cqm_linkwqe linkwqe; /* <generic linkwqe structure */ - u32 current_buffer_gpa_h; - u32 current_buffer_gpa_l; - u32 current_buffer_addr_h; - u32 current_buffer_addr_l; - - u32 fast_link_page_addr_h; - u32 fast_link_page_addr_l; - - u32 fixed_next_buffer_addr_h; - u32 fixed_next_buffer_addr_l; -}; - -#define CQM_LINKWQE_128B 128 - -/* first 64B of standard 128B WQE */ -union cqm_linkwqe_first64B { - struct cqm_linkwqe basic_linkwqe; /* <generic linkwqe structure */ - u32 value[16]; /* <reserved field */ -}; - -/* second 64B of standard 128B WQE */ -struct cqm_linkwqe_second64B { - u32 rsvd0[4]; /* <first 16B reserved field */ - u32 rsvd1[4]; /* <second 16B reserved field */ - u32 rsvd2[4]; - - union { - struct { - u32 rsvd0[2]; - u32 rsvd1 : 31; - u32 ifoe_o : 1; /* <o bit of ifoe */ - u32 rsvd2; - } bs; - u32 value[4]; - } forth_16B; /* <fourth 16B */ -}; - -/* standard 128B WQE structure */ -struct cqm_linkwqe_128B { - union cqm_linkwqe_first64B first64B; /* <first 64B of standard 128B WQE */ - struct cqm_linkwqe_second64B second64B; /* <back 64B of standard 128B WQE */ -}; - -/* AEQ type definition */ -enum cqm_aeq_event_type { - CQM_AEQ_BASE_T_FC = 48, /* <FC consists of 8 events:48~55 */ - CQM_AEQ_MAX_T_FC = 56 -}; - -/* service registration template */ -struct service_register_template { - u32 service_type; /* <service type */ - u32 srq_ctx_size; /* <SRQ context size */ - u32 scq_ctx_size; /* <SCQ context size */ - void *service_handle; - u8 (*aeq_level_callback)(void *service_handle, u8 event_type, u8 *val); - void (*aeq_callback)(void *service_handle, u8 event_type, u8 *val); -}; - -/* object operation type definition */ -enum cqm_object_type { - CQM_OBJECT_ROOT_CTX = 0, /* <0:root context, which is compatible with root CTX management */ - CQM_OBJECT_SERVICE_CTX, /* <1:QPC, connection management object */ - CQM_OBJECT_NONRDMA_EMBEDDED_RQ = 10, /* <10:RQ of non-RDMA services, managed by LINKWQE */ - CQM_OBJECT_NONRDMA_EMBEDDED_SQ, /* <11:SQ of non-RDMA services, managed by LINKWQE */ - /* <12:SRQ of non-RDMA services, managed by MTT, but the CQM needs to apply for MTT. */ - CQM_OBJECT_NONRDMA_SRQ, - /* <13:Embedded CQ for non-RDMA services, managed by LINKWQE */ - CQM_OBJECT_NONRDMA_EMBEDDED_CQ, - CQM_OBJECT_NONRDMA_SCQ, /* <14:SCQ of non-RDMA services, managed by LINKWQE */ -}; - -/* return value of the failure to apply for the BITMAP table */ -#define CQM_INDEX_INVALID (~(0U)) - -/* doorbell mode selected by the current Q, hardware doorbell */ -#define CQM_HARDWARE_DOORBELL 1 - -/* single-node structure of the CQM buffer */ -struct cqm_buf_list { - void *va; /* <virtual address */ - dma_addr_t pa; /* <physical address */ - u32 refcount; /* <reference count of the buf, which is used for internal buf management. */ -}; - -/* common management structure of the CQM buffer */ -struct cqm_buf { - struct cqm_buf_list *buf_list; /* <buffer list */ - /* <map the discrete buffer list to a group of consecutive addresses */ - struct cqm_buf_list direct; - u32 page_number; /* <buf_number in quantity of page_number=2^n */ - u32 buf_number; /* <number of buf_list nodes */ - u32 buf_size; /* <PAGE_SIZE in quantity of buf_size=2^n */ -}; - -/* CQM object structure, which can be considered - * as the base class abstracted from all queues/CTX. - */ -struct cqm_object { - u32 service_type; /* <service type */ - u32 object_type; /* <object type, such as context, queue, mpt, and mtt, etc */ - u32 object_size; /* <object Size, for queue/CTX/MPT, the unit is Byte*/ - atomic_t refcount; /* <reference counting */ - struct completion free; /* <release completed quantity */ - void *cqm_handle; /* <cqm_handle */ -}; - -/* structure of the QPC and MPT objects of the CQM */ -struct cqm_qpc_mpt { - struct cqm_object object; - u32 xid; - dma_addr_t paddr; /* <physical address of the QPC/MTT memory */ - void *priv; /* <private information about the object of the service driver. */ - u8 *vaddr; /* <virtual address of the QPC/MTT memory */ -}; - -/* queue header structure */ -struct cqm_queue_header { - u64 doorbell_record; /* <SQ/RQ DB content */ - u64 ci_record; /* <CQ DB content */ - u64 rsv1; - u64 rsv2; -}; - -/* queue management structure: for queues of non-RDMA services, embedded queues - * are managed by LinkWQE, SRQ and SCQ are managed by MTT, but MTT needs to be - * applied by CQM; the queue of the RDMA service is managed by the MTT. - */ -struct cqm_queue { - struct cqm_object object; /* <object base class */ - /* <The embedded queue and QP do not have indexes, but the SRQ and SCQ do. */ - u32 index; - /* <private information about the object of the service driver */ - void *priv; - /* <doorbell type selected by the current queue. HW/SW are used for the roce QP. */ - u32 current_q_doorbell; - u32 current_q_room; - struct cqm_buf q_room_buf_1; /* <nonrdma:only q_room_buf_1 can be set to q_room_buf */ - struct cqm_buf q_room_buf_2; /* <The CQ of RDMA reallocates the size of the queue room. */ - struct cqm_queue_header *q_header_vaddr; /* <queue header virtual address */ - dma_addr_t q_header_paddr; /* <physical address of the queue header */ - u8 *q_ctx_vaddr; /* <CTX virtual addresses of SRQ and SCQ */ - dma_addr_t q_ctx_paddr; /* <CTX physical addresses of SRQ and SCQ */ - u32 valid_wqe_num; /* <number of valid WQEs that are successfully created */ - u8 *tail_container; /* <tail pointer of the SRQ container */ - u8 *head_container; /* <head pointer of SRQ container */ - /* <Determine the connection mode during queue creation, such as link and ring. */ - u8 queue_link_mode; -}; - -struct cqm_qpc_layout_table_node { - u32 type; - u32 size; - u32 offset; - struct cqm_object *object; -}; - -struct cqm_qpc_mpt_info { - struct cqm_qpc_mpt common; - /* Different service has different QPC. - * The large QPC/mpt will occupy some continuous indexes in bitmap. - */ - u32 index_count; - struct cqm_qpc_layout_table_node qpc_layout_table[CQM_QPC_LAYOUT_TABLE_SIZE]; -}; - -struct cqm_nonrdma_qinfo { - struct cqm_queue common; - u32 wqe_size; - /* Number of WQEs in each buffer (excluding link WQEs) - * For SRQ, the value is the number of WQEs contained in a container. - */ - u32 wqe_per_buf; - u32 q_ctx_size; - /* When different services use CTXs of different sizes, - * a large CTX occupies multiple consecutive indexes in the bitmap. - */ - u32 index_count; - /* add for srq */ - u32 container_size; -}; - -/* sending command structure */ -struct cqm_cmd_buf { - void *buf; - dma_addr_t dma; - u16 size; -}; - -struct cqm_queue *cqm3_object_fc_srq_create(void *ex_handle, u32 service_type, - enum cqm_object_type object_type, - u32 wqe_number, u32 wqe_size, - void *object_priv); -struct cqm_qpc_mpt *cqm3_object_qpc_mpt_create(void *ex_handle, u32 service_type, - enum cqm_object_type object_type, - u32 object_size, void *object_priv, - u32 index); -struct cqm_queue *cqm3_object_nonrdma_queue_create(void *ex_handle, u32 service_type, - enum cqm_object_type object_type, - u32 wqe_number, u32 wqe_size, - void *object_priv); -void cqm3_object_delete(struct cqm_object *object); -struct cqm_object *cqm3_object_get(void *ex_handle, enum cqm_object_type object_type, - u32 index, bool bh); -void cqm3_object_put(struct cqm_object *object); - -s32 cqm3_ring_hardware_db_fc(void *ex_handle, u32 service_type, u8 db_count, - u8 pagenum, u64 db); -s32 cqm_ring_direct_wqe_db(void *ex_handle, u32 service_type, u8 db_count, void *direct_wqe); -s32 cqm_ring_direct_wqe_db_fc(void *ex_handle, u32 service_type, void *direct_wqe); - -#ifdef __cplusplus -#if __cplusplus -} -#endif -#endif /* __cplusplus */ - -#endif /* SPFC_CQM_OBJECT_H */ diff --git a/drivers/scsi/spfc/hw/spfc_hba.c b/drivers/scsi/spfc/hw/spfc_hba.c deleted file mode 100644 index b033dcb78bb3..000000000000 --- a/drivers/scsi/spfc/hw/spfc_hba.c +++ /dev/null @@ -1,1751 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_hba.h" -#include "spfc_module.h" -#include "spfc_utils.h" -#include "spfc_chipitf.h" -#include "spfc_io.h" -#include "spfc_lld.h" -#include "sphw_hw.h" -#include "spfc_cqm_main.h" - -struct spfc_hba_info *spfc_hba[SPFC_HBA_PORT_MAX_NUM]; -ulong probe_bit_map[SPFC_MAX_PROBE_PORT_NUM / SPFC_PORT_NUM_PER_TABLE]; -static ulong card_num_bit_map[SPFC_MAX_PROBE_PORT_NUM / SPFC_PORT_NUM_PER_TABLE]; -static struct spfc_card_num_manage card_num_manage[SPFC_MAX_CARD_NUM]; -spinlock_t probe_spin_lock; -u32 max_parent_qpc_num; - -static int spfc_probe(struct spfc_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name); -static void spfc_remove(struct spfc_lld_dev *lld_dev, void *uld_dev); -static u32 spfc_initial_chip_access(struct spfc_hba_info *hba); -static void spfc_release_chip_access(struct spfc_hba_info *hba); -static u32 spfc_port_config_set(void *hba, enum unf_port_config_set_op opcode, void *var_in); -static u32 spfc_port_config_get(void *hba, enum unf_port_cfg_get_op opcode, void *para_out); -static u32 spfc_port_update_wwn(void *hba, void *para_in); -static u32 spfc_get_chip_info(struct spfc_hba_info *hba); -static u32 spfc_delete_scqc_via_cmdq_sync(struct spfc_hba_info *hba, u32 scqn); -static u32 spfc_delete_srqc_via_cmdq_sync(struct spfc_hba_info *hba, u64 sqrc_gpa); -static u32 spfc_get_hba_pcie_link_state(void *hba, void *link_state); -static u32 spfc_port_check_fw_ready(struct spfc_hba_info *hba); -static u32 spfc_set_port_state(void *hba, void *para_in); - -struct spfc_uld_info fc_uld_info = { - .probe = spfc_probe, - .remove = spfc_remove, - .resume = NULL, - .event = NULL, - .suspend = NULL, - .ioctl = NULL -}; - -struct service_register_template service_cqm_temp = { - .service_type = SERVICE_T_FC, - .scq_ctx_size = SPFC_SCQ_CNTX_SIZE, - .srq_ctx_size = SPFC_SRQ_CNTX_SIZE, /* srq, scq context_size configuration */ - .aeq_callback = spfc_process_aeqe, /* the API of asynchronous event from TILE to driver */ -}; - -/* default configuration: auto speed, auto topology, INI+TGT */ -static struct unf_cfg_item spfc_port_cfg_parm[] = { - {"port_id", 0, 0x110000, 0xffffff}, - /* port mode:INI(0x20), TGT(0x10), BOTH(0x30) */ - {"port_mode", 0, 0x20, 0xff}, - /* port topology, 0x3: loop , 0xc:p2p, 0xf:auto, 0x10:vn2vn */ - {"port_topology", 0, 0xf, 0x20}, - {"port_alpa", 0, 0xdead, 0xffff}, /* alpa address of port */ - /* queue depth of originator registered to SCSI midlayer */ - {"max_queue_depth", 0, 512, 512}, - {"sest_num", 0, 2048, 2048}, - {"max_login", 0, 2048, 2048}, - /* nodename from 32 bit to 64 bit */ - {"node_name_high", 0, 0x1000286e, 0xffffffff}, - /* nodename from 0 bit to 31 bit */ - {"node_name_low", 0, 0xd4bbf12f, 0xffffffff}, - /* portname from 32 bit to 64 bit */ - {"port_name_high", 0, 0x2000286e, 0xffffffff}, - /* portname from 0 bit to 31 bit */ - {"port_name_low", 0, 0xd4bbf12f, 0xffffffff}, - /* port speed 0:auto 1:1Gbps 2:2Gbps 3:4Gbps 4:8Gbps 5:16Gbps */ - {"port_speed", 0, 0, 32}, - {"interrupt_delay", 0, 0, 100}, /* unit: us */ - {"tape_support", 0, 0, 1}, /* tape support */ - {"End", 0, 0, 0} -}; - -struct unf_low_level_functioon_op spfc_func_op = { - .low_level_type = UNF_SPFC_FC, - .name = "SPFC", - .xchg_mgr_type = UNF_LOW_LEVEL_MGR_TYPE_PASSTIVE, - .abts_xchg = UNF_NO_EXTRA_ABTS_XCHG, - .passthrough_flag = UNF_LOW_LEVEL_PASS_THROUGH_PORT_LOGIN, - .support_max_npiv_num = UNF_SPFC_MAXNPIV_NUM, - .support_max_ssq_num = SPFC_MAX_SSQ_NUM - 1, - .chip_id = 0, - .support_max_speed = UNF_PORT_SPEED_32_G, - .support_max_rport = UNF_SPFC_MAXRPORT_NUM, - .sfp_type = UNF_PORT_TYPE_FC_SFP, - .rport_release_type = UNF_LOW_LEVEL_RELEASE_RPORT_ASYNC, - .sirt_page_mode = UNF_LOW_LEVEL_SIRT_PAGE_MODE_XCHG, - - /* Link service */ - .service_op = { - .unf_ls_gs_send = spfc_send_ls_gs_cmnd, - .unf_bls_send = spfc_send_bls_cmnd, - .unf_cmnd_send = spfc_send_scsi_cmnd, - .unf_release_rport_res = spfc_free_parent_resource, - .unf_flush_ini_resp_que = spfc_flush_ini_resp_queue, - .unf_alloc_rport_res = spfc_alloc_parent_resource, - .ll_release_xid = spfc_free_xid, - }, - - /* Port Mgr */ - .port_mgr_op = { - .ll_port_config_set = spfc_port_config_set, - .ll_port_config_get = spfc_port_config_get, - } -}; - -struct spfc_port_cfg_op { - enum unf_port_config_set_op opcode; - u32 (*spfc_operation)(void *hba, void *para); -}; - -struct spfc_port_cfg_op spfc_config_set_op[] = { - {UNF_PORT_CFG_SET_PORT_SWITCH, spfc_sfp_switch}, - {UNF_PORT_CFG_UPDATE_WWN, spfc_port_update_wwn}, - {UNF_PORT_CFG_SET_PORT_STATE, spfc_set_port_state}, - {UNF_PORT_CFG_UPDATE_FABRIC_PARAM, spfc_update_fabric_param}, - {UNF_PORT_CFG_UPDATE_PLOGI_PARAM, spfc_update_port_param}, - {UNF_PORT_CFG_SET_BUTT, NULL} -}; - -struct spfc_port_cfg_get_op { - enum unf_port_cfg_get_op opcode; - u32 (*spfc_operation)(void *hba, void *para); -}; - -struct spfc_port_cfg_get_op spfc_config_get_op[] = { - {UNF_PORT_CFG_GET_TOPO_ACT, spfc_get_topo_act}, - {UNF_PORT_CFG_GET_LOOP_MAP, spfc_get_loop_map}, - {UNF_PORT_CFG_GET_WORKBALE_BBCREDIT, spfc_get_workable_bb_credit}, - {UNF_PORT_CFG_GET_WORKBALE_BBSCN, spfc_get_workable_bb_scn}, - {UNF_PORT_CFG_GET_LOOP_ALPA, spfc_get_loop_alpa}, - {UNF_PORT_CFG_GET_MAC_ADDR, spfc_get_chip_msg}, - {UNF_PORT_CFG_GET_PCIE_LINK_STATE, spfc_get_hba_pcie_link_state}, - {UNF_PORT_CFG_GET_BUTT, NULL}, -}; - -static u32 spfc_set_port_state(void *hba, void *para_in) -{ - u32 ret = UNF_RETURN_ERROR; - enum unf_port_config_state port_state = UNF_PORT_CONFIG_STATE_START; - - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(para_in, UNF_RETURN_ERROR); - - port_state = *((enum unf_port_config_state *)para_in); - switch (port_state) { - case UNF_PORT_CONFIG_STATE_RESET: - ret = (u32)spfc_port_reset(hba); - break; - - default: - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Cannot set port_state(0x%x)", port_state); - break; - } - - return ret; - -} - -static u32 spfc_port_update_wwn(void *hba, void *para_in) -{ - struct unf_port_wwn *port_wwn = NULL; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(para_in, UNF_RETURN_ERROR); - - port_wwn = (struct unf_port_wwn *)para_in; - - /* Update it to the hba in the later */ - *(u64 *)spfc_hba->sys_node_name = port_wwn->sys_node_name; - *(u64 *)spfc_hba->sys_port_name = port_wwn->sys_port_wwn; - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "[info]Port(0x%x) updates WWNN(0x%llx) WWPN(0x%llx)", - spfc_hba->port_cfg.port_id, - *(u64 *)spfc_hba->sys_node_name, - *(u64 *)spfc_hba->sys_port_name); - - return RETURN_OK; -} - -static u32 spfc_port_config_set(void *hba, enum unf_port_config_set_op opcode, - void *var_in) -{ - u32 op_idx = 0; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - for (op_idx = 0; op_idx < sizeof(spfc_config_set_op) / - sizeof(struct spfc_port_cfg_op); op_idx++) { - if (opcode == spfc_config_set_op[op_idx].opcode) { - if (!spfc_config_set_op[op_idx].spfc_operation) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Null operation for configuration, opcode(0x%x), operation ID(0x%x)", - opcode, op_idx); - - return UNF_RETURN_ERROR; - } - return spfc_config_set_op[op_idx].spfc_operation(hba, var_in); - } - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]No operation code for configuration, opcode(0x%x)", - opcode); - - return UNF_RETURN_ERROR; -} - -static u32 spfc_port_config_get(void *hba, enum unf_port_cfg_get_op opcode, - void *para_out) -{ - u32 op_idx = 0; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - for (op_idx = 0; op_idx < sizeof(spfc_config_get_op) / - sizeof(struct spfc_port_cfg_get_op); op_idx++) { - if (opcode == spfc_config_get_op[op_idx].opcode) { - if (!spfc_config_get_op[op_idx].spfc_operation) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Null operation to get configuration, opcode(0x%x), operation ID(0x%x)", - opcode, op_idx); - return UNF_RETURN_ERROR; - } - return spfc_config_get_op[op_idx].spfc_operation(hba, para_out); - } - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]No operation to get configuration, opcode(0x%x)", - opcode); - - return UNF_RETURN_ERROR; -} - -static u32 spfc_fc_mode_check(void *hw_dev_handle) -{ - FC_CHECK_RETURN_VALUE(hw_dev_handle, UNF_RETURN_ERROR); - - if (!sphw_support_fc(hw_dev_handle, NULL)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Work mode is error"); - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Selected work mode is FC"); - - return RETURN_OK; -} - -static u32 spfc_check_port_cfg(const struct spfc_port_cfg *port_cfg) -{ - bool topo_condition = false; - bool speed_condition = false; - /* About Work Topology */ - topo_condition = ((port_cfg->port_topology != UNF_TOP_LOOP_MASK) && - (port_cfg->port_topology != UNF_TOP_P2P_MASK) && - (port_cfg->port_topology != UNF_TOP_AUTO_MASK)); - if (topo_condition) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Configured port topology(0x%x) is incorrect", - port_cfg->port_topology); - - return UNF_RETURN_ERROR; - } - - /* About Work Mode */ - if (port_cfg->port_mode != UNF_PORT_MODE_INI) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Configured port mode(0x%x) is incorrect", - port_cfg->port_mode); - - return UNF_RETURN_ERROR; - } - - /* About Work Speed */ - speed_condition = ((port_cfg->port_speed != UNF_PORT_SPEED_AUTO) && - (port_cfg->port_speed != UNF_PORT_SPEED_2_G) && - (port_cfg->port_speed != UNF_PORT_SPEED_4_G) && - (port_cfg->port_speed != UNF_PORT_SPEED_8_G) && - (port_cfg->port_speed != UNF_PORT_SPEED_16_G) && - (port_cfg->port_speed != UNF_PORT_SPEED_32_G)); - if (speed_condition) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Configured port speed(0x%x) is incorrect", - port_cfg->port_speed); - - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Check port configuration OK"); - - return RETURN_OK; -} - -static u32 spfc_get_port_cfg(struct spfc_hba_info *hba, - struct spfc_chip_info *chip_info, u8 card_num) -{ -#define UNF_CONFIG_ITEM_LEN 15 - /* Maximum length of a configuration item name, including the end - * character - */ -#define UNF_MAX_ITEM_NAME_LEN (32 + 1) - - /* Get and check parameters */ - char cfg_item[UNF_MAX_ITEM_NAME_LEN]; - u32 ret = UNF_RETURN_ERROR; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - memset((void *)cfg_item, 0, sizeof(cfg_item)); - - spfc_hba->card_info.func_num = (sphw_global_func_id(hba->dev_handle)) & UNF_FUN_ID_MASK; - spfc_hba->card_info.card_num = card_num; - - /* The range of PF of FC server is from PF1 to PF2 */ - snprintf(cfg_item, UNF_MAX_ITEM_NAME_LEN, "spfc_cfg_%1u", (spfc_hba->card_info.func_num)); - - cfg_item[UNF_MAX_ITEM_NAME_LEN - 1] = 0; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Get port configuration: %s", cfg_item); - - /* Get configuration parameters from file */ - UNF_LOWLEVEL_GET_CFG_PARMS(ret, cfg_item, &spfc_port_cfg_parm[ARRAY_INDEX_0], - (u32 *)(void *)(&spfc_hba->port_cfg), - sizeof(spfc_port_cfg_parm) / sizeof(struct unf_cfg_item)); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't get configuration", - spfc_hba->port_cfg.port_id); - - return ret; - } - - if (max_parent_qpc_num <= SPFC_MAX_PARENT_QPC_NUM) { - spfc_hba->port_cfg.sest_num = UNF_SPFC_MAXRPORT_NUM; - spfc_hba->port_cfg.max_login = UNF_SPFC_MAXRPORT_NUM; - } - - spfc_hba->port_cfg.port_id &= SPFC_PORT_ID_MASK; - spfc_hba->port_cfg.port_id |= spfc_hba->card_info.card_num << UNF_SHIFT_8; - spfc_hba->port_cfg.port_id |= spfc_hba->card_info.func_num; - spfc_hba->port_cfg.tape_support = (u32)chip_info->tape_support; - - /* Parameters check */ - ret = spfc_check_port_cfg(&spfc_hba->port_cfg); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) check configuration incorrect", - spfc_hba->port_cfg.port_id); - - return ret; - } - - /* Set configuration which is got from file */ - spfc_hba->port_speed_cfg = spfc_hba->port_cfg.port_speed; - spfc_hba->port_topo_cfg = spfc_hba->port_cfg.port_topology; - spfc_hba->port_mode = (enum unf_port_mode)(spfc_hba->port_cfg.port_mode); - - return ret; -} - -void spfc_generate_sys_wwn(struct spfc_hba_info *hba) -{ - FC_CHECK_RETURN_VOID(hba); - - *(u64 *)hba->sys_node_name = (((u64)hba->port_cfg.node_name_hi << UNF_SHIFT_32) | - (hba->port_cfg.node_name_lo)); - *(u64 *)hba->sys_port_name = (((u64)hba->port_cfg.port_name_hi << UNF_SHIFT_32) | - (hba->port_cfg.port_name_lo)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]NodeName = 0x%llx, PortName = 0x%llx", - *(u64 *)hba->sys_node_name, *(u64 *)hba->sys_port_name); -} - -static u32 spfc_create_queues(struct spfc_hba_info *hba) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - SPFC_FUNCTION_ENTER; - - /* Initialize shared resources of SCQ and SRQ in parent queue */ - ret = spfc_create_common_share_queues(hba); - if (ret != RETURN_OK) - goto out_create_common_queue_fail; - - /* Initialize parent queue manager resources */ - ret = spfc_alloc_parent_queue_mgr(hba); - if (ret != RETURN_OK) - goto out_free_share_queue_resource; - - /* Initialize shared WQE page pool in parent SQ */ - ret = spfc_alloc_parent_sq_wqe_page_pool(hba); - if (ret != RETURN_OK) - goto out_free_parent_queue_resource; - - ret = spfc_create_ssq(hba); - if (ret != RETURN_OK) - goto out_free_parent_wqe_page_pool; - - /* - * Notice: the configuration of SQ and QID(default_sqid) - * must be the same in FC - */ - hba->next_clear_sq = 0; - hba->default_sqid = SPFC_QID_SQ; - - SPFC_FUNCTION_RETURN; - return RETURN_OK; -out_free_parent_wqe_page_pool: - spfc_free_parent_sq_wqe_page_pool(hba); - -out_free_parent_queue_resource: - spfc_free_parent_queue_mgr(hba); - -out_free_share_queue_resource: - spfc_flush_scq_ctx(hba); - spfc_flush_srq_ctx(hba); - spfc_destroy_common_share_queues(hba); - -out_create_common_queue_fail: - SPFC_FUNCTION_RETURN; - - return ret; -} - -static u32 spfc_alloc_dma_buffers(struct spfc_hba_info *hba) -{ - struct pci_dev *pci_dev = NULL; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - pci_dev = hba->pci_dev; - FC_CHECK_RETURN_VALUE(pci_dev, UNF_RETURN_ERROR); - - hba->sfp_buf = dma_alloc_coherent(&hba->pci_dev->dev, - sizeof(struct unf_sfp_err_rome_info), - &hba->sfp_dma_addr, GFP_KERNEL); - if (!hba->sfp_buf) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't allocate SFP DMA buffer", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - memset(hba->sfp_buf, 0, sizeof(struct unf_sfp_err_rome_info)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) allocate sfp buffer(0x%p 0x%llx)", - hba->port_cfg.port_id, hba->sfp_buf, - (u64)hba->sfp_dma_addr); - - return RETURN_OK; -} - -static void spfc_free_dma_buffers(struct spfc_hba_info *hba) -{ - struct pci_dev *pci_dev = NULL; - - FC_CHECK_RETURN_VOID(hba); - pci_dev = hba->pci_dev; - FC_CHECK_RETURN_VOID(pci_dev); - - if (hba->sfp_buf) { - dma_free_coherent(&pci_dev->dev, sizeof(struct unf_sfp_err_rome_info), - hba->sfp_buf, hba->sfp_dma_addr); - - hba->sfp_buf = NULL; - hba->sfp_dma_addr = 0; - } -} - -static void spfc_destroy_queues(struct spfc_hba_info *hba) -{ - /* Free ssq */ - spfc_free_ssq(hba, SPFC_MAX_SSQ_NUM); - - /* Free parent queue resource */ - spfc_free_parent_queues(hba); - - /* Free queue manager resource */ - spfc_free_parent_queue_mgr(hba); - - /* Free linked List SQ and WQE page pool resource */ - spfc_free_parent_sq_wqe_page_pool(hba); - - /* Free shared SRQ and SCQ queue resource */ - spfc_destroy_common_share_queues(hba); -} - -static u32 spfc_alloc_default_session(struct spfc_hba_info *hba) -{ - struct unf_port_info rport_info = {0}; - u32 wait_sq_cnt = 0; - - rport_info.nport_id = 0xffffff; - rport_info.rport_index = SPFC_DEFAULT_RPORT_INDEX; - rport_info.local_nport_id = 0xffffff; - rport_info.port_name = 0; - rport_info.cs_ctrl = 0x81; - - if (spfc_alloc_parent_resource((void *)hba, &rport_info) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Alloc default session resource failed"); - goto failed; - } - - for (;;) { - if (hba->default_sq_info.default_sq_flag == 1) - break; - - msleep(SPFC_WAIT_SESS_ENABLE_ONE_TIME_MS); - wait_sq_cnt++; - if (wait_sq_cnt >= SPFC_MAX_WAIT_LOOP_TIMES) { - hba->default_sq_info.default_sq_flag = 0xF; - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Wait Default Session enable timeout"); - goto failed; - } - } - - if (spfc_mbx_config_default_session(hba, 1) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Notify up config default session table fail"); - goto failed; - } - - return RETURN_OK; - -failed: - spfc_sess_resource_free_sync((void *)hba, &rport_info); - return UNF_RETURN_ERROR; -} - -static u32 spfc_init_host_res(struct spfc_hba_info *hba) -{ - u32 ret = RETURN_OK; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - - SPFC_FUNCTION_ENTER; - - /* Initialize spin lock */ - spin_lock_init(&spfc_hba->hba_lock); - spin_lock_init(&spfc_hba->flush_state_lock); - spin_lock_init(&spfc_hba->clear_state_lock); - spin_lock_init(&spfc_hba->spin_lock); - spin_lock_init(&spfc_hba->srq_delay_info.srq_lock); - /* Initialize init_completion */ - init_completion(&spfc_hba->hba_init_complete); - init_completion(&spfc_hba->mbox_complete); - init_completion(&spfc_hba->vpf_complete); - init_completion(&spfc_hba->fcfi_complete); - init_completion(&spfc_hba->get_sfp_complete); - /* Step-1: initialize the communication channel between driver and uP */ - ret = spfc_initial_chip_access(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't initialize chip access", - spfc_hba->port_cfg.port_id); - - goto out_unmap_memory; - } - /* Step-2: get chip configuration information before creating - * queue resources - */ - ret = spfc_get_chip_info(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't get chip information", - spfc_hba->port_cfg.port_id); - - goto out_unmap_memory; - } - - /* Step-3: create queue resources */ - ret = spfc_create_queues(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't create queues", - spfc_hba->port_cfg.port_id); - - goto out_release_chip_access; - } - /* Allocate DMA buffer (SFP information) */ - ret = spfc_alloc_dma_buffers(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't allocate DMA buffers", - spfc_hba->port_cfg.port_id); - - goto out_destroy_queues; - } - /* Initialize status parameters */ - spfc_hba->active_port_speed = UNF_PORT_SPEED_UNKNOWN; - spfc_hba->active_topo = UNF_ACT_TOP_UNKNOWN; - spfc_hba->sfp_on = false; - spfc_hba->port_loop_role = UNF_LOOP_ROLE_MASTER_OR_SLAVE; - spfc_hba->phy_link = UNF_PORT_LINK_DOWN; - spfc_hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_INIT; - - /* Initialize parameters referring to the lowlevel */ - spfc_hba->remote_rttov_tag = 0; - spfc_hba->port_bb_scn_cfg = SPFC_LOWLEVEL_DEFAULT_BB_SCN; - - /* Initialize timer, and the unit of E_D_TOV is ms */ - spfc_hba->remote_edtov_tag = 0; - spfc_hba->remote_bb_credit = 0; - spfc_hba->compared_bb_scn = 0; - spfc_hba->compared_edtov_val = UNF_DEFAULT_EDTOV; - spfc_hba->compared_ratov_val = UNF_DEFAULT_RATOV; - spfc_hba->removing = false; - spfc_hba->dev_present = true; - - /* Initialize parameters about cos */ - spfc_hba->cos_bitmap = cos_bit_map; - memset(spfc_hba->cos_rport_cnt, 0, SPFC_MAX_COS_NUM * sizeof(atomic_t)); - - /* Mailbox access completion */ - complete(&spfc_hba->mbox_complete); - - ret = spfc_alloc_default_session(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't allocate Default Session", - spfc_hba->port_cfg.port_id); - - goto out_destroy_dma_buff; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]SPFC port(0x%x) initialize host resources succeeded", - spfc_hba->port_cfg.port_id); - - return ret; - -out_destroy_dma_buff: - spfc_free_dma_buffers(spfc_hba); -out_destroy_queues: - spfc_flush_scq_ctx(spfc_hba); - spfc_flush_srq_ctx(spfc_hba); - spfc_destroy_queues(spfc_hba); - -out_release_chip_access: - spfc_release_chip_access(spfc_hba); - -out_unmap_memory: - return ret; -} - -static u32 spfc_get_chip_info(struct spfc_hba_info *hba) -{ - u32 ret = RETURN_OK; - u32 exi_count = 0; - u32 exi_base = 0; - u32 exi_stride = 0; - u32 fun_idx = 0; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - hba->vpid_start = hba->service_cap.dev_fc_cap.vp_id_start; - hba->vpid_end = hba->service_cap.dev_fc_cap.vp_id_end; - fun_idx = sphw_global_func_id(hba->dev_handle); - - exi_count = (max_parent_qpc_num <= SPFC_MAX_PARENT_QPC_NUM) ? - exit_count >> UNF_SHIFT_1 : exit_count; - exi_stride = (max_parent_qpc_num <= SPFC_MAX_PARENT_QPC_NUM) ? - exit_stride >> UNF_SHIFT_1 : exit_stride; - exi_base = exit_base; - - exi_base += (fun_idx * exi_stride); - hba->exi_base = SPFC_LSW(exi_base); - hba->exi_count = SPFC_LSW(exi_count); - hba->max_support_speed = max_speed; - hba->port_index = SPFC_LSB(fun_idx); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) base information: PortIndex=0x%x, ExiBase=0x%x, ExiCount=0x%x, VpIdStart=0x%x, VpIdEnd=0x%x, MaxSpeed=0x%x, Speed=0x%x, Topo=0x%x", - hba->port_cfg.port_id, hba->port_index, hba->exi_base, - hba->exi_count, hba->vpid_start, hba->vpid_end, - hba->max_support_speed, hba->port_speed_cfg, hba->port_topo_cfg); - - return ret; -} - -static u32 spfc_initial_chip_access(struct spfc_hba_info *hba) -{ - int ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - /* 1. Initialize cqm access related with scq, emb cq, aeq(ucode-->driver) */ - service_cqm_temp.service_handle = hba; - - ret = cqm3_service_register(hba->dev_handle, &service_cqm_temp); - if (ret != CQM_SUCCESS) - return UNF_RETURN_ERROR; - - /* 2. Initialize mailbox(driver-->up), aeq(up--->driver) access */ - ret = sphw_register_mgmt_msg_cb(hba->dev_handle, COMM_MOD_FC, hba, - spfc_up_msg2driver_proc); - if (ret != CQM_SUCCESS) - goto out_unreg_cqm; - - return RETURN_OK; - -out_unreg_cqm: - cqm3_service_unregister(hba->dev_handle, SERVICE_T_FC); - - return UNF_RETURN_ERROR; -} - -static void spfc_release_chip_access(struct spfc_hba_info *hba) -{ - FC_CHECK_RETURN_VOID(hba); - FC_CHECK_RETURN_VOID(hba->dev_handle); - - sphw_unregister_mgmt_msg_cb(hba->dev_handle, COMM_MOD_FC); - - cqm3_service_unregister(hba->dev_handle, SERVICE_T_FC); -} - -static void spfc_update_lport_config(struct spfc_hba_info *hba, - struct unf_low_level_functioon_op *lowlevel_func) -{ -#define SPFC_MULTI_CONF_NONSUPPORT 0 - - struct unf_lport_cfg_item *lport_cfg = NULL; - - lport_cfg = &lowlevel_func->lport_cfg_items; - - if (hba->port_cfg.max_login < lowlevel_func->support_max_rport) - lport_cfg->max_login = hba->port_cfg.max_login; - else - lport_cfg->max_login = lowlevel_func->support_max_rport; - - if (hba->port_cfg.sest_num >> UNF_SHIFT_1 < UNF_RESERVE_SFS_XCHG) - lport_cfg->max_io = hba->port_cfg.sest_num; - else - lport_cfg->max_io = hba->port_cfg.sest_num - UNF_RESERVE_SFS_XCHG; - - lport_cfg->max_sfs_xchg = UNF_MAX_SFS_XCHG; - lport_cfg->port_id = hba->port_cfg.port_id; - lport_cfg->port_mode = hba->port_cfg.port_mode; - lport_cfg->port_topology = hba->port_cfg.port_topology; - lport_cfg->max_queue_depth = hba->port_cfg.max_queue_depth; - - lport_cfg->port_speed = hba->port_cfg.port_speed; - lport_cfg->tape_support = hba->port_cfg.tape_support; - - lowlevel_func->sys_port_name = *(u64 *)hba->sys_port_name; - lowlevel_func->sys_node_name = *(u64 *)hba->sys_node_name; - - /* Update chip information */ - lowlevel_func->dev = hba->pci_dev; - lowlevel_func->chip_info.chip_work_mode = hba->work_mode; - lowlevel_func->chip_info.chip_type = hba->chip_type; - lowlevel_func->chip_info.disable_err_flag = 0; - lowlevel_func->support_max_speed = hba->max_support_speed; - lowlevel_func->support_min_speed = hba->min_support_speed; - - lowlevel_func->chip_id = 0; - - lowlevel_func->sfp_type = UNF_PORT_TYPE_FC_SFP; - - lowlevel_func->multi_conf_support = SPFC_MULTI_CONF_NONSUPPORT; - lowlevel_func->support_max_hot_tag_range = hba->port_cfg.sest_num; - lowlevel_func->update_fw_reset_active = UNF_PORT_UNGRADE_FW_RESET_INACTIVE; - lowlevel_func->port_type = 0; /* DRV_PORT_ENTITY_TYPE_PHYSICAL */ - - if ((lport_cfg->port_id & UNF_FIRST_LPORT_ID_MASK) == lport_cfg->port_id) - lowlevel_func->support_upgrade_report = UNF_PORT_SUPPORT_UPGRADE_REPORT; - else - lowlevel_func->support_upgrade_report = UNF_PORT_UNSUPPORT_UPGRADE_REPORT; -} - -static u32 spfc_create_lport(struct spfc_hba_info *hba) -{ - void *lport = NULL; - struct unf_low_level_functioon_op lowlevel_func; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - spfc_func_op.dev = hba->pci_dev; - memcpy(&lowlevel_func, &spfc_func_op, sizeof(struct unf_low_level_functioon_op)); - - /* Update port configuration table */ - spfc_update_lport_config(hba, &lowlevel_func); - - /* Apply for lport resources */ - UNF_LOWLEVEL_ALLOC_LPORT(lport, hba, &lowlevel_func); - if (!lport) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't allocate Lport", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - hba->lport = lport; - - return RETURN_OK; -} - -void spfc_release_probe_index(u32 probe_index) -{ - if (probe_index >= SPFC_MAX_PROBE_PORT_NUM) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Probe index(0x%x) is invalid", probe_index); - - return; - } - - spin_lock(&probe_spin_lock); - if (!test_bit((int)probe_index, (const ulong *)probe_bit_map)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Probe index(0x%x) is not probed", - probe_index); - - spin_unlock(&probe_spin_lock); - - return; - } - - clear_bit((int)probe_index, probe_bit_map); - spin_unlock(&probe_spin_lock); -} - -static void spfc_delete_default_session(struct spfc_hba_info *hba) -{ - struct unf_port_info rport_info = {0}; - - rport_info.nport_id = 0xffffff; - rport_info.rport_index = SPFC_DEFAULT_RPORT_INDEX; - rport_info.local_nport_id = 0xffffff; - rport_info.port_name = 0; - rport_info.cs_ctrl = 0x81; - - /* Need config table to up first, then delete default session */ - (void)spfc_mbx_config_default_session(hba, 0); - spfc_sess_resource_free_sync((void *)hba, &rport_info); -} - -static void spfc_release_host_res(struct spfc_hba_info *hba) -{ - spfc_free_dma_buffers(hba); - - spfc_destroy_queues(hba); - - spfc_release_chip_access(hba); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) release low level resource done", - hba->port_cfg.port_id); -} - -static struct spfc_hba_info *spfc_init_hba(struct pci_dev *pci_dev, - void *hw_dev_handle, - struct spfc_chip_info *chip_info, - u8 card_num) -{ - u32 ret = RETURN_OK; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VALUE(pci_dev, NULL); - FC_CHECK_RETURN_VALUE(hw_dev_handle, NULL); - - /* Allocate HBA */ - hba = kmalloc(sizeof(struct spfc_hba_info), GFP_ATOMIC); - FC_CHECK_RETURN_VALUE(hba, NULL); - memset(hba, 0, sizeof(struct spfc_hba_info)); - - /* Heartbeat default */ - hba->heart_status = 1; - /* Private data in pciDev */ - hba->pci_dev = pci_dev; - hba->dev_handle = hw_dev_handle; - - /* Work mode */ - hba->work_mode = chip_info->work_mode; - /* Create work queue */ - hba->work_queue = create_singlethread_workqueue("spfc"); - if (!hba->work_queue) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Spfc creat workqueue failed"); - - goto out_free_hba; - } - /* Init delay work */ - INIT_DELAYED_WORK(&hba->srq_delay_info.del_work, spfc_rcvd_els_from_srq_timeout); - INIT_WORK(&hba->els_srq_clear_work, spfc_wq_destroy_els_srq); - - /* Notice: Only use FC features */ - (void)sphw_support_fc(hw_dev_handle, &hba->service_cap); - /* Check parent context available */ - if (hba->service_cap.dev_fc_cap.max_parent_qpc_num == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]FC parent context is not allocated in this function"); - - goto out_destroy_workqueue; - } - max_parent_qpc_num = hba->service_cap.dev_fc_cap.max_parent_qpc_num; - - /* Get port configuration */ - ret = spfc_get_port_cfg(hba, chip_info, card_num); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Can't get port configuration"); - - goto out_destroy_workqueue; - } - /* Get WWN */ - spfc_generate_sys_wwn(hba); - - /* Initialize host resources */ - ret = spfc_init_host_res(hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't initialize host resource", - hba->port_cfg.port_id); - - goto out_destroy_workqueue; - } - /* Local Port create */ - ret = spfc_create_lport(hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't create lport", - hba->port_cfg.port_id); - goto out_release_host_res; - } - complete(&hba->hba_init_complete); - - /* Print reference count */ - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[info]Port(0x%x) probe succeeded. Memory reference is 0x%x", - hba->port_cfg.port_id, atomic_read(&fc_mem_ref)); - - return hba; - -out_release_host_res: - spfc_delete_default_session(hba); - spfc_flush_scq_ctx(hba); - spfc_flush_srq_ctx(hba); - spfc_release_host_res(hba); - -out_destroy_workqueue: - flush_workqueue(hba->work_queue); - destroy_workqueue(hba->work_queue); - hba->work_queue = NULL; - -out_free_hba: - kfree(hba); - - return NULL; -} - -void spfc_get_total_probed_num(u32 *probe_cnt) -{ - u32 i = 0; - u32 cnt = 0; - - spin_lock(&probe_spin_lock); - for (i = 0; i < SPFC_MAX_PROBE_PORT_NUM; i++) { - if (test_bit((int)i, (const ulong *)probe_bit_map)) - cnt++; - } - - *probe_cnt = cnt; - spin_unlock(&probe_spin_lock); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Probed port total number is 0x%x", cnt); -} - -u32 spfc_assign_card_num(struct spfc_lld_dev *lld_dev, - struct spfc_chip_info *chip_info, u8 *card_num) -{ - u8 i = 0; - u64 card_index = 0; - - card_index = (!pci_is_root_bus(lld_dev->pdev->bus)) ? - lld_dev->pdev->bus->parent->number : lld_dev->pdev->bus->number; - - spin_lock(&probe_spin_lock); - - for (i = 0; i < SPFC_MAX_CARD_NUM; i++) { - if (test_bit((int)i, (const ulong *)card_num_bit_map)) { - if (card_num_manage[i].card_number == - card_index && !card_num_manage[i].is_removing - ) { - card_num_manage[i].port_count++; - *card_num = i; - spin_unlock(&probe_spin_lock); - return RETURN_OK; - } - } - } - - for (i = 0; i < SPFC_MAX_CARD_NUM; i++) { - if (!test_bit((int)i, (const ulong *)card_num_bit_map)) { - card_num_manage[i].card_number = card_index; - card_num_manage[i].port_count = 1; - card_num_manage[i].is_removing = false; - - *card_num = i; - set_bit(i, card_num_bit_map); - - spin_unlock(&probe_spin_lock); - - return RETURN_OK; - } - } - - spin_unlock(&probe_spin_lock); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Have probe more than 0x%x port, probe failed", i); - - return UNF_RETURN_ERROR; -} - -static void spfc_dec_and_free_card_num(u8 card_num) -{ - /* 2 ports per card */ - if (card_num >= SPFC_MAX_CARD_NUM) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Card number(0x%x) is invalid", card_num); - - return; - } - - spin_lock(&probe_spin_lock); - - if (test_bit((int)card_num, (const ulong *)card_num_bit_map)) { - card_num_manage[card_num].port_count--; - card_num_manage[card_num].is_removing = true; - - if (card_num_manage[card_num].port_count == 0) { - card_num_manage[card_num].card_number = 0; - card_num_manage[card_num].is_removing = false; - clear_bit((int)card_num, card_num_bit_map); - } - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Can not find card number(0x%x)", card_num); - } - - spin_unlock(&probe_spin_lock); -} - -u32 spfc_assign_probe_index(u32 *probe_index) -{ - u32 i = 0; - - spin_lock(&probe_spin_lock); - for (i = 0; i < SPFC_MAX_PROBE_PORT_NUM; i++) { - if (!test_bit((int)i, (const ulong *)probe_bit_map)) { - *probe_index = i; - set_bit(i, probe_bit_map); - - spin_unlock(&probe_spin_lock); - - return RETURN_OK; - } - } - spin_unlock(&probe_spin_lock); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Have probe more than 0x%x port, probe failed", i); - - return UNF_RETURN_ERROR; -} - -u32 spfc_get_probe_index_by_port_id(u32 port_id, u32 *probe_index) -{ - u32 total_probe_num = 0; - u32 i = 0; - u32 probe_cnt = 0; - - spfc_get_total_probed_num(&total_probe_num); - - for (i = 0; i < SPFC_MAX_PROBE_PORT_NUM; i++) { - if (!spfc_hba[i]) - continue; - - if (total_probe_num == probe_cnt) - break; - - if (port_id == spfc_hba[i]->port_cfg.port_id) { - *probe_index = spfc_hba[i]->probe_index; - - return RETURN_OK; - } - - probe_cnt++; - } - - return UNF_RETURN_ERROR; -} - -static int spfc_probe(struct spfc_lld_dev *lld_dev, void **uld_dev, - char *uld_dev_name) -{ - struct pci_dev *pci_dev = NULL; - struct spfc_hba_info *hba = NULL; - u32 ret = UNF_RETURN_ERROR; - const u8 work_mode = SPFC_SMARTIO_WORK_MODE_FC; - u32 probe_index = 0; - u32 probe_total_num = 0; - u8 card_num = INVALID_VALUE8; - struct spfc_chip_info chip_info; - - FC_CHECK_RETURN_VALUE(lld_dev, UNF_RETURN_ERROR_S32); - FC_CHECK_RETURN_VALUE(lld_dev->hwdev, UNF_RETURN_ERROR_S32); - FC_CHECK_RETURN_VALUE(lld_dev->pdev, UNF_RETURN_ERROR_S32); - FC_CHECK_RETURN_VALUE(uld_dev, UNF_RETURN_ERROR_S32); - FC_CHECK_RETURN_VALUE(uld_dev_name, UNF_RETURN_ERROR_S32); - - pci_dev = lld_dev->pdev; - memset(&chip_info, 0, sizeof(struct spfc_chip_info)); - /* 1. Get & check Total_Probed_number */ - spfc_get_total_probed_num(&probe_total_num); - if (probe_total_num >= allowed_probe_num) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Total probe num (0x%x) is larger than allowed number(0x%x)", - probe_total_num, allowed_probe_num); - - return UNF_RETURN_ERROR_S32; - } - /* 2. Check device work mode */ - ret = spfc_fc_mode_check(lld_dev->hwdev); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR_S32; - - /* 3. Assign & Get new Probe index */ - ret = spfc_assign_probe_index(&probe_index); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]AssignProbeIndex fail"); - - return UNF_RETURN_ERROR_S32; - } - - ret = spfc_get_chip_capability((void *)lld_dev->hwdev, &chip_info); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]GetChipCapability fail"); - return UNF_RETURN_ERROR_S32; - } - chip_info.work_mode = work_mode; - - /* Assign & Get new Card number */ - ret = spfc_assign_card_num(lld_dev, &chip_info, &card_num); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]spfc_assign_card_num fail"); - spfc_release_probe_index(probe_index); - - return UNF_RETURN_ERROR_S32; - } - - /* Init HBA resource */ - hba = spfc_init_hba(pci_dev, lld_dev->hwdev, &chip_info, card_num); - if (!hba) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Probe HBA(0x%x) failed. Memory reference = 0x%x", - probe_index, atomic_read(&fc_mem_ref)); - - spfc_release_probe_index(probe_index); - spfc_dec_and_free_card_num(card_num); - - return UNF_RETURN_ERROR_S32; - } - - /* Name by the order of probe */ - *uld_dev = hba; - snprintf(uld_dev_name, SPFC_PORT_NAME_STR_LEN, "%s%02x%02x", - SPFC_PORT_NAME_LABEL, hba->card_info.card_num, - hba->card_info.func_num); - memcpy(hba->port_name, uld_dev_name, SPFC_PORT_NAME_STR_LEN); - hba->probe_index = probe_index; - spfc_hba[probe_index] = hba; - - return RETURN_OK; -} - -u32 spfc_sfp_switch(void *hba, void *para_in) -{ - struct spfc_hba_info *spfc_hba = (struct spfc_hba_info *)hba; - bool turn_on = false; - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(para_in, UNF_RETURN_ERROR); - - /* Redundancy check */ - turn_on = *((bool *)para_in); - if ((u32)turn_on == (u32)spfc_hba->sfp_on) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) FC physical port is already %s", - spfc_hba->port_cfg.port_id, (turn_on) ? "on" : "off"); - - return ret; - } - - if (turn_on) { - ret = spfc_port_check_fw_ready(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Get port(0x%x) clear state failed, turn on fail", - spfc_hba->port_cfg.port_id); - return ret; - } - /* At first, configure port table info if necessary */ - ret = spfc_config_port_table(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't configurate port table", - spfc_hba->port_cfg.port_id); - - return ret; - } - } - - /* Switch physical port */ - ret = spfc_port_switch(spfc_hba, turn_on); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Port(0x%x) switch failed", - spfc_hba->port_cfg.port_id); - - return ret; - } - - /* Update HBA's sfp state */ - spfc_hba->sfp_on = turn_on; - - return ret; -} - -static u32 spfc_destroy_lport(struct spfc_hba_info *hba) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - UNF_LOWLEVEL_RELEASE_LOCAL_PORT(ret, hba->lport); - hba->lport = NULL; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) destroy L_Port done", - hba->port_cfg.port_id); - - return ret; -} - -static u32 spfc_port_check_fw_ready(struct spfc_hba_info *hba) -{ -#define SPFC_PORT_CLEAR_DONE 0 -#define SPFC_PORT_CLEAR_DOING 1 -#define SPFC_WAIT_ONE_TIME_MS 1000 -#define SPFC_LOOP_TIMES 30 - - u32 clear_state = SPFC_PORT_CLEAR_DOING; - u32 ret = RETURN_OK; - u32 wait_timeout = 0; - - do { - msleep(SPFC_WAIT_ONE_TIME_MS); - wait_timeout += SPFC_WAIT_ONE_TIME_MS; - ret = spfc_mbx_get_fw_clear_stat(hba, &clear_state); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - - /* Total time more than 30s retry more than 3 times failed */ - if (wait_timeout > SPFC_LOOP_TIMES * SPFC_WAIT_ONE_TIME_MS && - clear_state != SPFC_PORT_CLEAR_DONE) - return UNF_RETURN_ERROR; - } while (clear_state != SPFC_PORT_CLEAR_DONE); - - return RETURN_OK; -} - -u32 spfc_port_reset(struct spfc_hba_info *hba) -{ - u32 ret = RETURN_OK; - ulong timeout = 0; - bool sfp_before_reset = false; - bool off_para_in = false; - struct pci_dev *pci_dev = NULL; - struct spfc_hba_info *spfc_hba = hba; - - FC_CHECK_RETURN_VALUE(spfc_hba, UNF_RETURN_ERROR); - pci_dev = spfc_hba->pci_dev; - FC_CHECK_RETURN_VALUE(pci_dev, UNF_RETURN_ERROR); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Port(0x%x) reset HBA begin", - spfc_hba->port_cfg.port_id); - - /* Wait for last init/reset completion */ - timeout = wait_for_completion_timeout(&spfc_hba->hba_init_complete, - (ulong)SPFC_PORT_INIT_TIME_SEC_MAX * HZ); - - if (timeout == SPFC_ZERO) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Last HBA initialize/reset timeout: %d second", - SPFC_PORT_INIT_TIME_SEC_MAX); - - return UNF_RETURN_ERROR; - } - - /* Save current port state */ - sfp_before_reset = spfc_hba->sfp_on; - - /* Inform the reset event to CM level before beginning */ - UNF_LOWLEVEL_PORT_EVENT(ret, spfc_hba->lport, UNF_PORT_RESET_START, NULL); - spfc_hba->reset_time = jiffies; - - /* Close SFP */ - ret = spfc_sfp_switch(spfc_hba, &off_para_in); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) can't close SFP", - spfc_hba->port_cfg.port_id); - spfc_hba->sfp_on = sfp_before_reset; - - complete(&spfc_hba->hba_init_complete); - - return ret; - } - - ret = spfc_port_check_fw_ready(spfc_hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Get port(0x%x) clear state failed, hang port and report chip error", - spfc_hba->port_cfg.port_id); - - complete(&spfc_hba->hba_init_complete); - - return ret; - } - - spfc_queue_pre_process(spfc_hba, false); - - ret = spfc_mb_reset_chip(spfc_hba, SPFC_MBOX_SUBTYPE_LIGHT_RESET); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SPFC port(0x%x) can't reset chip mailbox", - spfc_hba->port_cfg.port_id); - - UNF_LOWLEVEL_PORT_EVENT(ret, spfc_hba->lport, UNF_PORT_GET_FWLOG, NULL); - UNF_LOWLEVEL_PORT_EVENT(ret, spfc_hba->lport, UNF_PORT_DEBUG_DUMP, NULL); - } - - /* Inform the success to CM level */ - UNF_LOWLEVEL_PORT_EVENT(ret, spfc_hba->lport, UNF_PORT_RESET_END, NULL); - - /* Queue open */ - spfc_queue_post_process(spfc_hba); - - /* Open SFP */ - (void)spfc_sfp_switch(spfc_hba, &sfp_before_reset); - - complete(&spfc_hba->hba_init_complete); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]Port(0x%x) reset HBA done", - spfc_hba->port_cfg.port_id); - - return ret; -#undef SPFC_WAIT_LINKDOWN_EVENT_MS -} - -static u32 spfc_delete_scqc_via_cmdq_sync(struct spfc_hba_info *hba, u32 scqn) -{ - /* Via CMND Queue */ -#define SPFC_DEL_SCQC_TIMEOUT 3000 - - int ret; - struct spfc_cmdqe_delete_scqc del_scqc_cmd; - struct sphw_cmd_buf *cmd_buf; - - /* Alloc cmd buffer */ - cmd_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmd_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf alloc failed"); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_DEL_SCQC); - return UNF_RETURN_ERROR; - } - - /* Build & Send Cmnd */ - memset(&del_scqc_cmd, 0, sizeof(del_scqc_cmd)); - del_scqc_cmd.wd0.task_type = SPFC_TASK_T_DEL_SCQC; - del_scqc_cmd.wd1.scqn = SPFC_LSW(scqn); - spfc_cpu_to_big32(&del_scqc_cmd, sizeof(del_scqc_cmd)); - memcpy(cmd_buf->buf, &del_scqc_cmd, sizeof(del_scqc_cmd)); - cmd_buf->size = sizeof(del_scqc_cmd); - - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, cmd_buf, - NULL, NULL, SPFC_DEL_SCQC_TIMEOUT, - SPHW_CHANNEL_FC); - - /* Free cmnd buffer */ - sphw_free_cmd_buf(hba->dev_handle, cmd_buf); - - if (ret) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Send del scqc via cmdq failed, ret=0x%x", - ret); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_DEL_SCQC); - return UNF_RETURN_ERROR; - } - - SPFC_IO_STAT(hba, SPFC_TASK_T_DEL_SCQC); - - return RETURN_OK; -} - -static u32 spfc_delete_srqc_via_cmdq_sync(struct spfc_hba_info *hba, u64 sqrc_gpa) -{ - /* Via CMND Queue */ -#define SPFC_DEL_SRQC_TIMEOUT 3000 - - int ret; - struct spfc_cmdqe_delete_srqc del_srqc_cmd; - struct sphw_cmd_buf *cmd_buf; - - /* Alloc Cmnd buffer */ - cmd_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmd_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf allocate failed"); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_DEL_SRQC); - return UNF_RETURN_ERROR; - } - - /* Build & Send Cmnd */ - memset(&del_srqc_cmd, 0, sizeof(del_srqc_cmd)); - del_srqc_cmd.wd0.task_type = SPFC_TASK_T_DEL_SRQC; - del_srqc_cmd.srqc_gpa_h = SPFC_HIGH_32_BITS(sqrc_gpa); - del_srqc_cmd.srqc_gpa_l = SPFC_LOW_32_BITS(sqrc_gpa); - spfc_cpu_to_big32(&del_srqc_cmd, sizeof(del_srqc_cmd)); - memcpy(cmd_buf->buf, &del_srqc_cmd, sizeof(del_srqc_cmd)); - cmd_buf->size = sizeof(del_srqc_cmd); - - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, cmd_buf, - NULL, NULL, SPFC_DEL_SRQC_TIMEOUT, - SPHW_CHANNEL_FC); - - /* Free Cmnd Buffer */ - sphw_free_cmd_buf(hba->dev_handle, cmd_buf); - - if (ret) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Send del srqc via cmdq failed, ret=0x%x", - ret); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_DEL_SRQC); - return UNF_RETURN_ERROR; - } - - SPFC_IO_STAT(hba, SPFC_TASK_T_DEL_SRQC); - - return RETURN_OK; -} - -void spfc_flush_scq_ctx(struct spfc_hba_info *hba) -{ - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Start destroy total 0x%x SCQC", SPFC_TOTAL_SCQ_NUM); - - FC_CHECK_RETURN_VOID(hba); - - (void)spfc_delete_scqc_via_cmdq_sync(hba, 0); -} - -void spfc_flush_srq_ctx(struct spfc_hba_info *hba) -{ - struct spfc_srq_info *srq_info = NULL; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Start destroy ELS&IMMI SRQC"); - - FC_CHECK_RETURN_VOID(hba); - - /* Check state to avoid to flush SRQC again */ - srq_info = &hba->els_srq_info; - if (srq_info->srq_type == SPFC_SRQ_ELS && srq_info->enable) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[event]HBA(0x%x) flush ELS SRQC", - hba->port_index); - - (void)spfc_delete_srqc_via_cmdq_sync(hba, srq_info->cqm_srq_info->q_ctx_paddr); - } -} - -void spfc_set_hba_flush_state(struct spfc_hba_info *hba, bool in_flush) -{ - ulong flag = 0; - - spin_lock_irqsave(&hba->flush_state_lock, flag); - hba->in_flushing = in_flush; - spin_unlock_irqrestore(&hba->flush_state_lock, flag); -} - -void spfc_set_hba_clear_state(struct spfc_hba_info *hba, bool clear_flag) -{ - ulong flag = 0; - - spin_lock_irqsave(&hba->clear_state_lock, flag); - hba->port_is_cleared = clear_flag; - spin_unlock_irqrestore(&hba->clear_state_lock, flag); -} - -bool spfc_hba_is_present(struct spfc_hba_info *hba) -{ - int ret_val = RETURN_OK; - bool present_flag = false; - u32 vendor_id = 0; - - ret_val = pci_read_config_dword(hba->pci_dev, 0, &vendor_id); - vendor_id &= SPFC_PCI_VENDOR_ID_MASK; - if (ret_val == RETURN_OK && vendor_id == SPFC_PCI_VENDOR_ID_RAMAXEL) { - present_flag = true; - } else { - present_flag = false; - hba->dev_present = false; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[info]Port %s remove: vender_id=0x%x, ret=0x%x", - present_flag ? "normal" : "surprise", vendor_id, ret_val); - - return present_flag; -} - -static void spfc_exit(struct pci_dev *pci_dev, struct spfc_hba_info *hba) -{ -#define SPFC_WAIT_CLR_RESOURCE_MS 1000 - u32 ret = UNF_RETURN_ERROR; - bool sfp_switch = false; - bool present_flag = true; - - FC_CHECK_RETURN_VOID(pci_dev); - FC_CHECK_RETURN_VOID(hba); - - hba->removing = true; - - /* 1. Check HBA present or not */ - present_flag = spfc_hba_is_present(hba); - if (present_flag) { - if (hba->phy_link == UNF_PORT_LINK_DOWN) - hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_FLUSHDONE; - - /* At first, close sfp */ - sfp_switch = false; - (void)spfc_sfp_switch((void *)hba, (void *)&sfp_switch); - } - - /* 2. Report COM with HBA removing: delete route timer delay work */ - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_BEGIN_REMOVE, NULL); - - /* 3. Report COM with HBA Nop, COM release I/O(s) & R_Port(s) forcely */ - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_NOP, NULL); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]PCI device(%p) remove port(0x%x) failed", - pci_dev, hba->port_index); - } - - spfc_delete_default_session(hba); - - if (present_flag) - /* 4.1 Wait for all SQ empty, free SRQ buffer & SRQC */ - spfc_queue_pre_process(hba, true); - - /* 5. Destroy L_Port */ - (void)spfc_destroy_lport(hba); - - /* 6. With HBA is present */ - if (present_flag) { - /* Enable Queues dispatch */ - spfc_queue_post_process(hba); - - /* Need reset port if necessary */ - (void)spfc_mb_reset_chip(hba, SPFC_MBOX_SUBTYPE_HEAVY_RESET); - - /* Flush SCQ context */ - spfc_flush_scq_ctx(hba); - - /* Flush SRQ context */ - spfc_flush_srq_ctx(hba); - - sphw_func_rx_tx_flush(hba->dev_handle, SPHW_CHANNEL_FC); - - /* NOTE: while flushing txrx, hash bucket will be cached out in - * UP. Wait to clear resources completely - */ - msleep(SPFC_WAIT_CLR_RESOURCE_MS); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) flush scq & srq & root context done", - hba->port_cfg.port_id); - } - - /* 7. Release host resources */ - spfc_release_host_res(hba); - - /* 8. Destroy FC work queue */ - if (hba->work_queue) { - flush_workqueue(hba->work_queue); - destroy_workqueue(hba->work_queue); - hba->work_queue = NULL; - } - - /* 9. Release Probe index & Decrease card number */ - spfc_release_probe_index(hba->probe_index); - spfc_dec_and_free_card_num((u8)hba->card_info.card_num); - - /* 10. Free HBA memory */ - kfree(hba); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]PCI device(%p) remove succeed, memory reference is 0x%x", - pci_dev, atomic_read(&fc_mem_ref)); -} - -static void spfc_remove(struct spfc_lld_dev *lld_dev, void *uld_dev) -{ - struct pci_dev *pci_dev = NULL; - struct spfc_hba_info *hba = (struct spfc_hba_info *)uld_dev; - u32 probe_total_num = 0; - u32 probe_index = 0; - - FC_CHECK_RETURN_VOID(lld_dev); - FC_CHECK_RETURN_VOID(uld_dev); - FC_CHECK_RETURN_VOID(lld_dev->hwdev); - FC_CHECK_RETURN_VOID(lld_dev->pdev); - - pci_dev = hba->pci_dev; - - /* Get total probed port number */ - spfc_get_total_probed_num(&probe_total_num); - if (probe_total_num < 1) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port manager is empty and no need to remove"); - return; - } - - /* check pci vendor id */ - if (pci_dev->vendor != SPFC_PCI_VENDOR_ID_RAMAXEL) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Wrong vendor id(0x%x) and exit", - pci_dev->vendor); - return; - } - - /* Check function ability */ - if (!sphw_support_fc(lld_dev->hwdev, NULL)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]FC is not enable in this function"); - return; - } - - /* Get probe index */ - probe_index = hba->probe_index; - - /* Parent context alloc check */ - if (hba->service_cap.dev_fc_cap.max_parent_qpc_num == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]FC parent context not allocate in this function"); - return; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]HBA(0x%x) start removing...", hba->port_index); - - /* HBA removinig... */ - spfc_exit(pci_dev, hba); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Port(0x%x) pci device removed, vendorid(0x%04x) devid(0x%04x)", - probe_index, pci_dev->vendor, pci_dev->device); - - /* Probe index check */ - if (probe_index < SPFC_HBA_PORT_MAX_NUM) { - spfc_hba[probe_index] = NULL; - } else { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Probe index(0x%x) is invalid and remove failed", - probe_index); - } - - spfc_get_total_probed_num(&probe_total_num); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]Removed index=%u, RemainNum=%u, AllowNum=%u", - probe_index, probe_total_num, allowed_probe_num); -} - -static u32 spfc_get_hba_pcie_link_state(void *hba, void *link_state) -{ - bool *link_state_info = link_state; - bool present_flag = true; - struct spfc_hba_info *spfc_hba = hba; - int ret; - bool last_dev_state = true; - bool cur_dev_state = true; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(link_state, UNF_RETURN_ERROR); - last_dev_state = spfc_hba->dev_present; - ret = sphw_get_card_present_state(spfc_hba->dev_handle, (bool *)&present_flag); - if (ret || !present_flag) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]port(0x%x) is not present,ret:%d, present_flag:%d", - spfc_hba->port_cfg.port_id, ret, present_flag); - cur_dev_state = false; - } else { - cur_dev_state = true; - } - - spfc_hba->dev_present = cur_dev_state; - - /* To prevent false alarms, the heartbeat is considered lost only - * when the PCIe link is down for two consecutive times. - */ - if (!last_dev_state && !cur_dev_state) - spfc_hba->heart_status = false; - - *link_state_info = spfc_hba->dev_present; - - return RETURN_OK; -} diff --git a/drivers/scsi/spfc/hw/spfc_hba.h b/drivers/scsi/spfc/hw/spfc_hba.h deleted file mode 100644 index 937f00ea8fc7..000000000000 --- a/drivers/scsi/spfc/hw/spfc_hba.h +++ /dev/null @@ -1,341 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_HBA_H -#define SPFC_HBA_H - -#include "unf_type.h" -#include "unf_common.h" -#include "spfc_queue.h" -#include "sphw_crm.h" -#define SPFC_PCI_VENDOR_ID_MASK (0xffff) - -#define FW_VER_LEN (32) -#define HW_VER_LEN (32) -#define FW_SUB_VER_LEN (24) - -#define SPFC_LOWLEVEL_RTTOV_TAG 0 -#define SPFC_LOWLEVEL_EDTOV_TAG 0 -#define SPFC_LOWLEVEL_DEFAULT_LOOP_BB_CREDIT (8) -#define SPFC_LOWLEVEL_DEFAULT_32G_BB_CREDIT (255) -#define SPFC_LOWLEVEL_DEFAULT_16G_BB_CREDIT (255) -#define SPFC_LOWLEVEL_DEFAULT_8G_BB_CREDIT (255) -#define SPFC_LOWLEVEL_DEFAULT_BB_SCN 0 -#define SPFC_LOWLEVEL_DEFAULT_RA_TOV UNF_DEFAULT_RATOV -#define SPFC_LOWLEVEL_DEFAULT_ED_TOV UNF_DEFAULT_EDTOV - -#define SPFC_LOWLEVEL_DEFAULT_32G_ESCH_VALUE 28081 -#define SPFC_LOWLEVEL_DEFAULT_16G_ESCH_VALUE 14100 -#define SPFC_LOWLEVEL_DEFAULT_8G_ESCH_VALUE 7000 -#define SPFC_LOWLEVEL_DEFAULT_ESCH_BUST_SIZE 0x2000 - -#define SPFC_PCI_STATUS 0x06 - -#define SPFC_SMARTIO_WORK_MODE_FC 0x1 -#define SPFC_SMARTIO_WORK_MODE_OTHER 0xF -#define UNF_FUN_ID_MASK 0x07 - -#define UNF_SPFC_FC (0x01) -#define UNF_SPFC_MAXNPIV_NUM 64 /* If not support NPIV, Initialized to 0 */ - -#define SPFC_MAX_COS_NUM (8) - -#define SPFC_INTR_ENABLE 0x5 -#define SPFC_INTR_DISABLE 0x0 -#define SPFC_CLEAR_FW_INTR 0x1 -#define SPFC_REG_ENABLE_INTR 0x00000200 - -#define SPFC_PCI_VENDOR_ID_RAMAXEL 0x1E81 - -#define SPFC_SCQ_CNTX_SIZE 32 -#define SPFC_SRQ_CNTX_SIZE 64 - -#define SPFC_PORT_INIT_TIME_SEC_MAX 1 - -#define SPFC_PORT_NAME_LABEL "spfc" -#define SPFC_PORT_NAME_STR_LEN (16) - -#define SPFC_MAX_PROBE_PORT_NUM (64) -#define SPFC_PORT_NUM_PER_TABLE (64) -#define SPFC_MAX_CARD_NUM (32) - -#define SPFC_HBA_PORT_MAX_NUM SPFC_MAX_PROBE_PORT_NUM -#define SPFC_SIRT_MIN_RXID 0 -#define SPFC_SIRT_MAX_RXID 255 - -#define SPFC_GET_HBA_PORT_ID(hba) ((hba)->port_index) - -#define SPFC_MAX_WAIT_LOOP_TIMES 10000 -#define SPFC_WAIT_SESS_ENABLE_ONE_TIME_MS 1 -#define SPFC_WAIT_SESS_FREE_ONE_TIME_MS 1 - -#define SPFC_PORT_ID_MASK 0xff0000 - -#define SPFC_MAX_PARENT_QPC_NUM 2048 -struct spfc_port_cfg { - u32 port_id; /* Port ID */ - u32 port_mode; /* Port mode:INI(0x20), TGT(0x10), BOTH(0x30) */ - u32 port_topology; /* Port topo:0x3:loop,0xc:p2p,0xf:auto */ - u32 port_alpa; /* Port ALPA */ - u32 max_queue_depth; /* Max Queue depth Registration to SCSI */ - u32 sest_num; /* IO burst num:512-4096 */ - u32 max_login; /* Max Login Session. */ - u32 node_name_hi; /* nodename high 32 bits */ - u32 node_name_lo; /* nodename low 32 bits */ - u32 port_name_hi; /* portname high 32 bits */ - u32 port_name_lo; /* portname low 32 bits */ - u32 port_speed; /* Port speed 0:auto 4:4Gbps 8:8Gbps 16:16Gbps */ - u32 interrupt_delay; /* Delay times(ms) in interrupt */ - u32 tape_support; /* tape support */ -}; - -#define SPFC_VER_INFO_SIZE 128 -struct spfc_drv_version { - char ver[SPFC_VER_INFO_SIZE]; -}; - -struct spfc_card_info { - u32 card_num : 8; - u32 func_num : 8; - u32 base_func : 8; - /* Card type:UNF_FC_SERVER_BOARD_32_G(6) 32G mode, - * UNF_FC_SERVER_BOARD_16_G(7)16G mode - */ - u32 card_type : 8; -}; - -struct spfc_card_num_manage { - bool is_removing; - u32 port_count; - u64 card_number; -}; - -struct spfc_sim_ini_err { - u32 err_code; - u32 times; -}; - -struct spfc_sim_pcie_err { - u32 err_code; - u32 times; -}; - -struct spfc_led_state { - u8 green_speed_led; - u8 yellow_speed_led; - u8 ac_led; - u8 rsvd; -}; - -enum spfc_led_activity { - SPFC_LED_CFG_ACTVE_FRAME = 0, - SPFC_LED_CFG_ACTVE_FC = 3 -}; - -enum spfc_queue_set_stage { - SPFC_QUEUE_SET_STAGE_INIT = 0, - SPFC_QUEUE_SET_STAGE_SCANNING, - SPFC_QUEUE_SET_STAGE_FLUSHING, - SPFC_QUEUE_SET_STAGE_FLUSHDONE, - SPFC_QUEUE_SET_STAGE_BUTT -}; - -struct spfc_vport_info { - u64 node_name; - u64 port_name; - u32 port_mode; /* INI, TGT or both */ - u32 nport_id; /* maybe acquired by lowlevel and update to common */ - void *vport; - u16 vp_index; -}; - -struct spfc_srq_delay_info { - u8 srq_delay_flag; /* Check whether need to delay */ - u8 root_rq_rcvd_flag; - u16 rsd; - - spinlock_t srq_lock; - struct unf_frame_pkg frame_pkg; - - struct delayed_work del_work; -}; - -struct spfc_fw_ver_detail { - u8 ucode_ver[SPFC_VER_LEN]; - u8 ucode_compile_time[SPFC_COMPILE_TIME_LEN]; - - u8 up_ver[SPFC_VER_LEN]; - u8 up_compile_time[SPFC_COMPILE_TIME_LEN]; - - u8 boot_ver[SPFC_VER_LEN]; - u8 boot_compile_time[SPFC_COMPILE_TIME_LEN]; -}; - -/* get wwpn and wwnn */ -struct spfc_chip_info { - u8 work_mode; - u8 tape_support; - u64 wwpn; - u64 wwnn; -}; - -/* Default SQ info */ -struct spfc_default_sq_info { - u32 sq_cid; - u32 sq_xid; - u32 fun_cid; - u32 default_sq_flag; -}; - -struct spfc_hba_info { - struct pci_dev *pci_dev; - void *dev_handle; - - struct fc_service_cap service_cap; /* struct fc_service_cap pstFcoeServiceCap; */ - - struct spfc_scq_info scq_info[SPFC_TOTAL_SCQ_NUM]; - struct spfc_srq_info els_srq_info; - - struct spfc_vport_info vport_info[UNF_SPFC_MAXNPIV_NUM + 1]; - - /* PCI IO Memory */ - void __iomem *bar0; - u32 bar0_len; - - struct spfc_parent_queue_mgr *parent_queue_mgr; - - /* Link list Sq WqePage Pool */ - struct spfc_sq_wqepage_pool sq_wpg_pool; - - enum spfc_queue_set_stage queue_set_stage; - u32 next_clear_sq; - u32 default_sqid; - - /* Port parameters, Obtained through firmware */ - u16 queue_set_max_count; - u8 port_type; /* FC or FCoE Port */ - u8 port_index; /* Phy Port */ - u32 default_scqn; - char fw_ver[FW_VER_LEN]; /* FW version */ - char hw_ver[HW_VER_LEN]; /* HW version */ - char mst_fw_ver[FW_SUB_VER_LEN]; - char fc_fw_ver[FW_SUB_VER_LEN]; - u8 chip_type; /* chiptype:Smart or fc */ - u8 work_mode; - struct spfc_card_info card_info; - char port_name[SPFC_PORT_NAME_STR_LEN]; - u32 probe_index; - - u16 exi_base; - u16 exi_count; - u16 vpf_count; - u8 vpid_start; - u8 vpid_end; - - spinlock_t flush_state_lock; - bool in_flushing; - - spinlock_t clear_state_lock; - bool port_is_cleared; - - struct spfc_port_cfg port_cfg; /* Obtained through Config */ - - void *lport; /* Used in UNF level */ - - u8 sys_node_name[UNF_WWN_LEN]; - u8 sys_port_name[UNF_WWN_LEN]; - - struct completion hba_init_complete; - struct completion mbox_complete; - struct completion vpf_complete; - struct completion fcfi_complete; - struct completion get_sfp_complete; - - u16 init_stage; - u16 removing; - bool sfp_on; - bool dev_present; - bool heart_status; - spinlock_t hba_lock; - u32 port_topo_cfg; - u32 port_bb_scn_cfg; - u32 port_loop_role; - u32 port_speed_cfg; - u32 max_support_speed; - u32 min_support_speed; - u32 server_max_speed; - - u8 remote_rttov_tag; - u8 remote_edtov_tag; - u16 compared_bb_scn; - u16 remote_bb_credit; - u32 compared_edtov_val; - u32 compared_ratov_val; - enum unf_act_topo active_topo; - u32 active_port_speed; - u32 active_rxbb_credit; - u32 active_bb_scn; - - u32 phy_link; - - enum unf_port_mode port_mode; - - u32 fcp_cfg; - - /* loop */ - u8 active_alpa; - u8 loop_map_valid; - u8 loop_map[UNF_LOOPMAP_COUNT]; - - /* sfp info dma */ - void *sfp_buf; - dma_addr_t sfp_dma_addr; - u32 sfp_status; - int chip_temp; - u32 sfp_posion; - - u32 cos_bitmap; - atomic_t cos_rport_cnt[SPFC_MAX_COS_NUM]; - - /* fw debug dma buffer */ - void *debug_buf; - dma_addr_t debug_buf_dma_addr; - void *log_buf; - dma_addr_t fw_log_dma_addr; - - void *dma_addr; - dma_addr_t update_dma_addr; - - struct spfc_sim_ini_err sim_ini_err; - struct spfc_sim_pcie_err sim_pcie_err; - - struct spfc_led_state led_states; - - u32 fec_status; - - struct workqueue_struct *work_queue; - struct work_struct els_srq_clear_work; - u64 reset_time; - - spinlock_t spin_lock; - - struct spfc_srq_delay_info srq_delay_info; - struct spfc_fw_ver_detail hardinfo; - struct spfc_default_sq_info default_sq_info; -}; - -extern struct spfc_hba_info *spfc_hba[SPFC_HBA_PORT_MAX_NUM]; -extern spinlock_t probe_spin_lock; -extern ulong probe_bit_map[SPFC_MAX_PROBE_PORT_NUM / SPFC_PORT_NUM_PER_TABLE]; - -u32 spfc_port_reset(struct spfc_hba_info *hba); -void spfc_flush_scq_ctx(struct spfc_hba_info *hba); -void spfc_flush_srq_ctx(struct spfc_hba_info *hba); -void spfc_set_hba_flush_state(struct spfc_hba_info *hba, bool in_flush); -void spfc_set_hba_clear_state(struct spfc_hba_info *hba, bool clear_flag); -u32 spfc_get_probe_index_by_port_id(u32 port_id, u32 *probe_index); -void spfc_get_total_probed_num(u32 *probe_cnt); -u32 spfc_sfp_switch(void *hba, void *para_in); -bool spfc_hba_is_present(struct spfc_hba_info *hba); - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_hw_wqe.h b/drivers/scsi/spfc/hw/spfc_hw_wqe.h deleted file mode 100644 index e03d24a98579..000000000000 --- a/drivers/scsi/spfc/hw/spfc_hw_wqe.h +++ /dev/null @@ -1,1645 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_HW_WQE_H -#define SPFC_HW_WQE_H - -#define FC_ICQ_EN -#define FC_SCSI_CMDIU_LEN 48 -#define FC_NVME_CMDIU_LEN 96 -#define FC_LS_GS_USERID_CNT_MAX 10 -#define FC_SENSEDATA_USERID_CNT_MAX 2 -#define FC_INVALID_MAGIC_NUM 0xFFFFFFFF -#define FC_INVALID_HOTPOOLTAG 0xFFFF - -/* TASK TYPE: in order to compatible wiht EDA, please add new type before BUTT. */ -enum spfc_task_type { - SPFC_TASK_T_EMPTY = 0, /* SCQE TYPE: means task type not initialize */ - - SPFC_TASK_T_IWRITE = 1, /* SQE TYPE: ini send FCP Write Command */ - SPFC_TASK_T_IREAD = 2, /* SQE TYPE: ini send FCP Read Command */ - SPFC_TASK_T_IRESP = 3, /* SCQE TYPE: ini recv fcp rsp for IREAD/IWRITE/ITMF */ - SPFC_TASK_T_TCMND = 4, /* NA */ - SPFC_TASK_T_TREAD = 5, /* SQE TYPE: tgt send FCP Read Command */ - SPFC_TASK_T_TWRITE = 6, /* SQE TYPE: tgt send FCP Write Command (XFER_RDY) */ - SPFC_TASK_T_TRESP = 7, /* SQE TYPE: tgt send fcp rsp of Read/Write */ - SPFC_TASK_T_TSTS = 8, /* SCQE TYPE: tgt sts for TREAD/TWRITE/TRESP */ - SPFC_TASK_T_ABTS = 9, /* SQE TYPE: ini send abts request Command */ - SPFC_TASK_T_IELS = 10, /* NA */ - SPFC_TASK_T_ITMF = 11, /* SQE TYPE: ini send tmf request Command */ - SPFC_TASK_T_CLEAN_UP = 12, /* NA */ - SPFC_TASK_T_CLEAN_UP_ALL = 13, /* NA */ - SPFC_TASK_T_UNSOLICITED = 14, /* NA */ - SPFC_TASK_T_ERR_WARN = 15, /* NA */ - SPFC_TASK_T_SESS_EN = 16, /* CMDQ TYPE: enable session */ - SPFC_TASK_T_SESS_DIS = 17, /* NA */ - SPFC_TASK_T_SESS_DEL = 18, /* NA */ - SPFC_TASK_T_RQE_REPLENISH = 19, /* NA */ - - SPFC_TASK_T_RCV_TCMND = 20, /* SCQE TYPE: tgt recv fcp cmd */ - SPFC_TASK_T_RCV_ELS_CMD = 21, /* SCQE TYPE: tgt recv els cmd */ - SPFC_TASK_T_RCV_ABTS_CMD = 22, /* SCQE TYPE: tgt recv abts cmd */ - SPFC_TASK_T_RCV_IMMEDIATE = 23, /* SCQE TYPE: tgt recv immediate data */ - /* SQE TYPE: send ESL rsp. PLOGI_ACC, PRLI_ACC will carry the parent - *context parameter indication. - */ - SPFC_TASK_T_ELS_RSP = 24, - SPFC_TASK_T_ELS_RSP_STS = 25, /* SCQE TYPE: ELS rsp sts */ - SPFC_TASK_T_ABTS_RSP = 26, /* CMDQ TYPE: tgt send abts rsp */ - SPFC_TASK_T_ABTS_RSP_STS = 27, /* SCQE TYPE: tgt abts rsp sts */ - - SPFC_TASK_T_ABORT = 28, /* CMDQ TYPE: tgt send Abort Command */ - SPFC_TASK_T_ABORT_STS = 29, /* SCQE TYPE: Abort sts */ - - SPFC_TASK_T_ELS = 30, /* SQE TYPE: send ELS request Command */ - SPFC_TASK_T_RCV_ELS_RSP = 31, /* SCQE TYPE: recv ELS response */ - - SPFC_TASK_T_GS = 32, /* SQE TYPE: send GS request Command */ - SPFC_TASK_T_RCV_GS_RSP = 33, /* SCQE TYPE: recv GS response */ - - SPFC_TASK_T_SESS_EN_STS = 34, /* SCQE TYPE: enable session sts */ - SPFC_TASK_T_SESS_DIS_STS = 35, /* NA */ - SPFC_TASK_T_SESS_DEL_STS = 36, /* NA */ - - SPFC_TASK_T_RCV_ABTS_RSP = 37, /* SCQE TYPE: ini recv abts rsp */ - - SPFC_TASK_T_BUFFER_CLEAR = 38, /* CMDQ TYPE: Buffer Clear */ - SPFC_TASK_T_BUFFER_CLEAR_STS = 39, /* SCQE TYPE: Buffer Clear sts */ - SPFC_TASK_T_FLUSH_SQ = 40, /* CMDQ TYPE: flush sq */ - SPFC_TASK_T_FLUSH_SQ_STS = 41, /* SCQE TYPE: flush sq sts */ - - SPFC_TASK_T_SESS_RESET = 42, /* SQE TYPE: Reset session */ - SPFC_TASK_T_SESS_RESET_STS = 43, /* SCQE TYPE: Reset session sts */ - SPFC_TASK_T_RQE_REPLENISH_STS = 44, /* NA */ - SPFC_TASK_T_DUMP_EXCH = 45, /* CMDQ TYPE: dump exch */ - SPFC_TASK_T_INIT_SRQC = 46, /* CMDQ TYPE: init SRQC */ - SPFC_TASK_T_CLEAR_SRQ = 47, /* CMDQ TYPE: clear SRQ */ - SPFC_TASK_T_CLEAR_SRQ_STS = 48, /* SCQE TYPE: clear SRQ sts */ - SPFC_TASK_T_INIT_SCQC = 49, /* CMDQ TYPE: init SCQC */ - SPFC_TASK_T_DEL_SCQC = 50, /* CMDQ TYPE: delete SCQC */ - SPFC_TASK_T_TMF_RESP = 51, /* SQE TYPE: tgt send tmf rsp */ - SPFC_TASK_T_DEL_SRQC = 52, /* CMDQ TYPE: delete SRQC */ - SPFC_TASK_T_RCV_IMMI_CONTINUE = 53, /* SCQE TYPE: tgt recv continue immediate data */ - - SPFC_TASK_T_ITMF_RESP = 54, /* SCQE TYPE: ini recv tmf rsp */ - SPFC_TASK_T_ITMF_MARKER_STS = 55, /* SCQE TYPE: tmf marker sts */ - SPFC_TASK_T_TACK = 56, - SPFC_TASK_T_SEND_AEQERR = 57, - SPFC_TASK_T_ABTS_MARKER_STS = 58, /* SCQE TYPE: abts marker sts */ - SPFC_TASK_T_FLR_CLEAR_IO = 59, /* FLR clear io type */ - SPFC_TASK_T_CREATE_SSQ_CONTEXT = 60, - SPFC_TASK_T_CLEAR_SSQ_CONTEXT = 61, - SPFC_TASK_T_EXCH_ID_FREE = 62, - SPFC_TASK_T_DIFX_RESULT_STS = 63, - SPFC_TASK_T_EXCH_ID_FREE_ABORT = 64, - SPFC_TASK_T_EXCH_ID_FREE_ABORT_STS = 65, - SPFC_TASK_T_PARAM_CHECK_FAIL = 66, - SPFC_TASK_T_TGT_UNKNOWN = 67, - SPFC_TASK_T_NVME_LS = 70, /* SQE TYPE: Snd Ls Req */ - SPFC_TASK_T_RCV_NVME_LS_RSP = 71, /* SCQE TYPE: Rcv Ls Rsp */ - - SPFC_TASK_T_NVME_LS_RSP = 72, /* SQE TYPE: Snd Ls Rsp */ - SPFC_TASK_T_RCV_NVME_LS_RSP_STS = 73, /* SCQE TYPE: Rcv Ls Rsp sts */ - - SPFC_TASK_T_RCV_NVME_LS_CMD = 74, /* SCQE TYPE: Rcv ls cmd */ - - SPFC_TASK_T_NVME_IREAD = 75, /* SQE TYPE: Ini Snd Nvme Read Cmd */ - SPFC_TASK_T_NVME_IWRITE = 76, /* SQE TYPE: Ini Snd Nvme write Cmd */ - - SPFC_TASK_T_NVME_TREAD = 77, /* SQE TYPE: Tgt Snd Nvme Read Cmd */ - SPFC_TASK_T_NVME_TWRITE = 78, /* SQE TYPE: Tgt Snd Nvme write Cmd */ - - SPFC_TASK_T_NVME_IRESP = 79, /* SCQE TYPE: Ini recv nvme rsp for NVMEIREAD/NVMEIWRITE */ - - SPFC_TASK_T_INI_IO_ABORT = 80, /* SQE type: INI Abort Cmd */ - SPFC_TASK_T_INI_IO_ABORT_STS = 81, /* SCQE type: INI Abort sts */ - - SPFC_TASK_T_INI_LS_ABORT = 82, /* SQE type: INI ls abort Cmd */ - SPFC_TASK_T_INI_LS_ABORT_STS = 83, /* SCQE type: INI ls abort sts */ - SPFC_TASK_T_EXCHID_TIMEOUT_STS = 84, /* SCQE TYPE: EXCH_ID TIME OUT */ - SPFC_TASK_T_PARENT_ERR_STS = 85, /* SCQE TYPE: PARENT ERR */ - - SPFC_TASK_T_NOP = 86, - SPFC_TASK_T_NOP_STS = 87, - - SPFC_TASK_T_DFX_INFO = 126, - SPFC_TASK_T_BUTT -}; - -/* error code for error report */ - -enum spfc_err_code { - FC_CQE_COMPLETED = 0, /* Successful */ - FC_SESS_HT_INSERT_FAIL = 1, /* Offload fail: hash insert fail */ - FC_SESS_HT_INSERT_DUPLICATE = 2, /* Offload fail: duplicate offload */ - FC_SESS_HT_BIT_SET_FAIL = 3, /* Offload fail: bloom filter set fail */ - FC_SESS_HT_DELETE_FAIL = 4, /* Offload fail: hash delete fail(duplicate delete) */ - FC_CQE_BUFFER_CLEAR_IO_COMPLETED = 5, /* IO done in buffer clear */ - FC_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED = 6, /* IO done in session rst mode=1 */ - FC_CQE_SESSION_RST_CLEAR_IO_COMPLETED = 7, /* IO done in session rst mode=3 */ - FC_CQE_TMF_RSP_IO_COMPLETED = 8, /* IO done in tgt tmf rsp */ - FC_CQE_TMF_IO_COMPLETED = 9, /* IO done in ini tmf */ - FC_CQE_DRV_ABORT_IO_COMPLETED = 10, /* IO done in tgt abort */ - /* - *IO done in fcp rsp process. Used for the sceanrio: 1.abort before cmd 2. - *send fcp rsp directly after recv cmd. - */ - FC_CQE_DRV_ABORT_IO_IN_RSP_COMPLETED = 11, - /* - *IO done in fcp cmd process. Used for the sceanrio: 1.abort before cmd 2.child setup fail. - */ - FC_CQE_DRV_ABORT_IO_IN_CMD_COMPLETED = 12, - FC_CQE_WQE_FLUSH_IO_COMPLETED = 13, /* IO done in FLUSH SQ */ - FC_ERROR_CODE_DATA_DIFX_FAILED = 14, /* fcp data format check: DIFX check error */ - /* fcp data format check: task_type is not read */ - FC_ERROR_CODE_DATA_TASK_TYPE_INCORRECT = 15, - FC_ERROR_CODE_DATA_OOO_RO = 16, /* fcp data format check: data offset is not continuous */ - FC_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS = 17, /* fcp data format check: data is over run */ - /* fcp rsp format check: payload is too short */ - FC_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD = 18, - /* fcp rsp format check: fcp_conf need, but exch don't hold seq initiative */ - FC_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET = 19, - /* fcp rsp format check: fcp_conf is required, but it's the last seq */ - FC_ERROR_CODE_FCP_RSP_OPENED_SEQ = 20, - /* xfer rdy format check: payload is too short */ - FC_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE = 21, - /* xfer rdy format check: last data out havn't finished */ - FC_ERROR_CODE_XFER_PEND_XFER_SET = 22, - /* xfer rdy format check: data offset is not continuous */ - FC_ERROR_CODE_XFER_OOO_RO = 23, - FC_ERROR_CODE_XFER_NULL_BURST_LEN = 24, /* xfer rdy format check: burst len is 0 */ - FC_ERROR_CODE_REC_TIMER_EXPIRE = 25, /* Timer expire: REC_TIMER */ - FC_ERROR_CODE_E_D_TIMER_EXPIRE = 26, /* Timer expire: E_D_TIMER */ - FC_ERROR_CODE_ABORT_TIMER_EXPIRE = 27, /* Timer expire: Abort timer */ - FC_ERROR_CODE_ABORT_MAGIC_NUM_NOT_MATCH = 28, /* Abort IO magic number mismatch */ - FC_IMMI_CMDPKT_SETUP_FAIL = 29, /* RX immediate data cmd pkt child setup fail */ - FC_ERROR_CODE_DATA_SEQ_ID_NOT_EQUAL = 30, /* RX fcp data sequence id not equal */ - FC_ELS_GS_RSP_EXCH_CHECK_FAIL = 31, /* ELS/GS exch info check fail */ - FC_CQE_ELS_GS_SRQE_GET_FAIL = 32, /* ELS/GS process get SRQE fail */ - FC_CQE_DATA_DMA_REQ_FAIL = 33, /* SMF soli-childdma rsp error */ - FC_CQE_SESSION_CLOSED = 34, /* Session is closed */ - FC_SCQ_IS_FULL = 35, /* SCQ is full */ - FC_SRQ_IS_FULL = 36, /* SRQ is full */ - FC_ERROR_DUCHILDCTX_SETUP_FAIL = 37, /* dpchild ctx setup fail */ - FC_ERROR_INVALID_TXMFS = 38, /* invalid txmfs */ - FC_ERROR_OFFLOAD_LACKOF_SCQE_FAIL = 39, /* offload fail,lack of SCQE,through AEQ */ - FC_ERROR_INVALID_TASK_ID = 40, /* tx invlaid task id */ - FC_ERROR_INVALID_PKT_LEN = 41, /* tx els gs pakcet len check */ - FC_CQE_ELS_GS_REQ_CLR_IO_COMPLETED = 42, /* IO done in els gs tx */ - FC_CQE_ELS_RSP_CLR_IO_COMPLETED = 43, /* IO done in els rsp tx */ - FC_ERROR_CODE_RESID_UNDER_ERR = 44, /* FCP RSP RESID ERROR */ - FC_ERROR_EXCH_ID_FREE_ERR = 45, /* Abnormal free xid failed */ - FC_ALLOC_EXCH_ID_FAILED = 46, /* ucode alloc EXCH ID failed */ - FC_ERROR_DUPLICATE_IO_RECEIVED = 47, /* Duplicate tcmnd or tmf rsp received */ - FC_ERROR_RXID_MISCOMPARE = 48, - FC_ERROR_FAILOVER_CLEAR_VALID_HOST = 49, /* Failover cleared valid host io */ - FC_ERROR_EXCH_ID_NOT_MATCH = 50, /* SCQ TYPE: xid not match */ - FC_ERROR_ABORT_FAIL = 51, /* SCQ TYPE: abort fail */ - FC_ERROR_SHARD_TABLE_OP_FAIL = 52, /* SCQ TYPE: shard table OP fail */ - FC_ERROR_E0E1_FAIL = 53, - FC_INSERT_EXCH_ID_HASH_FAILED = 54, /* ucode INSERT EXCH ID HASH failed */ - FC_ERROR_CODE_FCP_RSP_UPDMA_FAILED = 55, /* up dma req failed,while fcp rsp is rcving */ - FC_ERROR_CODE_SID_DID_NOT_MATCH = 56, /* sid or did not match */ - FC_ERROR_DATA_NOT_REL_OFF = 57, /* data not rel off */ - FC_ERROR_CODE_EXCH_ID_TIMEOUT = 58, /* exch id timeout */ - FC_ERROR_PARENT_CHECK_FAIL = 59, - FC_ERROR_RECV_REC_REJECT = 60, /* RECV REC RSP REJECT */ - FC_ERROR_RECV_SRR_REJECT = 61, /* RECV REC SRR REJECT */ - FC_ERROR_REC_NOT_FIND_EXID_INVALID = 62, - FC_ERROR_RECV_REC_NO_ERR = 63, - FC_ERROR_PARENT_CTX_ERR = 64 -}; - -/* AEQ EVENT TYPE */ -enum spfc_aeq_evt_type { - /* SCQ and SRQ not enough, HOST will initiate a operation to associated SCQ/SRQ */ - FC_AEQ_EVENT_QUEUE_ERROR = 48, - FC_AEQ_EVENT_WQE_FATAL_ERROR = 49, /* WQE MSN check error,HOST will reset port */ - FC_AEQ_EVENT_CTX_FATAL_ERROR = 50, /* serious chip error, HOST will reset chip */ - FC_AEQ_EVENT_OFFLOAD_ERROR = 51, - FC_FC_AEQ_EVENT_TYPE_LAST -}; - -enum spfc_protocol_class { - FC_PROTOCOL_CLASS_3 = 0x0, - FC_PROTOCOL_CLASS_2 = 0x1, - FC_PROTOCOL_CLASS_1 = 0x2, - FC_PROTOCOL_CLASS_F = 0x3, - FC_PROTOCOL_CLASS_OTHER = 0x4 -}; - -enum spfc_aeq_evt_err_code { - /* detail type of resource lack */ - FC_SCQ_IS_FULL_ERR = 0, - FC_SRQ_IS_FULL_ERR, - - /* detail type of FC_AEQ_EVENT_WQE_FATAL_ERROR */ - FC_SQE_CHILD_SETUP_WQE_MSN_ERR = 2, - FC_SQE_CHILD_SETUP_WQE_GPA_ERR, - FC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_1, - FC_CMDPKT_CHILD_SETUP_INVALID_WQE_ERR_2, - FC_CLEAEQ_WQE_ERR, - FC_WQEFETCH_WQE_MSN_ERR, - FC_WQEFETCH_QUINFO_ERR, - - /* detail type of FC_AEQ_EVENT_CTX_FATAL_ERROR */ - FC_SCQE_ERR_BIT_ERR = 9, - FC_UPDMA_ADDR_REQ_SRQ_ERR, - FC_SOLICHILDDMA_ADDR_REQ_ERR, - FC_UNSOLICHILDDMA_ADDR_REQ_ERR, - FC_SQE_CHILD_SETUP_QINFO_ERR_1, - FC_SQE_CHILD_SETUP_QINFO_ERR_2, - FC_CMDPKT_CHILD_SETUP_QINFO_ERR_1, - FC_CMDPKT_CHILD_SETUP_QINFO_ERR_2, - FC_CMDPKT_CHILD_SETUP_PMSN_ERR, - FC_CLEAEQ_CTX_ERR, - FC_WQEFETCH_CTX_ERR, - FC_FLUSH_QPC_ERR_LQP, - FC_FLUSH_QPC_ERR_SMF, - FC_PREFETCH_QPC_ERR_PCM_MHIT_LQP, - FC_PREFETCH_QPC_ERR_PCM_MHIT_FQG, - FC_PREFETCH_QPC_ERR_PCM_ABM_FQG, - FC_PREFETCH_QPC_ERR_MAP_FQG, - FC_PREFETCH_QPC_ERR_MAP_LQP, - FC_PREFETCH_QPC_ERR_SMF_RTN, - FC_PREFETCH_QPC_ERR_CFG, - FC_PREFETCH_QPC_ERR_FLSH_HIT, - FC_PREFETCH_QPC_ERR_FLSH_ACT, - FC_PREFETCH_QPC_ERR_ABM_W_RSC, - FC_PREFETCH_QPC_ERR_RW_ABM, - FC_PREFETCH_QPC_ERR_DEFAULT, - FC_CHILDHASH_INSERT_SW_ERR, - FC_CHILDHASH_LOOKUP_SW_ERR, - FC_CHILDHASH_DEL_SW_ERR, - FC_EXCH_ID_FREE_SW_ERR, - FC_FLOWHASH_INSERT_SW_ERR, - FC_FLOWHASH_LOOKUP_SW_ERR, - FC_FLOWHASH_DEL_SW_ERR, - FC_FLUSH_QPC_ERR_USED, - FC_FLUSH_QPC_ERR_OUTER_LOCK, - FC_SETUP_SESSION_ERR, - - FC_AEQ_EVT_ERR_CODE_BUTT - -}; - -/* AEQ data structure */ -struct spfc_aqe_data { - union { - struct { - u32 conn_id : 16; - u32 rsvd : 8; - u32 evt_code : 8; - } wd0; - - u32 data0; - }; - - union { - struct { - u32 xid : 20; - u32 rsvd : 12; - } wd1; - - u32 data1; - }; -}; - -/* Control Section: Common Header */ -struct spfc_wqe_ctrl_ch { - union { - struct { - u32 bdsl : 8; - u32 drv_sl : 2; - u32 rsvd0 : 4; - u32 wf : 1; - u32 cf : 1; - u32 tsl : 5; - u32 va : 1; - u32 df : 1; - u32 cr : 1; - u32 dif_sl : 3; - u32 csl : 2; - u32 ctrl_sl : 2; - u32 owner : 1; - } wd0; - - u32 ctrl_ch_val; - }; -}; - -/* Control Section: Queue Specific Field */ -struct spfc_wqe_ctrl_qsf { - u32 wqe_sn : 16; - u32 dump_wqe_sn : 16; -}; - -/* DIF info definition in WQE */ -struct spfc_fc_dif_info { - struct { - u32 app_tag_ctrl : 3; /* DIF/DIX APP TAG Control */ - /* Bit 0: scenario of the reference tag verify mode. - *Bit 1: scenario of the reference tag insert/replace mode. - */ - u32 ref_tag_mode : 2; - /* 0: fixed; 1: increasement; */ - u32 ref_tag_ctrl : 3; /* The DIF/DIX Reference tag control */ - u32 grd_agm_ini_ctrl : 3; - u32 grd_agm_ctrl : 2; /* Bit 0: DIF/DIX guard verify algorithm control */ - /* Bit 1: DIF/DIX guard replace or insert algorithm control */ - u32 grd_ctrl : 3; /* The DIF/DIX Guard control */ - u32 dif_verify_type : 2; /* verify type */ - u32 difx_ref_esc : 1; /* Check blocks whose reference tag contains 0xFFFF flag */ - u32 difx_app_esc : 1;/* Check blocks whose application tag contains 0xFFFF flag */ - u32 rsvd : 8; - u32 sct_size : 1; /* Sector size, 1: 4K; 0: 512 */ - u32 smd_tp : 2; - u32 difx_en : 1; - } wd0; - - struct { - u32 cmp_app_tag_msk : 16; - u32 rsvd : 7; - u32 lun_qos_en : 2; - u32 vpid : 7; - } wd1; - - u16 cmp_app_tag; - u16 rep_app_tag; - - u32 cmp_ref_tag; - u32 rep_ref_tag; -}; - -/* Task Section: TMF SQE for INI */ -struct spfc_tmf_info { - union { - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } bs; - u32 value; - } w0; - - union { - struct { - u32 reset_did : 24; - u32 reset_type : 2; - u32 marker_sts : 1; - u32 rsvd0 : 5; - } bs; - u32 value; - } w1; - - union { - struct { - u32 reset_sid : 24; - u32 rsvd0 : 8; - } bs; - u32 value; - } w2; - - u8 reset_lun[8]; -}; - -/* Task Section: CMND SQE for INI */ -struct spfc_sqe_icmnd { - u8 fcp_cmnd_iu[FC_SCSI_CMDIU_LEN]; - union { - struct spfc_fc_dif_info dif_info; - struct spfc_tmf_info tmf; - } info; -}; - -/* Task Section: ABTS SQE */ -struct spfc_sqe_abts { - u32 fh_parm_abts; - u32 hotpooltag; - u32 release_timer; -}; - -struct spfc_keys { - struct { - u32 smac1 : 8; - u32 smac0 : 8; - u32 rsv : 16; - } wd0; - - u8 smac[4]; - - u8 dmac[6]; - u8 sid[3]; - u8 did[3]; - - struct { - u32 port_id : 3; - u32 host_id : 2; - u32 rsvd : 27; - } wd5; - u32 rsvd; -}; - -/* BDSL: Session Enable WQE.keys field only use 26 bytes room */ -struct spfc_cmdqe_sess_en { - struct { - u32 rx_id : 16; - u32 port_id : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 cid : 20; - u32 rsvd1 : 12; - } wd1; - - struct { - u32 conn_id : 16; - u32 scqn : 16; - } wd2; - - struct { - u32 xid_p : 20; - u32 rsvd3 : 12; - } wd3; - - u32 context_gpa_hi; - u32 context_gpa_lo; - struct spfc_keys keys; - u32 context[64]; -}; - -/* Control Section */ -struct spfc_wqe_ctrl { - struct spfc_wqe_ctrl_ch ch; - struct spfc_wqe_ctrl_qsf qsf; -}; - -struct spfc_sqe_els_rsp { - struct { - u32 echo_flag : 16; - u32 data_len : 16; - } wd0; - - struct { - u32 rsvd1 : 27; - u32 offload_flag : 1; - u32 lp_bflag : 1; - u32 clr_io : 1; - u32 para_update : 2; - } wd1; - - struct { - u32 seq_cnt : 1; - u32 e_d_tov : 1; - u32 rsvd2 : 6; - u32 class_mode : 8; /* 0:class3, 1:class2*/ - u32 tx_mfs : 16; - } wd2; - - u32 e_d_tov_timer_val; - - struct { - u32 conf : 1; - u32 rec : 1; - u32 xfer_dis : 1; - u32 immi_taskid_cnt : 13; - u32 immi_taskid_start : 16; - } wd4; - - u32 first_burst_len; - - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } wd6; - - struct { - u32 scqn : 16; - u32 hotpooltag : 16; - } wd7; - - u32 magic_local; - u32 magic_remote; - u32 ts_rcv_echo_req; - u32 sid; - u32 did; - u32 context_gpa_hi; - u32 context_gpa_lo; -}; - -struct spfc_sqe_reset_session { - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } wd0; - - struct { - u32 reset_did : 24; - u32 mode : 2; - u32 rsvd : 6; - } wd1; - - struct { - u32 reset_sid : 24; - u32 rsvd : 8; - } wd2; - - struct { - u32 scqn : 16; - u32 rsvd : 16; - } wd3; -}; - -struct spfc_sqe_nop_sq { - struct { - u32 scqn : 16; - u32 rsvd : 16; - } wd0; - u32 magic_num; -}; - -struct spfc_sqe_t_els_gs { - u16 echo_flag; - u16 data_len; - - struct { - u32 rsvd1 : 9; - u32 offload_flag : 1; - u32 origin_hottag : 16; - u32 rec_flag : 1; - u32 rec_support : 1; - u32 lp_bflag : 1; - u32 clr_io : 1; - u32 para_update : 2; - } wd4; - - struct { - u32 seq_cnt : 1; - u32 e_d_tov : 1; - u32 rsvd2 : 14; - u32 tx_mfs : 16; - } wd5; - - u32 e_d_tov_timer_val; - - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } wd6; - - struct { - u32 scqn : 16; - u32 hotpooltag : 16; /* used for send ELS rsp */ - } wd7; - - u32 sid; - u32 did; - u32 context_gpa_hi; - u32 context_gpa_lo; - u32 origin_magicnum; -}; - -struct spfc_sqe_els_gs_elsrsp_comm { - u16 rsvd; - u16 data_len; -}; - -struct spfc_sqe_lpb_msg { - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } w0; - - struct { - u32 reset_did : 24; - u32 reset_type : 2; - u32 rsvd0 : 6; - } w1; - - struct { - u32 reset_sid : 24; - u32 rsvd0 : 8; - } w2; - - u16 tmf_exch_id; - u16 rsvd1; - - u8 reset_lun[8]; -}; - -/* SQE Task Section's Contents except Common Header */ -union spfc_sqe_ts_cont { - struct spfc_sqe_icmnd icmnd; - struct spfc_sqe_abts abts; - struct spfc_sqe_els_rsp els_rsp; - struct spfc_sqe_t_els_gs t_els_gs; - struct spfc_sqe_els_gs_elsrsp_comm els_gs_elsrsp_comm; - struct spfc_sqe_reset_session reset_session; - struct spfc_sqe_lpb_msg lpb_msg; - struct spfc_sqe_nop_sq nop_sq; - u32 value[17]; -}; - -struct spfc_sqe_nvme_icmnd_part2 { - u8 nvme_cmnd_iu_part2_data[FC_NVME_CMDIU_LEN - FC_SCSI_CMDIU_LEN]; -}; - -union spfc_sqe_ts_ex { - struct spfc_sqe_nvme_icmnd_part2 nvme_icmnd_part2; - u32 value[12]; -}; - -struct spfc_sqe_ts { - /* SQE Task Section's Common Header */ - u32 local_xid : 16; /* local exch_id, icmnd/els send used for hotpooltag */ - u32 crc_inj : 1; - u32 immi_std : 1; - u32 cdb_type : 1; /* cdb_type = 0:CDB_LEN = 16B, cdb_type = 1:CDB_LEN = 32B */ - u32 rsvd : 5; /* used for loopback saving bdsl's num */ - u32 task_type : 8; - - struct { - u16 conn_id; - u16 remote_xid; - } wd0; - - u32 xid : 20; - u32 sqn : 12; - u32 cid; - u32 magic_num; - union spfc_sqe_ts_cont cont; -}; - -struct spfc_constant_sge { - u32 buf_addr_hi; - u32 buf_addr_lo; -}; - -struct spfc_variable_sge { - u32 buf_addr_hi; - u32 buf_addr_lo; - - struct { - u32 buf_len : 31; - u32 r_flag : 1; - } wd0; - - struct { - u32 buf_addr_gpa : 16; - u32 xid : 14; - u32 extension_flag : 1; - u32 last_flag : 1; - } wd1; -}; - -#define FC_WQE_SIZE 256 -/* SQE, should not be over 256B */ -struct spfc_sqe { - struct spfc_wqe_ctrl ctrl_sl; - u32 sid; - u32 did; - u64 wqe_gpa; /* gpa shift 6 bit to right*/ - u64 db_val; - union spfc_sqe_ts_ex ts_ex; - struct spfc_variable_sge esge[3]; - struct spfc_wqe_ctrl ectrl_sl; - struct spfc_sqe_ts ts_sl; - struct spfc_variable_sge sge[2]; -}; - -struct spfc_rqe_ctrl { - struct spfc_wqe_ctrl_ch ch; - - struct { - u16 wqe_msn; - u16 dump_wqe_msn; - } wd0; -}; - -struct spfc_rqe_drv { - struct { - u32 rsvd0 : 16; - u32 user_id : 16; - } wd0; - - u32 rsvd1; -}; - -/* RQE,should not be over 32B */ -struct spfc_rqe { - struct spfc_rqe_ctrl ctrl_sl; - u32 cqe_gpa_h; - u32 cqe_gpa_l; - struct spfc_constant_sge bds_sl; - struct spfc_rqe_drv drv_sl; -}; - -struct spfc_cmdqe_abort { - struct { - u32 rx_id : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 ox_id : 16; - u32 rsvd1 : 12; - u32 trsp_send : 1; - u32 tcmd_send : 1; - u32 immi : 1; - u32 reply_sts : 1; - } wd1; - - struct { - u32 conn_id : 16; - u32 scqn : 16; - } wd2; - - struct { - u32 xid : 20; - u32 rsvd : 12; - } wd3; - - struct { - u32 cid : 20; - u32 rsvd : 12; - } wd4; - struct { - u32 hotpooltag : 16; - u32 rsvd : 16; - } wd5; /* v6 new define */ - /* abort time out. Used for abort and io cmd reach ucode in different path - * and io cmd will not arrive. - */ - u32 time_out; - u32 magic_num; -}; - -struct spfc_cmdqe_abts_rsp { - struct { - u32 rx_id : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 ox_id : 16; - u32 rsvd1 : 4; - u32 port_id : 4; - u32 payload_len : 7; - u32 rsp_type : 1; - } wd1; - - struct { - u32 conn_id : 16; - u32 scqn : 16; - } wd2; - - struct { - u32 xid : 20; - u32 rsvd : 12; - } wd3; - - struct { - u32 cid : 20; - u32 rsvd : 12; - } wd4; - - struct { - u32 req_rx_id : 16; - u32 hotpooltag : 16; - } wd5; - - /* payload length is according to rsp_type:1DWORD or 3DWORD */ - u32 payload[3]; -}; - -struct spfc_cmdqe_buffer_clear { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 wqe_type : 8; - } wd0; - - struct { - u32 rx_id_end : 16; - u32 rx_id_start : 16; - } wd1; - - u32 scqn; - u32 wd3; -}; - -struct spfc_cmdqe_flush_sq { - struct { - u32 entry_count : 16; - u32 rsvd : 8; - u32 wqe_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 port_id : 4; - u32 pos : 11; - u32 last_wqe : 1; - } wd1; - - struct { - u32 rsvd : 4; - u32 clr_pos : 12; - u32 pkt_ptr : 16; - } wd2; - - struct { - u32 first_sq_xid : 24; - u32 sqqid_start_per_session : 4; - u32 sqcnt_per_session : 4; - } wd3; -}; - -struct spfc_cmdqe_dump_exch { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - u16 oqid_wr; - u16 oqid_rd; - - u32 host_id; - u32 func_id; - u32 cache_id; - u32 exch_id; -}; - -struct spfc_cmdqe_creat_srqc { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - u32 srqc_gpa_h; - u32 srqc_gpa_l; - - u32 srqc[16]; /* srqc_size=64B */ -}; - -struct spfc_cmdqe_delete_srqc { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - u32 srqc_gpa_h; - u32 srqc_gpa_l; -}; - -struct spfc_cmdqe_clr_srq { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 srq_type : 16; - } wd1; - - u32 srqc_gpa_h; - u32 srqc_gpa_l; -}; - -struct spfc_cmdqe_creat_scqc { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 rsvd2 : 16; - } wd1; - - u32 scqc[16]; /* scqc_size=64B */ -}; - -struct spfc_cmdqe_delete_scqc { - struct { - u32 rsvd1 : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 rsvd2 : 16; - } wd1; -}; - -struct spfc_cmdqe_creat_ssqc { - struct { - u32 rsvd1 : 4; - u32 xid : 20; - u32 task_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 rsvd2 : 16; - } wd1; - u32 context_gpa_hi; - u32 context_gpa_lo; - - u32 ssqc[64]; /* ssqc_size=256B */ -}; - -struct spfc_cmdqe_delete_ssqc { - struct { - u32 entry_count : 4; - u32 xid : 20; - u32 task_type : 8; - } wd0; - - struct { - u32 scqn : 16; - u32 rsvd2 : 16; - } wd1; - u32 context_gpa_hi; - u32 context_gpa_lo; -}; - -/* add xid free via cmdq */ -struct spfc_cmdqe_exch_id_free { - struct { - u32 task_id : 16; - u32 port_id : 8; - u32 rsvd0 : 8; - } wd0; - - u32 magic_num; - - struct { - u32 scqn : 16; - u32 hotpool_tag : 16; - } wd2; - struct { - u32 rsvd1 : 31; - u32 clear_abort_flag : 1; - } wd3; - u32 sid; - u32 did; - u32 type; /* ELS/ELS RSP/IO */ -}; - -struct spfc_cmdqe_cmdqe_dfx { - struct { - u32 rsvd1 : 4; - u32 xid : 20; - u32 task_type : 8; - } wd0; - - struct { - u32 qid_crclen : 12; - u32 cid : 20; - } wd1; - u32 context_gpa_hi; - u32 context_gpa_lo; - u32 dfx_type; - - u32 rsv[16]; -}; - -struct spfc_sqe_t_rsp { - struct { - u32 rsvd1 : 16; - u32 fcp_rsp_len : 8; - u32 busy_rsp : 3; - u32 immi : 1; - u32 mode : 1; - u32 conf : 1; - u32 fill : 2; - } wd0; - - u32 hotpooltag; - - union { - struct { - u32 addr_h; - u32 addr_l; - } gpa; - - struct { - u32 data[23]; /* FCP_RESP payload buf, 92B rsvd */ - } buf; - } payload; -}; - -struct spfc_sqe_tmf_t_rsp { - struct { - u32 scqn : 16; - u32 fcp_rsp_len : 8; - u32 pkt_nosnd_flag : 3; /* tmf rsp snd flag, 0:snd, 1: not snd, Driver ignore */ - u32 reset_type : 2; - u32 conf : 1; - u32 fill : 2; - } wd0; - - struct { - u32 reset_exch_end : 16; - u32 reset_exch_start : 16; - } wd1; - - struct { - u16 hotpooltag; /*tmf rsp hotpooltag, Driver ignore */ - u16 rsvd; - } wd2; - - u8 lun[8]; /* Lun ID */ - u32 data[20]; /* FCP_RESP payload buf, 80B rsvd */ -}; - -struct spfc_sqe_tresp_ts { - /* SQE Task Section's Common Header */ - u16 local_xid; - u8 rsvd0; - u8 task_type; - - struct { - u16 conn_id; - u16 remote_xid; - } wd0; - - u32 xid : 20; - u32 sqn : 12; - u32 cid; - u32 magic_num; - struct spfc_sqe_t_rsp t_rsp; -}; - -struct spfc_sqe_tmf_resp_ts { - /* SQE Task Section's Common Header */ - u16 local_xid; - u8 rsvd0; - u8 task_type; - - struct { - u16 conn_id; - u16 remote_xid; - } wd0; - - u32 xid : 20; - u32 sqn : 12; - u32 cid; - u32 magic_num; /* magic num */ - struct spfc_sqe_tmf_t_rsp tmf_rsp; -}; - -/* SQE for fcp response, max TSL is 120B */ -struct spfc_sqe_tresp { - struct spfc_wqe_ctrl ctrl_sl; - u64 taskrsvd; - u64 wqe_gpa; - u64 db_val; - union spfc_sqe_ts_ex ts_ex; - struct spfc_variable_sge esge[3]; - struct spfc_wqe_ctrl ectrl_sl; - struct spfc_sqe_tresp_ts ts_sl; -}; - -/* SQE for tmf response, max TSL is 120B */ -struct spfc_sqe_tmf_rsp { - struct spfc_wqe_ctrl ctrl_sl; - u64 taskrsvd; - u64 wqe_gpa; - u64 db_val; - union spfc_sqe_ts_ex ts_ex; - struct spfc_variable_sge esge[3]; - struct spfc_wqe_ctrl ectrl_sl; - struct spfc_sqe_tmf_resp_ts ts_sl; -}; - -/* SCQE Common Header */ -struct spfc_scqe_ch { - struct { - u32 task_type : 8; - u32 sqn : 13; - u32 cqe_remain_cnt : 3; - u32 err_code : 7; - u32 owner : 1; - } wd0; -}; - -struct spfc_scqe_type { - struct spfc_scqe_ch ch; - - u32 rsvd0; - - u16 conn_id; - u16 rsvd4; - - u32 rsvd1[12]; - - struct { - u32 done : 1; - u32 rsvd : 23; - u32 dif_vry_rst : 8; - } wd0; -}; - -struct spfc_scqe_sess_sts { - struct spfc_scqe_ch ch; - - struct { - u32 xid_qpn : 20; - u32 rsvd1 : 12; - } wd0; - - struct { - u32 conn_id : 16; - u32 rsvd3 : 16; - } wd1; - - struct { - u32 cid : 20; - u32 rsvd2 : 12; - } wd2; - - u64 rsvd3; -}; - -struct spfc_scqe_comm_rsp_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 hotpooltag : 16; /* ucode return hotpooltag to drv */ - } wd1; - - u32 magic_num; -}; - -struct spfc_scqe_iresp { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 rsvd0 : 3; - u32 user_id_num : 8; - u32 dif_info : 5; - } wd1; - - struct { - u32 scsi_status : 8; - u32 fcp_flag : 8; - u32 hotpooltag : 16; /* ucode return hotpooltag to drv */ - } wd2; - - u32 fcp_resid; - u32 fcp_sns_len; - u32 fcp_rsp_len; - u32 magic_num; - u16 user_id[FC_SENSEDATA_USERID_CNT_MAX]; - u32 rsv1; -}; - -struct spfc_scqe_nvme_iresp { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 eresp_flag : 8; - u32 user_id_num : 8; - } wd1; - - struct { - u32 scsi_status : 8; - u32 fcp_flag : 8; - u32 hotpooltag : 16; /* ucode return hotpooltag to drv */ - } wd2; - u32 magic_num; - u32 eresp[8]; -}; - -#pragma pack(1) -struct spfc_dif_result { - u8 vrd_rpt; - u16 pad; - u8 rcv_pi_vb; - u32 rcv_pi_h; - u32 rcv_pi_l; - u16 vrf_agm_imm; - u16 ri_agm_imm; -}; - -#pragma pack() - -struct spfc_scqe_dif_result { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 rsvd0 : 11; - u32 dif_info : 5; - } wd1; - - struct { - u32 scsi_status : 8; - u32 fcp_flag : 8; - u32 hotpooltag : 16; /* ucode return hotpooltag to drv */ - } wd2; - - u32 fcp_resid; - u32 fcp_sns_len; - u32 fcp_rsp_len; - u32 magic_num; - - u32 rsv1[3]; - struct spfc_dif_result difinfo; -}; - -struct spfc_scqe_rcv_abts_rsp { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 hotpooltag : 16; - } wd1; - - struct { - u32 fh_rctrl : 8; - u32 rsvd0 : 24; - } wd2; - - struct { - u32 did : 24; - u32 rsvd1 : 8; - } wd3; - - struct { - u32 sid : 24; - u32 rsvd2 : 8; - } wd4; - - /* payload length is according to fh_rctrl:1DWORD or 3DWORD */ - u32 payload[3]; - u32 magic_num; -}; - -struct spfc_scqe_fcp_rsp_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd0; - - struct { - u32 conn_id : 16; - u32 rsvd0 : 10; - u32 immi : 1; - u32 dif_info : 5; - } wd1; - - u32 magic_num; - u32 hotpooltag; - u32 xfer_rsp; - u32 rsvd[5]; - - u32 dif_tmp[4]; /* HW will overwrite it */ -}; - -struct spfc_scqe_rcv_els_cmd { - struct spfc_scqe_ch ch; - - struct { - u32 did : 24; - u32 class_mode : 8; /* 0:class3, 1:class2 */ - } wd0; - - struct { - u32 sid : 24; - u32 rsvd1 : 8; - } wd1; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd2; - - struct { - u32 user_id_num : 16; - u32 data_len : 16; - } wd3; - /* User ID of SRQ SGE, used for drvier buffer release */ - u16 user_id[FC_LS_GS_USERID_CNT_MAX]; - u32 ts; -}; - -struct spfc_scqe_param_check_scq { - struct spfc_scqe_ch ch; - - u8 rsvd0[3]; - u8 port_id; - - u16 scqn; - u16 check_item; - - u16 exch_id_load; - u16 exch_id; - - u16 historty_type; - u16 entry_count; - - u32 xid; - - u32 gpa_h; - u32 gpa_l; - - u32 magic_num; - u32 hotpool_tag; - - u32 payload_len; - u32 sub_err; - - u32 rsvd2[3]; -}; - -struct spfc_scqe_rcv_abts_cmd { - struct spfc_scqe_ch ch; - - struct { - u32 did : 24; - u32 rsvd0 : 8; - } wd0; - - struct { - u32 sid : 24; - u32 rsvd1 : 8; - } wd1; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd2; -}; - -struct spfc_scqe_rcv_els_gs_rsp { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd1; - - struct { - u32 conn_id : 16; - u32 data_len : 16; /* ELS/GS RSP Payload length */ - } wd2; - - struct { - u32 did : 24; - u32 rsvd : 6; - u32 echo_rsp : 1; - u32 end_rsp : 1; - } wd3; - - struct { - u32 sid : 24; - u32 user_id_num : 8; - } wd4; - - struct { - u32 rsvd : 16; - u32 hotpooltag : 16; - } wd5; - - u32 magic_num; - u16 user_id[FC_LS_GS_USERID_CNT_MAX]; -}; - -struct spfc_scqe_rcv_flush_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rsvd0 : 4; - u32 clr_pos : 12; - u32 port_id : 8; - u32 last_flush : 8; - } wd0; -}; - -struct spfc_scqe_rcv_clear_buf_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rsvd0 : 24; - u32 port_id : 8; - } wd0; -}; - -struct spfc_scqe_clr_srq_rsp { - struct spfc_scqe_ch ch; - - struct { - u32 srq_type : 16; - u32 cur_wqe_msn : 16; - } wd0; -}; - -struct spfc_scqe_itmf_marker_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd1; - - struct { - u32 did : 24; - u32 end_rsp : 8; - } wd2; - - struct { - u32 sid : 24; - u32 rsvd1 : 8; - } wd3; - - struct { - u32 hotpooltag : 16; - u32 rsvd : 16; - } wd4; - - u32 magic_num; -}; - -struct spfc_scqe_abts_marker_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd1; - - struct { - u32 did : 24; - u32 end_rsp : 8; - } wd2; - - struct { - u32 sid : 24; - u32 io_state : 8; - } wd3; - - struct { - u32 hotpooltag : 16; - u32 rsvd : 16; - } wd4; - - u32 magic_num; -}; - -struct spfc_scqe_ini_abort_sts { - struct spfc_scqe_ch ch; - - struct { - u32 rx_id : 16; - u32 ox_id : 16; - } wd1; - - struct { - u32 did : 24; - u32 rsvd : 8; - } wd2; - - struct { - u32 sid : 24; - u32 io_state : 8; - } wd3; - - struct { - u32 hotpooltag : 16; - u32 rsvd : 16; - } wd4; - - u32 magic_num; -}; - -struct spfc_scqe_sq_nop_sts { - struct spfc_scqe_ch ch; - struct { - u32 rsvd : 16; - u32 sqn : 16; - } wd0; - struct { - u32 rsvd : 16; - u32 conn_id : 16; - } wd1; - u32 magic_num; -}; - -/* SCQE, should not be over 64B */ -#define FC_SCQE_SIZE 64 -union spfc_scqe { - struct spfc_scqe_type common; - struct spfc_scqe_sess_sts sess_sts; /* session enable/disable/delete sts */ - struct spfc_scqe_comm_rsp_sts comm_sts; /* aborts/abts_rsp/els rsp sts */ - struct spfc_scqe_rcv_clear_buf_sts clear_sts; /* clear buffer sts */ - struct spfc_scqe_rcv_flush_sts flush_sts; /* flush sq sts */ - struct spfc_scqe_iresp iresp; - struct spfc_scqe_rcv_abts_rsp rcv_abts_rsp; /* recv abts rsp */ - struct spfc_scqe_fcp_rsp_sts fcp_rsp_sts; /* Read/Write/Rsp sts */ - struct spfc_scqe_rcv_els_cmd rcv_els_cmd; /* recv els cmd */ - struct spfc_scqe_rcv_abts_cmd rcv_abts_cmd; /* recv abts cmd */ - struct spfc_scqe_rcv_els_gs_rsp rcv_els_gs_rsp; /* recv els/gs rsp */ - struct spfc_scqe_clr_srq_rsp clr_srq_sts; - struct spfc_scqe_itmf_marker_sts itmf_marker_sts; /* tmf marker */ - struct spfc_scqe_abts_marker_sts abts_marker_sts; /* abts marker */ - struct spfc_scqe_dif_result dif_result; - struct spfc_scqe_param_check_scq param_check_sts; - struct spfc_scqe_nvme_iresp nvme_iresp; - struct spfc_scqe_ini_abort_sts ini_abort_sts; - struct spfc_scqe_sq_nop_sts sq_nop_sts; -}; - -struct spfc_cmdqe_type { - struct { - u32 rx_id : 16; - u32 rsvd0 : 8; - u32 task_type : 8; - } wd0; -}; - -struct spfc_cmdqe_send_ack { - struct { - u32 rx_id : 16; - u32 immi_stand : 1; - u32 rsvd0 : 7; - u32 task_type : 8; - } wd0; - - u32 xid; - u32 cid; -}; - -struct spfc_cmdqe_send_aeq_err { - struct { - u32 errorevent : 8; - u32 errortype : 8; - u32 portid : 8; - u32 task_type : 8; - } wd0; -}; - -/* CMDQE, variable length */ -union spfc_cmdqe { - struct spfc_cmdqe_type common; - struct spfc_cmdqe_sess_en session_enable; - struct spfc_cmdqe_abts_rsp snd_abts_rsp; - struct spfc_cmdqe_abort snd_abort; - struct spfc_cmdqe_buffer_clear buffer_clear; - struct spfc_cmdqe_flush_sq flush_sq; - struct spfc_cmdqe_dump_exch dump_exch; - struct spfc_cmdqe_creat_srqc create_srqc; - struct spfc_cmdqe_delete_srqc delete_srqc; - struct spfc_cmdqe_clr_srq clear_srq; - struct spfc_cmdqe_creat_scqc create_scqc; - struct spfc_cmdqe_delete_scqc delete_scqc; - struct spfc_cmdqe_send_ack send_ack; - struct spfc_cmdqe_send_aeq_err send_aeqerr; - struct spfc_cmdqe_creat_ssqc createssqc; - struct spfc_cmdqe_delete_ssqc deletessqc; - struct spfc_cmdqe_cmdqe_dfx dfx_info; - struct spfc_cmdqe_exch_id_free xid_free; -}; - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_io.c b/drivers/scsi/spfc/hw/spfc_io.c deleted file mode 100644 index 7184eb6a10af..000000000000 --- a/drivers/scsi/spfc/hw/spfc_io.c +++ /dev/null @@ -1,1193 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_io.h" -#include "spfc_module.h" -#include "spfc_service.h" - -#define SPFC_SGE_WD1_XID_MASK 0x3fff - -u32 dif_protect_opcode = INVALID_VALUE32; -u32 dif_app_esc_check = SPFC_DIF_APP_REF_ESC_CHECK; -u32 dif_ref_esc_check = SPFC_DIF_APP_REF_ESC_CHECK; -u32 grd_agm_ini_ctrl = SPFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1; -u32 ref_tag_no_increase; -u32 dix_flag; -u32 grd_ctrl; -u32 grd_agm_ctrl = SPFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16; -u32 cmp_app_tag_mask = 0xffff; -u32 app_tag_ctrl; -u32 ref_tag_ctrl; -u32 ref_tag_mod = INVALID_VALUE32; -u32 rep_ref_tag; -u32 rx_rep_ref_tag; -u16 cmp_app_tag; -u16 rep_app_tag; - -static void spfc_dif_err_count(struct spfc_hba_info *hba, u8 info) -{ - u8 dif_info = info; - - if (dif_info & SPFC_TX_DIF_ERROR_FLAG) { - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_SEND_DIFERR_ALL); - if (dif_info & SPFC_DIF_ERROR_CODE_CRC) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_SEND_DIFERR_CRC); - - if (dif_info & SPFC_DIF_ERROR_CODE_APP) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_SEND_DIFERR_APP); - - if (dif_info & SPFC_DIF_ERROR_CODE_REF) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_SEND_DIFERR_REF); - } else { - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_RECV_DIFERR_ALL); - if (dif_info & SPFC_DIF_ERROR_CODE_CRC) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_RECV_DIFERR_CRC); - - if (dif_info & SPFC_DIF_ERROR_CODE_APP) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_RECV_DIFERR_APP); - - if (dif_info & SPFC_DIF_ERROR_CODE_REF) - SPFC_DIF_ERR_STAT(hba, SPFC_DIF_RECV_DIFERR_REF); - } -} - -void spfc_build_no_dif_control(struct unf_frame_pkg *pkg, - struct spfc_fc_dif_info *info) -{ - struct spfc_fc_dif_info *dif_info = info; - - /* dif enable or disable */ - dif_info->wd0.difx_en = SPFC_DIF_DISABLE; - - dif_info->wd1.vpid = pkg->qos_level; - dif_info->wd1.lun_qos_en = 1; -} - -void spfc_dif_action_forward(struct spfc_fc_dif_info *dif_info_l1, - struct unf_dif_control_info *dif_ctrl_u1) -{ - dif_info_l1->wd0.grd_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_CRC_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.grd_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_REPLACE_CRC_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_REPLACE - : SPFC_DIF_GARD_REF_APP_CTRL_FORWARD; - - dif_info_l1->wd0.ref_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_LBA_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.ref_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_REPLACE_LBA_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_REPLACE - : SPFC_DIF_GARD_REF_APP_CTRL_FORWARD; - - dif_info_l1->wd0.app_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_APP_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.app_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_REPLACE_APP_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_REPLACE - : SPFC_DIF_GARD_REF_APP_CTRL_FORWARD; -} - -void spfc_dif_action_delete(struct spfc_fc_dif_info *dif_info_l1, - struct unf_dif_control_info *dif_ctrl_u1) -{ - dif_info_l1->wd0.grd_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_CRC_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.grd_ctrl |= SPFC_DIF_GARD_REF_APP_CTRL_DELETE; - - dif_info_l1->wd0.ref_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_LBA_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.ref_tag_ctrl |= SPFC_DIF_GARD_REF_APP_CTRL_DELETE; - - dif_info_l1->wd0.app_tag_ctrl |= - (dif_ctrl_u1->protect_opcode & UNF_VERIFY_APP_MASK) - ? SPFC_DIF_GARD_REF_APP_CTRL_VERIFY - : SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.app_tag_ctrl |= SPFC_DIF_GARD_REF_APP_CTRL_DELETE; -} - -static void spfc_convert_dif_action(struct unf_dif_control_info *dif_ctrl, - struct spfc_fc_dif_info *dif_info) -{ - struct spfc_fc_dif_info *dif_info_l1 = NULL; - struct unf_dif_control_info *dif_ctrl_u1 = NULL; - - dif_info_l1 = dif_info; - dif_ctrl_u1 = dif_ctrl; - - switch (UNF_DIF_ACTION_MASK & dif_ctrl_u1->protect_opcode) { - case UNF_DIF_ACTION_VERIFY_AND_REPLACE: - case UNF_DIF_ACTION_VERIFY_AND_FORWARD: - spfc_dif_action_forward(dif_info_l1, dif_ctrl_u1); - break; - - case UNF_DIF_ACTION_INSERT: - dif_info_l1->wd0.grd_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.grd_ctrl |= SPFC_DIF_GARD_REF_APP_CTRL_INSERT; - dif_info_l1->wd0.ref_tag_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.ref_tag_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_INSERT; - dif_info_l1->wd0.app_tag_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY; - dif_info_l1->wd0.app_tag_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_INSERT; - break; - - case UNF_DIF_ACTION_VERIFY_AND_DELETE: - spfc_dif_action_delete(dif_info_l1, dif_ctrl_u1); - break; - - default: - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "Unknown dif protect opcode 0x%x", - dif_ctrl_u1->protect_opcode); - break; - } -} - -void spfc_get_dif_info_l1(struct spfc_fc_dif_info *dif_info_l1, - struct unf_dif_control_info *dif_ctrl_u1) -{ - dif_info_l1->wd1.cmp_app_tag_msk = cmp_app_tag_mask; - - dif_info_l1->rep_app_tag = dif_ctrl_u1->app_tag; - dif_info_l1->rep_ref_tag = dif_ctrl_u1->start_lba; - - dif_info_l1->cmp_app_tag = dif_ctrl_u1->app_tag; - dif_info_l1->cmp_ref_tag = dif_ctrl_u1->start_lba; - - if (cmp_app_tag != 0) - dif_info_l1->cmp_app_tag = cmp_app_tag; - - if (rep_app_tag != 0) - dif_info_l1->rep_app_tag = rep_app_tag; - - if (rep_ref_tag != 0) - dif_info_l1->rep_ref_tag = rep_ref_tag; -} - -void spfc_build_dif_control(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, - struct spfc_fc_dif_info *dif_info) -{ - struct spfc_fc_dif_info *dif_info_l1 = NULL; - struct unf_dif_control_info *dif_ctrl_u1 = NULL; - - dif_info_l1 = dif_info; - dif_ctrl_u1 = &pkg->dif_control; - - /* dif enable or disable */ - dif_info_l1->wd0.difx_en = SPFC_DIF_ENABLE; - - dif_info_l1->wd1.vpid = pkg->qos_level; - dif_info_l1->wd1.lun_qos_en = 1; - - /* 512B + 8 size mode */ - dif_info_l1->wd0.sct_size = (dif_ctrl_u1->flags & UNF_DIF_SECTSIZE_4KB) - ? SPFC_DIF_SECTOR_4KB_MODE - : SPFC_DIF_SECTOR_512B_MODE; - - /* dif type 1 */ - dif_info_l1->wd0.dif_verify_type = dif_type; - - /* Check whether the 0xffff app or ref domain is isolated */ - /* If all ff messages are displayed in type1 app, checkcheck sector - * dif_info_l1->wd0.difx_app_esc = SPFC_DIF_APP_REF_ESC_CHECK - */ - - dif_info_l1->wd0.difx_app_esc = dif_app_esc_check; - - /* type1 ref tag If all ff is displayed, check sector is required */ - dif_info_l1->wd0.difx_ref_esc = dif_ref_esc_check; - - /* Currently, only t10 crc is supported */ - dif_info_l1->wd0.grd_agm_ctrl = 0; - - /* Set this parameter based on the values of bit zero and bit one. - * The initial value is 0, and the value is UNF_DEFAULT_CRC_GUARD_SEED - */ - dif_info_l1->wd0.grd_agm_ini_ctrl = grd_agm_ini_ctrl; - dif_info_l1->wd0.app_tag_ctrl = 0; - dif_info_l1->wd0.grd_ctrl = 0; - dif_info_l1->wd0.ref_tag_ctrl = 0; - - /* Convert the verify operation, replace, forward, insert, - * and delete operations based on the actual operation code of the upper - * layer - */ - if (dif_protect_opcode != INVALID_VALUE32) { - dif_ctrl_u1->protect_opcode = - dif_protect_opcode | - (dif_ctrl_u1->protect_opcode & UNF_DIF_ACTION_MASK); - } - - spfc_convert_dif_action(dif_ctrl_u1, dif_info_l1); - dif_info_l1->wd0.app_tag_ctrl |= app_tag_ctrl; - - /* Address self-increase mode */ - dif_info_l1->wd0.ref_tag_mode = - (dif_ctrl_u1->protect_opcode & UNF_DIF_ACTION_NO_INCREASE_REFTAG) - ? (BOTH_NONE) - : (BOTH_INCREASE); - - if (ref_tag_mod != INVALID_VALUE32) - dif_info_l1->wd0.ref_tag_mode = ref_tag_mod; - - /* This parameter is used only when type 3 is set to 0xffff. */ - spfc_get_dif_info_l1(dif_info_l1, dif_ctrl_u1); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) sid_did(0x%x_0x%x) package type(0x%x) apptag(0x%x) flag(0x%x) opcode(0x%x) fcpdl(0x%x) statlba(0x%x)", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did, pkg->type, pkg->dif_control.app_tag, - pkg->dif_control.flags, pkg->dif_control.protect_opcode, - pkg->dif_control.fcp_dl, pkg->dif_control.start_lba); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) cover dif control info, app:cmp_tag(0x%x) cmp_tag_mask(0x%x) rep_tag(0x%x), ref:tag_mode(0x%x) cmp_tag(0x%x) rep_tag(0x%x).", - hba->port_cfg.port_id, dif_info_l1->cmp_app_tag, - dif_info_l1->wd1.cmp_app_tag_msk, dif_info_l1->rep_app_tag, - dif_info_l1->wd0.ref_tag_mode, dif_info_l1->cmp_ref_tag, - dif_info_l1->rep_ref_tag); - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "Port(0x%x) cover dif control info, ctrl:grd(0x%x) ref(0x%x) app(0x%x).", - hba->port_cfg.port_id, dif_info_l1->wd0.grd_ctrl, - dif_info_l1->wd0.ref_tag_ctrl, - dif_info_l1->wd0.app_tag_ctrl); -} - -static u32 spfc_fill_external_sgl_page(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, - struct unf_esgl_page *esgl_page, - u32 sge_num, int direction, - u32 context_id, u32 dif_flag) -{ - u32 ret = UNF_RETURN_ERROR; - u32 index = 0; - u32 sge_num_per_page = 0; - u32 buffer_addr = 0; - u32 buf_len = 0; - char *buf = NULL; - ulong phys = 0; - struct unf_esgl_page *unf_esgl_page = NULL; - struct spfc_variable_sge *sge = NULL; - - unf_esgl_page = esgl_page; - while (sge_num > 0) { - /* Obtains the initial address of the sge page */ - sge = (struct spfc_variable_sge *)unf_esgl_page->page_address; - - /* Calculate the number of sge on each page */ - sge_num_per_page = (unf_esgl_page->page_size) / sizeof(struct spfc_variable_sge); - - /* Fill in sgl page. The last sge of each page is link sge by - * default - */ - for (index = 0; index < (sge_num_per_page - 1); index++) { - UNF_GET_SGL_ENTRY(ret, (void *)pkg, &buf, &buf_len, dif_flag); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - phys = (ulong)buf; - sge[index].buf_addr_hi = UNF_DMA_HI32(phys); - sge[index].buf_addr_lo = UNF_DMA_LO32(phys); - sge[index].wd0.buf_len = buf_len; - sge[index].wd0.r_flag = 0; - sge[index].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sge[index].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - /* Parity bit */ - sge[index].wd1.buf_addr_gpa = (sge[index].buf_addr_lo >> UNF_SHIFT_16); - sge[index].wd1.xid = (context_id & SPFC_SGE_WD1_XID_MASK); - - spfc_cpu_to_big32(&sge[index], sizeof(struct spfc_variable_sge)); - - sge_num--; - if (sge_num == 0) - break; - } - - /* sge Set the end flag on the last sge of the page if all the - * pages have been filled. - */ - if (sge_num == 0) { - sge[index].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sge[index].wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - - /* Parity bit */ - buffer_addr = be32_to_cpu(sge[index].buf_addr_lo); - sge[index].wd1.buf_addr_gpa = (buffer_addr >> UNF_SHIFT_16); - sge[index].wd1.xid = (context_id & SPFC_SGE_WD1_XID_MASK); - - spfc_cpu_to_big32(&sge[index].wd1, SPFC_DWORD_BYTE); - } - /* If only one sge is left empty, the sge reserved on the page - * is used for filling. - */ - else if (sge_num == 1) { - UNF_GET_SGL_ENTRY(ret, (void *)pkg, &buf, &buf_len, - dif_flag); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - phys = (ulong)buf; - sge[index].buf_addr_hi = UNF_DMA_HI32(phys); - sge[index].buf_addr_lo = UNF_DMA_LO32(phys); - sge[index].wd0.buf_len = buf_len; - sge[index].wd0.r_flag = 0; - sge[index].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sge[index].wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - - /* Parity bit */ - sge[index].wd1.buf_addr_gpa = (sge[index].buf_addr_lo >> UNF_SHIFT_16); - sge[index].wd1.xid = (context_id & SPFC_SGE_WD1_XID_MASK); - - spfc_cpu_to_big32(&sge[index], sizeof(struct spfc_variable_sge)); - - sge_num--; - } else { - /* Apply for a new sgl page and fill in link sge */ - UNF_GET_FREE_ESGL_PAGE(unf_esgl_page, hba->lport, pkg); - if (!unf_esgl_page) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Get free esgl page failed."); - return UNF_RETURN_ERROR; - } - phys = unf_esgl_page->esgl_phy_addr; - sge[index].buf_addr_hi = UNF_DMA_HI32(phys); - sge[index].buf_addr_lo = UNF_DMA_LO32(phys); - - /* For the cascaded wqe, you only need to enter the - * cascading buffer address and extension flag, and do - * not need to fill in other fields - */ - sge[index].wd0.buf_len = 0; - sge[index].wd0.r_flag = 0; - sge[index].wd1.extension_flag = SPFC_WQE_SGE_EXTEND_FLAG; - sge[index].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - /* parity bit */ - sge[index].wd1.buf_addr_gpa = (sge[index].buf_addr_lo >> UNF_SHIFT_16); - sge[index].wd1.xid = (context_id & SPFC_SGE_WD1_XID_MASK); - - spfc_cpu_to_big32(&sge[index], sizeof(struct spfc_variable_sge)); - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Port(0x%x) SID(0x%x) DID(0x%x) RXID(0x%x) build esgl left sge num: %u.", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did, - pkg->frame_head.oxid_rxid, sge_num); - } - - return RETURN_OK; -} - -static u32 spfc_build_local_dif_sgl(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, struct spfc_sqe *sqe, - int direction, u32 bd_sge_num) -{ - u32 ret = UNF_RETURN_ERROR; - char *buf = NULL; - u32 buf_len = 0; - ulong phys = 0; - u32 dif_sge_place = 0; - - /* DIF SGE must be followed by BD SGE */ - dif_sge_place = ((bd_sge_num <= pkg->entry_count) ? bd_sge_num : pkg->entry_count); - - /* The entry_count= 0 needs to be specially processed and does not need - * to be mounted. As long as len is set to zero, Last-bit is set to one, - * and E-bit is set to 0. - */ - if (pkg->dif_control.dif_sge_count == 0) { - sqe->sge[dif_sge_place].buf_addr_hi = 0; - sqe->sge[dif_sge_place].buf_addr_lo = 0; - sqe->sge[dif_sge_place].wd0.buf_len = 0; - } else { - UNF_CM_GET_DIF_SGL_ENTRY(ret, (void *)pkg, &buf, &buf_len); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "DOUBLE DIF Get Dif Buf Fail."); - return UNF_RETURN_ERROR; - } - phys = (ulong)buf; - sqe->sge[dif_sge_place].buf_addr_hi = UNF_DMA_HI32(phys); - sqe->sge[dif_sge_place].buf_addr_lo = UNF_DMA_LO32(phys); - sqe->sge[dif_sge_place].wd0.buf_len = buf_len; - } - - /* rdma flag. If the fc is not used, enter 0. */ - sqe->sge[dif_sge_place].wd0.r_flag = 0; - - /* parity bit */ - sqe->sge[dif_sge_place].wd1.buf_addr_gpa = 0; - sqe->sge[dif_sge_place].wd1.xid = 0; - - /* The local sgl does not use the cascading SGE. Therefore, the value of - * this field is always 0. - */ - sqe->sge[dif_sge_place].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sqe->sge[dif_sge_place].wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - - spfc_cpu_to_big32(&sqe->sge[dif_sge_place], sizeof(struct spfc_variable_sge)); - - return RETURN_OK; -} - -static u32 spfc_build_external_dif_sgl(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, - struct spfc_sqe *sqe, int direction, - u32 bd_sge_num) -{ - u32 ret = UNF_RETURN_ERROR; - struct unf_esgl_page *esgl_page = NULL; - ulong phys = 0; - u32 left_sge_num = 0; - u32 dif_sge_place = 0; - struct spfc_parent_ssq_info *ssq = NULL; - u32 ssqn = 0; - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - ssq = &hba->parent_queue_mgr->shared_queue[ssqn].parent_ssq_info; - - /* DIF SGE must be followed by BD SGE */ - dif_sge_place = ((bd_sge_num <= pkg->entry_count) ? bd_sge_num : pkg->entry_count); - - /* Allocate the first page first */ - UNF_GET_FREE_ESGL_PAGE(esgl_page, hba->lport, pkg); - if (!esgl_page) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "DOUBLE DIF Get External Page Fail."); - return UNF_RETURN_ERROR; - } - - phys = esgl_page->esgl_phy_addr; - - /* Configuring the Address of the Cascading Page */ - sqe->sge[dif_sge_place].buf_addr_hi = UNF_DMA_HI32(phys); - sqe->sge[dif_sge_place].buf_addr_lo = UNF_DMA_LO32(phys); - - /* Configuring Control Information About the Cascading Page */ - sqe->sge[dif_sge_place].wd0.buf_len = 0; - sqe->sge[dif_sge_place].wd0.r_flag = 0; - sqe->sge[dif_sge_place].wd1.extension_flag = SPFC_WQE_SGE_EXTEND_FLAG; - sqe->sge[dif_sge_place].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - /* parity bit */ - sqe->sge[dif_sge_place].wd1.buf_addr_gpa = 0; - sqe->sge[dif_sge_place].wd1.xid = 0; - - spfc_cpu_to_big32(&sqe->sge[dif_sge_place], sizeof(struct spfc_variable_sge)); - - /* Fill in the sge information on the cascading page */ - left_sge_num = pkg->dif_control.dif_sge_count; - ret = spfc_fill_external_sgl_page(hba, pkg, esgl_page, left_sge_num, - direction, ssq->context_id, true); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - - return RETURN_OK; -} - -static u32 spfc_build_local_sgl(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, struct spfc_sqe *sqe, - int direction) -{ - u32 ret = UNF_RETURN_ERROR; - char *buf = NULL; - u32 buf_len = 0; - u32 index = 0; - ulong phys = 0; - - for (index = 0; index < pkg->entry_count; index++) { - UNF_CM_GET_SGL_ENTRY(ret, (void *)pkg, &buf, &buf_len); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - - phys = (ulong)buf; - sqe->sge[index].buf_addr_hi = UNF_DMA_HI32(phys); - sqe->sge[index].buf_addr_lo = UNF_DMA_LO32(phys); - sqe->sge[index].wd0.buf_len = buf_len; - - /* rdma flag. If the fc is not used, enter 0. */ - sqe->sge[index].wd0.r_flag = 0; - - /* parity bit */ - sqe->sge[index].wd1.buf_addr_gpa = SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE; - sqe->sge[index].wd1.xid = 0; - - /* The local sgl does not use the cascading SGE. Therefore, the - * value of this field is always 0. - */ - sqe->sge[index].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sqe->sge[index].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - if (index == (pkg->entry_count - 1)) { - /* Sets the last WQE end flag 1 */ - sqe->sge[index].wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - } - - spfc_cpu_to_big32(&sqe->sge[index], sizeof(struct spfc_variable_sge)); - } - - /* Adjust the length of the BDSL field in the CTRL domain. */ - SPFC_ADJUST_DATA(sqe->ctrl_sl.ch.wd0.bdsl, - SPFC_BYTES_TO_QW_NUM((pkg->entry_count * - sizeof(struct spfc_variable_sge)))); - - /* The entry_count= 0 needs to be specially processed and does not need - * to be mounted. As long as len is set to zero, Last-bit is set to one, - * and E-bit is set to 0. - */ - if (pkg->entry_count == 0) { - sqe->sge[ARRAY_INDEX_0].buf_addr_hi = 0; - sqe->sge[ARRAY_INDEX_0].buf_addr_lo = 0; - sqe->sge[ARRAY_INDEX_0].wd0.buf_len = 0; - - /* rdma flag. This field is not used in fc. Set it to 0. */ - sqe->sge[ARRAY_INDEX_0].wd0.r_flag = 0; - - /* parity bit */ - sqe->sge[ARRAY_INDEX_0].wd1.buf_addr_gpa = SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE; - sqe->sge[ARRAY_INDEX_0].wd1.xid = 0; - - /* The local sgl does not use the cascading SGE. Therefore, the - * value of this field is always 0. - */ - sqe->sge[ARRAY_INDEX_0].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sqe->sge[ARRAY_INDEX_0].wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - - spfc_cpu_to_big32(&sqe->sge[ARRAY_INDEX_0], sizeof(struct spfc_variable_sge)); - - /* Adjust the length of the BDSL field in the CTRL domain. */ - SPFC_ADJUST_DATA(sqe->ctrl_sl.ch.wd0.bdsl, - SPFC_BYTES_TO_QW_NUM(sizeof(struct spfc_variable_sge))); - } - - return RETURN_OK; -} - -static u32 spfc_build_external_sgl(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, struct spfc_sqe *sqe, - int direction, u32 bd_sge_num) -{ - u32 ret = UNF_RETURN_ERROR; - char *buf = NULL; - struct unf_esgl_page *esgl_page = NULL; - ulong phys = 0; - u32 buf_len = 0; - u32 index = 0; - u32 left_sge_num = 0; - u32 local_sge_num = 0; - struct spfc_parent_ssq_info *ssq = NULL; - u16 ssqn = 0; - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - ssq = &hba->parent_queue_mgr->shared_queue[ssqn].parent_ssq_info; - - /* Ensure that the value of bd_sge_num is greater than or equal to one - */ - local_sge_num = bd_sge_num - 1; - - for (index = 0; index < local_sge_num; index++) { - UNF_CM_GET_SGL_ENTRY(ret, (void *)pkg, &buf, &buf_len); - if (unlikely(ret != RETURN_OK)) - return UNF_RETURN_ERROR; - - phys = (ulong)buf; - - sqe->sge[index].buf_addr_hi = UNF_DMA_HI32(phys); - sqe->sge[index].buf_addr_lo = UNF_DMA_LO32(phys); - sqe->sge[index].wd0.buf_len = buf_len; - - /* RDMA flag, which is not used by FC. */ - sqe->sge[index].wd0.r_flag = 0; - sqe->sge[index].wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sqe->sge[index].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - /* parity bit */ - sqe->sge[index].wd1.buf_addr_gpa = SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE; - sqe->sge[index].wd1.xid = 0; - - spfc_cpu_to_big32(&sqe->sge[index], sizeof(struct spfc_variable_sge)); - } - - /* Calculate the number of remaining sge. */ - left_sge_num = pkg->entry_count - local_sge_num; - /* Adjust the length of the BDSL field in the CTRL domain. */ - SPFC_ADJUST_DATA(sqe->ctrl_sl.ch.wd0.bdsl, - SPFC_BYTES_TO_QW_NUM((bd_sge_num * sizeof(struct spfc_variable_sge)))); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "alloc extended sgl page,leftsge:%d", left_sge_num); - /* Allocating the first cascading page */ - UNF_GET_FREE_ESGL_PAGE(esgl_page, hba->lport, pkg); - if (unlikely(!esgl_page)) - return UNF_RETURN_ERROR; - - phys = esgl_page->esgl_phy_addr; - - /* Configuring the Address of the Cascading Page */ - sqe->sge[index].buf_addr_hi = (u32)UNF_DMA_HI32(phys); - sqe->sge[index].buf_addr_lo = (u32)UNF_DMA_LO32(phys); - - /* Configuring Control Information About the Cascading Page */ - sqe->sge[index].wd0.buf_len = 0; - sqe->sge[index].wd0.r_flag = 0; - sqe->sge[index].wd1.extension_flag = SPFC_WQE_SGE_EXTEND_FLAG; - sqe->sge[index].wd1.last_flag = SPFC_WQE_SGE_NOT_LAST_FLAG; - - /* parity bit */ - sqe->sge[index].wd1.buf_addr_gpa = SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE; - sqe->sge[index].wd1.xid = 0; - - spfc_cpu_to_big32(&sqe->sge[index], sizeof(struct spfc_variable_sge)); - - /* Fill in the sge information on the cascading page. */ - ret = spfc_fill_external_sgl_page(hba, pkg, esgl_page, left_sge_num, - direction, ssq->context_id, false); - if (ret != RETURN_OK) - return UNF_RETURN_ERROR; - /* Copy the extended data sge to the extended sge of the extended wqe.*/ - if (left_sge_num > 0) { - memcpy(sqe->esge, (void *)esgl_page->page_address, - SPFC_WQE_MAX_ESGE_NUM * sizeof(struct spfc_variable_sge)); - } - - return RETURN_OK; -} - -u32 spfc_build_sgl_by_local_sge_num(struct unf_frame_pkg *pkg, - struct spfc_hba_info *hba, struct spfc_sqe *sqe, - int direction, u32 bd_sge_num) -{ - u32 ret = RETURN_OK; - - if (pkg->entry_count <= bd_sge_num) - ret = spfc_build_local_sgl(hba, pkg, sqe, direction); - else - ret = spfc_build_external_sgl(hba, pkg, sqe, direction, bd_sge_num); - - return ret; -} - -u32 spfc_conf_dual_sgl_info(struct unf_frame_pkg *pkg, - struct spfc_hba_info *hba, struct spfc_sqe *sqe, - int direction, u32 bd_sge_num, bool double_sgl) -{ - u32 ret = RETURN_OK; - - if (double_sgl) { - /* Adjust the length of the DIF_SL field in the CTRL domain */ - SPFC_ADJUST_DATA(sqe->ctrl_sl.ch.wd0.dif_sl, - SPFC_BYTES_TO_QW_NUM(sizeof(struct spfc_variable_sge))); - - if (pkg->dif_control.dif_sge_count <= SPFC_WQE_SGE_DIF_ENTRY_NUM) - ret = spfc_build_local_dif_sgl(hba, pkg, sqe, direction, bd_sge_num); - else - ret = spfc_build_external_dif_sgl(hba, pkg, sqe, direction, bd_sge_num); - } - - return ret; -} - -u32 spfc_build_sgl(struct spfc_hba_info *hba, struct unf_frame_pkg *pkg, - struct spfc_sqe *sqe, int direction, u32 dif_flag) -{ -#define SPFC_ESGE_CNT 3 - u32 ret = RETURN_OK; - u32 bd_sge_num = SPFC_WQE_SGE_ENTRY_NUM; - bool double_sgl = false; - - if (dif_flag != 0 && (pkg->dif_control.flags & UNF_DIF_DOUBLE_SGL)) { - bd_sge_num = SPFC_WQE_SGE_ENTRY_NUM - SPFC_WQE_SGE_DIF_ENTRY_NUM; - double_sgl = true; - } - - /* Only one wqe local sge can be loaded. If more than one wqe local sge - * is used, use the esgl - */ - ret = spfc_build_sgl_by_local_sge_num(pkg, hba, sqe, direction, bd_sge_num); - - if (unlikely(ret != RETURN_OK)) - return ret; - - /* Configuring Dual SGL Information for DIF */ - ret = spfc_conf_dual_sgl_info(pkg, hba, sqe, direction, bd_sge_num, double_sgl); - - return ret; -} - -void spfc_adjust_dix(struct unf_frame_pkg *pkg, struct spfc_fc_dif_info *dif_info, - u8 task_type) -{ - u8 tasktype = task_type; - struct spfc_fc_dif_info *dif_info_l1 = NULL; - - dif_info_l1 = dif_info; - - if (dix_flag == 1) { - if (tasktype == SPFC_SQE_FCP_IWRITE || - tasktype == SPFC_SQE_FCP_TRD) { - if ((UNF_DIF_ACTION_MASK & pkg->dif_control.protect_opcode) == - UNF_DIF_ACTION_VERIFY_AND_FORWARD) { - dif_info_l1->wd0.grd_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_REPLACE; - dif_info_l1->wd0.grd_agm_ctrl = - SPFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16; - } - - if ((UNF_DIF_ACTION_MASK & pkg->dif_control.protect_opcode) == - UNF_DIF_ACTION_VERIFY_AND_DELETE) { - dif_info_l1->wd0.grd_agm_ctrl = - SPFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16; - } - } - - if (tasktype == SPFC_SQE_FCP_IREAD || - tasktype == SPFC_SQE_FCP_TWR) { - if ((UNF_DIF_ACTION_MASK & - pkg->dif_control.protect_opcode) == - UNF_DIF_ACTION_VERIFY_AND_FORWARD) { - dif_info_l1->wd0.grd_ctrl |= - SPFC_DIF_GARD_REF_APP_CTRL_REPLACE; - dif_info_l1->wd0.grd_agm_ctrl = - SPFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM; - } - - if ((UNF_DIF_ACTION_MASK & - pkg->dif_control.protect_opcode) == - UNF_DIF_ACTION_INSERT) { - dif_info_l1->wd0.grd_agm_ctrl = - SPFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM; - } - } - } - - if (grd_agm_ctrl != 0) - dif_info_l1->wd0.grd_agm_ctrl = grd_agm_ctrl; - - if (grd_ctrl != 0) - dif_info_l1->wd0.grd_ctrl = grd_ctrl; -} - -void spfc_get_dma_direction_by_fcp_cmnd(const struct unf_fcp_cmnd *fcp_cmnd, - int *dma_direction, u8 *task_type) -{ - if (UNF_FCP_WR_DATA & fcp_cmnd->control) { - *task_type = SPFC_SQE_FCP_IWRITE; - *dma_direction = DMA_TO_DEVICE; - } else if (UNF_GET_TASK_MGMT_FLAGS(fcp_cmnd->control) != 0) { - *task_type = SPFC_SQE_FCP_ITMF; - *dma_direction = DMA_FROM_DEVICE; - } else { - *task_type = SPFC_SQE_FCP_IREAD; - *dma_direction = DMA_FROM_DEVICE; - } -} - -static inline u32 spfc_build_icmnd_wqe(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, - struct spfc_sqe *sge) -{ - u32 ret = RETURN_OK; - int direction = 0; - u8 tasktype = 0; - struct unf_fcp_cmnd *fcp_cmnd = NULL; - struct spfc_sqe *sqe = sge; - u32 dif_flag = 0; - - fcp_cmnd = pkg->fcp_cmnd; - if (unlikely(!fcp_cmnd)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Package's FCP commond pointer is NULL."); - - return UNF_RETURN_ERROR; - } - - spfc_get_dma_direction_by_fcp_cmnd(fcp_cmnd, &direction, &tasktype); - - spfc_build_icmnd_wqe_ts_header(pkg, sqe, tasktype, hba->exi_base, hba->port_index); - - spfc_build_icmnd_wqe_ctrls(pkg, sqe); - - spfc_build_icmnd_wqe_ts(hba, pkg, &sqe->ts_sl, &sqe->ts_ex); - - if (sqe->ts_sl.task_type != SPFC_SQE_FCP_ITMF) { - if (pkg->dif_control.protect_opcode == UNF_DIF_ACTION_NONE) { - dif_flag = 0; - spfc_build_no_dif_control(pkg, &sqe->ts_sl.cont.icmnd.info.dif_info); - } else { - dif_flag = 1; - spfc_build_dif_control(hba, pkg, &sqe->ts_sl.cont.icmnd.info.dif_info); - spfc_adjust_dix(pkg, - &sqe->ts_sl.cont.icmnd.info.dif_info, - tasktype); - } - } - - ret = spfc_build_sgl(hba, pkg, sqe, direction, dif_flag); - - sqe->sid = UNF_GET_SID(pkg); - sqe->did = UNF_GET_DID(pkg); - - return ret; -} - -u32 spfc_send_scsi_cmnd(void *hba, struct unf_frame_pkg *pkg) -{ - struct spfc_hba_info *spfc_hba = NULL; - struct spfc_parent_sq_info *parent_sq = NULL; - u32 ret = UNF_RETURN_ERROR; - struct spfc_sqe sqe; - u16 ssqn; - struct spfc_parent_queue_info *parent_queue = NULL; - - /* input param check */ - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - - SPFC_CHECK_PKG_ALLOCTIME(pkg); - memset(&sqe, 0, sizeof(struct spfc_sqe)); - spfc_hba = hba; - - /* 1. find parent sq for scsi_cmnd(pkg) */ - parent_sq = spfc_find_parent_sq_by_pkg(spfc_hba, pkg); - if (unlikely(!parent_sq)) { - /* Do not need to print info */ - return UNF_RETURN_ERROR; - } - - pkg->qos_level += spfc_hba->vpid_start; - - /* 2. build cmnd wqe (to sqe) for scsi_cmnd(pkg) */ - ret = spfc_build_icmnd_wqe(spfc_hba, pkg, &sqe); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[fail]Port(0x%x) Build WQE failed, SID(0x%x) DID(0x%x) pkg type(0x%x) hottag(0x%x).", - spfc_hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did, pkg->type, UNF_GET_XCHG_TAG(pkg)); - - return ret; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "Port(0x%x) RPort(0x%x) send FCP_CMND TYPE(0x%x) Local_Xid(0x%x) hottag(0x%x) LBA(0x%llx)", - spfc_hba->port_cfg.port_id, parent_sq->rport_index, - sqe.ts_sl.task_type, sqe.ts_sl.local_xid, - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX], - *((u64 *)pkg->fcp_cmnd->cdb)); - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - if (sqe.ts_sl.task_type == SPFC_SQE_FCP_ITMF) { - parent_queue = container_of(parent_sq, struct spfc_parent_queue_info, - parent_sq_info); - ret = spfc_suspend_sqe_and_send_nop(spfc_hba, parent_queue, &sqe, pkg); - return ret; - } - /* 3. En-Queue Parent SQ for scsi_cmnd(pkg) sqe */ - ret = spfc_parent_sq_enqueue(parent_sq, &sqe, ssqn); - - return ret; -} - -static void spfc_ini_status_default_handler(struct spfc_scqe_iresp *iresp, - struct unf_frame_pkg *pkg) -{ - u8 control = 0; - u16 com_err_code = 0; - - control = iresp->wd2.fcp_flag & SPFC_CTRL_MASK; - - if (iresp->fcp_resid != 0) { - com_err_code = UNF_IO_FAILED; - pkg->residus_len = iresp->fcp_resid; - } else { - com_err_code = UNF_IO_SUCCESS; - pkg->residus_len = 0; - } - - pkg->status = spfc_fill_pkg_status(com_err_code, control, iresp->wd2.scsi_status); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Fill package with status: 0x%x, residus len: 0x%x", - pkg->status, pkg->residus_len); -} - -static void spfc_check_fcp_rsp_iu(struct spfc_scqe_iresp *iresp, - struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg) -{ - u8 scsi_status = 0; - u8 control = 0; - - control = (u8)iresp->wd2.fcp_flag; - scsi_status = (u8)iresp->wd2.scsi_status; - - /* FcpRspIU with Little End from IOB WQE to COM's pkg also */ - if (control & FCP_RESID_UNDER_MASK) { - /* under flow: usually occurs in inquiry */ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]I_STS IOB posts under flow with residus len: %u, FCP residue: %u.", - pkg->residus_len, iresp->fcp_resid); - - if (pkg->residus_len != iresp->fcp_resid) - pkg->status = spfc_fill_pkg_status(UNF_IO_FAILED, control, scsi_status); - else - pkg->status = spfc_fill_pkg_status(UNF_IO_UNDER_FLOW, control, scsi_status); - } - - if (control & FCP_RESID_OVER_MASK) { - /* over flow: error happened */ - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]I_STS IOB posts over flow with residus len: %u, FCP residue: %u.", - pkg->residus_len, iresp->fcp_resid); - - if (pkg->residus_len != iresp->fcp_resid) - pkg->status = spfc_fill_pkg_status(UNF_IO_FAILED, control, scsi_status); - else - pkg->status = spfc_fill_pkg_status(UNF_IO_OVER_FLOW, control, scsi_status); - } - - pkg->unf_rsp_pload_bl.length = 0; - pkg->unf_sense_pload_bl.length = 0; - - if (control & FCP_RSP_LEN_VALID_MASK) { - /* dma by chip */ - pkg->unf_rsp_pload_bl.buffer_ptr = NULL; - - pkg->unf_rsp_pload_bl.length = iresp->fcp_rsp_len; - pkg->byte_orders |= UNF_BIT_3; - } - - if (control & FCP_SNS_LEN_VALID_MASK) { - /* dma by chip */ - pkg->unf_sense_pload_bl.buffer_ptr = NULL; - - pkg->unf_sense_pload_bl.length = iresp->fcp_sns_len; - pkg->byte_orders |= UNF_BIT_4; - } - - if (iresp->wd1.user_id_num == 1 && - (pkg->unf_sense_pload_bl.length + pkg->unf_rsp_pload_bl.length > 0)) { - pkg->unf_rsp_pload_bl.buffer_ptr = - (u8 *)spfc_get_els_buf_by_user_id(hba, (u16)iresp->user_id[ARRAY_INDEX_0]); - } else if (iresp->wd1.user_id_num > 1) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]receive buff num 0x%x > 1 0x%x", - iresp->wd1.user_id_num, control); - } -} - -u16 spfc_get_com_err_code(struct unf_frame_pkg *pkg) -{ - u16 com_err_code = UNF_IO_FAILED; - u32 status_subcode = 0; - - status_subcode = pkg->status_sub_code; - - if (likely(status_subcode == 0)) - com_err_code = 0; - else if (status_subcode == UNF_DIF_CRC_ERR) - com_err_code = UNF_IO_DIF_ERROR; - else if (status_subcode == UNF_DIF_LBA_ERR) - com_err_code = UNF_IO_DIF_REF_ERROR; - else if (status_subcode == UNF_DIF_APP_ERR) - com_err_code = UNF_IO_DIF_GEN_ERROR; - - return com_err_code; -} - -void spfc_process_ini_fail_io(struct spfc_hba_info *hba, union spfc_scqe *iresp, - struct unf_frame_pkg *pkg) -{ - u16 com_err_code = UNF_IO_FAILED; - - /* 1. error stats process */ - if (SPFC_GET_SCQE_STATUS((union spfc_scqe *)(void *)iresp) != 0) { - switch (SPFC_GET_SCQE_STATUS((union spfc_scqe *)(void *)iresp)) { - /* I/O not complete: 1.session reset; 2.clear buffer */ - case FC_CQE_BUFFER_CLEAR_IO_COMPLETED: - case FC_CQE_SESSION_RST_CLEAR_IO_COMPLETED: - case FC_CQE_SESSION_ONLY_CLEAR_IO_COMPLETED: - case FC_CQE_WQE_FLUSH_IO_COMPLETED: - com_err_code = UNF_IO_CLEAN_UP; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[warn]Port(0x%x) INI IO not complete, OX_ID(0x%x) RX_ID(0x%x) status(0x%x)", - hba->port_cfg.port_id, - ((struct spfc_scqe_iresp *)iresp)->wd0.ox_id, - ((struct spfc_scqe_iresp *)iresp)->wd0.rx_id, - com_err_code); - - break; - /* Allocate task id(oxid) fail */ - case FC_ERROR_INVALID_TASK_ID: - com_err_code = UNF_IO_NO_XCHG; - break; - case FC_ALLOC_EXCH_ID_FAILED: - com_err_code = UNF_IO_NO_XCHG; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[warn]Port(0x%x) INI IO, tag 0x%x alloc oxid fail.", - hba->port_cfg.port_id, - ((struct spfc_scqe_iresp *)iresp)->wd2.hotpooltag); - break; - case FC_ERROR_CODE_DATA_DIFX_FAILED: - com_err_code = pkg->status >> UNF_SHIFT_16; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[warn]Port(0x%x) INI IO, tag 0x%x tx dif error.", - hba->port_cfg.port_id, - ((struct spfc_scqe_iresp *)iresp)->wd2.hotpooltag); - break; - /* any other: I/O failed --->>> DID error */ - default: - com_err_code = UNF_IO_FAILED; - break; - } - - /* fill pkg status & return directly */ - pkg->status = - spfc_fill_pkg_status(com_err_code, - ((struct spfc_scqe_iresp *)iresp)->wd2.fcp_flag, - ((struct spfc_scqe_iresp *)iresp)->wd2.scsi_status); - - return; - } - - /* 2. default stats process */ - spfc_ini_status_default_handler((struct spfc_scqe_iresp *)iresp, pkg); - - /* 3. FCP RSP IU check */ - spfc_check_fcp_rsp_iu((struct spfc_scqe_iresp *)iresp, hba, pkg); -} - -void spfc_process_dif_result(struct spfc_hba_info *hba, union spfc_scqe *wqe, - struct unf_frame_pkg *pkg) -{ - u16 com_err_code = UNF_IO_FAILED; - u8 dif_info = 0; - - dif_info = wqe->common.wd0.dif_vry_rst; - if (dif_info == SPFC_TX_DIF_ERROR_FLAG) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[error]Port(0x%x) TGT recv tx dif result abnormal.", - hba->port_cfg.port_id); - } - - pkg->status_sub_code = - (dif_info & SPFC_DIF_ERROR_CODE_CRC) - ? UNF_DIF_CRC_ERR - : ((dif_info & SPFC_DIF_ERROR_CODE_REF) - ? UNF_DIF_LBA_ERR - : ((dif_info & SPFC_DIF_ERROR_CODE_APP) ? UNF_DIF_APP_ERR : 0)); - com_err_code = spfc_get_com_err_code(pkg); - pkg->status = (u32)(com_err_code) << UNF_SHIFT_16; - - if (unlikely(com_err_code != 0)) { - spfc_dif_err_count(hba, dif_info); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[error]Port(0x%x) INI io status with dif result(0x%x),subcode(0x%x) pkg->status(0x%x)", - hba->port_cfg.port_id, dif_info, - pkg->status_sub_code, pkg->status); - } -} - -u32 spfc_scq_recv_iresp(struct spfc_hba_info *hba, union spfc_scqe *wqe) -{ -#define SPFC_IRSP_USERID_LEN ((FC_SENSEDATA_USERID_CNT_MAX + 1) / 2) - struct spfc_scqe_iresp *iresp = NULL; - struct unf_frame_pkg pkg; - u32 ret = RETURN_OK; - u16 hot_tag; - - FC_CHECK_RETURN_VALUE((hba), UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE((wqe), UNF_RETURN_ERROR); - - iresp = (struct spfc_scqe_iresp *)(void *)wqe; - - /* 1. Constraints: I_STS remain cnt must be zero */ - if (unlikely(SPFC_GET_SCQE_REMAIN_CNT(wqe) != 0)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) ini_wqe(OX_ID:0x%x RX_ID:0x%x) HotTag(0x%x) remain_cnt(0x%x) abnormal, status(0x%x)", - hba->port_cfg.port_id, iresp->wd0.ox_id, - iresp->wd0.rx_id, iresp->wd2.hotpooltag, - SPFC_GET_SCQE_REMAIN_CNT(wqe), - SPFC_GET_SCQE_STATUS(wqe)); - - UNF_PRINT_SFS_LIMIT(UNF_MAJOR, hba->port_cfg.port_id, wqe, sizeof(union spfc_scqe)); - - /* return directly */ - return UNF_RETURN_ERROR; - } - - spfc_swap_16_in_32((u32 *)iresp->user_id, SPFC_IRSP_USERID_LEN); - - memset(&pkg, 0, sizeof(struct unf_frame_pkg)); - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = iresp->magic_num; - pkg.frame_head.oxid_rxid = (((iresp->wd0.ox_id) << UNF_SHIFT_16) | (iresp->wd0.rx_id)); - - hot_tag = (u16)iresp->wd2.hotpooltag; - /* 2. HotTag validity check */ - if (likely(hot_tag >= hba->exi_base && (hot_tag < hba->exi_base + hba->exi_count))) { - pkg.status = UNF_IO_SUCCESS; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = - hot_tag - hba->exi_base; - } else { - /* OX_ID error: return by COM */ - pkg.status = UNF_IO_FAILED; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE16; - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) ini_cmnd_wqe(OX_ID:0x%x RX_ID:0x%x) ox_id invalid, status(0x%x)", - hba->port_cfg.port_id, iresp->wd0.ox_id, iresp->wd0.rx_id, - SPFC_GET_SCQE_STATUS(wqe)); - - UNF_PRINT_SFS_LIMIT(UNF_MAJOR, hba->port_cfg.port_id, wqe, - sizeof(union spfc_scqe)); - } - - /* process dif result */ - spfc_process_dif_result(hba, wqe, &pkg); - - /* 3. status check */ - if (unlikely(SPFC_GET_SCQE_STATUS(wqe) || - iresp->wd2.scsi_status != 0 || iresp->fcp_resid != 0 || - ((iresp->wd2.fcp_flag & SPFC_CTRL_MASK) != 0))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[warn]Port(0x%x) scq_status(0x%x) scsi_status(0x%x) fcp_resid(0x%x) fcp_flag(0x%x)", - hba->port_cfg.port_id, SPFC_GET_SCQE_STATUS(wqe), - iresp->wd2.scsi_status, iresp->fcp_resid, - iresp->wd2.fcp_flag); - - /* set pkg status & check fcp_rsp IU */ - spfc_process_ini_fail_io(hba, (union spfc_scqe *)iresp, &pkg); - } - - /* 4. LL_Driver ---to--->>> COM_Driver */ - UNF_LOWLEVEL_SCSI_COMPLETED(ret, hba->lport, &pkg); - if (iresp->wd1.user_id_num == 1 && - (pkg.unf_sense_pload_bl.length + pkg.unf_rsp_pload_bl.length > 0)) { - spfc_post_els_srq_wqe(&hba->els_srq_info, (u16)iresp->user_id[ARRAY_INDEX_0]); - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) rport(0x%x) recv(%s) hottag(0x%x) OX_ID(0x%x) RX_ID(0x%x) return(%s)", - hba->port_cfg.port_id, iresp->wd1.conn_id, - (SPFC_SCQE_FCP_IRSP == (SPFC_GET_SCQE_TYPE(wqe)) ? "IRESP" : "ITMF_RSP"), - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX], iresp->wd0.ox_id, - iresp->wd0.rx_id, (ret == RETURN_OK) ? "OK" : "ERROR"); - - return ret; -} diff --git a/drivers/scsi/spfc/hw/spfc_io.h b/drivers/scsi/spfc/hw/spfc_io.h deleted file mode 100644 index 26d10a51bbe4..000000000000 --- a/drivers/scsi/spfc/hw/spfc_io.h +++ /dev/null @@ -1,138 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_IO_H -#define SPFC_IO_H - -#include "unf_type.h" -#include "unf_common.h" -#include "spfc_hba.h" - -#ifdef __cplusplus -#if __cplusplus -extern "C" { -#endif -#endif /* __cplusplus */ - -#define BYTE_PER_DWORD 4 -#define SPFC_TRESP_DIRECT_CARRY_LEN (23 * 4) -#define FCP_RESP_IU_LEN_BYTE_GOOD_STATUS 24 -#define SPFC_TRSP_IU_CONTROL_OFFSET 2 -#define SPFC_TRSP_IU_FCP_CONF_REP (1 << 12) - -struct spfc_dif_io_param { - u32 all_len; - u32 buf_len; - char **buf; - char *in_buf; - int drect; -}; - -enum dif_mode_type { - DIF_MODE_NONE = 0x0, - DIF_MODE_INSERT = 0x1, - DIF_MODE_REMOVE = 0x2, - DIF_MODE_FORWARD_OR_REPLACE = 0x3 -}; - -enum ref_tag_mode_type { - BOTH_NONE = 0x0, - RECEIVE_INCREASE = 0x1, - REPLACE_INCREASE = 0x2, - BOTH_INCREASE = 0x3 -}; - -#define SPFC_DIF_DISABLE 0 -#define SPFC_DIF_ENABLE 1 -#define SPFC_DIF_SINGLE_SGL 0 -#define SPFC_DIF_DOUBLE_SGL 1 -#define SPFC_DIF_SECTOR_512B_MODE 0 -#define SPFC_DIF_SECTOR_4KB_MODE 1 -#define SPFC_DIF_TYPE1 0x01 -#define SPFC_DIF_TYPE3 0x03 -#define SPFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_T10_CRC16 0x0 -#define SPFC_DIF_GUARD_VERIFY_CRC16_REPLACE_IP_CHECKSUM 0x1 -#define SPFC_DIF_GUARD_VERIFY_IP_CHECKSUM_REPLACE_CRC16 0x2 -#define SPFC_DIF_GUARD_VERIFY_ALGORITHM_CTL_IP_CHECKSUM 0x3 -#define SPFC_DIF_CRC16_INITIAL_SELECTOR_DEFAUL 0 -#define SPFC_DIF_CRC_CS_INITIAL_CONFIG_BY_REGISTER 0 -#define SPFC_DIF_CRC_CS_INITIAL_CONFIG_BY_BIT0_1 0x4 - -#define SPFC_DIF_GARD_REF_APP_CTRL_VERIFY 0x4 -#define SPFC_DIF_GARD_REF_APP_CTRL_NOT_VERIFY 0x0 -#define SPFC_DIF_GARD_REF_APP_CTRL_INSERT 0x0 -#define SPFC_DIF_GARD_REF_APP_CTRL_DELETE 0x1 -#define SPFC_DIF_GARD_REF_APP_CTRL_FORWARD 0x2 -#define SPFC_DIF_GARD_REF_APP_CTRL_REPLACE 0x3 - -#define SPFC_BUILD_RESPONSE_INFO_NON_GAP_MODE0 0 -#define SPFC_BUILD_RESPONSE_INFO_GPA_MODE1 1 -#define SPFC_CONF_SUPPORT 1 -#define SPFC_CONF_NOT_SUPPORT 0 -#define SPFC_XID_INTERVAL 2048 - -#define SPFC_DIF_ERROR_CODE_MASK 0xe -#define SPFC_DIF_ERROR_CODE_CRC 0x2 -#define SPFC_DIF_ERROR_CODE_REF 0x4 -#define SPFC_DIF_ERROR_CODE_APP 0x8 -#define SPFC_TX_DIF_ERROR_FLAG (1 << 7) - -#define SPFC_DIF_PAYLOAD_TYPE (1 << 0) -#define SPFC_DIF_CRC_TYPE (1 << 1) -#define SPFC_DIF_APP_TYPE (1 << 2) -#define SPFC_DIF_REF_TYPE (1 << 3) - -#define SPFC_DIF_SEND_DIFERR_ALL (0) -#define SPFC_DIF_SEND_DIFERR_CRC (1) -#define SPFC_DIF_SEND_DIFERR_APP (2) -#define SPFC_DIF_SEND_DIFERR_REF (3) -#define SPFC_DIF_RECV_DIFERR_ALL (4) -#define SPFC_DIF_RECV_DIFERR_CRC (5) -#define SPFC_DIF_RECV_DIFERR_APP (6) -#define SPFC_DIF_RECV_DIFERR_REF (7) -#define SPFC_DIF_ERR_ENABLE (382855) -#define SPFC_DIF_ERR_DISABLE (0) - -#define SPFC_DIF_LENGTH (8) -#define SPFC_SECT_SIZE_512 (512) -#define SPFC_SECT_SIZE_4096 (4096) -#define SPFC_SECT_SIZE_512_8 (520) -#define SPFC_SECT_SIZE_4096_8 (4104) -#define SPFC_DIF_SECT_SIZE_APP_OFFSET (2) -#define SPFC_DIF_SECT_SIZE_LBA_OFFSET (4) - -#define SPFC_MAX_IO_TAG (2048) -#define SPFC_PRINT_WORD (8) - -extern u32 dif_protect_opcode; -extern u32 dif_sect_size; -extern u32 no_dif_sect_size; -extern u32 grd_agm_ini_ctrl; -extern u32 ref_tag_mod; -extern u32 grd_ctrl; -extern u32 grd_agm_ctrl; -extern u32 cmp_app_tag_mask; -extern u32 app_tag_ctrl; -extern u32 ref_tag_ctrl; -extern u32 rep_ref_tag; -extern u32 rx_rep_ref_tag; -extern u16 cmp_app_tag; -extern u16 rep_app_tag; - -#define spfc_fill_pkg_status(com_err_code, control, scsi_status) \ - (((u32)(com_err_code) << 16) | ((u32)(control) << 8) | \ - (u32)(scsi_status)) -#define SPFC_CTRL_MASK 0x1f - -u32 spfc_send_scsi_cmnd(void *hba, struct unf_frame_pkg *pkg); -u32 spfc_scq_recv_iresp(struct spfc_hba_info *hba, union spfc_scqe *wqe); -void spfc_process_dif_result(struct spfc_hba_info *hba, union spfc_scqe *wqe, - struct unf_frame_pkg *pkg); - -#ifdef __cplusplus -#if __cplusplus -} -#endif -#endif /* __cplusplus */ - -#endif /* __SPFC_IO_H__ */ diff --git a/drivers/scsi/spfc/hw/spfc_lld.c b/drivers/scsi/spfc/hw/spfc_lld.c deleted file mode 100644 index a35484f1c917..000000000000 --- a/drivers/scsi/spfc/hw/spfc_lld.c +++ /dev/null @@ -1,997 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/io-mapping.h> -#include <linux/interrupt.h> -#include <linux/inetdevice.h> -#include <net/addrconf.h> -#include <linux/time.h> -#include <linux/timex.h> -#include <linux/rtc.h> -#include <linux/aer.h> -#include <linux/debugfs.h> - -#include "spfc_lld.h" -#include "sphw_hw.h" -#include "sphw_mt.h" -#include "sphw_hw_cfg.h" -#include "sphw_hw_comm.h" -#include "sphw_common.h" -#include "spfc_cqm_main.h" -#include "spfc_module.h" - -#define SPFC_DRV_NAME "spfc" -#define SPFC_CHIP_NAME "spfc" - -#define PCI_VENDOR_ID_RAMAXEL 0x1E81 -#define SPFC_DEV_ID_PF_STD 0x9010 -#define SPFC_DEV_ID_VF 0x9008 - -#define SPFC_VF_PCI_CFG_REG_BAR 0 -#define SPFC_PF_PCI_CFG_REG_BAR 1 - -#define SPFC_PCI_INTR_REG_BAR 2 -#define SPFC_PCI_MGMT_REG_BAR 3 -#define SPFC_PCI_DB_BAR 4 - -#define SPFC_SECOND_BASE (1000) -#define SPFC_SYNC_YEAR_OFFSET (1900) -#define SPFC_SYNC_MONTH_OFFSET (1) -#define SPFC_MINUTE_BASE (60) -#define SPFC_WAIT_TOOL_CNT_TIMEOUT 10000 - -#define SPFC_MIN_TIME_IN_USECS 900 -#define SPFC_MAX_TIME_IN_USECS 1000 -#define SPFC_MAX_LOOP_TIMES 10000 - -#define SPFC_TOOL_MIN_TIME_IN_USECS 9900 -#define SPFC_TOOL_MAX_TIME_IN_USECS 10000 - -#define SPFC_EVENT_PROCESS_TIMEOUT 10000 - -#define FIND_BIT(num, n) (((num) & (1UL << (n))) ? 1 : 0) -#define SET_BIT(num, n) ((num) | (1UL << (n))) -#define CLEAR_BIT(num, n) ((num) & (~(1UL << (n)))) - -#define MAX_CARD_ID 64 -static unsigned long card_bit_map; -LIST_HEAD(g_spfc_chip_list); -struct spfc_uld_info g_uld_info[SERVICE_T_MAX] = { {0} }; - -struct unf_cm_handle_op spfc_cm_op_handle = {0}; - -u32 allowed_probe_num = SPFC_MAX_PORT_NUM; -u32 dif_sgl_mode; -u32 max_speed = SPFC_SPEED_32G; -u32 accum_db_num = 1; -u32 dif_type = 0x1; -u32 wqe_page_size = 4096; -u32 wqe_pre_load = 6; -u32 combo_length = 128; -u32 cos_bit_map = 0x1f; -u32 spfc_dif_type; -u32 spfc_dif_enable; -u8 spfc_guard; -int link_lose_tmo = 30; - -u32 exit_count = 4096; -u32 exit_stride = 4096; -u32 exit_base; - -/* dfx counter */ -atomic64_t rx_tx_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t rx_tx_err[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t scq_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t aeq_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t dif_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t mail_box_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -atomic64_t up_err_event_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -u64 link_event_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_LINK_EVENT_CNT]; -u64 link_reason_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_LINK_REASON_CNT]; -u64 hba_stat[SPFC_MAX_PORT_NUM][SPFC_HBA_STAT_BUTT]; -atomic64_t com_up_event_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; - -#ifndef MAX_SIZE -#define MAX_SIZE (16) -#endif - -struct spfc_lld_lock g_lld_lock; - -/* g_device_mutex */ -struct mutex g_device_mutex; - -/* pci device initialize lock */ -struct mutex g_pci_init_mutex; - -#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */ -#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */ -#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ - -void lld_dev_cnt_init(struct spfc_pcidev *pci_adapter) -{ - atomic_set(&pci_adapter->ref_cnt, 0); -} - -void lld_dev_hold(struct spfc_lld_dev *dev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); - - atomic_inc(&pci_adapter->ref_cnt); -} - -void lld_dev_put(struct spfc_lld_dev *dev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); - - atomic_dec(&pci_adapter->ref_cnt); -} - -static void spfc_sync_time_to_fmw(struct spfc_pcidev *pdev_pri) -{ - struct tm tm = {0}; - u64 tv_msec; - int err; - - tv_msec = ktime_to_ms(ktime_get_real()); - err = sphw_sync_time(pdev_pri->hwdev, tv_msec); - if (err) { - sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", - err); - } else { - time64_to_tm(tv_msec / MSEC_PER_SEC, 0, &tm); - sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %ld-%02d-%02d %02d:%02d:%02d.\n", - tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour, - tm.tm_min, tm.tm_sec); - } -} - -void wait_lld_dev_unused(struct spfc_pcidev *pci_adapter) -{ - u32 loop_cnt = 0; - - while (loop_cnt < SPFC_WAIT_TOOL_CNT_TIMEOUT) { - if (!atomic_read(&pci_adapter->ref_cnt)) - return; - - usleep_range(SPFC_TOOL_MIN_TIME_IN_USECS, SPFC_TOOL_MAX_TIME_IN_USECS); - loop_cnt++; - } -} - -static void lld_lock_chip_node(void) -{ - u32 loop_cnt; - - mutex_lock(&g_lld_lock.lld_mutex); - - loop_cnt = 0; - while (loop_cnt < WAIT_LLD_DEV_NODE_CHANGED) { - if (!test_and_set_bit(SPFC_NODE_CHANGE, &g_lld_lock.status)) - break; - - loop_cnt++; - - if (loop_cnt % SPFC_MAX_LOOP_TIMES == 0) - pr_warn("[warn]Wait for lld node change complete for %us", - loop_cnt / UNF_S_TO_MS); - - usleep_range(SPFC_MIN_TIME_IN_USECS, SPFC_MAX_TIME_IN_USECS); - } - - if (loop_cnt == WAIT_LLD_DEV_NODE_CHANGED) - pr_warn("[warn]Wait for lld node change complete timeout when try to get lld lock"); - - loop_cnt = 0; - while (loop_cnt < WAIT_LLD_DEV_REF_CNT_EMPTY) { - if (!atomic_read(&g_lld_lock.dev_ref_cnt)) - break; - - loop_cnt++; - - if (loop_cnt % SPFC_MAX_LOOP_TIMES == 0) - pr_warn("[warn]Wait for lld dev unused for %us, reference count: %d", - loop_cnt / UNF_S_TO_MS, atomic_read(&g_lld_lock.dev_ref_cnt)); - - usleep_range(SPFC_MIN_TIME_IN_USECS, SPFC_MAX_TIME_IN_USECS); - } - - if (loop_cnt == WAIT_LLD_DEV_REF_CNT_EMPTY) - pr_warn("[warn]Wait for lld dev unused timeout"); - - mutex_unlock(&g_lld_lock.lld_mutex); -} - -static void lld_unlock_chip_node(void) -{ - clear_bit(SPFC_NODE_CHANGE, &g_lld_lock.status); -} - -void lld_hold(void) -{ - u32 loop_cnt = 0; - - /* ensure there have not any chip node in changing */ - mutex_lock(&g_lld_lock.lld_mutex); - - while (loop_cnt < WAIT_LLD_DEV_HOLD_TIMEOUT) { - if (!test_bit(SPFC_NODE_CHANGE, &g_lld_lock.status)) - break; - - loop_cnt++; - - if (loop_cnt % SPFC_MAX_LOOP_TIMES == 0) - pr_warn("[warn]Wait lld node change complete for %u", - loop_cnt / UNF_S_TO_MS); - - usleep_range(SPFC_MIN_TIME_IN_USECS, SPFC_MAX_TIME_IN_USECS); - } - - if (loop_cnt == WAIT_LLD_DEV_HOLD_TIMEOUT) - pr_warn("[warn]Wait lld node change complete timeout when try to hode lld dev %u", - loop_cnt / UNF_S_TO_MS); - - atomic_inc(&g_lld_lock.dev_ref_cnt); - - mutex_unlock(&g_lld_lock.lld_mutex); -} - -void lld_put(void) -{ - atomic_dec(&g_lld_lock.dev_ref_cnt); -} - -static void spfc_lld_lock_init(void) -{ - mutex_init(&g_lld_lock.lld_mutex); - atomic_set(&g_lld_lock.dev_ref_cnt, 0); -} - -static void spfc_realease_cmo_op_handle(void) -{ - memset(&spfc_cm_op_handle, 0, sizeof(struct unf_cm_handle_op)); -} - -static void spfc_check_module_para(void) -{ - if (spfc_dif_enable) { - dif_sgl_mode = true; - spfc_dif_type = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIX_TYPE1_PROTECTION; - dix_flag = 1; - } - - if (dif_sgl_mode != 0) - dif_sgl_mode = 1; -} - -void spfc_event_process(void *adapter, struct sphw_event_info *event) -{ - struct spfc_pcidev *dev = adapter; - - if (test_and_set_bit(SERVICE_T_FC, &dev->state)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[WARN]Event: fc is in detach"); - return; - } - - if (g_uld_info[SERVICE_T_FC].event) - g_uld_info[SERVICE_T_FC].event(&dev->lld_dev, dev->uld_dev[SERVICE_T_FC], event); - - clear_bit(SERVICE_T_FC, &dev->state); -} - -int spfc_stateful_init(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - int stateful_en; - int err; - - if (!dev) - return -EINVAL; - - if (dev->statufull_ref_cnt++) - return 0; - - stateful_en = IS_FT_TYPE(dev) | IS_RDMA_TYPE(dev); - if (stateful_en && SPHW_IS_PPF(dev)) { - err = sphw_ppf_ext_db_init(dev); - if (err) - goto out; - } - - err = cqm3_init(dev); - if (err) { - sdk_err(dev->dev_hdl, "Failed to init cqm, err: %d\n", err); - goto init_cqm_err; - } - - sdk_info(dev->dev_hdl, "Initialize statefull resource success\n"); - - return 0; - -init_cqm_err: - if (stateful_en) - sphw_ppf_ext_db_deinit(dev); - -out: - dev->statufull_ref_cnt--; - - return err; -} - -void spfc_stateful_deinit(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - u32 stateful_en; - - if (!dev || !dev->statufull_ref_cnt) - return; - - if (--dev->statufull_ref_cnt) - return; - - cqm3_uninit(hwdev); - - stateful_en = IS_FT_TYPE(dev) | IS_RDMA_TYPE(dev); - if (stateful_en) - sphw_ppf_ext_db_deinit(hwdev); - - sdk_info(dev->dev_hdl, "Clear statefull resource success\n"); -} - -static int attach_uld(struct spfc_pcidev *dev, struct spfc_uld_info *uld_info) -{ - void *uld_dev = NULL; - int err; - - mutex_lock(&dev->pdev_mutex); - if (dev->uld_dev[SERVICE_T_FC]) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]fc driver has attached to pcie device"); - err = 0; - goto out_unlock; - } - - err = spfc_stateful_init(dev->hwdev); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to initialize statefull resources"); - goto out_unlock; - } - - err = uld_info->probe(&dev->lld_dev, &uld_dev, - dev->uld_dev_name[SERVICE_T_FC]); - if (err || !uld_dev) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to add object for fc driver to pcie device"); - goto probe_failed; - } - - dev->uld_dev[SERVICE_T_FC] = uld_dev; - mutex_unlock(&dev->pdev_mutex); - - return RETURN_OK; - -probe_failed: - spfc_stateful_deinit(dev->hwdev); - -out_unlock: - mutex_unlock(&dev->pdev_mutex); - - return err; -} - -static void detach_uld(struct spfc_pcidev *dev) -{ - struct spfc_uld_info *uld_info = &g_uld_info[SERVICE_T_FC]; - u32 cnt = 0; - - mutex_lock(&dev->pdev_mutex); - if (!dev->uld_dev[SERVICE_T_FC]) { - mutex_unlock(&dev->pdev_mutex); - return; - } - - while (cnt < SPFC_EVENT_PROCESS_TIMEOUT) { - if (!test_and_set_bit(SERVICE_T_FC, &dev->state)) - break; - usleep_range(900, 1000); - cnt++; - } - - uld_info->remove(&dev->lld_dev, dev->uld_dev[SERVICE_T_FC]); - dev->uld_dev[SERVICE_T_FC] = NULL; - spfc_stateful_deinit(dev->hwdev); - if (cnt < SPFC_EVENT_PROCESS_TIMEOUT) - clear_bit(SERVICE_T_FC, &dev->state); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "Detach fc driver from pcie device succeed"); - mutex_unlock(&dev->pdev_mutex); -} - -int spfc_register_uld(struct spfc_uld_info *uld_info) -{ - memset(g_uld_info, 0, sizeof(g_uld_info)); - spfc_lld_lock_init(); - mutex_init(&g_device_mutex); - mutex_init(&g_pci_init_mutex); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Module Init Success, wait for pci init and probe"); - - if (!uld_info || !uld_info->probe || !uld_info->remove) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Invalid information of fc driver to register"); - return -EINVAL; - } - - lld_hold(); - - if (g_uld_info[SERVICE_T_FC].probe) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]fc driver has registered"); - lld_put(); - return -EINVAL; - } - - memcpy(&g_uld_info[SERVICE_T_FC], uld_info, sizeof(*uld_info)); - - lld_put(); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[KEVENT]Register spfc driver succeed"); - return RETURN_OK; -} - -void spfc_unregister_uld(void) -{ - struct spfc_uld_info *uld_info = NULL; - - lld_hold(); - uld_info = &g_uld_info[SERVICE_T_FC]; - memset(uld_info, 0, sizeof(*uld_info)); - lld_put(); -} - -static int spfc_pci_init(struct pci_dev *pdev) -{ - struct spfc_pcidev *pci_adapter = NULL; - int err = 0; - - pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); - if (!pci_adapter) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to alloc pci device adapter"); - return -ENOMEM; - } - pci_adapter->pcidev = pdev; - mutex_init(&pci_adapter->pdev_mutex); - - pci_set_drvdata(pdev, pci_adapter); - - err = pci_enable_device(pdev); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to enable PCI device"); - goto pci_enable_err; - } - - err = pci_request_regions(pdev, SPFC_DRV_NAME); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to request regions"); - goto pci_regions_err; - } - - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Couldn't set 64-bit DMA mask"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, "[err]Failed to set DMA mask"); - goto dma_mask_err; - } - } - - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Couldn't set 64-bit coherent DMA mask"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, - "[err]Failed to set coherent DMA mask"); - goto dma_consistnet_mask_err; - } - } - - return 0; - -dma_consistnet_mask_err: -dma_mask_err: - pci_clear_master(pdev); - pci_release_regions(pdev); - -pci_regions_err: - pci_disable_device(pdev); - -pci_enable_err: - pci_set_drvdata(pdev, NULL); - kfree(pci_adapter); - - return err; -} - -static void spfc_pci_deinit(struct pci_dev *pdev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(pdev); - - pci_clear_master(pdev); - pci_release_regions(pdev); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - kfree(pci_adapter); -} - -static int alloc_chip_node(struct spfc_pcidev *pci_adapter) -{ - struct card_node *chip_node = NULL; - unsigned char i; - unsigned char bus_number = 0; - - if (!pci_is_root_bus(pci_adapter->pcidev->bus)) - bus_number = pci_adapter->pcidev->bus->number; - - if (bus_number != 0) { - list_for_each_entry(chip_node, &g_spfc_chip_list, node) { - if (chip_node->bus_num == bus_number) { - pci_adapter->chip_node = chip_node; - return 0; - } - } - } else if (pci_adapter->pcidev->device == SPFC_DEV_ID_VF) { - list_for_each_entry(chip_node, &g_spfc_chip_list, node) { - if (chip_node) { - pci_adapter->chip_node = chip_node; - return 0; - } - } - } - - for (i = 0; i < MAX_CARD_ID; i++) { - if (!test_and_set_bit(i, &card_bit_map)) - break; - } - - if (i == MAX_CARD_ID) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to alloc card id"); - return -EFAULT; - } - - chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); - if (!chip_node) { - clear_bit(i, &card_bit_map); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to alloc chip node"); - return -ENOMEM; - } - - /* bus number */ - chip_node->bus_num = bus_number; - - snprintf(chip_node->chip_name, IFNAMSIZ, "%s%d", SPFC_CHIP_NAME, i); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[INFO]Add new chip %s to global list succeed", - chip_node->chip_name); - - list_add_tail(&chip_node->node, &g_spfc_chip_list); - - INIT_LIST_HEAD(&chip_node->func_list); - pci_adapter->chip_node = chip_node; - - return 0; -} - -#ifdef CONFIG_X86 -void cfg_order_reg(struct spfc_pcidev *pci_adapter) -{ - u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56}; - struct cpuinfo_x86 *cpuinfo = NULL; - u32 i; - - if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) - return; - - cpuinfo = &cpu_data(0); - - for (i = 0; i < sizeof(cpu_model); i++) { - if (cpu_model[i] == cpuinfo->x86_model) - sphw_set_pcie_order_cfg(pci_adapter->hwdev); - } -} -#endif - -static int mapping_bar(struct pci_dev *pdev, struct spfc_pcidev *pci_adapter) -{ - int cfg_bar; - - cfg_bar = pdev->is_virtfn ? SPFC_VF_PCI_CFG_REG_BAR : SPFC_PF_PCI_CFG_REG_BAR; - - pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, cfg_bar); - if (!pci_adapter->cfg_reg_base) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Failed to map configuration regs"); - return -ENOMEM; - } - - pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, SPFC_PCI_INTR_REG_BAR); - if (!pci_adapter->intr_reg_base) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Failed to map interrupt regs"); - goto map_intr_bar_err; - } - - if (!pdev->is_virtfn) { - pci_adapter->mgmt_reg_base = pci_ioremap_bar(pdev, SPFC_PCI_MGMT_REG_BAR); - if (!pci_adapter->mgmt_reg_base) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, "Failed to map mgmt regs"); - goto map_mgmt_bar_err; - } - } - - pci_adapter->db_base_phy = pci_resource_start(pdev, SPFC_PCI_DB_BAR); - pci_adapter->db_dwqe_len = pci_resource_len(pdev, SPFC_PCI_DB_BAR); - pci_adapter->db_base = pci_ioremap_bar(pdev, SPFC_PCI_DB_BAR); - if (!pci_adapter->db_base) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "Failed to map doorbell regs"); - goto map_db_err; - } - - return 0; - -map_db_err: - if (!pdev->is_virtfn) - iounmap(pci_adapter->mgmt_reg_base); - -map_mgmt_bar_err: - iounmap(pci_adapter->intr_reg_base); - -map_intr_bar_err: - iounmap(pci_adapter->cfg_reg_base); - - return -ENOMEM; -} - -static void unmapping_bar(struct spfc_pcidev *pci_adapter) -{ - iounmap(pci_adapter->db_base); - - if (!pci_adapter->pcidev->is_virtfn) - iounmap(pci_adapter->mgmt_reg_base); - - iounmap(pci_adapter->intr_reg_base); - iounmap(pci_adapter->cfg_reg_base); -} - -static int spfc_func_init(struct pci_dev *pdev, struct spfc_pcidev *pci_adapter) -{ - struct sphw_init_para init_para = {0}; - int err; - - init_para.adapter_hdl = pci_adapter; - init_para.pcidev_hdl = pdev; - init_para.dev_hdl = &pdev->dev; - init_para.cfg_reg_base = pci_adapter->cfg_reg_base; - init_para.intr_reg_base = pci_adapter->intr_reg_base; - init_para.mgmt_reg_base = pci_adapter->mgmt_reg_base; - init_para.db_base = pci_adapter->db_base; - init_para.db_base_phy = pci_adapter->db_base_phy; - init_para.db_dwqe_len = pci_adapter->db_dwqe_len; - init_para.hwdev = &pci_adapter->hwdev; - init_para.chip_node = pci_adapter->chip_node; - err = sphw_init_hwdev(&init_para); - if (err) { - pci_adapter->hwdev = NULL; - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to initialize hardware device"); - return -EFAULT; - } - - pci_adapter->lld_dev.pdev = pdev; - pci_adapter->lld_dev.hwdev = pci_adapter->hwdev; - - sphw_event_register(pci_adapter->hwdev, pci_adapter, spfc_event_process); - - if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) - spfc_sync_time_to_fmw(pci_adapter); - lld_lock_chip_node(); - list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); - lld_unlock_chip_node(); - err = attach_uld(pci_adapter, &g_uld_info[SERVICE_T_FC]); - - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Spfc3 attach uld fail"); - goto attach_fc_err; - } - -#ifdef CONFIG_X86 - cfg_order_reg(pci_adapter); -#endif - - return 0; - -attach_fc_err: - lld_lock_chip_node(); - list_del(&pci_adapter->node); - lld_unlock_chip_node(); - wait_lld_dev_unused(pci_adapter); - - return err; -} - -static void spfc_func_deinit(struct pci_dev *pdev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(pdev); - - lld_lock_chip_node(); - list_del(&pci_adapter->node); - lld_unlock_chip_node(); - wait_lld_dev_unused(pci_adapter); - - detach_uld(pci_adapter); - sphw_disable_mgmt_msg_report(pci_adapter->hwdev); - sphw_flush_mgmt_workq(pci_adapter->hwdev); - sphw_event_unregister(pci_adapter->hwdev); - sphw_free_hwdev(pci_adapter->hwdev); -} - -static void free_chip_node(struct spfc_pcidev *pci_adapter) -{ - struct card_node *chip_node = pci_adapter->chip_node; - int id, err; - - if (list_empty(&chip_node->func_list)) { - list_del(&chip_node->node); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[INFO]Delete chip %s from global list succeed", - chip_node->chip_name); - err = sscanf(chip_node->chip_name, SPFC_CHIP_NAME "%d", &id); - if (err < 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, "[err]Failed to get spfc id"); - } - - clear_bit(id, &card_bit_map); - - kfree(chip_node); - } -} - -static int spfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct spfc_pcidev *pci_adapter = NULL; - int err; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[event]Spfc3 Pcie device probe begin"); - - mutex_lock(&g_pci_init_mutex); - err = spfc_pci_init(pdev); - if (err) { - mutex_unlock(&g_pci_init_mutex); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]pci init fail, return %d", err); - return err; - } - pci_adapter = pci_get_drvdata(pdev); - err = mapping_bar(pdev, pci_adapter); - if (err) { - mutex_unlock(&g_pci_init_mutex); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to map bar"); - goto map_bar_failed; - } - mutex_unlock(&g_pci_init_mutex); - pci_adapter->id = *id; - lld_dev_cnt_init(pci_adapter); - - /* if chip information of pcie function exist, add the function into chip */ - lld_lock_chip_node(); - err = alloc_chip_node(pci_adapter); - if (err) { - lld_unlock_chip_node(); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Failed to add new chip node to global list"); - goto alloc_chip_node_fail; - } - - lld_unlock_chip_node(); - err = spfc_func_init(pdev, pci_adapter); - if (err) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]spfc func init fail"); - goto func_init_err; - } - - return 0; - -func_init_err: - lld_lock_chip_node(); - free_chip_node(pci_adapter); - lld_unlock_chip_node(); - -alloc_chip_node_fail: - unmapping_bar(pci_adapter); - -map_bar_failed: - spfc_pci_deinit(pdev); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Pcie device probe failed"); - return err; -} - -static void spfc_remove(struct pci_dev *pdev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(pdev); - - if (!pci_adapter) - return; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[INFO]Pcie device remove begin"); - sphw_detect_hw_present(pci_adapter->hwdev); - spfc_func_deinit(pdev); - lld_lock_chip_node(); - free_chip_node(pci_adapter); - lld_unlock_chip_node(); - unmapping_bar(pci_adapter); - mutex_lock(&g_pci_init_mutex); - spfc_pci_deinit(pdev); - mutex_unlock(&g_pci_init_mutex); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[INFO]Pcie device removed"); -} - -static void spfc_shutdown(struct pci_dev *pdev) -{ - struct spfc_pcidev *pci_adapter = pci_get_drvdata(pdev); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Shutdown device"); - - if (pci_adapter) - sphw_shutdown_hwdev(pci_adapter->hwdev); - - pci_disable_device(pdev); -} - -static pci_ers_result_t spfc_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) -{ - struct spfc_pcidev *pci_adapter = NULL; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Uncorrectable error detected, log and cleanup error status: 0x%08x", - state); - - pci_aer_clear_nonfatal_status(pdev); - pci_adapter = pci_get_drvdata(pdev); - - if (pci_adapter) - sphw_record_pcie_error(pci_adapter->hwdev); - - return PCI_ERS_RESULT_CAN_RECOVER; -} - -static int unf_global_value_init(void) -{ - memset(rx_tx_stat, 0, sizeof(rx_tx_stat)); - memset(rx_tx_err, 0, sizeof(rx_tx_err)); - memset(scq_err_stat, 0, sizeof(scq_err_stat)); - memset(aeq_err_stat, 0, sizeof(aeq_err_stat)); - memset(dif_err_stat, 0, sizeof(dif_err_stat)); - memset(link_event_stat, 0, sizeof(link_event_stat)); - memset(link_reason_stat, 0, sizeof(link_reason_stat)); - memset(hba_stat, 0, sizeof(hba_stat)); - memset(&spfc_cm_op_handle, 0, sizeof(struct unf_cm_handle_op)); - memset(up_err_event_stat, 0, sizeof(up_err_event_stat)); - memset(mail_box_stat, 0, sizeof(mail_box_stat)); - memset(spfc_hba, 0, sizeof(spfc_hba)); - - spin_lock_init(&probe_spin_lock); - - /* 4. Get COM Handlers used for low_level */ - if (unf_get_cm_handle_ops(&spfc_cm_op_handle) != RETURN_OK) { - spfc_realease_cmo_op_handle(); - return RETURN_ERROR_S32; - } - - return RETURN_OK; -} - -static const struct pci_device_id spfc_pci_table[] = { - {PCI_VDEVICE(RAMAXEL, SPFC_DEV_ID_PF_STD), 0}, - {0, 0} -}; - -MODULE_DEVICE_TABLE(pci, spfc_pci_table); - -static struct pci_error_handlers spfc_err_handler = { - .error_detected = spfc_io_error_detected, -}; - -static struct pci_driver spfc_driver = {.name = SPFC_DRV_NAME, - .id_table = spfc_pci_table, - .probe = spfc_probe, - .remove = spfc_remove, - .shutdown = spfc_shutdown, - .err_handler = &spfc_err_handler}; - -static __init int spfc_lld_init(void) -{ - if (unf_common_init() != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]UNF_Common_init failed"); - return RETURN_ERROR_S32; - } - - spfc_check_module_para(); - - if (unf_global_value_init() != RETURN_OK) - return RETURN_ERROR_S32; - - spfc_register_uld(&fc_uld_info); - return pci_register_driver(&spfc_driver); -} - -static __exit void spfc_lld_exit(void) -{ - pci_unregister_driver(&spfc_driver); - spfc_unregister_uld(); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]SPFC module removing..."); - - spfc_realease_cmo_op_handle(); - - /* 2. Unregister FC COM module(level) */ - unf_common_exit(); -} - -module_init(spfc_lld_init); -module_exit(spfc_lld_exit); - -MODULE_AUTHOR("Ramaxel Memory Technology, Ltd"); -MODULE_DESCRIPTION(SPFC_DRV_DESC); -MODULE_VERSION(SPFC_DRV_VERSION); -MODULE_LICENSE("GPL"); - -module_param(allowed_probe_num, uint, 0444); -module_param(dif_sgl_mode, uint, 0444); -module_param(max_speed, uint, 0444); -module_param(wqe_page_size, uint, 0444); -module_param(combo_length, uint, 0444); -module_param(cos_bit_map, uint, 0444); -module_param(spfc_dif_enable, uint, 0444); -MODULE_PARM_DESC(spfc_dif_enable, "set dif enable/disable(1/0), default is 0(disable)."); -module_param(link_lose_tmo, uint, 0444); -MODULE_PARM_DESC(link_lose_tmo, "set link time out, default is 30s."); diff --git a/drivers/scsi/spfc/hw/spfc_lld.h b/drivers/scsi/spfc/hw/spfc_lld.h deleted file mode 100644 index f7b4a5e5ce07..000000000000 --- a/drivers/scsi/spfc/hw/spfc_lld.h +++ /dev/null @@ -1,76 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_LLD_H -#define SPFC_LLD_H - -#include "sphw_crm.h" - -struct spfc_lld_dev { - struct pci_dev *pdev; - void *hwdev; -}; - -struct spfc_uld_info { - /* uld_dev: should not return null even the function capability - * is not support the up layer driver - * uld_dev_name: NIC driver should copy net device name. - * FC driver could copy fc device name. - * other up layer driver don`t need copy anything - */ - int (*probe)(struct spfc_lld_dev *lld_dev, void **uld_dev, - char *uld_dev_name); - void (*remove)(struct spfc_lld_dev *lld_dev, void *uld_dev); - int (*suspend)(struct spfc_lld_dev *lld_dev, void *uld_dev, - pm_message_t state); - int (*resume)(struct spfc_lld_dev *lld_dev, void *uld_dev); - void (*event)(struct spfc_lld_dev *lld_dev, void *uld_dev, - struct sphw_event_info *event); - int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size); -}; - -/* Structure pcidev private */ -struct spfc_pcidev { - struct pci_dev *pcidev; - void *hwdev; - struct card_node *chip_node; - struct spfc_lld_dev lld_dev; - /* such as fc_dev */ - void *uld_dev[SERVICE_T_MAX]; - /* Record the service object name */ - char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ]; - /* It is a the global variable for driver to manage - * all function device linked list - */ - struct list_head node; - void __iomem *cfg_reg_base; - void __iomem *intr_reg_base; - void __iomem *mgmt_reg_base; - u64 db_dwqe_len; - u64 db_base_phy; - void __iomem *db_base; - /* lock for attach/detach uld */ - struct mutex pdev_mutex; - /* setted when uld driver processing event */ - unsigned long state; - struct pci_device_id id; - atomic_t ref_cnt; -}; - -enum spfc_lld_status { - SPFC_NODE_CHANGE = BIT(0), -}; - -struct spfc_lld_lock { - /* lock for chip list */ - struct mutex lld_mutex; - unsigned long status; - atomic_t dev_ref_cnt; -}; - -#ifndef MAX_SIZE -#define MAX_SIZE (16) -#endif - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_module.h b/drivers/scsi/spfc/hw/spfc_module.h deleted file mode 100644 index 153d59955339..000000000000 --- a/drivers/scsi/spfc/hw/spfc_module.h +++ /dev/null @@ -1,297 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_MODULE_H -#define SPFC_MODULE_H -#include "unf_type.h" -#include "unf_log.h" -#include "unf_common.h" -#include "spfc_utils.h" -#include "spfc_hba.h" - -#define SPFC_FT_ENABLE (1) -#define SPFC_FC_DISABLE (0) - -#define SPFC_P2P_DIRECT (0) -#define SPFC_P2P_FABRIC (1) -#define SPFC_LOOP (2) -#define SPFC_ATUOSPEED (1) -#define SPFC_FIXEDSPEED (0) -#define SPFC_AUTOTOPO (0) -#define SPFC_P2PTOPO (0x2) -#define SPFC_LOOPTOPO (0x1) -#define SPFC_SPEED_2G (0x2) -#define SPFC_SPEED_4G (0x4) -#define SPFC_SPEED_8G (0x8) -#define SPFC_SPEED_16G (0x10) -#define SPFC_SPEED_32G (0x20) - -#define SPFC_MAX_PORT_NUM SPFC_MAX_PROBE_PORT_NUM -#define SPFC_MAX_PORT_TASK_TYPE_STAT_NUM (128) -#define SPFC_MAX_LINK_EVENT_CNT (4) -#define SPFC_MAX_LINK_REASON_CNT (256) - -#define SPFC_MML_LOGCTRO_NUM (14) - -#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */ - -/* - * Define the data type - */ -struct spfc_log_ctrl { - char *log_option; - u32 state; -}; - -/* - * Declare the global function. - */ -extern struct unf_cm_handle_op spfc_cm_op_handle; -extern struct spfc_uld_info fc_uld_info; -extern u32 allowed_probe_num; -extern u32 max_speed; -extern u32 accum_db_num; -extern u32 wqe_page_size; -extern u32 dif_type; -extern u32 wqe_pre_load; -extern u32 combo_length; -extern u32 cos_bit_map; -extern u32 exit_count; -extern u32 exit_stride; -extern u32 exit_base; - -extern atomic64_t rx_tx_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t rx_tx_err[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t scq_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t aeq_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t dif_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t mail_box_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern atomic64_t com_up_event_err_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern u64 link_event_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_LINK_EVENT_CNT]; -extern u64 link_reason_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_LINK_REASON_CNT]; -extern atomic64_t up_err_event_stat[SPFC_MAX_PORT_NUM][SPFC_MAX_PORT_TASK_TYPE_STAT_NUM]; -extern u64 hba_stat[SPFC_MAX_PORT_NUM][SPFC_HBA_STAT_BUTT]; -#define SPFC_LINK_EVENT_STAT(hba, link_ent) \ - (link_event_stat[(hba)->probe_index][link_ent]++) -#define SPFC_LINK_REASON_STAT(hba, link_rsn) \ - (link_reason_stat[(hba)->probe_index][link_rsn]++) -#define SPFC_HBA_STAT(hba, hba_stat_type) \ - (hba_stat[(hba)->probe_index][hba_stat_type]++) - -#define SPFC_UP_ERR_EVENT_STAT(hba, err_type) \ - (atomic64_inc(&up_err_event_stat[(hba)->probe_index][err_type])) -#define SPFC_UP_ERR_EVENT_STAT_READ(probe_index, io_type) \ - (atomic64_read(&up_err_event_stat[probe_index][io_type])) -#define SPFC_DIF_ERR_STAT(hba, dif_err) \ - (atomic64_inc(&dif_err_stat[(hba)->probe_index][dif_err])) -#define SPFC_DIF_ERR_STAT_READ(probe_index, dif_err) \ - (atomic64_read(&dif_err_stat[probe_index][dif_err])) - -#define SPFC_IO_STAT(hba, io_type) \ - (atomic64_inc(&rx_tx_stat[(hba)->probe_index][io_type])) -#define SPFC_IO_STAT_READ(probe_index, io_type) \ - (atomic64_read(&rx_tx_stat[probe_index][io_type])) - -#define SPFC_ERR_IO_STAT(hba, io_type) \ - (atomic64_inc(&rx_tx_err[(hba)->probe_index][io_type])) -#define SPFC_ERR_IO_STAT_READ(probe_index, io_type) \ - (atomic64_read(&rx_tx_err[probe_index][io_type])) - -#define SPFC_SCQ_ERR_TYPE_STAT(hba, err_type) \ - (atomic64_inc(&scq_err_stat[(hba)->probe_index][err_type])) -#define SPFC_SCQ_ERR_TYPE_STAT_READ(probe_index, io_type) \ - (atomic64_read(&scq_err_stat[probe_index][io_type])) -#define SPFC_AEQ_ERR_TYPE_STAT(hba, err_type) \ - (atomic64_inc(&aeq_err_stat[(hba)->probe_index][err_type])) -#define SPFC_AEQ_ERR_TYPE_STAT_READ(probe_index, io_type) \ - (atomic64_read(&aeq_err_stat[probe_index][io_type])) - -#define SPFC_MAILBOX_STAT(hba, io_type) \ - (atomic64_inc(&mail_box_stat[(hba)->probe_index][io_type])) -#define SPFC_MAILBOX_STAT_READ(probe_index, io_type) \ - (atomic64_read(&mail_box_stat[probe_index][io_type])) - -#define SPFC_COM_UP_ERR_EVENT_STAT(hba, err_type) \ - (atomic64_inc( \ - &com_up_event_err_stat[(hba)->probe_index][err_type])) -#define SPFC_COM_UP_ERR_EVENT_STAT_READ(probe_index, err_type) \ - (atomic64_read(&com_up_event_err_stat[probe_index][err_type])) - -#define UNF_LOWLEVEL_ALLOC_LPORT(lport, fc_port, low_level) \ - do { \ - if (spfc_cm_op_handle.unf_alloc_local_port) { \ - lport = spfc_cm_op_handle.unf_alloc_local_port( \ - (fc_port), (low_level)); \ - } else { \ - lport = NULL; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RECEIVE_LS_GS_PKG(ret, fc_port, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_receive_ls_gs_pkg) { \ - ret = spfc_cm_op_handle.unf_receive_ls_gs_pkg( \ - (fc_port), (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_SEND_ELS_DONE(ret, fc_port, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_send_els_done) { \ - ret = spfc_cm_op_handle.unf_send_els_done((fc_port), \ - (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_GET_CFG_PARMS(ret, section_name, cfg_parm, cfg_value, \ - item_num) \ - do { \ - if (spfc_cm_op_handle.unf_get_cfg_parms) { \ - ret = (u32)spfc_cm_op_handle.unf_get_cfg_parms( \ - (section_name), (cfg_parm), (cfg_value), \ - (item_num)); \ - } else { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, \ - "Get config parameter function is NULL."); \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RELEASE_LOCAL_PORT(ret, lport) \ - do { \ - if (unlikely(!spfc_cm_op_handle.unf_release_local_port)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = \ - spfc_cm_op_handle.unf_release_local_port(lport); \ - } \ - } while (0) - -#define UNF_CM_GET_SGL_ENTRY(ret, pkg, buf, buf_len) \ - do { \ - if (unlikely(!spfc_cm_op_handle.unf_cm_get_sgl_entry)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = spfc_cm_op_handle.unf_cm_get_sgl_entry( \ - pkg, buf, buf_len); \ - } \ - } while (0) - -#define UNF_CM_GET_DIF_SGL_ENTRY(ret, pkg, buf, buf_len) \ - do { \ - if (unlikely(!spfc_cm_op_handle.unf_cm_get_dif_sgl_entry)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = spfc_cm_op_handle.unf_cm_get_dif_sgl_entry( \ - pkg, buf, buf_len); \ - } \ - } while (0) - -#define UNF_GET_SGL_ENTRY(ret, pkg, buf, buf_len, dif_flag) \ - do { \ - if (dif_flag) { \ - UNF_CM_GET_DIF_SGL_ENTRY(ret, pkg, buf, buf_len); \ - } else { \ - UNF_CM_GET_SGL_ENTRY(ret, pkg, buf, buf_len); \ - } \ - } while (0) - -#define UNF_GET_FREE_ESGL_PAGE(ret, lport, pkg) \ - do { \ - if (unlikely( \ - !spfc_cm_op_handle.unf_get_one_free_esgl_page)) { \ - ret = NULL; \ - } else { \ - ret = \ - spfc_cm_op_handle.unf_get_one_free_esgl_page( \ - lport, pkg); \ - } \ - } while (0) - -#define UNF_LOWLEVEL_FCP_CMND_RECEIVED(ret, lport, pkg) \ - do { \ - if (unlikely(!spfc_cm_op_handle.unf_process_fcp_cmnd)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = spfc_cm_op_handle.unf_process_fcp_cmnd(lport, \ - pkg); \ - } \ - } while (0) - -#define UNF_LOWLEVEL_SCSI_COMPLETED(ret, lport, pkg) \ - do { \ - if (unlikely(NULL == \ - spfc_cm_op_handle.unf_receive_ini_response)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = spfc_cm_op_handle.unf_receive_ini_response( \ - lport, pkg); \ - } \ - } while (0) - -#define UNF_LOWLEVEL_PORT_EVENT(ret, lport, event, input) \ - do { \ - if (unlikely(!spfc_cm_op_handle.unf_fc_port_event)) { \ - ret = UNF_RETURN_ERROR; \ - } else { \ - ret = spfc_cm_op_handle.unf_fc_port_event( \ - lport, event, input); \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RECEIVE_FC4LS_PKG(ret, fc_port, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_receive_fc4ls_pkg) { \ - ret = spfc_cm_op_handle.unf_receive_fc4ls_pkg( \ - (fc_port), (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_SEND_FC4LS_DONE(ret, lport, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_send_fc4ls_done) { \ - ret = spfc_cm_op_handle.unf_send_fc4ls_done((lport), \ - (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RECEIVE_BLS_PKG(ret, lport, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_receive_bls_pkg) { \ - ret = spfc_cm_op_handle.unf_receive_bls_pkg((lport), \ - (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RECEIVE_MARKER_STS(ret, lport, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_receive_marker_status) { \ - ret = spfc_cm_op_handle.unf_receive_marker_status( \ - (lport), (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#define UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(ret, lport, pkg) \ - do { \ - if (spfc_cm_op_handle.unf_receive_abts_marker_status) { \ - ret = \ - spfc_cm_op_handle.unf_receive_abts_marker_status( \ - (lport), (pkg)); \ - } else { \ - ret = UNF_RETURN_ERROR; \ - } \ - } while (0) - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_parent_context.h b/drivers/scsi/spfc/hw/spfc_parent_context.h deleted file mode 100644 index dc4baffe5c44..000000000000 --- a/drivers/scsi/spfc/hw/spfc_parent_context.h +++ /dev/null @@ -1,269 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_PARENT_CONTEXT_H -#define SPFC_PARENT_CONTEXT_H - -enum fc_parent_status { - FC_PARENT_STATUS_INVALID = 0, - FC_PARENT_STATUS_NORMAL, - FC_PARENT_STATUS_CLOSING -}; - -#define SPFC_PARENT_CONTEXT_KEY_ALIGN_SIZE (48) - -#define SPFC_PARENT_CONTEXT_TIMER_SIZE (32) /* 24+2*N,N=timer count */ - -#define FC_CALC_CID(_xid) \ - (((((_xid) >> 5) & 0x1ff) << 11) | ((((_xid) >> 5) & 0x1ff) << 2) | \ - (((_xid) >> 3) & 0x3)) - -#define MAX_PKT_SIZE_PER_DISPATCH (fc_child_ctx_ex->per_xmit_data_size) - -/* immediate data DIF info definition in parent context */ -struct immi_dif_info { - union { - u32 value; - struct { - u32 app_tag_ctrl : 3; /* DIF/DIX APP TAG Control */ - u32 ref_tag_mode : 2; /* Bit 0: scenario of the reference tag verify mode */ - /* Bit 1: scenario of the reference tag insert/replace - * mode 0: fixed; 1: increasement; - */ - u32 ref_tag_ctrl : 3; /* The DIF/DIX Reference tag control */ - u32 grd_agm_ini_ctrl : 3; - u32 grd_agm_ctrl : 2; /* Bit 0: DIF/DIX guard verify algorithm control */ - /* Bit 1: DIF/DIX guard replace or insert algorithm control */ - u32 grd_ctrl : 3; /* The DIF/DIX Guard control */ - u32 dif_verify_type : 2; /* verify type */ - /* Check blocks whose reference tag contains 0xFFFF flag */ - u32 difx_ref_esc : 1; - /* Check blocks whose application tag contains 0xFFFF flag */ - u32 difx_app_esc : 1; - u32 rsvd : 8; - u32 sct_size : 1; /* Sector size, 1: 4K; 0: 512 */ - u32 smd_tp : 2; - u32 difx_en : 1; - } info; - } dif_dw3; - - u32 cmp_app_tag : 16; - u32 rep_app_tag : 16; - /* The ref tag value for verify compare, do not support replace or insert ref tag */ - u32 cmp_ref_tag; - u32 rep_ref_tag; - - u32 rsv1 : 16; - u32 cmp_app_tag_msk : 16; -}; - -/* parent context SW section definition: SW(80B) */ -struct spfc_sw_section { - u16 scq_num_rcv_cmd; - u16 scq_num_max_scqn; - - struct { - u32 xid : 13; - u32 vport : 7; - u32 csctrl : 8; - u32 rsvd0 : 4; - } sw_ctxt_vport_xid; - - u32 scq_num_scqn_mask : 12; - u32 cid : 20; /* ucode init */ - - u16 conn_id; - u16 immi_rq_page_size; - - u16 immi_taskid_min; - u16 immi_taskid_max; - - union { - u32 pctxt_val0; - struct { - u32 srv_type : 5; /* driver init */ - u32 srr_support : 2; /* sequence retransmition support flag */ - u32 rsvd1 : 5; - u32 port_id : 4; /* driver init */ - u32 vlan_id : 16; /* driver init */ - } dw; - } sw_ctxt_misc; - - u32 rsvd2; - u32 per_xmit_data_size; - - /* RW fields */ - u32 cmd_scq_gpa_h; - u32 cmd_scq_gpa_l; - u32 e_d_tov_timer_val; /* E_D_TOV timer value: value should be set on ms by driver */ - u16 mfs_unaligned_bytes; /* mfs unalined bytes of per 64KB dispatch*/ - u16 tx_mfs; /* remote port max receive fc payload length */ - u32 xfer_rdy_dis_max_len_remote; /* max data len allowed in xfer_rdy dis scenario */ - u32 xfer_rdy_dis_max_len_local; - - union { - struct { - u32 priority : 3; /* vlan priority */ - u32 rsvd4 : 2; - u32 status : 8; /* status of flow */ - u32 cos : 3; /* doorbell cos value */ - u32 oq_cos_data : 3; /* esch oq cos for data */ - u32 oq_cos_cmd : 3; /* esch oq cos for cmd/xferrdy/rsp */ - /* used for parent context cache Consistency judgment,1: done */ - u32 flush_done : 1; - u32 work_mode : 2; /* 0:Target, 1:Initiator, 2:Target&Initiator */ - u32 seq_cnt : 1; /* seq_cnt */ - u32 e_d_tov : 1; /* E_D_TOV resolution */ - u32 vlan_enable : 1; /* Vlan enable flag */ - u32 conf_support : 1; /* Response confirm support flag */ - u32 rec_support : 1; /* REC support flag */ - u32 write_xfer_rdy : 1; /* WRITE Xfer_Rdy disable or enable */ - u32 sgl_num : 1; /* Double or single SGL, 1: double; 0: single */ - } dw; - u32 pctxt_val1; - } sw_ctxt_config; - struct immi_dif_info immi_dif_info; /* immediate data dif control info(20B) */ -}; - -struct spfc_hw_rsvd_queue { - /* bitmap[0]:255-192 */ - /* bitmap[1]:191-128 */ - /* bitmap[2]:127-64 */ - /* bitmap[3]:63-0 */ - u64 seq_id_bitmap[4]; - struct { - u64 last_req_seq_id : 8; - u64 xid : 20; - u64 rsvd0 : 36; - } wd0; -}; - -struct spfc_sq_qinfo { - u64 rsvd_0 : 10; - u64 pmsn_type : 1; /* 0: get pmsn from queue header; 1: get pmsn from ucode */ - u64 rsvd_1 : 4; - u64 cur_wqe_o : 1; /* should be opposite from loop_o */ - u64 rsvd_2 : 48; - - u64 cur_sqe_gpa; - u64 pmsn_gpa; /* sq's queue header gpa */ - - u64 sqe_dmaattr_idx : 6; - u64 sq_so_ro : 2; - u64 rsvd_3 : 2; - u64 ring : 1; /* 0: link; 1: ring */ - u64 loop_o : 1; /* init to be the first round o-bit */ - u64 rsvd_4 : 4; - u64 zerocopy_dmaattr_idx : 6; - u64 zerocopy_so_ro : 2; - u64 parity : 8; - u64 r : 1; - u64 s : 1; - u64 enable_256 : 1; - u64 rsvd_5 : 23; - u64 pcie_template : 6; -}; - -struct spfc_cq_qinfo { - u64 pcie_template_hi : 3; - u64 parity_2 : 1; - u64 cur_cqe_gpa : 60; - - u64 pi : 15; - u64 pi_o : 1; - u64 ci : 15; - u64 ci_o : 1; - u64 c_eqn_msi_x : 10; /* if init_mode = 2, is msi/msi-x; other the low-5-bit means c_eqn */ - u64 parity_1 : 1; - u64 ci_type : 1; /* 0: get ci from queue header; 1: get ci from ucode */ - u64 cq_depth : 3; /* valid when ring = 1 */ - u64 armq : 1; /* 0: IDLE state; 1: NEXT state */ - u64 cur_cqe_cnt : 8; - u64 cqe_max_cnt : 8; - - u64 cqe_dmaattr_idx : 6; - u64 cq_so_ro : 2; - u64 init_mode : 2; /* 1: armQ; 2: msi/msi-x; others: rsvd */ - u64 next_o : 1; /* next pate valid o-bit */ - u64 loop_o : 1; /* init to be the first round o-bit */ - u64 next_cq_wqe_page_gpa : 52; - - u64 pcie_template_lo : 3; - u64 parity_0 : 1; - u64 ci_gpa : 60; /* cq's queue header gpa */ -}; - -struct spfc_scq_qinfo { - union { - struct { - u64 scq_n : 20; /* scq number */ - u64 sq_min_preld_cache_num : 4; - u64 sq_th0_preld_cache_num : 5; - u64 sq_th1_preld_cache_num : 5; - u64 sq_th2_preld_cache_num : 5; - u64 rq_min_preld_cache_num : 4; - u64 rq_th0_preld_cache_num : 5; - u64 rq_th1_preld_cache_num : 5; - u64 rq_th2_preld_cache_num : 5; - u64 parity : 6; - } info; - - u64 pctxt_val1; - } hw_scqc_config; -}; - -struct spfc_srq_qinfo { - u64 parity : 4; - u64 srqc_gpa : 60; -}; - -/* here is the layout of service type 12/13 */ -struct spfc_parent_context { - u8 key[SPFC_PARENT_CONTEXT_KEY_ALIGN_SIZE]; - struct spfc_scq_qinfo resp_scq_qinfo; - struct spfc_srq_qinfo imm_srq_info; - struct spfc_sq_qinfo sq_qinfo; - u8 timer_section[SPFC_PARENT_CONTEXT_TIMER_SIZE]; - struct spfc_hw_rsvd_queue hw_rsvdq; - struct spfc_srq_qinfo els_srq_info; - struct spfc_sw_section sw_section; -}; - -/* here is the layout of service type 13 */ -struct spfc_ssq_parent_context { - u8 rsvd0[64]; - struct spfc_sq_qinfo sq1_qinfo; - u8 rsvd1[32]; - struct spfc_sq_qinfo sq2_qinfo; - u8 rsvd2[32]; - struct spfc_sq_qinfo sq3_qinfo; - struct spfc_scq_qinfo sq_pretchinfo; - u8 rsvd3[24]; -}; - -/* FC Key Section */ -struct spfc_fc_key_section { - u32 xid_h : 4; - u32 key_size : 2; - u32 rsvd1 : 1; - u32 srv_type : 5; - u32 csize : 2; - u32 rsvd0 : 17; - u32 v : 1; - - u32 tag_fp_h : 4; - u32 rsvd2 : 12; - u32 xid_l : 16; - - u16 tag_fp_l; - u8 smac[6]; /* Source MAC */ - u8 dmac[6]; /* Dest MAC */ - u8 sid[3]; /* Source FC ID */ - u8 did[3]; /* Dest FC ID */ - u8 svlan[4]; /* Svlan */ - u8 cvlan[4]; /* Cvlan */ - - u32 next_ptr_h; -}; - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_queue.c b/drivers/scsi/spfc/hw/spfc_queue.c deleted file mode 100644 index fa4295832da7..000000000000 --- a/drivers/scsi/spfc/hw/spfc_queue.c +++ /dev/null @@ -1,4852 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_queue.h" -#include "unf_log.h" -#include "unf_lport.h" -#include "spfc_module.h" -#include "spfc_utils.h" -#include "spfc_service.h" -#include "spfc_chipitf.h" -#include "spfc_parent_context.h" -#include "sphw_hw.h" -#include "sphw_crm.h" - -#define SPFC_UCODE_CMD_MODIFY_QUEUE_CONTEXT 0 - -#define SPFC_DONE_MASK (0x00000001) -#define SPFC_OWNER_MASK (0x80000000) - -#define SPFC_SQ_LINK_PRE (1 << 2) - -#define SPFC_SQ_HEADER_ADDR_ALIGN_SIZE (64) -#define SPFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK (SPFC_SQ_HEADER_ADDR_ALIGN_SIZE - 1) - -#define SPFC_ADDR_64_ALIGN(addr) \ - (((addr) + (SPFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK)) & \ - ~(SPFC_SQ_HEADER_ADDR_ALIGN_SIZE_MASK)) - -u32 spfc_get_parity_value(u64 *src_data, u32 row, u32 col) -{ - u32 i = 0; - u32 j = 0; - u32 offset = 0; - u32 group = 0; - u32 bit_offset = 0; - u32 bit_val = 0; - u32 tmp_val = 0; - u32 dest_data = 0; - - for (i = 0; i < row; i++) { - for (j = 0; j < col; j++) { - offset = (row * j + i); - group = offset / (sizeof(src_data[ARRAY_INDEX_0]) * UNF_BITS_PER_BYTE); - bit_offset = offset % (sizeof(src_data[ARRAY_INDEX_0]) * UNF_BITS_PER_BYTE); - tmp_val = (src_data[group] >> bit_offset) & SPFC_PARITY_MASK; - - if (j == 0) { - bit_val = tmp_val; - continue; - } - - bit_val ^= tmp_val; - } - - bit_val = (~bit_val) & SPFC_PARITY_MASK; - - dest_data |= (bit_val << i); - } - - return dest_data; -} - -static void spfc_update_producer_info(u16 q_depth, u16 *pus_pi, u16 *pus_owner) -{ - u16 current_pi = 0; - u16 next_pi = 0; - u16 owner = 0; - - current_pi = *pus_pi; - next_pi = current_pi + 1; - - if (next_pi < q_depth) { - *pus_pi = next_pi; - } else { - /* PI reversal */ - *pus_pi = 0; - - /* obit reversal */ - owner = *pus_owner; - *pus_owner = !owner; - } -} - -static void spfc_update_consumer_info(u16 q_depth, u16 *pus_ci, u16 *pus_owner) -{ - u16 current_ci = 0; - u16 next_ci = 0; - u16 owner = 0; - - current_ci = *pus_ci; - next_ci = current_ci + 1; - - if (next_ci < q_depth) { - *pus_ci = next_ci; - } else { - /* CI reversal */ - *pus_ci = 0; - - /* obit reversal */ - owner = *pus_owner; - *pus_owner = !owner; - } -} - -static inline void spfc_update_cq_header(struct ci_record *ci_record, u16 ci, - u16 owner) -{ - u32 size = 0; - struct ci_record record = {0}; - - size = sizeof(struct ci_record); - memcpy(&record, ci_record, size); - spfc_big_to_cpu64(&record, size); - record.cmsn = ci + (u16)(owner << SPFC_CQ_HEADER_OWNER_SHIFT); - record.dump_cmsn = record.cmsn; - spfc_cpu_to_big64(&record, size); - - wmb(); - memcpy(ci_record, &record, size); -} - -static void spfc_update_srq_header(struct db_record *pmsn_record, u16 pmsn) -{ - u32 size = 0; - struct db_record record = {0}; - - size = sizeof(struct db_record); - memcpy(&record, pmsn_record, size); - spfc_big_to_cpu64(&record, size); - record.pmsn = pmsn; - record.dump_pmsn = record.pmsn; - spfc_cpu_to_big64(&record, sizeof(struct db_record)); - - wmb(); - memcpy(pmsn_record, &record, size); -} - -static void spfc_set_srq_wqe_owner_be(struct spfc_wqe_ctrl *sqe_ctrl_in_wp, - u32 owner) -{ - struct spfc_wqe_ctrl_ch wqe_ctrl_ch; - - mb(); - - wqe_ctrl_ch.ctrl_ch_val = be32_to_cpu(sqe_ctrl_in_wp->ch.ctrl_ch_val); - wqe_ctrl_ch.wd0.owner = owner; - sqe_ctrl_in_wp->ch.ctrl_ch_val = cpu_to_be32(wqe_ctrl_ch.ctrl_ch_val); - - mb(); -} - -static inline void spfc_set_sq_wqe_owner_be(void *sqe) -{ - u32 *sqe_dw = (u32 *)sqe; - u32 *e_sqe_dw = (u32 *)((u8 *)sqe + SPFC_EXTEND_WQE_OFFSET); - - /* Ensure that the write of WQE is complete */ - mb(); - e_sqe_dw[SPFC_SQE_SECOND_OBIT_DW_POS] |= SPFC_SQE_OBIT_SET_MASK_BE; - e_sqe_dw[SPFC_SQE_FIRST_OBIT_DW_POS] |= SPFC_SQE_OBIT_SET_MASK_BE; - sqe_dw[SPFC_SQE_SECOND_OBIT_DW_POS] |= SPFC_SQE_OBIT_SET_MASK_BE; - sqe_dw[SPFC_SQE_FIRST_OBIT_DW_POS] |= SPFC_SQE_OBIT_SET_MASK_BE; - mb(); -} - -void spfc_clear_sq_wqe_owner_be(struct spfc_sqe *sqe) -{ - u32 *sqe_dw = (u32 *)sqe; - u32 *e_sqe_dw = (u32 *)((u8 *)sqe + SPFC_EXTEND_WQE_OFFSET); - - mb(); - sqe_dw[SPFC_SQE_SECOND_OBIT_DW_POS] &= SPFC_SQE_OBIT_CLEAR_MASK_BE; - mb(); - sqe_dw[SPFC_SQE_FIRST_OBIT_DW_POS] &= SPFC_SQE_OBIT_CLEAR_MASK_BE; - e_sqe_dw[SPFC_SQE_SECOND_OBIT_DW_POS] &= SPFC_SQE_OBIT_CLEAR_MASK_BE; - e_sqe_dw[SPFC_SQE_FIRST_OBIT_DW_POS] &= SPFC_SQE_OBIT_CLEAR_MASK_BE; -} - -static void spfc_set_direct_wqe_owner_be(void *sqe, u16 owner) -{ - if (owner) - spfc_set_sq_wqe_owner_be(sqe); - else - spfc_clear_sq_wqe_owner_be(sqe); -} - -static void spfc_set_srq_link_wqe_owner_be(struct spfc_linkwqe *link_wqe, - u32 owner, u16 pmsn) -{ - struct spfc_linkwqe local_lw; - - mb(); - local_lw.val_wd1 = be32_to_cpu(link_wqe->val_wd1); - local_lw.wd1.msn = pmsn; - local_lw.wd1.dump_msn = (local_lw.wd1.msn & SPFC_LOCAL_LW_WD1_DUMP_MSN_MASK); - link_wqe->val_wd1 = cpu_to_be32(local_lw.val_wd1); - - local_lw.val_wd0 = be32_to_cpu(link_wqe->val_wd0); - local_lw.wd0.o = owner; - link_wqe->val_wd0 = cpu_to_be32(local_lw.val_wd0); - mb(); -} - -static inline bool spfc_is_scq_link_wqe(struct spfc_scq_info *scq_info) -{ - u16 custom_scqe_num = 0; - - custom_scqe_num = scq_info->ci + 1; - - if ((custom_scqe_num % scq_info->wqe_num_per_buf == 0) || - scq_info->valid_wqe_num == custom_scqe_num) - return true; - else - return false; -} - -static struct spfc_wqe_page * -spfc_add_tail_wqe_page(struct spfc_parent_ssq_info *ssq) -{ - struct spfc_hba_info *hba = NULL; - struct spfc_wqe_page *esgl = NULL; - struct list_head *free_list_head = NULL; - ulong flag = 0; - - hba = (struct spfc_hba_info *)ssq->hba; - - spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); - - /* Get a WqePage from hba->sq_wpg_pool.list_free_wpg_pool, and add to - * sq.list_SqTailWqePage - */ - if (!list_empty(&hba->sq_wpg_pool.list_free_wpg_pool)) { - free_list_head = UNF_OS_LIST_NEXT(&hba->sq_wpg_pool.list_free_wpg_pool); - list_del(free_list_head); - list_add_tail(free_list_head, &ssq->list_linked_list_sq); - esgl = list_entry(free_list_head, struct spfc_wqe_page, entry_wpg); - - /* WqePage Pool counter */ - atomic_inc(&hba->sq_wpg_pool.wpg_in_use); - } else { - esgl = NULL; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]SQ pool is empty when SQ(0x%x) try to get wqe page", - ssq->sqn); - SPFC_HBA_STAT(hba, SPFC_STAT_SQ_POOL_EMPTY); - } - - spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); - - return esgl; -} - -static inline struct spfc_sqe *spfc_get_wqe_page_entry(struct spfc_wqe_page *wpg, - u32 wqe_offset) -{ - struct spfc_sqe *sqe_wpg = NULL; - - sqe_wpg = (struct spfc_sqe *)(wpg->wpg_addr); - sqe_wpg += wqe_offset; - - return sqe_wpg; -} - -static void spfc_free_head_wqe_page(struct spfc_parent_ssq_info *ssq) -{ - struct spfc_hba_info *hba = NULL; - struct spfc_wqe_page *sq_wpg = NULL; - struct list_head *entry_head_wqe_page = NULL; - ulong flag = 0; - - atomic_dec(&ssq->wqe_page_cnt); - - hba = (struct spfc_hba_info *)ssq->hba; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "Port(0x%x) free wqe page nowpagecnt:%d", - hba->port_cfg.port_id, - atomic_read(&ssq->wqe_page_cnt)); - sq_wpg = SPFC_GET_SQ_HEAD(ssq); - - memset((void *)sq_wpg->wpg_addr, WQE_MARKER_0, hba->sq_wpg_pool.wpg_size); - - spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); - entry_head_wqe_page = &sq_wpg->entry_wpg; - list_del(entry_head_wqe_page); - list_add_tail(entry_head_wqe_page, &hba->sq_wpg_pool.list_free_wpg_pool); - - /* WqePage Pool counter */ - atomic_dec(&hba->sq_wpg_pool.wpg_in_use); - spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); -} - -static void spfc_free_link_list_wpg(struct spfc_parent_ssq_info *ssq) -{ - ulong flag = 0; - struct spfc_hba_info *hba = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct list_head *entry_head_wqe_page = NULL; - struct spfc_wqe_page *sq_wpg = NULL; - - hba = (struct spfc_hba_info *)ssq->hba; - - list_for_each_safe(node, next_node, &ssq->list_linked_list_sq) { - sq_wpg = list_entry(node, struct spfc_wqe_page, entry_wpg); - memset((void *)sq_wpg->wpg_addr, WQE_MARKER_0, hba->sq_wpg_pool.wpg_size); - - spin_lock_irqsave(&hba->sq_wpg_pool.wpg_pool_lock, flag); - entry_head_wqe_page = &sq_wpg->entry_wpg; - list_del(entry_head_wqe_page); - list_add_tail(entry_head_wqe_page, &hba->sq_wpg_pool.list_free_wpg_pool); - - /* WqePage Pool counter */ - atomic_dec(&ssq->wqe_page_cnt); - atomic_dec(&hba->sq_wpg_pool.wpg_in_use); - - spin_unlock_irqrestore(&hba->sq_wpg_pool.wpg_pool_lock, flag); - } - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]Port(0x%x) RPort(0x%x) Sq(0x%x) link list destroyed, Sq.WqePageCnt=0x%x, SqWpgPool.wpg_in_use=0x%x", - hba->port_cfg.port_id, ssq->sqn, ssq->context_id, - atomic_read(&ssq->wqe_page_cnt), atomic_read(&hba->sq_wpg_pool.wpg_in_use)); -} - -struct spfc_wqe_page * -spfc_add_one_wqe_page(struct spfc_parent_ssq_info *ssq) -{ - u32 wqe_inx = 0; - struct spfc_wqe_page *wqe_page = NULL; - struct spfc_sqe *sqe_in_wp = NULL; - struct spfc_linkwqe *link_wqe_in_wpg = NULL; - struct spfc_linkwqe link_wqe; - - /* Add a new Wqe Page */ - wqe_page = spfc_add_tail_wqe_page(ssq); - - if (!wqe_page) - return NULL; - - for (wqe_inx = 0; wqe_inx <= ssq->wqe_num_per_buf; wqe_inx++) { - sqe_in_wp = spfc_get_wqe_page_entry(wqe_page, wqe_inx); - sqe_in_wp->ctrl_sl.ch.ctrl_ch_val = 0; - sqe_in_wp->ectrl_sl.ch.ctrl_ch_val = 0; - } - - /* Set last WqePage as linkwqe */ - link_wqe_in_wpg = (struct spfc_linkwqe *)spfc_get_wqe_page_entry(wqe_page, - ssq->wqe_num_per_buf); - link_wqe.val_wd0 = 0; - link_wqe.val_wd1 = 0; - link_wqe.next_page_addr_hi = (ssq->queue_style == SPFC_QUEUE_RING_STYLE) - ? SPFC_MSD(wqe_page->wpg_phy_addr) - : 0; - link_wqe.next_page_addr_lo = (ssq->queue_style == SPFC_QUEUE_RING_STYLE) - ? SPFC_LSD(wqe_page->wpg_phy_addr) - : 0; - link_wqe.wd0.wf = CQM_WQE_WF_LINK; - link_wqe.wd0.ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; - link_wqe.wd0.o = !(ssq->last_pi_owner); - link_wqe.wd1.lp = (ssq->queue_style == SPFC_QUEUE_RING_STYLE) - ? CQM_LINK_WQE_LP_VALID - : CQM_LINK_WQE_LP_INVALID; - spfc_cpu_to_big32(&link_wqe, sizeof(struct spfc_linkwqe)); - memcpy(link_wqe_in_wpg, &link_wqe, sizeof(struct spfc_linkwqe)); - memcpy((u8 *)link_wqe_in_wpg + SPFC_EXTEND_WQE_OFFSET, - &link_wqe, sizeof(struct spfc_linkwqe)); - - return wqe_page; -} - -static inline struct spfc_scqe_type * -spfc_get_scq_entry(struct spfc_scq_info *scq_info) -{ - u32 buf_id = 0; - u16 buf_offset = 0; - u16 ci = 0; - struct cqm_buf_list *buf = NULL; - - FC_CHECK_RETURN_VALUE(scq_info, NULL); - - ci = scq_info->ci; - buf_id = ci / scq_info->wqe_num_per_buf; - buf = &scq_info->cqm_scq_info->q_room_buf_1.buf_list[buf_id]; - buf_offset = (u16)(ci % scq_info->wqe_num_per_buf); - - return (struct spfc_scqe_type *)(buf->va) + buf_offset; -} - -static inline bool spfc_is_cqe_done(u32 *done, u32 *owner, u16 driver_owner) -{ - return ((((u16)(!!(*done & SPFC_DONE_MASK)) == driver_owner) && - ((u16)(!!(*owner & SPFC_OWNER_MASK)) == driver_owner)) ? true : false); -} - -u32 spfc_process_scq_cqe_entity(ulong info, u32 proc_cnt) -{ - u32 ret = UNF_RETURN_ERROR; - u32 index = 0; - struct wq_header *queue_header = NULL; - struct spfc_scqe_type *scqe = NULL; - struct spfc_scqe_type tmp_scqe; - struct spfc_scq_info *scq_info = (struct spfc_scq_info *)info; - - FC_CHECK_RETURN_VALUE(scq_info, ret); - SPFC_FUNCTION_ENTER; - - queue_header = (struct wq_header *)(void *)(scq_info->cqm_scq_info->q_header_vaddr); - - for (index = 0; index < proc_cnt;) { - /* If linked wqe, then update CI */ - if (spfc_is_scq_link_wqe(scq_info)) { - spfc_update_consumer_info(scq_info->valid_wqe_num, - &scq_info->ci, - &scq_info->ci_owner); - spfc_update_cq_header(&queue_header->ci_record, - scq_info->ci, scq_info->ci_owner); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_INFO, - "[info]Current wqe is a linked wqe"); - continue; - } - - /* Get SCQE and then check obit & donebit whether been set */ - scqe = spfc_get_scq_entry(scq_info); - if (unlikely(!scqe)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Scqe is NULL"); - break; - } - - if (!spfc_is_cqe_done((u32 *)(void *)&scqe->wd0, - (u32 *)(void *)&scqe->ch.wd0, - scq_info->ci_owner)) { - atomic_set(&scq_info->flush_stat, SPFC_QUEUE_FLUSH_DONE); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_INFO, "[info]Now has no valid scqe"); - break; - } - - /* rmb & do memory copy */ - rmb(); - memcpy(&tmp_scqe, scqe, sizeof(struct spfc_scqe_type)); - /* process SCQ entry */ - ret = spfc_rcv_scq_entry_from_scq(scq_info->hba, (void *)&tmp_scqe, - scq_info->queue_id); - if (unlikely(ret != RETURN_OK)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]QueueId(0x%x) scqn(0x%x) scqe process error at CI(0x%x)", - scq_info->queue_id, scq_info->scqn, scq_info->ci); - } - - /* Update Driver's CI & Obit */ - spfc_update_consumer_info(scq_info->valid_wqe_num, - &scq_info->ci, &scq_info->ci_owner); - spfc_update_cq_header(&queue_header->ci_record, scq_info->ci, - scq_info->ci_owner); - index++; - } - - /* Re-schedule again if necessary */ - if (index == proc_cnt) - tasklet_schedule(&scq_info->tasklet); - - SPFC_FUNCTION_RETURN; - - return index; -} - -void spfc_set_scq_irg_cfg(struct spfc_hba_info *hba, u32 mode, u16 msix_index) -{ -#define SPFC_POLLING_MODE_ITERRUPT_PENDING_CNT 5 -#define SPFC_POLLING_MODE_ITERRUPT_COALESC_TIMER_CFG 10 - u8 pending_limt = 0; - u8 coalesc_timer_cfg = 0; - - struct interrupt_info info = {0}; - - if (mode != SPFC_SCQ_INTR_LOW_LATENCY_MODE) { - pending_limt = SPFC_POLLING_MODE_ITERRUPT_PENDING_CNT; - coalesc_timer_cfg = - SPFC_POLLING_MODE_ITERRUPT_COALESC_TIMER_CFG; - } - - memset(&info, 0, sizeof(info)); - info.interrupt_coalesc_set = 1; - info.lli_set = 0; - info.pending_limt = pending_limt; - info.coalesc_timer_cfg = coalesc_timer_cfg; - info.resend_timer_cfg = 0; - info.msix_index = msix_index; - - sphw_set_interrupt_cfg(hba->dev_handle, info, SPHW_CHANNEL_FC); -} - -void spfc_process_scq_cqe(ulong info) -{ - struct spfc_scq_info *scq_info = (struct spfc_scq_info *)info; - - FC_CHECK_RETURN_VOID(scq_info); - - spfc_process_scq_cqe_entity(info, SPFC_CQE_MAX_PROCESS_NUM_PER_INTR); -} - -irqreturn_t spfc_scq_irq(int irq, void *scq_info) -{ - SPFC_FUNCTION_ENTER; - - FC_CHECK_RETURN_VALUE(scq_info, IRQ_NONE); - - tasklet_schedule(&((struct spfc_scq_info *)scq_info)->tasklet); - - SPFC_FUNCTION_RETURN; - - return IRQ_HANDLED; -} - -static u32 spfc_alloc_scq_int(struct spfc_scq_info *scq_info) -{ - int ret = UNF_RETURN_ERROR_S32; - u16 act_num = 0; - struct irq_info irq_info; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VALUE(scq_info, UNF_RETURN_ERROR); - - /* 1. Alloc & check SCQ IRQ */ - hba = (struct spfc_hba_info *)(scq_info->hba); - ret = sphw_alloc_irqs(hba->dev_handle, SERVICE_T_FC, SPFC_INT_NUM_PER_QUEUE, - &irq_info, &act_num); - if (ret != RETURN_OK || act_num != SPFC_INT_NUM_PER_QUEUE) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate scq irq failed, return %d", ret); - return UNF_RETURN_ERROR; - } - - if (irq_info.msix_entry_idx >= SPFC_SCQ_INT_ID_MAX) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SCQ irq id exceed %d, msix_entry_idx %d", - SPFC_SCQ_INT_ID_MAX, irq_info.msix_entry_idx); - sphw_free_irq(hba->dev_handle, SERVICE_T_FC, irq_info.irq_id); - return UNF_RETURN_ERROR; - } - - scq_info->irq_id = (u32)(irq_info.irq_id); - scq_info->msix_entry_idx = (u16)(irq_info.msix_entry_idx); - - snprintf(scq_info->irq_name, SPFC_IRQ_NAME_MAX, "fc_scq%u_%x_msix%u", - scq_info->queue_id, hba->port_cfg.port_id, scq_info->msix_entry_idx); - - /* 2. SCQ IRQ tasklet init */ - tasklet_init(&scq_info->tasklet, spfc_process_scq_cqe, (ulong)(uintptr_t)scq_info); - - /* 3. Request IRQ for SCQ */ - ret = request_irq(scq_info->irq_id, spfc_scq_irq, 0, scq_info->irq_name, scq_info); - - sphw_set_msix_state(hba->dev_handle, scq_info->msix_entry_idx, SPHW_MSIX_ENABLE); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Request SCQ irq failed, SCQ Index = %u, return %d", - scq_info->queue_id, ret); - sphw_free_irq(hba->dev_handle, SERVICE_T_FC, scq_info->irq_id); - memset(scq_info->irq_name, 0, SPFC_IRQ_NAME_MAX); - scq_info->irq_id = 0; - scq_info->msix_entry_idx = 0; - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -static void spfc_free_scq_int(struct spfc_scq_info *scq_info) -{ - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(scq_info); - - hba = (struct spfc_hba_info *)(scq_info->hba); - sphw_set_msix_state(hba->dev_handle, scq_info->msix_entry_idx, SPHW_MSIX_DISABLE); - free_irq(scq_info->irq_id, scq_info); - tasklet_kill(&scq_info->tasklet); - sphw_free_irq(hba->dev_handle, SERVICE_T_FC, scq_info->irq_id); - memset(scq_info->irq_name, 0, SPFC_IRQ_NAME_MAX); - scq_info->irq_id = 0; - scq_info->msix_entry_idx = 0; -} - -static void spfc_init_scq_info(struct spfc_hba_info *hba, struct cqm_queue *cqm_scq, - u32 queue_id, struct spfc_scq_info **scq_info) -{ - FC_CHECK_RETURN_VOID(hba); - FC_CHECK_RETURN_VOID(cqm_scq); - FC_CHECK_RETURN_VOID(scq_info); - - *scq_info = &hba->scq_info[queue_id]; - (*scq_info)->queue_id = queue_id; - (*scq_info)->scqn = cqm_scq->index; - (*scq_info)->hba = (void *)hba; - - (*scq_info)->cqm_scq_info = cqm_scq; - (*scq_info)->wqe_num_per_buf = - cqm_scq->q_room_buf_1.buf_size / SPFC_SCQE_SIZE; - (*scq_info)->wqe_size = SPFC_SCQE_SIZE; - (*scq_info)->valid_wqe_num = (SPFC_SCQ_IS_STS(queue_id) ? SPFC_STS_SCQ_DEPTH - : SPFC_CMD_SCQ_DEPTH); - (*scq_info)->scqc_cq_depth = (SPFC_SCQ_IS_STS(queue_id) ? SPFC_STS_SCQC_CQ_DEPTH - : SPFC_CMD_SCQC_CQ_DEPTH); - (*scq_info)->scqc_ci_type = SPFC_STS_SCQ_CI_TYPE; - (*scq_info)->ci = 0; - (*scq_info)->ci_owner = 1; -} - -static void spfc_init_scq_header(struct wq_header *queue_header) -{ - FC_CHECK_RETURN_VOID(queue_header); - - memset(queue_header, 0, sizeof(struct wq_header)); - - /* Obit default is 1 */ - queue_header->db_record.pmsn = 1 << UNF_SHIFT_15; - queue_header->db_record.dump_pmsn = queue_header->db_record.pmsn; - queue_header->ci_record.cmsn = 1 << UNF_SHIFT_15; - queue_header->ci_record.dump_cmsn = queue_header->ci_record.cmsn; - - /* Big endian convert */ - spfc_cpu_to_big64((void *)queue_header, sizeof(struct wq_header)); -} - -static void spfc_cfg_scq_ctx(struct spfc_scq_info *scq_info, - struct spfc_cq_qinfo *scq_ctx) -{ - struct cqm_queue *cqm_scq_info = NULL; - struct spfc_queue_info_bus queue_bus; - u64 parity = 0; - - FC_CHECK_RETURN_VOID(scq_info); - - cqm_scq_info = scq_info->cqm_scq_info; - - scq_ctx->pcie_template_hi = 0; - scq_ctx->cur_cqe_gpa = cqm_scq_info->q_room_buf_1.buf_list->pa >> SPFC_CQE_GPA_SHIFT; - scq_ctx->pi = 0; - scq_ctx->pi_o = 1; - scq_ctx->ci = scq_info->ci; - scq_ctx->ci_o = scq_info->ci_owner; - scq_ctx->c_eqn_msi_x = scq_info->msix_entry_idx; - scq_ctx->ci_type = scq_info->scqc_ci_type; - scq_ctx->cq_depth = scq_info->scqc_cq_depth; - scq_ctx->armq = SPFC_ARMQ_IDLE; - scq_ctx->cur_cqe_cnt = 0; - scq_ctx->cqe_max_cnt = 0; - scq_ctx->cqe_dmaattr_idx = 0; - scq_ctx->cq_so_ro = 0; - scq_ctx->init_mode = SPFC_CQ_INT_MODE; - scq_ctx->next_o = 1; - scq_ctx->loop_o = 1; - scq_ctx->next_cq_wqe_page_gpa = cqm_scq_info->q_room_buf_1.buf_list[ARRAY_INDEX_1].pa >> - SPFC_NEXT_CQE_GPA_SHIFT; - scq_ctx->pcie_template_lo = 0; - - scq_ctx->ci_gpa = (cqm_scq_info->q_header_paddr + offsetof(struct wq_header, ci_record)) >> - SPFC_CQE_GPA_SHIFT; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] |= ((u64)(scq_info->scqn & SPFC_SCQN_MASK)); /* bits 20 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->pcie_template_lo)) << UNF_SHIFT_20); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->ci_gpa & SPFC_SCQ_CTX_CI_GPA_MASK)) << - UNF_SHIFT_23); /* bits 28 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->cqe_dmaattr_idx)) << UNF_SHIFT_51); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->cq_so_ro)) << UNF_SHIFT_57); /* bits 2 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->init_mode)) << UNF_SHIFT_59); /* bits 2 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(scq_ctx->c_eqn_msi_x & - SPFC_SCQ_CTX_C_EQN_MSI_X_MASK)) << UNF_SHIFT_61); - queue_bus.bus[ARRAY_INDEX_1] |= ((u64)(scq_ctx->c_eqn_msi_x >> UNF_SHIFT_3)); /* bits 7 */ - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(scq_ctx->ci_type)) << UNF_SHIFT_7); /* bits 1 */ - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(scq_ctx->cq_depth)) << UNF_SHIFT_8); /* bits 3 */ - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(scq_ctx->cqe_max_cnt)) << UNF_SHIFT_11); - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(scq_ctx->pcie_template_hi)) << UNF_SHIFT_19); - - parity = spfc_get_parity_value(queue_bus.bus, SPFC_SCQC_BUS_ROW, SPFC_SCQC_BUS_COL); - scq_ctx->parity_0 = parity & SPFC_PARITY_MASK; - scq_ctx->parity_1 = (parity >> UNF_SHIFT_1) & SPFC_PARITY_MASK; - scq_ctx->parity_2 = (parity >> UNF_SHIFT_2) & SPFC_PARITY_MASK; - - spfc_cpu_to_big64((void *)scq_ctx, sizeof(struct spfc_cq_qinfo)); -} - -static u32 spfc_creat_scqc_via_cmdq_sync(struct spfc_hba_info *hba, - struct spfc_cq_qinfo *scqc, u32 scqn) -{ -#define SPFC_INIT_SCQC_TIMEOUT 3000 - int ret; - u32 covrt_size; - struct spfc_cmdqe_creat_scqc init_scqc_cmd; - struct sphw_cmd_buf *cmdq_in_buf; - - cmdq_in_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmdq_in_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf alloc failed"); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_INIT_SCQC); - return UNF_RETURN_ERROR; - } - - memset(&init_scqc_cmd, 0, sizeof(init_scqc_cmd)); - init_scqc_cmd.wd0.task_type = SPFC_TASK_T_INIT_SCQC; - init_scqc_cmd.wd1.scqn = SPFC_LSW(scqn); - covrt_size = sizeof(init_scqc_cmd) - sizeof(init_scqc_cmd.scqc); - spfc_cpu_to_big32(&init_scqc_cmd, covrt_size); - - /* scqc is already big endian */ - memcpy(init_scqc_cmd.scqc, scqc, sizeof(*scqc)); - memcpy(cmdq_in_buf->buf, &init_scqc_cmd, sizeof(init_scqc_cmd)); - cmdq_in_buf->size = sizeof(init_scqc_cmd); - - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, - cmdq_in_buf, NULL, NULL, - SPFC_INIT_SCQC_TIMEOUT, SPHW_CHANNEL_FC); - sphw_free_cmd_buf(hba->dev_handle, cmdq_in_buf); - if (ret) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Send creat scqc via cmdq failed, ret=%d", - ret); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_INIT_SCQC); - return UNF_RETURN_ERROR; - } - - SPFC_IO_STAT(hba, SPFC_TASK_T_INIT_SCQC); - - return RETURN_OK; -} - -static u32 spfc_delete_ssqc_via_cmdq_sync(struct spfc_hba_info *hba, u32 xid, - u64 context_gpa, u32 entry_count) -{ -#define SPFC_DELETE_SSQC_TIMEOUT 3000 - int ret = RETURN_OK; - struct spfc_cmdqe_delete_ssqc delete_ssqc_cmd; - struct sphw_cmd_buf *cmdq_in_buf = NULL; - - cmdq_in_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmdq_in_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf alloc failed"); - return UNF_RETURN_ERROR; - } - - memset(&delete_ssqc_cmd, 0, sizeof(delete_ssqc_cmd)); - delete_ssqc_cmd.wd0.task_type = SPFC_TASK_T_CLEAR_SSQ_CONTEXT; - delete_ssqc_cmd.wd0.xid = xid; - delete_ssqc_cmd.wd0.entry_count = entry_count; - delete_ssqc_cmd.wd1.scqn = SPFC_LSW(0); - delete_ssqc_cmd.context_gpa_hi = SPFC_HIGH_32_BITS(context_gpa); - delete_ssqc_cmd.context_gpa_lo = SPFC_LOW_32_BITS(context_gpa); - spfc_cpu_to_big32(&delete_ssqc_cmd, sizeof(delete_ssqc_cmd)); - memcpy(cmdq_in_buf->buf, &delete_ssqc_cmd, sizeof(delete_ssqc_cmd)); - cmdq_in_buf->size = sizeof(delete_ssqc_cmd); - - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, - cmdq_in_buf, NULL, NULL, - SPFC_DELETE_SSQC_TIMEOUT, - SPHW_CHANNEL_FC); - - sphw_free_cmd_buf(hba->dev_handle, cmdq_in_buf); - - return ret; -} - -static void spfc_free_ssq_qpc(struct spfc_hba_info *hba, u32 free_sq_num) -{ - u32 global_sq_index = 0; - u32 qid = 0; - struct spfc_parent_shared_queue_info *ssq_info = NULL; - - SPFC_FUNCTION_ENTER; - for (global_sq_index = 0; global_sq_index < free_sq_num;) { - for (qid = 1; qid <= SPFC_SQ_NUM_PER_QPC; qid++) { - ssq_info = &hba->parent_queue_mgr->shared_queue[global_sq_index]; - if (qid == SPFC_SQ_NUM_PER_QPC || - global_sq_index == free_sq_num - 1) { - if (ssq_info->parent_ctx.cqm_parent_ctx_obj) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[INFO]qid 0x%x, global_sq_index 0x%x, free_sq_num 0x%x", - qid, global_sq_index, free_sq_num); - cqm3_object_delete(&ssq_info->parent_ctx - .cqm_parent_ctx_obj->object); - ssq_info->parent_ctx.cqm_parent_ctx_obj = NULL; - } - } - global_sq_index++; - if (global_sq_index >= free_sq_num) - break; - } - } -} - -void spfc_free_ssq(void *handle, u32 free_sq_num) -{ -#define SPFC_FREE_SSQ_WAIT_MS 1000 - u32 global_sq_index = 0; - u32 qid = 0; - struct spfc_parent_shared_queue_info *ssq_info = NULL; - struct spfc_parent_ssq_info *sq_ctrl = NULL; - struct cqm_qpc_mpt *prnt_ctx = NULL; - u32 ret = UNF_RETURN_ERROR; - u32 entry_count = 0; - struct spfc_hba_info *hba = NULL; - - SPFC_FUNCTION_ENTER; - - hba = (struct spfc_hba_info *)handle; - for (global_sq_index = 0; global_sq_index < free_sq_num;) { - for (qid = 1; qid <= SPFC_SQ_NUM_PER_QPC; qid++) { - ssq_info = &hba->parent_queue_mgr->shared_queue[global_sq_index]; - sq_ctrl = &ssq_info->parent_ssq_info; - /* Free data cos */ - spfc_free_link_list_wpg(sq_ctrl); - if (sq_ctrl->queue_head_original) { - pci_unmap_single(hba->pci_dev, - sq_ctrl->queue_hdr_phy_addr_original, - sizeof(struct spfc_queue_header) + - SPFC_SQ_HEADER_ADDR_ALIGN_SIZE, - DMA_BIDIRECTIONAL); - kfree(sq_ctrl->queue_head_original); - sq_ctrl->queue_head_original = NULL; - } - if (qid == SPFC_SQ_NUM_PER_QPC || global_sq_index == free_sq_num - 1) { - if (ssq_info->parent_ctx.cqm_parent_ctx_obj) { - prnt_ctx = ssq_info->parent_ctx.cqm_parent_ctx_obj; - entry_count = (qid == SPFC_SQ_NUM_PER_QPC ? - SPFC_SQ_NUM_PER_QPC : - free_sq_num - global_sq_index); - ret = spfc_delete_ssqc_via_cmdq_sync(hba, prnt_ctx->xid, - prnt_ctx->paddr, - entry_count); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]ucode delete ssq fail, glbindex 0x%x, qid 0x%x, glsqindex 0x%x", - global_sq_index, qid, free_sq_num); - } - } - } - global_sq_index++; - if (global_sq_index >= free_sq_num) - break; - } - } - - msleep(SPFC_FREE_SSQ_WAIT_MS); - - spfc_free_ssq_qpc(hba, free_sq_num); -} - -u32 spfc_creat_ssqc_via_cmdq_sync(struct spfc_hba_info *hba, - struct spfc_ssq_parent_context *ssqc, - u32 xid, u64 context_gpa) -{ -#define SPFC_INIT_SSQC_TIMEOUT 3000 - int ret; - u32 covrt_size; - struct spfc_cmdqe_creat_ssqc create_ssqc_cmd; - struct sphw_cmd_buf *cmdq_in_buf = NULL; - - cmdq_in_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmdq_in_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf alloc failed"); - return UNF_RETURN_ERROR; - } - - memset(&create_ssqc_cmd, 0, sizeof(create_ssqc_cmd)); - create_ssqc_cmd.wd0.task_type = SPFC_TASK_T_CREATE_SSQ_CONTEXT; - create_ssqc_cmd.wd0.xid = xid; - create_ssqc_cmd.wd1.scqn = SPFC_LSW(0); - create_ssqc_cmd.context_gpa_hi = SPFC_HIGH_32_BITS(context_gpa); - create_ssqc_cmd.context_gpa_lo = SPFC_LOW_32_BITS(context_gpa); - covrt_size = sizeof(create_ssqc_cmd) - sizeof(create_ssqc_cmd.ssqc); - spfc_cpu_to_big32(&create_ssqc_cmd, covrt_size); - - /* scqc is already big endian */ - memcpy(create_ssqc_cmd.ssqc, ssqc, sizeof(*ssqc)); - memcpy(cmdq_in_buf->buf, &create_ssqc_cmd, sizeof(create_ssqc_cmd)); - cmdq_in_buf->size = sizeof(create_ssqc_cmd); - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, - cmdq_in_buf, NULL, NULL, - SPFC_INIT_SSQC_TIMEOUT, SPHW_CHANNEL_FC); - sphw_free_cmd_buf(hba->dev_handle, cmdq_in_buf); - if (ret) - return UNF_RETURN_ERROR; - return RETURN_OK; -} - -void spfc_init_sq_prnt_ctxt_sq_qinfo(struct spfc_sq_qinfo *sq_info, - struct spfc_parent_ssq_info *ssq) -{ - struct spfc_wqe_page *head_wqe_page = NULL; - struct spfc_sq_qinfo *prnt_sq_ctx = NULL; - struct spfc_queue_info_bus queue_bus; - - SPFC_FUNCTION_ENTER; - - /* Obtains the Parent Context address */ - head_wqe_page = SPFC_GET_SQ_HEAD(ssq); - - prnt_sq_ctx = sq_info; - - /* The PMSN is updated by the host driver */ - prnt_sq_ctx->pmsn_type = SPFC_PMSN_CI_TYPE_FROM_HOST; - - /* Indicates the value of O of the valid SQE in the current round of SQ. - * * The value of Linked List SQ is always one, and the value of 0 is - * invalid. - */ - prnt_sq_ctx->loop_o = - SPFC_OWNER_DRIVER_PRODUCT; /* current valid o-bit */ - - /* should be opposite from loop_o */ - prnt_sq_ctx->cur_wqe_o = ~(prnt_sq_ctx->loop_o); - - /* the first sqe's gpa */ - prnt_sq_ctx->cur_sqe_gpa = head_wqe_page->wpg_phy_addr; - - /* Indicates the GPA of the Queue header that is initialized to the SQ - * in * the Host memory. The value must be 16-byte aligned. - */ - prnt_sq_ctx->pmsn_gpa = ssq->queue_hdr_phy_addr; - if (wqe_pre_load != 0) - prnt_sq_ctx->pmsn_gpa |= SPFC_SQ_LINK_PRE; - - /* This field is used to fill in the dmaattr_idx field of the ComboDMA. - * The default value is 0 - */ - prnt_sq_ctx->sqe_dmaattr_idx = SPFC_DMA_ATTR_OFST; - - /* This field is filled using the value of RO_SO in the SGL0 of the - * ComboDMA - */ - prnt_sq_ctx->sq_so_ro = SPFC_PCIE_RELAXED_ORDERING; - - prnt_sq_ctx->ring = ssq->queue_style; - - /* This field is used to set the SGL0 field of the Child solicDMA */ - prnt_sq_ctx->zerocopy_dmaattr_idx = SPFC_DMA_ATTR_OFST; - - prnt_sq_ctx->zerocopy_so_ro = SPFC_PCIE_RELAXED_ORDERING; - prnt_sq_ctx->enable_256 = SPFC_256BWQE_ENABLE; - - /* PCIe attribute information */ - prnt_sq_ctx->pcie_template = SPFC_PCIE_TEMPLATE; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] |= ((u64)(ssq->context_id & SPFC_SSQ_CTX_MASK)); /* bits 20 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->sqe_dmaattr_idx)) << UNF_SHIFT_20); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->sq_so_ro)) << UNF_SHIFT_26); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->ring)) << UNF_SHIFT_28); /* bits 1 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->zerocopy_dmaattr_idx)) - << UNF_SHIFT_29); /* bits 6 */ - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->zerocopy_so_ro)) << UNF_SHIFT_35); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->pcie_template)) << UNF_SHIFT_37); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(prnt_sq_ctx->pmsn_gpa >> UNF_SHIFT_4)) - << UNF_SHIFT_43); /* bits 21 */ - queue_bus.bus[ARRAY_INDEX_1] |= ((u64)(prnt_sq_ctx->pmsn_gpa >> UNF_SHIFT_25)); - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(prnt_sq_ctx->pmsn_type)) << UNF_SHIFT_39); - prnt_sq_ctx->parity = spfc_get_parity_value(queue_bus.bus, SPFC_SQC_BUS_ROW, - SPFC_SQC_BUS_COL); - spfc_cpu_to_big64(prnt_sq_ctx, sizeof(struct spfc_sq_qinfo)); - - SPFC_FUNCTION_RETURN; -} - -u32 spfc_create_ssq(void *handle) -{ - u32 ret = RETURN_OK; - u32 global_sq_index = 0; - u32 qid = 0; - struct cqm_qpc_mpt *prnt_ctx = NULL; - struct spfc_parent_shared_queue_info *ssq_info = NULL; - struct spfc_parent_ssq_info *sq_ctrl = NULL; - u32 queue_header_alloc_size = 0; - struct spfc_wqe_page *head_wpg = NULL; - struct spfc_ssq_parent_context prnt_ctx_info; - struct spfc_sq_qinfo *sq_info = NULL; - struct spfc_scq_qinfo *psq_pretchinfo = NULL; - struct spfc_queue_info_bus queue_bus; - struct spfc_fc_key_section *keysection = NULL; - struct spfc_hba_info *hba = NULL; - dma_addr_t origin_addr; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - hba = (struct spfc_hba_info *)handle; - for (global_sq_index = 0; global_sq_index < SPFC_MAX_SSQ_NUM;) { - qid = 0; - prnt_ctx = cqm3_object_qpc_mpt_create(hba->dev_handle, SERVICE_T_FC, - CQM_OBJECT_SERVICE_CTX, - SPFC_CNTX_SIZE_256B, NULL, - CQM_INDEX_INVALID); - if (!prnt_ctx) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create ssq context failed, CQM_INDEX is 0x%x", - CQM_INDEX_INVALID); - goto ssq_ctx_create_fail; - } - memset(&prnt_ctx_info, 0, sizeof(prnt_ctx_info)); - keysection = (struct spfc_fc_key_section *)&prnt_ctx_info; - keysection->xid_h = (prnt_ctx->xid >> UNF_SHIFT_16) & SPFC_KEYSECTION_XID_H_MASK; - keysection->xid_l = prnt_ctx->xid & SPFC_KEYSECTION_XID_L_MASK; - spfc_cpu_to_big32(keysection, sizeof(struct spfc_fc_key_section)); - for (qid = 0; qid < SPFC_SQ_NUM_PER_QPC; qid++) { - sq_info = (struct spfc_sq_qinfo *)((u8 *)(&prnt_ctx_info) + ((qid + 1) * - SPFC_SQ_SPACE_OFFSET)); - ssq_info = &hba->parent_queue_mgr->shared_queue[global_sq_index]; - ssq_info->parent_ctx.cqm_parent_ctx_obj = prnt_ctx; - /* Initialize struct spfc_parent_sq_info */ - sq_ctrl = &ssq_info->parent_ssq_info; - sq_ctrl->hba = (void *)hba; - sq_ctrl->context_id = prnt_ctx->xid; - sq_ctrl->sq_queue_id = qid + SPFC_SQ_QID_START_PER_QPC; - sq_ctrl->cache_id = FC_CALC_CID(prnt_ctx->xid); - sq_ctrl->sqn = global_sq_index; - sq_ctrl->max_sqe_num = hba->exi_count; - /* Reduce one Link Wqe */ - sq_ctrl->wqe_num_per_buf = hba->sq_wpg_pool.wqe_per_wpg - 1; - sq_ctrl->wqe_size = SPFC_SQE_SIZE; - sq_ctrl->wqe_offset = 0; - sq_ctrl->head_start_cmsn = 0; - sq_ctrl->head_end_cmsn = SPFC_GET_WP_END_CMSN(0, sq_ctrl->wqe_num_per_buf); - sq_ctrl->last_pmsn = 0; - /* Linked List SQ Owner Bit 1 valid,0 invalid */ - sq_ctrl->last_pi_owner = 1; - atomic_set(&sq_ctrl->sq_valid, true); - sq_ctrl->accum_wqe_cnt = 0; - sq_ctrl->service_type = SPFC_SERVICE_TYPE_FC_SQ; - sq_ctrl->queue_style = (global_sq_index == SPFC_DIRECTWQE_SQ_INDEX) ? - SPFC_QUEUE_RING_STYLE : SPFC_QUEUE_LINK_STYLE; - INIT_LIST_HEAD(&sq_ctrl->list_linked_list_sq); - atomic_set(&sq_ctrl->wqe_page_cnt, 0); - atomic_set(&sq_ctrl->sq_db_cnt, 0); - atomic_set(&sq_ctrl->sqe_minus_cqe_cnt, 1); - atomic_set(&sq_ctrl->sq_wqe_cnt, 0); - atomic_set(&sq_ctrl->sq_cqe_cnt, 0); - spin_lock_init(&sq_ctrl->parent_sq_enqueue_lock); - memset(sq_ctrl->io_stat, 0, sizeof(sq_ctrl->io_stat)); - - /* Allocate and initialize the Queue Header space. 64B - * alignment is required. * Additional 64B is applied - * for alignment - */ - queue_header_alloc_size = sizeof(struct spfc_queue_header) + - SPFC_SQ_HEADER_ADDR_ALIGN_SIZE; - sq_ctrl->queue_head_original = kmalloc(queue_header_alloc_size, GFP_ATOMIC); - if (!sq_ctrl->queue_head_original) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SQ(0x%x) create SQ queue header failed", - global_sq_index); - goto ssq_qheader_create_fail; - } - - memset((u8 *)sq_ctrl->queue_head_original, 0, queue_header_alloc_size); - - sq_ctrl->queue_hdr_phy_addr_original = - pci_map_single(hba->pci_dev, sq_ctrl->queue_head_original, - queue_header_alloc_size, DMA_BIDIRECTIONAL); - origin_addr = sq_ctrl->queue_hdr_phy_addr_original; - if (pci_dma_mapping_error(hba->pci_dev, origin_addr)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]SQ(0x%x) SQ queue header DMA mapping failed", - global_sq_index); - goto ssq_qheader_dma_map_fail; - } - - /* Obtains the 64B alignment address */ - sq_ctrl->queue_header = (struct spfc_queue_header *)(uintptr_t) - SPFC_ADDR_64_ALIGN((u64)((uintptr_t)(sq_ctrl->queue_head_original))); - sq_ctrl->queue_hdr_phy_addr = SPFC_ADDR_64_ALIGN(origin_addr); - - /* Each SQ is allocated with a Wqe Page by default. The - * WqePageCnt is incremented by one - */ - head_wpg = spfc_add_one_wqe_page(sq_ctrl); - if (!head_wpg) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]SQ(0x%x) create SQ first wqe page failed", - global_sq_index); - goto ssq_headwpg_create_fail; - } - - atomic_inc(&sq_ctrl->wqe_page_cnt); - spfc_init_sq_prnt_ctxt_sq_qinfo(sq_info, sq_ctrl); - global_sq_index++; - if (global_sq_index == SPFC_MAX_SSQ_NUM) - break; - } - psq_pretchinfo = &prnt_ctx_info.sq_pretchinfo; - psq_pretchinfo->hw_scqc_config.info.rq_th2_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.rq_th1_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.rq_th0_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.rq_min_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.sq_th2_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.sq_th1_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.sq_th0_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.sq_min_preld_cache_num = wqe_pre_load; - psq_pretchinfo->hw_scqc_config.info.scq_n = (u64)0; - psq_pretchinfo->hw_scqc_config.info.parity = 0; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] = psq_pretchinfo->hw_scqc_config.pctxt_val1; - psq_pretchinfo->hw_scqc_config.info.parity = - spfc_get_parity_value(queue_bus.bus, SPFC_HW_SCQC_BUS_ROW, - SPFC_HW_SCQC_BUS_COL); - spfc_cpu_to_big64(psq_pretchinfo, sizeof(struct spfc_scq_qinfo)); - ret = spfc_creat_ssqc_via_cmdq_sync(hba, &prnt_ctx_info, - prnt_ctx->xid, prnt_ctx->paddr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]SQ(0x%x) create ssqc failed.", - global_sq_index); - goto ssq_cmdqsync_fail; - } - } - - return RETURN_OK; - -ssq_headwpg_create_fail: - pci_unmap_single(hba->pci_dev, sq_ctrl->queue_hdr_phy_addr_original, - queue_header_alloc_size, DMA_BIDIRECTIONAL); - -ssq_qheader_dma_map_fail: - kfree(sq_ctrl->queue_head_original); - sq_ctrl->queue_head_original = NULL; - -ssq_qheader_create_fail: - cqm3_object_delete(&prnt_ctx->object); - ssq_info->parent_ctx.cqm_parent_ctx_obj = NULL; - if (qid > 0) { - while (qid--) { - ssq_info = &hba->parent_queue_mgr->shared_queue[global_sq_index - qid]; - ssq_info->parent_ctx.cqm_parent_ctx_obj = NULL; - } - } - -ssq_ctx_create_fail: -ssq_cmdqsync_fail: - if (global_sq_index > 0) - spfc_free_ssq(hba, global_sq_index); - - return UNF_RETURN_ERROR; -} - -static u32 spfc_create_scq(struct spfc_hba_info *hba) -{ - u32 ret = UNF_RETURN_ERROR; - u32 scq_index = 0; - u32 scq_cfg_num = 0; - struct cqm_queue *cqm_scq = NULL; - void *handle = NULL; - struct spfc_scq_info *scq_info = NULL; - struct spfc_cq_qinfo cq_qinfo; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - handle = hba->dev_handle; - /* Create SCQ by CQM interface */ - for (scq_index = 0; scq_index < SPFC_TOTAL_SCQ_NUM; scq_index++) { - /* - * 1. Create/Allocate SCQ - * * - * Notice: SCQ[0, 2, 4 ...]--->CMD SCQ, - * SCQ[1, 3, 5 ...]--->STS SCQ, - * SCQ[SPFC_TOTAL_SCQ_NUM-1]--->Defaul SCQ - */ - cqm_scq = cqm3_object_nonrdma_queue_create(handle, SERVICE_T_FC, - CQM_OBJECT_NONRDMA_SCQ, - SPFC_SCQ_IS_STS(scq_index) ? - SPFC_STS_SCQ_DEPTH : - SPFC_CMD_SCQ_DEPTH, - SPFC_SCQE_SIZE, hba); - if (!cqm_scq) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_WARN, "[err]Create scq failed"); - - goto free_scq; - } - - /* 2. Initialize SCQ (info) */ - spfc_init_scq_info(hba, cqm_scq, scq_index, &scq_info); - - /* 3. Allocate & Initialize SCQ interrupt */ - ret = spfc_alloc_scq_int(scq_info); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate scq interrupt failed"); - - cqm3_object_delete(&cqm_scq->object); - memset(scq_info, 0, sizeof(struct spfc_scq_info)); - goto free_scq; - } - - /* 4. Initialize SCQ queue header */ - spfc_init_scq_header((struct wq_header *)(void *)cqm_scq->q_header_vaddr); - - /* 5. Initialize & Create SCQ CTX */ - memset(&cq_qinfo, 0, sizeof(cq_qinfo)); - spfc_cfg_scq_ctx(scq_info, &cq_qinfo); - ret = spfc_creat_scqc_via_cmdq_sync(hba, &cq_qinfo, scq_info->scqn); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Create scq context failed"); - - cqm3_object_delete(&cqm_scq->object); - memset(scq_info, 0, sizeof(struct spfc_scq_info)); - goto free_scq; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Create SCQ[%u] Scqn=%u WqeNum=%u WqeSize=%u WqePerBuf=%u CqDepth=%u CiType=%u irq=%u msix=%u", - scq_info->queue_id, scq_info->scqn, - scq_info->valid_wqe_num, scq_info->wqe_size, - scq_info->wqe_num_per_buf, scq_info->scqc_cq_depth, - scq_info->scqc_ci_type, scq_info->irq_id, - scq_info->msix_entry_idx); - } - - /* Last SCQ is used to handle SCQE delivery access when clearing buffer - */ - hba->default_scqn = scq_info->scqn; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Default Scqn=%u CqmScqIndex=%u", hba->default_scqn, - cqm_scq->index); - - return RETURN_OK; - -free_scq: - spfc_flush_scq_ctx(hba); - - scq_cfg_num = scq_index; - for (scq_index = 0; scq_index < scq_cfg_num; scq_index++) { - scq_info = &hba->scq_info[scq_index]; - spfc_free_scq_int(scq_info); - cqm_scq = scq_info->cqm_scq_info; - cqm3_object_delete(&cqm_scq->object); - memset(scq_info, 0, sizeof(struct spfc_scq_info)); - } - - return UNF_RETURN_ERROR; -} - -static void spfc_destroy_scq(struct spfc_hba_info *hba) -{ - u32 scq_index = 0; - struct cqm_queue *cqm_scq = NULL; - struct spfc_scq_info *scq_info = NULL; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Start destroy total %d SCQ", SPFC_TOTAL_SCQ_NUM); - - FC_CHECK_RETURN_VOID(hba); - - /* Use CQM to delete SCQ */ - for (scq_index = 0; scq_index < SPFC_TOTAL_SCQ_NUM; scq_index++) { - scq_info = &hba->scq_info[scq_index]; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ALL, - "[info]Destroy SCQ%u, Scqn=%u, Irq=%u, msix=%u, name=%s", - scq_index, scq_info->scqn, scq_info->irq_id, - scq_info->msix_entry_idx, scq_info->irq_name); - - spfc_free_scq_int(scq_info); - cqm_scq = scq_info->cqm_scq_info; - cqm3_object_delete(&cqm_scq->object); - memset(scq_info, 0, sizeof(struct spfc_scq_info)); - } -} - -static void spfc_init_srq_info(struct spfc_hba_info *hba, struct cqm_queue *cqm_srq, - struct spfc_srq_info *srq_info) -{ - FC_CHECK_RETURN_VOID(hba); - FC_CHECK_RETURN_VOID(cqm_srq); - FC_CHECK_RETURN_VOID(srq_info); - - srq_info->hba = (void *)hba; - - srq_info->cqm_srq_info = cqm_srq; - srq_info->wqe_num_per_buf = cqm_srq->q_room_buf_1.buf_size / SPFC_SRQE_SIZE - 1; - srq_info->wqe_size = SPFC_SRQE_SIZE; - srq_info->valid_wqe_num = cqm_srq->valid_wqe_num; - srq_info->pi = 0; - srq_info->pi_owner = SPFC_SRQ_INIT_LOOP_O; - srq_info->pmsn = 0; - srq_info->srqn = cqm_srq->index; - srq_info->first_rqe_recv_dma = 0; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Init srq info(srq index 0x%x) valid wqe num 0x%x, buffer size 0x%x, wqe num per buf 0x%x", - cqm_srq->index, srq_info->valid_wqe_num, - cqm_srq->q_room_buf_1.buf_size, srq_info->wqe_num_per_buf); -} - -static void spfc_init_srq_header(struct wq_header *queue_header) -{ - FC_CHECK_RETURN_VOID(queue_header); - - memset(queue_header, 0, sizeof(struct wq_header)); -} - -/* - *Function Name : spfc_get_srq_entry - *Function Description: Obtain RQE in SRQ via PI. - *Input Parameters : *srq_info, - * **linked_rqe, - * position - *Output Parameters : N/A - *Return Type : struct spfc_rqe* - */ -static struct spfc_rqe *spfc_get_srq_entry(struct spfc_srq_info *srq_info, - struct spfc_rqe **linked_rqe, u16 position) -{ - u32 buf_id = 0; - u32 wqe_num_per_buf = 0; - u16 buf_offset = 0; - struct cqm_buf_list *buf = NULL; - - FC_CHECK_RETURN_VALUE(srq_info, NULL); - - wqe_num_per_buf = srq_info->wqe_num_per_buf; - - buf_id = position / wqe_num_per_buf; - buf = &srq_info->cqm_srq_info->q_room_buf_1.buf_list[buf_id]; - buf_offset = position % ((u16)wqe_num_per_buf); - - if (buf_offset + 1 == wqe_num_per_buf) - *linked_rqe = (struct spfc_rqe *)(buf->va) + wqe_num_per_buf; - else - *linked_rqe = NULL; - - return (struct spfc_rqe *)(buf->va) + buf_offset; -} - -void spfc_post_els_srq_wqe(struct spfc_srq_info *srq_info, u16 buf_id) -{ - struct spfc_rqe *rqe = NULL; - struct spfc_rqe tmp_rqe; - struct spfc_rqe *linked_rqe = NULL; - struct wq_header *wq_header = NULL; - struct spfc_drq_buff_entry *buff_entry = NULL; - - FC_CHECK_RETURN_VOID(srq_info); - FC_CHECK_RETURN_VOID(buf_id < srq_info->valid_wqe_num); - - buff_entry = srq_info->els_buff_entry_head + buf_id; - - spin_lock(&srq_info->srq_spin_lock); - - /* Obtain RQE, not include link wqe */ - rqe = spfc_get_srq_entry(srq_info, &linked_rqe, srq_info->pi); - if (!rqe) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]post els srq,get srqe failed, valid wqe num 0x%x, pi 0x%x, pmsn 0x%x", - srq_info->valid_wqe_num, srq_info->pi, - srq_info->pmsn); - - spin_unlock(&srq_info->srq_spin_lock); - return; - } - - /* Initialize RQE */ - /* cs section is not used */ - memset(&tmp_rqe, 0, sizeof(struct spfc_rqe)); - - /* default Obit is invalid, and set valid finally */ - spfc_build_srq_wqe_ctrls(&tmp_rqe, !srq_info->pi_owner, srq_info->pmsn + 1); - - tmp_rqe.bds_sl.buf_addr_hi = SPFC_HIGH_32_BITS(buff_entry->buff_dma); - tmp_rqe.bds_sl.buf_addr_lo = SPFC_LOW_32_BITS(buff_entry->buff_dma); - tmp_rqe.drv_sl.wd0.user_id = buf_id; - - /* convert to big endian */ - spfc_cpu_to_big32(&tmp_rqe, sizeof(struct spfc_rqe)); - - memcpy(rqe, &tmp_rqe, sizeof(struct spfc_rqe)); - - /* reset Obit */ - spfc_set_srq_wqe_owner_be((struct spfc_wqe_ctrl *)(void *)(&rqe->ctrl_sl), - srq_info->pi_owner); - - if (linked_rqe) { - /* Update Obit in linked WQE */ - spfc_set_srq_link_wqe_owner_be((struct spfc_linkwqe *)(void *)linked_rqe, - srq_info->pi_owner, srq_info->pmsn + 1); - } - - /* Update PI and PMSN */ - spfc_update_producer_info((u16)(srq_info->valid_wqe_num), - &srq_info->pi, &srq_info->pi_owner); - - /* pmsn is 16bit. The value is added to the maximum value and is - * automatically reversed - */ - srq_info->pmsn++; - - /* Update pmsn in queue header */ - wq_header = (struct wq_header *)(void *)srq_info->cqm_srq_info->q_header_vaddr; - spfc_update_srq_header(&wq_header->db_record, srq_info->pmsn); - - spin_unlock(&srq_info->srq_spin_lock); -} - -/* - *Function Name : spfc_cfg_srq_ctx - *Function Description: Initialize the CTX of the SRQ that receives the - * immediate data. The RQE of the SRQ - * needs to be - *initialized when the RQE is filled. Input Parameters : *srq_info, *srq_ctx, - * sge_size, - * rqe_gpa - *Output Parameters : N/A - *Return Type : void - */ -static void spfc_cfg_srq_ctx(struct spfc_srq_info *srq_info, - struct spfc_srq_ctx *ctx, u32 sge_size, - u64 rqe_gpa) -{ - struct spfc_srq_ctx *srq_ctx = NULL; - struct cqm_queue *cqm_srq_info = NULL; - struct spfc_queue_info_bus queue_bus; - - FC_CHECK_RETURN_VOID(srq_info); - FC_CHECK_RETURN_VOID(ctx); - - cqm_srq_info = srq_info->cqm_srq_info; - srq_ctx = ctx; - srq_ctx->last_rq_pmsn = 0; - srq_ctx->cur_rqe_msn = 0; - srq_ctx->pcie_template = 0; - /* The value of CTX needs to be updated - *when RQE is configured - */ - srq_ctx->cur_rqe_gpa = rqe_gpa; - srq_ctx->cur_sge_v = 0; - srq_ctx->cur_sge_l = 0; - /* The information received by the SRQ is reported through the - *SCQ. The interrupt and ArmCQ are disabled. - */ - srq_ctx->int_mode = 0; - srq_ctx->ceqn_msix = 0; - srq_ctx->cur_sge_remain_len = 0; - srq_ctx->cur_sge_id = 0; - srq_ctx->consant_sge_len = sge_size; - srq_ctx->cur_wqe_o = 0; - srq_ctx->pmsn_type = SPFC_PMSN_CI_TYPE_FROM_HOST; - srq_ctx->bdsl = 0; - srq_ctx->cr = 0; - srq_ctx->csl = 0; - srq_ctx->cf = 0; - srq_ctx->ctrl_sl = 0; - srq_ctx->cur_sge_gpa = 0; - srq_ctx->cur_pmsn_gpa = cqm_srq_info->q_header_paddr; - srq_ctx->prefetch_max_masn = 0; - srq_ctx->cqe_max_cnt = 0; - srq_ctx->cur_cqe_cnt = 0; - srq_ctx->arm_q = 0; - srq_ctx->cq_so_ro = 0; - srq_ctx->cqe_dma_attr_idx = 0; - srq_ctx->rq_so_ro = 0; - srq_ctx->rqe_dma_attr_idx = 0; - srq_ctx->loop_o = SPFC_SRQ_INIT_LOOP_O; - srq_ctx->ring = SPFC_QUEUE_RING; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] |= ((u64)(cqm_srq_info->q_ctx_paddr >> UNF_SHIFT_4)); - queue_bus.bus[ARRAY_INDEX_0] |= (((u64)(srq_ctx->rqe_dma_attr_idx & - SPFC_SRQ_CTX_rqe_dma_attr_idx_MASK)) - << UNF_SHIFT_60); /* bits 4 */ - - queue_bus.bus[ARRAY_INDEX_1] |= ((u64)(srq_ctx->rqe_dma_attr_idx >> UNF_SHIFT_4)); - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(srq_ctx->rq_so_ro)) << UNF_SHIFT_2); /* bits 2 */ - queue_bus.bus[ARRAY_INDEX_1] |= (((u64)(srq_ctx->cur_pmsn_gpa >> UNF_SHIFT_4)) - << UNF_SHIFT_4); /* bits 60 */ - - queue_bus.bus[ARRAY_INDEX_2] |= ((u64)(srq_ctx->consant_sge_len)); /* bits 17 */ - queue_bus.bus[ARRAY_INDEX_2] |= (((u64)(srq_ctx->pcie_template)) << UNF_SHIFT_17); - - srq_ctx->parity = spfc_get_parity_value((void *)queue_bus.bus, SPFC_SRQC_BUS_ROW, - SPFC_SRQC_BUS_COL); - - spfc_cpu_to_big64((void *)srq_ctx, sizeof(struct spfc_srq_ctx)); -} - -static u32 spfc_creat_srqc_via_cmdq_sync(struct spfc_hba_info *hba, - struct spfc_srq_ctx *srqc, - u64 ctx_gpa) -{ -#define SPFC_INIT_SRQC_TIMEOUT 3000 - - int ret; - u32 covrt_size; - struct spfc_cmdqe_creat_srqc init_srq_cmd; - struct sphw_cmd_buf *cmdq_in_buf; - - cmdq_in_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmdq_in_buf) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]cmdq in_cmd_buf alloc failed"); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_INIT_SRQC); - return UNF_RETURN_ERROR; - } - - memset(&init_srq_cmd, 0, sizeof(init_srq_cmd)); - init_srq_cmd.wd0.task_type = SPFC_TASK_T_INIT_SRQC; - init_srq_cmd.srqc_gpa_h = SPFC_HIGH_32_BITS(ctx_gpa); - init_srq_cmd.srqc_gpa_l = SPFC_LOW_32_BITS(ctx_gpa); - covrt_size = sizeof(init_srq_cmd) - sizeof(init_srq_cmd.srqc); - spfc_cpu_to_big32(&init_srq_cmd, covrt_size); - - /* srqc is already big-endian */ - memcpy(init_srq_cmd.srqc, srqc, sizeof(*srqc)); - memcpy(cmdq_in_buf->buf, &init_srq_cmd, sizeof(init_srq_cmd)); - cmdq_in_buf->size = sizeof(init_srq_cmd); - - ret = sphw_cmdq_detail_resp(hba->dev_handle, COMM_MOD_FC, 0, - cmdq_in_buf, NULL, NULL, - SPFC_INIT_SRQC_TIMEOUT, SPHW_CHANNEL_FC); - - sphw_free_cmd_buf(hba->dev_handle, cmdq_in_buf); - - if (ret) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Send creat srqc via cmdq failed, ret=%d", - ret); - - SPFC_ERR_IO_STAT(hba, SPFC_TASK_T_INIT_SRQC); - return UNF_RETURN_ERROR; - } - - SPFC_IO_STAT(hba, SPFC_TASK_T_INIT_SRQC); - - return RETURN_OK; -} - -static void spfc_init_els_srq_wqe(struct spfc_srq_info *srq_info) -{ - u32 rqe_index = 0; - struct spfc_drq_buff_entry *buf_entry = NULL; - - FC_CHECK_RETURN_VOID(srq_info); - - for (rqe_index = 0; rqe_index < srq_info->valid_wqe_num - 1; rqe_index++) { - buf_entry = srq_info->els_buff_entry_head + rqe_index; - spfc_post_els_srq_wqe(srq_info, buf_entry->buff_id); - } -} - -static void spfc_free_els_srq_buff(struct spfc_hba_info *hba, u32 srq_valid_wqe) -{ - u32 buff_index = 0; - struct spfc_srq_info *srq_info = NULL; - struct spfc_drq_buff_entry *buff_entry = NULL; - - FC_CHECK_RETURN_VOID(hba); - - srq_info = &hba->els_srq_info; - - if (!srq_info->els_buff_entry_head) - return; - - for (buff_index = 0; buff_index < srq_valid_wqe; buff_index++) { - buff_entry = &srq_info->els_buff_entry_head[buff_index]; - buff_entry->buff_addr = NULL; - } - - if (srq_info->buf_list.buflist) { - for (buff_index = 0; buff_index < srq_info->buf_list.buf_num; - buff_index++) { - if (srq_info->buf_list.buflist[buff_index].paddr != 0) { - pci_unmap_single(hba->pci_dev, - srq_info->buf_list.buflist[buff_index].paddr, - srq_info->buf_list.buf_size, - DMA_FROM_DEVICE); - srq_info->buf_list.buflist[buff_index].paddr = 0; - } - kfree(srq_info->buf_list.buflist[buff_index].vaddr); - srq_info->buf_list.buflist[buff_index].vaddr = NULL; - } - - kfree(srq_info->buf_list.buflist); - srq_info->buf_list.buflist = NULL; - } - - kfree(srq_info->els_buff_entry_head); - srq_info->els_buff_entry_head = NULL; -} - -static u32 spfc_alloc_els_srq_buff(struct spfc_hba_info *hba, u32 srq_valid_wqe) -{ - u32 req_buff_size = 0; - u32 buff_index = 0; - struct spfc_srq_info *srq_info = NULL; - struct spfc_drq_buff_entry *buff_entry = NULL; - u32 buf_total_size; - u32 buf_num; - u32 alloc_idx; - u32 cur_buf_idx = 0; - u32 cur_buf_offset = 0; - u32 buf_cnt_perhugebuf; - - srq_info = &hba->els_srq_info; - - /* Apply for entry buffer */ - req_buff_size = (u32)(srq_valid_wqe * sizeof(struct spfc_drq_buff_entry)); - srq_info->els_buff_entry_head = (struct spfc_drq_buff_entry *)kmalloc(req_buff_size, - GFP_KERNEL); - if (!srq_info->els_buff_entry_head) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate ELS Srq receive buffer entries failed"); - - return UNF_RETURN_ERROR; - } - memset(srq_info->els_buff_entry_head, 0, req_buff_size); - - buf_total_size = SPFC_SRQ_ELS_SGE_LEN * srq_valid_wqe; - - srq_info->buf_list.buf_size = buf_total_size > BUF_LIST_PAGE_SIZE - ? BUF_LIST_PAGE_SIZE - : buf_total_size; - buf_cnt_perhugebuf = srq_info->buf_list.buf_size / SPFC_SRQ_ELS_SGE_LEN; - buf_num = srq_valid_wqe % buf_cnt_perhugebuf ? - srq_valid_wqe / buf_cnt_perhugebuf + 1 : - srq_valid_wqe / buf_cnt_perhugebuf; - srq_info->buf_list.buflist = (struct buff_list *)kmalloc(buf_num * sizeof(struct buff_list), - GFP_KERNEL); - srq_info->buf_list.buf_num = buf_num; - - if (!srq_info->buf_list.buflist) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate ELS buf list failed out of memory"); - goto free_buff; - } - memset(srq_info->buf_list.buflist, 0, buf_num * sizeof(struct buff_list)); - - for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { - srq_info->buf_list.buflist[alloc_idx].vaddr = kmalloc(srq_info->buf_list.buf_size, - GFP_KERNEL); - if (!srq_info->buf_list.buflist[alloc_idx].vaddr) - goto free_buff; - - memset(srq_info->buf_list.buflist[alloc_idx].vaddr, 0, srq_info->buf_list.buf_size); - - srq_info->buf_list.buflist[alloc_idx].paddr = - pci_map_single(hba->pci_dev, srq_info->buf_list.buflist[alloc_idx].vaddr, - srq_info->buf_list.buf_size, DMA_FROM_DEVICE); - if (pci_dma_mapping_error(hba->pci_dev, - srq_info->buf_list.buflist[alloc_idx].paddr)) { - srq_info->buf_list.buflist[alloc_idx].paddr = 0; - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Map els srq buffer failed"); - - goto free_buff; - } - } - - /* Apply for receiving buffer and attach it to the free linked list */ - for (buff_index = 0; buff_index < srq_valid_wqe; buff_index++) { - buff_entry = &srq_info->els_buff_entry_head[buff_index]; - cur_buf_idx = buff_index / buf_cnt_perhugebuf; - cur_buf_offset = SPFC_SRQ_ELS_SGE_LEN * (buff_index % buf_cnt_perhugebuf); - buff_entry->buff_addr = srq_info->buf_list.buflist[cur_buf_idx].vaddr + - cur_buf_offset; - buff_entry->buff_dma = srq_info->buf_list.buflist[cur_buf_idx].paddr + - cur_buf_offset; - buff_entry->buff_id = (u16)buff_index; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[EVENT]Allocate bufnum:%u,buf_total_size:%u", buf_num, - buf_total_size); - - return RETURN_OK; - -free_buff: - spfc_free_els_srq_buff(hba, srq_valid_wqe); - return UNF_RETURN_ERROR; -} - -void spfc_send_clear_srq_cmd(struct spfc_hba_info *hba, - struct spfc_srq_info *srq_info) -{ - union spfc_cmdqe cmdqe; - struct cqm_queue *cqm_fcp_srq = NULL; - ulong flag = 0; - - memset(&cmdqe, 0, sizeof(union spfc_cmdqe)); - - spin_lock_irqsave(&srq_info->srq_spin_lock, flag); - cqm_fcp_srq = srq_info->cqm_srq_info; - if (!cqm_fcp_srq) { - srq_info->state = SPFC_CLEAN_DONE; - spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); - return; - } - - cmdqe.clear_srq.wd0.task_type = SPFC_TASK_T_CLEAR_SRQ; - cmdqe.clear_srq.wd1.scqn = SPFC_LSW(hba->default_scqn); - cmdqe.clear_srq.wd1.srq_type = srq_info->srq_type; - cmdqe.clear_srq.srqc_gpa_h = SPFC_HIGH_32_BITS(cqm_fcp_srq->q_ctx_paddr); - cmdqe.clear_srq.srqc_gpa_l = SPFC_LOW_32_BITS(cqm_fcp_srq->q_ctx_paddr); - - (void)queue_delayed_work(hba->work_queue, &srq_info->del_work, - (ulong)msecs_to_jiffies(SPFC_SRQ_DEL_STAGE_TIMEOUT_MS)); - spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port 0x%x begin to clear srq 0x%x(0x%x,0x%llx)", - hba->port_cfg.port_id, srq_info->srq_type, - SPFC_LSW(hba->default_scqn), - (u64)cqm_fcp_srq->q_ctx_paddr); - - /* Run the ROOT CMDQ command to issue the clear srq command. If the - * command fails to be delivered, retry upon timeout. - */ - (void)spfc_root_cmdq_enqueue(hba, &cmdqe, sizeof(cmdqe.clear_srq)); -} - -/* - *Function Name : spfc_srq_clr_timeout - *Function Description: Delete srq when timeout. - *Input Parameters : *work - *Output Parameters : N/A - *Return Type : void - */ -static void spfc_srq_clr_timeout(struct work_struct *work) -{ -#define SPFC_MAX_DEL_SRQ_RETRY_TIMES 2 - struct spfc_srq_info *srq = NULL; - struct spfc_hba_info *hba = NULL; - struct cqm_queue *cqm_fcp_imm_srq = NULL; - ulong flag = 0; - - srq = container_of(work, struct spfc_srq_info, del_work.work); - - spin_lock_irqsave(&srq->srq_spin_lock, flag); - hba = srq->hba; - cqm_fcp_imm_srq = srq->cqm_srq_info; - spin_unlock_irqrestore(&srq->srq_spin_lock, flag); - - if (hba && cqm_fcp_imm_srq) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port 0x%x clear srq 0x%x stat 0x%x timeout", - hba->port_cfg.port_id, srq->srq_type, srq->state); - - /* If the delivery fails or the execution times out after the - * delivery, try again once - */ - srq->del_retry_time++; - if (srq->del_retry_time < SPFC_MAX_DEL_SRQ_RETRY_TIMES) - spfc_send_clear_srq_cmd(hba, srq); - else - srq->del_retry_time = 0; - } -} - -static u32 spfc_create_els_srq(struct spfc_hba_info *hba) -{ - u32 ret = UNF_RETURN_ERROR; - struct cqm_queue *cqm_srq = NULL; - struct wq_header *wq_header = NULL; - struct spfc_srq_info *srq_info = NULL; - struct spfc_srq_ctx srq_ctx = {0}; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - cqm_srq = cqm3_object_fc_srq_create(hba->dev_handle, SERVICE_T_FC, - CQM_OBJECT_NONRDMA_SRQ, SPFC_SRQ_ELS_DATA_DEPTH, - SPFC_SRQE_SIZE, hba); - if (!cqm_srq) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Create Els Srq failed"); - - return UNF_RETURN_ERROR; - } - - /* Initialize SRQ */ - srq_info = &hba->els_srq_info; - spfc_init_srq_info(hba, cqm_srq, srq_info); - srq_info->srq_type = SPFC_SRQ_ELS; - srq_info->enable = true; - srq_info->state = SPFC_CLEAN_DONE; - srq_info->del_retry_time = 0; - - /* The srq lock is initialized and can be created repeatedly */ - spin_lock_init(&srq_info->srq_spin_lock); - srq_info->spin_lock_init = true; - - /* Initialize queue header */ - wq_header = (struct wq_header *)(void *)cqm_srq->q_header_vaddr; - spfc_init_srq_header(wq_header); - INIT_DELAYED_WORK(&srq_info->del_work, spfc_srq_clr_timeout); - - /* Apply for RQ buffer */ - ret = spfc_alloc_els_srq_buff(hba, srq_info->valid_wqe_num); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate Els Srq buffer failed"); - - cqm3_object_delete(&cqm_srq->object); - memset(srq_info, 0, sizeof(struct spfc_srq_info)); - return UNF_RETURN_ERROR; - } - - /* Fill RQE, update queue header */ - spfc_init_els_srq_wqe(srq_info); - - /* Fill SRQ CTX */ - memset(&srq_ctx, 0, sizeof(srq_ctx)); - spfc_cfg_srq_ctx(srq_info, &srq_ctx, SPFC_SRQ_ELS_SGE_LEN, - srq_info->cqm_srq_info->q_room_buf_1.buf_list->pa); - - ret = spfc_creat_srqc_via_cmdq_sync(hba, &srq_ctx, srq_info->cqm_srq_info->q_ctx_paddr); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Creat Els Srqc failed"); - - spfc_free_els_srq_buff(hba, srq_info->valid_wqe_num); - cqm3_object_delete(&cqm_srq->object); - memset(srq_info, 0, sizeof(struct spfc_srq_info)); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -void spfc_wq_destroy_els_srq(struct work_struct *work) -{ - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(work); - hba = - container_of(work, struct spfc_hba_info, els_srq_clear_work); - spfc_destroy_els_srq(hba); -} - -void spfc_destroy_els_srq(void *handle) -{ - /* - * Receive clear els srq sts - * ---then--->>> destroy els srq - */ - struct spfc_srq_info *srq_info = NULL; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(handle); - - hba = (struct spfc_hba_info *)handle; - srq_info = &hba->els_srq_info; - - /* release receive buffer */ - spfc_free_els_srq_buff(hba, srq_info->valid_wqe_num); - - /* release srq info */ - if (srq_info->cqm_srq_info) { - cqm3_object_delete(&srq_info->cqm_srq_info->object); - srq_info->cqm_srq_info = NULL; - } - if (srq_info->spin_lock_init) - srq_info->spin_lock_init = false; - srq_info->hba = NULL; - srq_info->enable = false; - srq_info->state = SPFC_CLEAN_DONE; -} - -/* - *Function Name : spfc_create_srq - *Function Description: Create SRQ, which contains four SRQ for receiving - * instant data and a SRQ for receiving - * ELS data. - *Input Parameters : *hba Output Parameters : N/A Return Type :u32 - */ -static u32 spfc_create_srq(struct spfc_hba_info *hba) -{ - u32 ret = UNF_RETURN_ERROR; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - - /* Create ELS SRQ */ - ret = spfc_create_els_srq(hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Create Els Srq failed"); - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -/* - *Function Name : spfc_destroy_srq - *Function Description: Release the SRQ resource, including the SRQ for - * receiving the immediate data and the - * SRQ forreceiving the ELS data. - *Input Parameters : *hba Output Parameters : N/A - *Return Type : void - */ -static void spfc_destroy_srq(struct spfc_hba_info *hba) -{ - FC_CHECK_RETURN_VOID(hba); - - spfc_destroy_els_srq(hba); -} - -u32 spfc_create_common_share_queues(void *handle) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - hba = (struct spfc_hba_info *)handle; - /* Create & Init 8 pairs SCQ */ - ret = spfc_create_scq(hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Create scq failed"); - - return UNF_RETURN_ERROR; - } - - /* Alloc SRQ resource for SIRT & ELS */ - ret = spfc_create_srq(hba); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Create srq failed"); - - spfc_flush_scq_ctx(hba); - spfc_destroy_scq(hba); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -void spfc_destroy_common_share_queues(void *hba) -{ - FC_CHECK_RETURN_VOID(hba); - - spfc_destroy_scq((struct spfc_hba_info *)hba); - spfc_destroy_srq((struct spfc_hba_info *)hba); -} - -static u8 spfc_map_fcp_data_cos(struct spfc_hba_info *hba) -{ - u8 i = 0; - u8 min_cnt_index = SPFC_PACKET_COS_FC_DATA; - bool get_init_index = false; - - for (i = 0; i < SPFC_MAX_COS_NUM; i++) { - /* Check whether the CoS is valid for the FC and cannot be - * occupied by the CMD - */ - if ((!(hba->cos_bitmap & ((u32)1 << i))) || i == SPFC_PACKET_COS_FC_CMD) - continue; - - if (!get_init_index) { - min_cnt_index = i; - get_init_index = true; - continue; - } - - if (atomic_read(&hba->cos_rport_cnt[i]) < - atomic_read(&hba->cos_rport_cnt[min_cnt_index])) - min_cnt_index = i; - } - - atomic_inc(&hba->cos_rport_cnt[min_cnt_index]); - - return min_cnt_index; -} - -static void spfc_update_cos_rport_cnt(struct spfc_hba_info *hba, u8 cos_index) -{ - if (cos_index >= SPFC_MAX_COS_NUM || - cos_index == SPFC_PACKET_COS_FC_CMD || - (!(hba->cos_bitmap & ((u32)1 << cos_index))) || - (atomic_read(&hba->cos_rport_cnt[cos_index]) == 0)) - return; - - atomic_dec(&hba->cos_rport_cnt[cos_index]); -} - -void spfc_invalid_parent_sq(struct spfc_parent_sq_info *sq_info) -{ - sq_info->rport_index = INVALID_VALUE32; - sq_info->context_id = INVALID_VALUE32; - sq_info->sq_queue_id = INVALID_VALUE32; - sq_info->cache_id = INVALID_VALUE32; - sq_info->local_port_id = INVALID_VALUE32; - sq_info->remote_port_id = INVALID_VALUE32; - sq_info->hba = NULL; - sq_info->del_start_jiff = INVALID_VALUE64; - sq_info->port_in_flush = false; - sq_info->sq_in_sess_rst = false; - sq_info->oqid_rd = INVALID_VALUE16; - sq_info->oqid_wr = INVALID_VALUE16; - sq_info->srq_ctx_addr = 0; - sq_info->sqn_base = 0; - atomic_set(&sq_info->sq_cached, false); - sq_info->vport_id = 0; - sq_info->sirt_dif_control.protect_opcode = UNF_DIF_ACTION_NONE; - sq_info->need_offloaded = INVALID_VALUE8; - atomic_set(&sq_info->sq_valid, false); - atomic_set(&sq_info->flush_done_wait_cnt, 0); - memset(&sq_info->delay_sqe, 0, sizeof(struct spfc_delay_sqe_ctrl_info)); - memset(sq_info->io_stat, 0, sizeof(sq_info->io_stat)); -} - -static void spfc_parent_sq_opreate_timeout(struct work_struct *work) -{ - ulong flag = 0; - struct spfc_parent_sq_info *parent_sq = NULL; - struct spfc_parent_queue_info *parent_queue = NULL; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(work); - - parent_sq = container_of(work, struct spfc_parent_sq_info, del_work.work); - parent_queue = container_of(parent_sq, struct spfc_parent_queue_info, parent_sq_info); - hba = (struct spfc_hba_info *)parent_sq->hba; - FC_CHECK_RETURN_VOID(hba); - - spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); - if (parent_queue->offload_state == SPFC_QUEUE_STATE_DESTROYING) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "Port(0x%x) sq rport index(0x%x) local nportid(0x%x),remote nportid(0x%x) reset timeout.", - hba->port_cfg.port_id, parent_sq->rport_index, - parent_sq->local_port_id, - parent_sq->remote_port_id); - } - spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, flag); -} - -static void spfc_parent_sq_wait_flush_done_timeout(struct work_struct *work) -{ - ulong flag = 0; - struct spfc_parent_sq_info *parent_sq = NULL; - struct spfc_parent_queue_info *parent_queue = NULL; - struct spfc_hba_info *hba = NULL; - u32 ctx_flush_done; - u32 *ctx_dw = NULL; - int ret; - int sq_state = SPFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK; - spinlock_t *prtq_state_lock = NULL; - - FC_CHECK_RETURN_VOID(work); - - parent_sq = container_of(work, struct spfc_parent_sq_info, flush_done_timeout_work.work); - - FC_CHECK_RETURN_VOID(parent_sq); - - parent_queue = container_of(parent_sq, struct spfc_parent_queue_info, parent_sq_info); - prtq_state_lock = &parent_queue->parent_queue_state_lock; - hba = (struct spfc_hba_info *)parent_sq->hba; - FC_CHECK_RETURN_VOID(hba); - FC_CHECK_RETURN_VOID(parent_queue); - - spin_lock_irqsave(prtq_state_lock, flag); - if (parent_queue->offload_state != SPFC_QUEUE_STATE_DESTROYING) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) sq rport index(0x%x) is not destroying status,offloadsts is %d", - hba->port_cfg.port_id, parent_sq->rport_index, - parent_queue->offload_state); - spin_unlock_irqrestore(prtq_state_lock, flag); - return; - } - - if (parent_queue->parent_ctx.cqm_parent_ctx_obj) { - ctx_dw = (u32 *)((void *)(parent_queue->parent_ctx.cqm_parent_ctx_obj->vaddr)); - ctx_flush_done = ctx_dw[SPFC_CTXT_FLUSH_DONE_DW_POS] & SPFC_CTXT_FLUSH_DONE_MASK_BE; - if (ctx_flush_done == 0) { - spin_unlock_irqrestore(prtq_state_lock, flag); - - if (atomic_read(&parent_queue->parent_sq_info.flush_done_wait_cnt) < - SPFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[info]Port(0x%x) sq rport index(0x%x) wait flush done timeout %d times", - hba->port_cfg.port_id, parent_sq->rport_index, - atomic_read(&(parent_queue->parent_sq_info - .flush_done_wait_cnt))); - - atomic_inc(&parent_queue->parent_sq_info.flush_done_wait_cnt); - - /* Delay Free Sq info */ - ret = queue_delayed_work(hba->work_queue, - &(parent_queue->parent_sq_info - .flush_done_timeout_work), - (ulong)msecs_to_jiffies((u32) - SPFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS)); - if (!ret) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) rport(0x%x) queue delayed work failed ret:%d", - hba->port_cfg.port_id, - parent_sq->rport_index, ret); - SPFC_HBA_STAT(hba, sq_state); - } - - return; - } - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) sq rport index(0x%x) has wait flush done %d times,do not free sq", - hba->port_cfg.port_id, - parent_sq->rport_index, - atomic_read(&(parent_queue->parent_sq_info - .flush_done_wait_cnt))); - - SPFC_HBA_STAT(hba, SPFC_STAT_CTXT_FLUSH_DONE); - return; - } - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) sq rport index(0x%x) flush done bit is ok,free sq now", - hba->port_cfg.port_id, parent_sq->rport_index); - - spfc_free_parent_queue_info(hba, parent_queue); -} - -static void spfc_free_parent_sq(struct spfc_hba_info *hba, - struct spfc_parent_queue_info *parq_info) -{ -#define SPFC_WAIT_PRT_CTX_FUSH_DONE_LOOP_TIMES 100 - u32 ctx_flush_done = 0; - u32 *ctx_dw = NULL; - struct spfc_parent_sq_info *sq_info = NULL; - u32 uidelaycnt = 0; - struct list_head *list = NULL; - struct spfc_suspend_sqe_info *suspend_sqe = NULL; - - sq_info = &parq_info->parent_sq_info; - - while (!list_empty(&sq_info->suspend_sqe_list)) { - list = UNF_OS_LIST_NEXT(&sq_info->suspend_sqe_list); - list_del(list); - suspend_sqe = list_entry(list, struct spfc_suspend_sqe_info, list_sqe_entry); - if (suspend_sqe) { - if (!cancel_delayed_work(&suspend_sqe->timeout_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[warn]reset worker timer maybe timeout"); - } - - kfree(suspend_sqe); - } - } - - /* Free data cos */ - spfc_update_cos_rport_cnt(hba, parq_info->queue_data_cos); - - if (parq_info->parent_ctx.cqm_parent_ctx_obj) { - ctx_dw = (u32 *)((void *)(parq_info->parent_ctx.cqm_parent_ctx_obj->vaddr)); - ctx_flush_done = ctx_dw[SPFC_CTXT_FLUSH_DONE_DW_POS] & SPFC_CTXT_FLUSH_DONE_MASK_BE; - mb(); - if (parq_info->offload_state == SPFC_QUEUE_STATE_DESTROYING && - ctx_flush_done == 0) { - do { - ctx_flush_done = ctx_dw[SPFC_CTXT_FLUSH_DONE_DW_POS] & - SPFC_CTXT_FLUSH_DONE_MASK_BE; - mb(); - if (ctx_flush_done != 0) - break; - uidelaycnt++; - } while (uidelaycnt < SPFC_WAIT_PRT_CTX_FUSH_DONE_LOOP_TIMES); - - if (ctx_flush_done == 0) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) Rport(0x%x) flush done is not set", - hba->port_cfg.port_id, - sq_info->rport_index); - } - } - - cqm3_object_delete(&parq_info->parent_ctx.cqm_parent_ctx_obj->object); - parq_info->parent_ctx.cqm_parent_ctx_obj = NULL; - } - - spfc_invalid_parent_sq(sq_info); -} - -u32 spfc_alloc_parent_sq(struct spfc_hba_info *hba, - struct spfc_parent_queue_info *parq_info, - struct unf_port_info *rport_info) -{ - struct spfc_parent_sq_info *sq_ctrl = NULL; - struct cqm_qpc_mpt *prnt_ctx = NULL; - ulong flag = 0; - - /* Craete parent context via CQM */ - prnt_ctx = cqm3_object_qpc_mpt_create(hba->dev_handle, SERVICE_T_FC, - CQM_OBJECT_SERVICE_CTX, SPFC_CNTX_SIZE_256B, - parq_info, CQM_INDEX_INVALID); - if (!prnt_ctx) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Create parent context failed, CQM_INDEX is 0x%x", - CQM_INDEX_INVALID); - goto parent_create_fail; - } - - parq_info->parent_ctx.cqm_parent_ctx_obj = prnt_ctx; - /* Initialize struct spfc_parent_sq_info */ - sq_ctrl = &parq_info->parent_sq_info; - sq_ctrl->hba = (void *)hba; - sq_ctrl->rport_index = rport_info->rport_index; - sq_ctrl->sqn_base = rport_info->sqn_base; - sq_ctrl->context_id = prnt_ctx->xid; - sq_ctrl->sq_queue_id = SPFC_QID_SQ; - sq_ctrl->cache_id = INVALID_VALUE32; - sq_ctrl->local_port_id = INVALID_VALUE32; - sq_ctrl->remote_port_id = INVALID_VALUE32; - sq_ctrl->sq_in_sess_rst = false; - atomic_set(&sq_ctrl->sq_valid, true); - sq_ctrl->del_start_jiff = INVALID_VALUE64; - sq_ctrl->service_type = SPFC_SERVICE_TYPE_FC; - sq_ctrl->vport_id = (u8)rport_info->qos_level; - sq_ctrl->cs_ctrl = (u8)rport_info->cs_ctrl; - sq_ctrl->sirt_dif_control.protect_opcode = UNF_DIF_ACTION_NONE; - sq_ctrl->need_offloaded = INVALID_VALUE8; - atomic_set(&sq_ctrl->flush_done_wait_cnt, 0); - - /* Check whether the HBA is in the Linkdown state. Note that - * offload_state must be in the non-FREE state. - */ - spin_lock_irqsave(&hba->flush_state_lock, flag); - sq_ctrl->port_in_flush = hba->in_flushing; - spin_unlock_irqrestore(&hba->flush_state_lock, flag); - memset(sq_ctrl->io_stat, 0, sizeof(sq_ctrl->io_stat)); - - INIT_DELAYED_WORK(&sq_ctrl->del_work, spfc_parent_sq_opreate_timeout); - INIT_DELAYED_WORK(&sq_ctrl->flush_done_timeout_work, - spfc_parent_sq_wait_flush_done_timeout); - INIT_LIST_HEAD(&sq_ctrl->suspend_sqe_list); - - memset(&sq_ctrl->delay_sqe, 0, sizeof(struct spfc_delay_sqe_ctrl_info)); - - return RETURN_OK; - -parent_create_fail: - parq_info->parent_ctx.cqm_parent_ctx_obj = NULL; - - return UNF_RETURN_ERROR; -} - -static void -spfc_init_prnt_ctxt_scq_qinfo(void *hba, - struct spfc_parent_queue_info *prnt_qinfo) -{ - u32 resp_scqn = 0; - struct spfc_parent_context *ctx = NULL; - struct spfc_scq_qinfo *resp_prnt_scq_ctxt = NULL; - struct spfc_queue_info_bus queue_bus; - - /* Obtains the queue id of the scq returned by the CQM when the SCQ is - * created - */ - resp_scqn = prnt_qinfo->parent_sts_scq_info.cqm_queue_id; - - /* Obtains the Parent Context address */ - ctx = (struct spfc_parent_context *)(prnt_qinfo->parent_ctx.parent_ctx); - - resp_prnt_scq_ctxt = &ctx->resp_scq_qinfo; - resp_prnt_scq_ctxt->hw_scqc_config.info.rq_th2_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.rq_th1_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.rq_th0_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.rq_min_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.sq_th2_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.sq_th1_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.sq_th0_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.sq_min_preld_cache_num = wqe_pre_load; - resp_prnt_scq_ctxt->hw_scqc_config.info.scq_n = (u64)resp_scqn; - resp_prnt_scq_ctxt->hw_scqc_config.info.parity = 0; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] = resp_prnt_scq_ctxt->hw_scqc_config.pctxt_val1; - resp_prnt_scq_ctxt->hw_scqc_config.info.parity = spfc_get_parity_value(queue_bus.bus, - SPFC_HW_SCQC_BUS_ROW, - SPFC_HW_SCQC_BUS_COL - ); - spfc_cpu_to_big64(resp_prnt_scq_ctxt, sizeof(struct spfc_scq_qinfo)); -} - -static void -spfc_init_prnt_ctxt_srq_qinfo(void *handle, struct spfc_parent_queue_info *prnt_qinfo) -{ - struct spfc_parent_context *ctx = NULL; - struct cqm_queue *cqm_els_srq = NULL; - struct spfc_parent_sq_info *sq = NULL; - struct spfc_queue_info_bus queue_bus; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - /* Obtains the SQ address */ - sq = &prnt_qinfo->parent_sq_info; - - /* Obtains the Parent Context address */ - ctx = (struct spfc_parent_context *)(prnt_qinfo->parent_ctx.parent_ctx); - - cqm_els_srq = hba->els_srq_info.cqm_srq_info; - - /* Initialize the Parent SRQ INFO used when the ELS is received */ - ctx->els_srq_info.srqc_gpa = cqm_els_srq->q_ctx_paddr >> UNF_SHIFT_4; - - memset(&queue_bus, 0, sizeof(struct spfc_queue_info_bus)); - queue_bus.bus[ARRAY_INDEX_0] = ctx->els_srq_info.srqc_gpa; - ctx->els_srq_info.parity = spfc_get_parity_value(queue_bus.bus, SPFC_HW_SRQC_BUS_ROW, - SPFC_HW_SRQC_BUS_COL); - spfc_cpu_to_big64(&ctx->els_srq_info, sizeof(struct spfc_srq_qinfo)); - - ctx->imm_srq_info.srqc_gpa = 0; - sq->srq_ctx_addr = 0; -} - -static u16 spfc_get_max_sequence_id(void) -{ - return SPFC_HRQI_SEQ_ID_MAX; -} - -static void spfc_init_prnt_rsvd_qinfo(struct spfc_parent_queue_info *prnt_qinfo) -{ - struct spfc_parent_context *ctx = NULL; - struct spfc_hw_rsvd_queue *hw_rsvd_qinfo = NULL; - u16 max_seq = 0; - u32 each = 0, seq_index = 0; - - /* Obtains the Parent Context address */ - ctx = (struct spfc_parent_context *)(prnt_qinfo->parent_ctx.parent_ctx); - hw_rsvd_qinfo = (struct spfc_hw_rsvd_queue *)&ctx->hw_rsvdq; - memset(hw_rsvd_qinfo->seq_id_bitmap, 0, sizeof(hw_rsvd_qinfo->seq_id_bitmap)); - - max_seq = spfc_get_max_sequence_id(); - - /* special set for sequence id 0, which is always kept by ucode for - * sending fcp-cmd - */ - hw_rsvd_qinfo->seq_id_bitmap[SPFC_HRQI_SEQ_SEPCIAL_ID] = 1; - seq_index = SPFC_HRQI_SEQ_SEPCIAL_ID - (max_seq >> SPFC_HRQI_SEQ_INDEX_SHIFT); - - /* Set the unavailable mask to start from max + 1 */ - for (each = (max_seq % SPFC_HRQI_SEQ_INDEX_MAX) + 1; - each < SPFC_HRQI_SEQ_INDEX_MAX; each++) { - hw_rsvd_qinfo->seq_id_bitmap[seq_index] |= ((u64)0x1) << each; - } - - hw_rsvd_qinfo->seq_id_bitmap[seq_index] = - cpu_to_be64(hw_rsvd_qinfo->seq_id_bitmap[seq_index]); - - /* sepcial set for sequence id 0 */ - if (seq_index != SPFC_HRQI_SEQ_SEPCIAL_ID) - hw_rsvd_qinfo->seq_id_bitmap[SPFC_HRQI_SEQ_SEPCIAL_ID] = - cpu_to_be64(hw_rsvd_qinfo->seq_id_bitmap[SPFC_HRQI_SEQ_SEPCIAL_ID]); - - for (each = 0; each < seq_index; each++) - hw_rsvd_qinfo->seq_id_bitmap[each] = SPFC_HRQI_SEQ_INVALID_ID; - - /* no matter what the range of seq id, last_req_seq_id is fixed value - * 0xff - */ - hw_rsvd_qinfo->wd0.last_req_seq_id = SPFC_HRQI_SEQ_ID_MAX; - hw_rsvd_qinfo->wd0.xid = prnt_qinfo->parent_sq_info.context_id; - - *(u64 *)&hw_rsvd_qinfo->wd0 = - cpu_to_be64(*(u64 *)&hw_rsvd_qinfo->wd0); -} - -/* - *Function Name : spfc_init_prnt_sw_section_info - *Function Description: Initialize the SW Section area that can be accessed by - * the Parent Context uCode. - *Input Parameters : *hba, - * *prnt_qinfo - *Output Parameters : N/A - *Return Type : void - */ -static void spfc_init_prnt_sw_section_info(struct spfc_hba_info *hba, - struct spfc_parent_queue_info *prnt_qinfo) -{ -#define SPFC_VLAN_ENABLE (1) -#define SPFC_MB_PER_KB 1024 - u16 rport_index; - struct spfc_parent_context *ctx = NULL; - struct spfc_sw_section *sw_setion = NULL; - u16 total_scq_num = SPFC_TOTAL_SCQ_NUM; - u32 queue_id; - dma_addr_t queue_hdr_paddr; - - /* Obtains the Parent Context address */ - ctx = (struct spfc_parent_context *)(prnt_qinfo->parent_ctx.parent_ctx); - sw_setion = &ctx->sw_section; - - /* xid+vPortId */ - sw_setion->sw_ctxt_vport_xid.xid = prnt_qinfo->parent_sq_info.context_id; - spfc_cpu_to_big32(&sw_setion->sw_ctxt_vport_xid, sizeof(sw_setion->sw_ctxt_vport_xid)); - - /* conn_id */ - rport_index = SPFC_LSW(prnt_qinfo->parent_sq_info.rport_index); - sw_setion->conn_id = cpu_to_be16(rport_index); - - /* Immediate parameters */ - sw_setion->immi_rq_page_size = 0; - - /* Parent SCQ INFO used for sending packets to the Cmnd */ - sw_setion->scq_num_rcv_cmd = cpu_to_be16((u16)prnt_qinfo->parent_cmd_scq_info.cqm_queue_id); - sw_setion->scq_num_max_scqn = cpu_to_be16(total_scq_num); - - /* sw_ctxt_misc */ - sw_setion->sw_ctxt_misc.dw.srv_type = prnt_qinfo->parent_sq_info.service_type; - sw_setion->sw_ctxt_misc.dw.port_id = hba->port_index; - - /* only the VN2VF mode is supported */ - sw_setion->sw_ctxt_misc.dw.vlan_id = 0; - spfc_cpu_to_big32(&sw_setion->sw_ctxt_misc.pctxt_val0, - sizeof(sw_setion->sw_ctxt_misc.pctxt_val0)); - - /* Configuring the combo length */ - sw_setion->per_xmit_data_size = cpu_to_be32(combo_length * SPFC_MB_PER_KB); - sw_setion->sw_ctxt_config.dw.work_mode = SPFC_PORT_MODE_INI; - sw_setion->sw_ctxt_config.dw.status = FC_PARENT_STATUS_INVALID; - sw_setion->sw_ctxt_config.dw.cos = 0; - sw_setion->sw_ctxt_config.dw.oq_cos_cmd = SPFC_PACKET_COS_FC_CMD; - sw_setion->sw_ctxt_config.dw.oq_cos_data = prnt_qinfo->queue_data_cos; - sw_setion->sw_ctxt_config.dw.priority = 0; - sw_setion->sw_ctxt_config.dw.vlan_enable = SPFC_VLAN_ENABLE; - sw_setion->sw_ctxt_config.dw.sgl_num = dif_sgl_mode; - spfc_cpu_to_big32(&sw_setion->sw_ctxt_config.pctxt_val1, - sizeof(sw_setion->sw_ctxt_config.pctxt_val1)); - spfc_cpu_to_big32(&sw_setion->immi_dif_info, sizeof(sw_setion->immi_dif_info)); - - queue_id = prnt_qinfo->parent_cmd_scq_info.local_queue_id; - queue_hdr_paddr = hba->scq_info[queue_id].cqm_scq_info->q_header_paddr; - sw_setion->cmd_scq_gpa_h = SPFC_HIGH_32_BITS(queue_hdr_paddr); - sw_setion->cmd_scq_gpa_l = SPFC_LOW_32_BITS(queue_hdr_paddr); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[info]Port(0x%x) RPort(0x%x) CmdLocalScqn(0x%x) QheaderGpaH(0x%x) QheaderGpaL(0x%x)", - hba->port_cfg.port_id, prnt_qinfo->parent_sq_info.rport_index, queue_id, - sw_setion->cmd_scq_gpa_h, sw_setion->cmd_scq_gpa_l); - - spfc_cpu_to_big32(&sw_setion->cmd_scq_gpa_h, sizeof(sw_setion->cmd_scq_gpa_h)); - spfc_cpu_to_big32(&sw_setion->cmd_scq_gpa_l, sizeof(sw_setion->cmd_scq_gpa_l)); -} - -static void spfc_init_parent_context(void *hba, struct spfc_parent_queue_info *prnt_qinfo) -{ - struct spfc_parent_context *ctx = NULL; - - ctx = (struct spfc_parent_context *)(prnt_qinfo->parent_ctx.parent_ctx); - - /* Initialize Parent Context */ - memset(ctx, 0, SPFC_CNTX_SIZE_256B); - - /* Initialize the Queue Info hardware area */ - spfc_init_prnt_ctxt_scq_qinfo(hba, prnt_qinfo); - spfc_init_prnt_ctxt_srq_qinfo(hba, prnt_qinfo); - spfc_init_prnt_rsvd_qinfo(prnt_qinfo); - - /* Initialize Software Section */ - spfc_init_prnt_sw_section_info(hba, prnt_qinfo); -} - -void spfc_map_shared_queue_qid(struct spfc_hba_info *hba, - struct spfc_parent_queue_info *parent_queue_info, - u32 rport_index) -{ - u32 cmd_scqn_local = 0; - u32 sts_scqn_local = 0; - - /* The SCQ is used for each connection based on the balanced * - * distribution of commands and responses - */ - cmd_scqn_local = SPFC_RPORTID_TO_CMD_SCQN(rport_index); - sts_scqn_local = SPFC_RPORTID_TO_STS_SCQN(rport_index); - parent_queue_info->parent_cmd_scq_info.local_queue_id = cmd_scqn_local; - parent_queue_info->parent_sts_scq_info.local_queue_id = sts_scqn_local; - parent_queue_info->parent_cmd_scq_info.cqm_queue_id = - hba->scq_info[cmd_scqn_local].scqn; - parent_queue_info->parent_sts_scq_info.cqm_queue_id = - hba->scq_info[sts_scqn_local].scqn; - - /* Each session share with immediate SRQ and ElsSRQ */ - parent_queue_info->parent_els_srq_info.local_queue_id = 0; - parent_queue_info->parent_els_srq_info.cqm_queue_id = hba->els_srq_info.srqn; - - /* Allocate fcp data cos value */ - parent_queue_info->queue_data_cos = spfc_map_fcp_data_cos(hba); - - /* Allocate Parent SQ vPort */ - parent_queue_info->parent_sq_info.vport_id += parent_queue_info->queue_vport_id; -} - -u32 spfc_send_session_enable(struct spfc_hba_info *hba, struct unf_port_info *rport_info) -{ - struct spfc_parent_queue_info *parent_queue_info = NULL; - dma_addr_t ctx_phy_addr = 0; - void *ctx_addr = NULL; - union spfc_cmdqe session_enable; - u32 ret = UNF_RETURN_ERROR; - struct spfc_parent_context *ctx = NULL; - struct spfc_sw_section *sw_setion = NULL; - struct spfc_host_keys key; - u32 tx_mfs = 2048; - u32 edtov_timer = 2000; - ulong flag = 0; - spinlock_t *prtq_state_lock = NULL; - u32 index; - - memset(&session_enable, 0, sizeof(union spfc_cmdqe)); - memset(&key, 0, sizeof(struct spfc_host_keys)); - index = rport_info->rport_index; - parent_queue_info = &hba->parent_queue_mgr->parent_queue[index]; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - ctx = (struct spfc_parent_context *)(parent_queue_info->parent_ctx.parent_ctx); - sw_setion = &ctx->sw_section; - - sw_setion->tx_mfs = cpu_to_be16((u16)(tx_mfs)); - sw_setion->e_d_tov_timer_val = cpu_to_be32(edtov_timer); - - spfc_big_to_cpu32(&sw_setion->sw_ctxt_misc.pctxt_val0, - sizeof(sw_setion->sw_ctxt_misc.pctxt_val0)); - sw_setion->sw_ctxt_misc.dw.port_id = SPFC_GET_NETWORK_PORT_ID(hba); - spfc_cpu_to_big32(&sw_setion->sw_ctxt_misc.pctxt_val0, - sizeof(sw_setion->sw_ctxt_misc.pctxt_val0)); - - spfc_big_to_cpu32(&sw_setion->sw_ctxt_config.pctxt_val1, - sizeof(sw_setion->sw_ctxt_config.pctxt_val1)); - spfc_cpu_to_big32(&sw_setion->sw_ctxt_config.pctxt_val1, - sizeof(sw_setion->sw_ctxt_config.pctxt_val1)); - - parent_queue_info->parent_sq_info.rport_index = rport_info->rport_index; - parent_queue_info->parent_sq_info.local_port_id = rport_info->local_nport_id; - parent_queue_info->parent_sq_info.remote_port_id = rport_info->nport_id; - parent_queue_info->parent_sq_info.context_id = - parent_queue_info->parent_ctx.cqm_parent_ctx_obj->xid; - - /* Fill in contex to the chip */ - ctx_phy_addr = parent_queue_info->parent_ctx.cqm_parent_ctx_obj->paddr; - ctx_addr = parent_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr; - memcpy(ctx_addr, parent_queue_info->parent_ctx.parent_ctx, - sizeof(struct spfc_parent_context)); - session_enable.session_enable.wd0.task_type = SPFC_TASK_T_SESS_EN; - session_enable.session_enable.wd2.conn_id = rport_info->rport_index; - session_enable.session_enable.wd2.scqn = hba->default_scqn; - session_enable.session_enable.wd3.xid_p = - parent_queue_info->parent_ctx.cqm_parent_ctx_obj->xid; - session_enable.session_enable.context_gpa_hi = SPFC_HIGH_32_BITS(ctx_phy_addr); - session_enable.session_enable.context_gpa_lo = SPFC_LOW_32_BITS(ctx_phy_addr); - - spin_unlock_irqrestore(prtq_state_lock, flag); - - key.wd3.sid_2 = (rport_info->local_nport_id & SPFC_KEY_WD3_SID_2_MASK) >> UNF_SHIFT_16; - key.wd3.sid_1 = (rport_info->local_nport_id & SPFC_KEY_WD3_SID_1_MASK) >> UNF_SHIFT_8; - key.wd4.sid_0 = rport_info->local_nport_id & SPFC_KEY_WD3_SID_0_MASK; - key.wd4.did_0 = rport_info->nport_id & SPFC_KEY_WD4_DID_0_MASK; - key.wd4.did_1 = (rport_info->nport_id & SPFC_KEY_WD4_DID_1_MASK) >> UNF_SHIFT_8; - key.wd4.did_2 = (rport_info->nport_id & SPFC_KEY_WD4_DID_2_MASK) >> UNF_SHIFT_16; - key.wd5.host_id = 0; - key.wd5.port_id = hba->port_index; - - memcpy(&session_enable.session_enable.keys, &key, sizeof(struct spfc_host_keys)); - - memcpy((void *)(uintptr_t)session_enable.session_enable.context, - parent_queue_info->parent_ctx.parent_ctx, - sizeof(struct spfc_parent_context)); - spfc_big_to_cpu32((void *)(uintptr_t)session_enable.session_enable.context, - sizeof(struct spfc_parent_context)); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_MAJOR, - "[info] xid:0x%x, sid:0x%x,did:0x%x parentcontext:", - parent_queue_info->parent_ctx.cqm_parent_ctx_obj->xid, - rport_info->local_nport_id, rport_info->nport_id); - - ret = spfc_root_cmdq_enqueue(hba, &session_enable, sizeof(session_enable.session_enable)); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]RootCMDQEnqueue Error, free default session parent resource"); - return UNF_RETURN_ERROR; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) send default session enable success,rport index(0x%x),context id(0x%x) SID=(0x%x), DID=(0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - parent_queue_info->parent_sq_info.context_id, - rport_info->local_nport_id, rport_info->nport_id); - - return RETURN_OK; -} - -u32 spfc_alloc_parent_resource(void *handle, struct unf_port_info *rport_info) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_hba_info *hba = NULL; - struct spfc_parent_queue_info *parent_queue_info = NULL; - ulong flag = 0; - spinlock_t *prtq_state_lock = NULL; - u32 index; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport_info, UNF_RETURN_ERROR); - - hba = (struct spfc_hba_info *)handle; - if (!hba->parent_queue_mgr) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) cannot find parent queue pool", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - - index = rport_info->rport_index; - if (index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) allocate parent resource failed, invlaid rport index(0x%x),rport nportid(0x%x)", - hba->port_cfg.port_id, index, - rport_info->nport_id); - - return UNF_RETURN_ERROR; - } - - parent_queue_info = &hba->parent_queue_mgr->parent_queue[index]; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - if (parent_queue_info->offload_state != SPFC_QUEUE_STATE_FREE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) allocate parent resource failed, invlaid rport index(0x%x),rport nportid(0x%x), offload state(0x%x)", - hba->port_cfg.port_id, index, rport_info->nport_id, - parent_queue_info->offload_state); - - spin_unlock_irqrestore(prtq_state_lock, flag); - return UNF_RETURN_ERROR; - } - - parent_queue_info->offload_state = SPFC_QUEUE_STATE_INITIALIZED; - /* Create Parent Context and Link List SQ */ - ret = spfc_alloc_parent_sq(hba, parent_queue_info, rport_info); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "Port(0x%x) alloc session resoure failed.rport index(0x%x),rport nportid(0x%x).", - hba->port_cfg.port_id, index, - rport_info->nport_id); - - parent_queue_info->offload_state = SPFC_QUEUE_STATE_FREE; - spfc_invalid_parent_sq(&parent_queue_info->parent_sq_info); - spin_unlock_irqrestore(prtq_state_lock, flag); - - return UNF_RETURN_ERROR; - } - - /* Allocate the corresponding queue xid to each parent */ - spfc_map_shared_queue_qid(hba, parent_queue_info, rport_info->rport_index); - - /* Initialize Parent Context, including hardware area and ucode area */ - spfc_init_parent_context(hba, parent_queue_info); - - spin_unlock_irqrestore(prtq_state_lock, flag); - - /* Only default enable session obviously, other will enable secertly */ - if (unlikely(rport_info->rport_index == SPFC_DEFAULT_RPORT_INDEX)) - return spfc_send_session_enable(handle, rport_info); - - parent_queue_info->parent_sq_info.local_port_id = rport_info->local_nport_id; - parent_queue_info->parent_sq_info.remote_port_id = rport_info->nport_id; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) allocate parent sq success,rport index(0x%x),rport nportid(0x%x),context id(0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - rport_info->nport_id, - parent_queue_info->parent_sq_info.context_id); - - return ret; -} - -u32 spfc_free_parent_resource(void *handle, struct unf_port_info *rport_info) -{ - struct spfc_parent_queue_info *parent_queue_info = NULL; - ulong flag = 0; - ulong rst_flag = 0; - u32 ret = UNF_RETURN_ERROR; - enum spfc_session_reset_mode mode = SPFC_SESS_RST_DELETE_IO_CONN_BOTH; - struct spfc_hba_info *hba = NULL; - spinlock_t *prtq_state_lock = NULL; - spinlock_t *sq_enq_lock = NULL; - u32 index; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(rport_info, UNF_RETURN_ERROR); - - hba = (struct spfc_hba_info *)handle; - if (!hba->parent_queue_mgr) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) cannot find parent queue pool", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - - /* get parent queue info (by rport index) */ - if (rport_info->rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) free parent resource failed, invlaid rport_index(%u) rport_nport_id(0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, rport_info->nport_id); - - return UNF_RETURN_ERROR; - } - - index = rport_info->rport_index; - parent_queue_info = &hba->parent_queue_mgr->parent_queue[index]; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - sq_enq_lock = &parent_queue_info->parent_sq_info.parent_sq_enqueue_lock; - - spin_lock_irqsave(prtq_state_lock, flag); - /* 1. for has been offload */ - if (parent_queue_info->offload_state == SPFC_QUEUE_STATE_OFFLOADED) { - parent_queue_info->offload_state = SPFC_QUEUE_STATE_DESTROYING; - spin_unlock_irqrestore(prtq_state_lock, flag); - - /* set reset state, in order to prevent I/O in_SQ */ - spin_lock_irqsave(sq_enq_lock, rst_flag); - parent_queue_info->parent_sq_info.sq_in_sess_rst = true; - spin_unlock_irqrestore(sq_enq_lock, rst_flag); - - /* check pcie device state */ - if (!hba->dev_present) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) hba is not present, free directly. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - parent_queue_info->parent_sq_info.rport_index, - parent_queue_info->parent_sq_info.local_port_id, - rport_info->nport_id, - parent_queue_info->parent_sq_info.remote_port_id); - - spfc_free_parent_queue_info(hba, parent_queue_info); - return RETURN_OK; - } - - parent_queue_info->parent_sq_info.del_start_jiff = jiffies; - (void)queue_delayed_work(hba->work_queue, - &parent_queue_info->parent_sq_info.del_work, - (ulong)msecs_to_jiffies((u32) - SPFC_SQ_DEL_STAGE_TIMEOUT_MS)); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to reset parent session, rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - parent_queue_info->parent_sq_info.rport_index, - parent_queue_info->parent_sq_info.local_port_id, - rport_info->nport_id, - parent_queue_info->parent_sq_info.remote_port_id); - /* Forcibly set both mode */ - mode = SPFC_SESS_RST_DELETE_IO_CONN_BOTH; - ret = spfc_send_session_rst_cmd(hba, parent_queue_info, mode); - - return ret; - } else if (parent_queue_info->offload_state == SPFC_QUEUE_STATE_INITIALIZED) { - /* 2. for resource has been alloc, but not offload */ - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) parent sq is not offloaded, free directly. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - parent_queue_info->parent_sq_info.rport_index, - parent_queue_info->parent_sq_info.local_port_id, - rport_info->nport_id, - parent_queue_info->parent_sq_info.remote_port_id); - - spin_unlock_irqrestore(prtq_state_lock, flag); - spfc_free_parent_queue_info(hba, parent_queue_info); - - return RETURN_OK; - } else if (parent_queue_info->offload_state == - SPFC_QUEUE_STATE_OFFLOADING) { - /* 3. for driver has offloading CMND to uCode */ - spfc_push_destroy_parent_queue_sqe(hba, parent_queue_info, rport_info); - spin_unlock_irqrestore(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) parent sq is offloading, push to delay free. rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", - hba->port_cfg.port_id, rport_info->rport_index, - parent_queue_info->parent_sq_info.rport_index, - parent_queue_info->parent_sq_info.local_port_id, - rport_info->nport_id, - parent_queue_info->parent_sq_info.remote_port_id); - - return RETURN_OK; - } - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) parent sq is not created, do not need free state(0x%x) rport_index(0x%x:0x%x) local_nportid(0x%x) remote_nportid(0x%x:0x%x)", - hba->port_cfg.port_id, parent_queue_info->offload_state, - rport_info->rport_index, - parent_queue_info->parent_sq_info.rport_index, - parent_queue_info->parent_sq_info.local_port_id, - rport_info->nport_id, - parent_queue_info->parent_sq_info.remote_port_id); - - spin_unlock_irqrestore(prtq_state_lock, flag); - - return RETURN_OK; -} - -void spfc_free_parent_queue_mgr(void *handle) -{ - u32 index = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(handle); - - hba = (struct spfc_hba_info *)handle; - if (!hba->parent_queue_mgr) - return; - parent_queue_mgr = hba->parent_queue_mgr; - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - if (parent_queue_mgr->parent_queue[index].parent_ctx.parent_ctx) - parent_queue_mgr->parent_queue[index].parent_ctx.parent_ctx = NULL; - } - - if (parent_queue_mgr->parent_sq_buf_list.buflist) { - for (index = 0; index < parent_queue_mgr->parent_sq_buf_list.buf_num; index++) { - if (parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr != 0) { - pci_unmap_single(hba->pci_dev, - parent_queue_mgr->parent_sq_buf_list - .buflist[index].paddr, - parent_queue_mgr->parent_sq_buf_list.buf_size, - DMA_BIDIRECTIONAL); - parent_queue_mgr->parent_sq_buf_list.buflist[index].paddr = 0; - } - kfree(parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr); - parent_queue_mgr->parent_sq_buf_list.buflist[index].vaddr = NULL; - } - - kfree(parent_queue_mgr->parent_sq_buf_list.buflist); - parent_queue_mgr->parent_sq_buf_list.buflist = NULL; - } - - vfree(parent_queue_mgr); - hba->parent_queue_mgr = NULL; -} - -void spfc_free_parent_queues(void *handle) -{ - u32 index = 0; - ulong flag = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - struct spfc_hba_info *hba = NULL; - spinlock_t *prtq_state_lock = NULL; - - FC_CHECK_RETURN_VOID(handle); - - hba = (struct spfc_hba_info *)handle; - parent_queue_mgr = hba->parent_queue_mgr; - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - prtq_state_lock = &parent_queue_mgr->parent_queue[index].parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - if (SPFC_QUEUE_STATE_DESTROYING == - parent_queue_mgr->parent_queue[index].offload_state) { - spin_unlock_irqrestore(prtq_state_lock, flag); - - (void)cancel_delayed_work_sync(&parent_queue_mgr->parent_queue[index] - .parent_sq_info.del_work); - (void)cancel_delayed_work_sync(&parent_queue_mgr->parent_queue[index] - .parent_sq_info.flush_done_timeout_work); - - /* free parent queue */ - spfc_free_parent_queue_info(hba, &parent_queue_mgr->parent_queue[index]); - continue; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - } -} - -/* - *Function Name : spfc_alloc_parent_queue_mgr - *Function Description: Allocate and initialize parent queue manager. - *Input Parameters : *handle - *Output Parameters : N/A - *Return Type : void - */ -u32 spfc_alloc_parent_queue_mgr(void *handle) -{ - u32 index = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - u32 buf_total_size; - u32 buf_num; - u32 alloc_idx; - u32 cur_buf_idx = 0; - u32 cur_buf_offset = 0; - u32 prt_ctx_size = sizeof(struct spfc_parent_context); - u32 buf_cnt_perhugebuf; - struct spfc_hba_info *hba = NULL; - u32 init_val = INVALID_VALUE32; - dma_addr_t paddr; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - - hba = (struct spfc_hba_info *)handle; - parent_queue_mgr = (struct spfc_parent_queue_mgr *)vmalloc(sizeof - (struct spfc_parent_queue_mgr)); - if (!parent_queue_mgr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) cannot allocate queue manager", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - - hba->parent_queue_mgr = parent_queue_mgr; - memset(parent_queue_mgr, 0, sizeof(struct spfc_parent_queue_mgr)); - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - spin_lock_init(&parent_queue_mgr->parent_queue[index].parent_queue_state_lock); - parent_queue_mgr->parent_queue[index].offload_state = SPFC_QUEUE_STATE_FREE; - spin_lock_init(&(parent_queue_mgr->parent_queue[index] - .parent_sq_info.parent_sq_enqueue_lock)); - parent_queue_mgr->parent_queue[index].parent_cmd_scq_info.cqm_queue_id = init_val; - parent_queue_mgr->parent_queue[index].parent_sts_scq_info.cqm_queue_id = init_val; - parent_queue_mgr->parent_queue[index].parent_els_srq_info.cqm_queue_id = init_val; - parent_queue_mgr->parent_queue[index].parent_sq_info.del_start_jiff = init_val; - parent_queue_mgr->parent_queue[index].queue_vport_id = hba->vpid_start; - } - - buf_total_size = prt_ctx_size * UNF_SPFC_MAXRPORT_NUM; - parent_queue_mgr->parent_sq_buf_list.buf_size = buf_total_size > BUF_LIST_PAGE_SIZE ? - BUF_LIST_PAGE_SIZE : buf_total_size; - buf_cnt_perhugebuf = parent_queue_mgr->parent_sq_buf_list.buf_size / prt_ctx_size; - buf_num = UNF_SPFC_MAXRPORT_NUM % buf_cnt_perhugebuf ? - UNF_SPFC_MAXRPORT_NUM / buf_cnt_perhugebuf + 1 : - UNF_SPFC_MAXRPORT_NUM / buf_cnt_perhugebuf; - parent_queue_mgr->parent_sq_buf_list.buflist = - (struct buff_list *)kmalloc(buf_num * sizeof(struct buff_list), GFP_KERNEL); - parent_queue_mgr->parent_sq_buf_list.buf_num = buf_num; - - if (!parent_queue_mgr->parent_sq_buf_list.buflist) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Allocate QueuMgr buf list failed out of memory"); - goto free_parent_queue; - } - memset(parent_queue_mgr->parent_sq_buf_list.buflist, 0, buf_num * sizeof(struct buff_list)); - - for (alloc_idx = 0; alloc_idx < buf_num; alloc_idx++) { - parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr = - kmalloc(parent_queue_mgr->parent_sq_buf_list.buf_size, GFP_KERNEL); - if (!parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr) - goto free_parent_queue; - - memset(parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr, 0, - parent_queue_mgr->parent_sq_buf_list.buf_size); - - parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr = - pci_map_single(hba->pci_dev, - parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].vaddr, - parent_queue_mgr->parent_sq_buf_list.buf_size, - DMA_BIDIRECTIONAL); - paddr = parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr; - if (pci_dma_mapping_error(hba->pci_dev, paddr)) { - parent_queue_mgr->parent_sq_buf_list.buflist[alloc_idx].paddr = 0; - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[err]Map QueuMgr address failed"); - - goto free_parent_queue; - } - } - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - cur_buf_idx = index / buf_cnt_perhugebuf; - cur_buf_offset = prt_ctx_size * (index % buf_cnt_perhugebuf); - - parent_queue_mgr->parent_queue[index].parent_ctx.parent_ctx = - parent_queue_mgr->parent_sq_buf_list.buflist[cur_buf_idx].vaddr + - cur_buf_offset; - parent_queue_mgr->parent_queue[index].parent_ctx.parent_ctx_addr = - parent_queue_mgr->parent_sq_buf_list.buflist[cur_buf_idx].paddr + - cur_buf_offset; - } - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_INFO, - "[EVENT]Allocate bufnum:%u,buf_total_size:%u", buf_num, buf_total_size); - - return RETURN_OK; - -free_parent_queue: - spfc_free_parent_queue_mgr(hba); - return UNF_RETURN_ERROR; -} - -static void spfc_rlease_all_wqe_pages(struct spfc_hba_info *hba) -{ - u32 index; - struct spfc_wqe_page *wpg = NULL; - - FC_CHECK_RETURN_VOID((hba)); - - wpg = hba->sq_wpg_pool.wpg_pool_addr; - - for (index = 0; index < hba->sq_wpg_pool.wpg_cnt; index++) { - if (wpg->wpg_addr) { - dma_pool_free(hba->sq_wpg_pool.wpg_dma_pool, - wpg->wpg_addr, wpg->wpg_phy_addr); - wpg->wpg_addr = NULL; - wpg->wpg_phy_addr = 0; - } - - wpg++; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port[%u] free total %u wqepages", hba->port_index, - index); -} - -u32 spfc_alloc_parent_sq_wqe_page_pool(void *handle) -{ - u32 index = 0; - struct spfc_sq_wqepage_pool *wpg_pool = NULL; - struct spfc_wqe_page *wpg = NULL; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - wpg_pool = &hba->sq_wpg_pool; - - INIT_LIST_HEAD(&wpg_pool->list_free_wpg_pool); - spin_lock_init(&wpg_pool->wpg_pool_lock); - atomic_set(&wpg_pool->wpg_in_use, 0); - - /* Calculate the number of Wqe Page required in the pool */ - wpg_pool->wpg_size = wqe_page_size; - wpg_pool->wpg_cnt = SPFC_MIN_WP_NUM * SPFC_MAX_SSQ_NUM + - ((hba->exi_count * SPFC_SQE_SIZE) / wpg_pool->wpg_size); - wpg_pool->wqe_per_wpg = wpg_pool->wpg_size / SPFC_SQE_SIZE; - - /* Craete DMA POOL */ - wpg_pool->wpg_dma_pool = dma_pool_create("spfc_wpg_pool", - &hba->pci_dev->dev, - wpg_pool->wpg_size, - SPFC_SQE_SIZE, 0); - if (!wpg_pool->wpg_dma_pool) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Cannot allocate SQ WqePage DMA pool"); - - goto out_create_dma_pool_err; - } - - /* Allocate arrays to record all WqePage addresses */ - wpg_pool->wpg_pool_addr = (struct spfc_wqe_page *)vmalloc(wpg_pool->wpg_cnt * - sizeof(struct spfc_wqe_page)); - if (!wpg_pool->wpg_pool_addr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Allocate SQ WqePageAddr array failed"); - - goto out_alloc_wpg_array_err; - } - wpg = wpg_pool->wpg_pool_addr; - memset(wpg, 0, wpg_pool->wpg_cnt * sizeof(struct spfc_wqe_page)); - - for (index = 0; index < wpg_pool->wpg_cnt; index++) { - wpg->wpg_addr = dma_pool_alloc(wpg_pool->wpg_dma_pool, GFP_KERNEL, - (u64 *)&wpg->wpg_phy_addr); - if (!wpg->wpg_addr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_ERR, "[err]Dma pool allocated failed"); - break; - } - - /* To ensure security, clear the memory */ - memset(wpg->wpg_addr, 0, wpg_pool->wpg_size); - - /* Add to the idle linked list */ - INIT_LIST_HEAD(&wpg->entry_wpg); - list_add_tail(&wpg->entry_wpg, &wpg_pool->list_free_wpg_pool); - - wpg++; - } - /* ALL allocated successfully */ - if (wpg_pool->wpg_cnt == index) - return RETURN_OK; - - spfc_rlease_all_wqe_pages(hba); - vfree(wpg_pool->wpg_pool_addr); - wpg_pool->wpg_pool_addr = NULL; - -out_alloc_wpg_array_err: - dma_pool_destroy(wpg_pool->wpg_dma_pool); - wpg_pool->wpg_dma_pool = NULL; - -out_create_dma_pool_err: - return UNF_RETURN_ERROR; -} - -void spfc_free_parent_sq_wqe_page_pool(void *handle) -{ - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID((handle)); - hba = (struct spfc_hba_info *)handle; - spfc_rlease_all_wqe_pages(hba); - hba->sq_wpg_pool.wpg_cnt = 0; - - if (hba->sq_wpg_pool.wpg_pool_addr) { - vfree(hba->sq_wpg_pool.wpg_pool_addr); - hba->sq_wpg_pool.wpg_pool_addr = NULL; - } - - dma_pool_destroy(hba->sq_wpg_pool.wpg_dma_pool); - hba->sq_wpg_pool.wpg_dma_pool = NULL; -} - -static u32 spfc_parent_sq_ring_direct_wqe_doorbell(struct spfc_parent_ssq_info *sq, u8 *direct_wqe) -{ - u32 ret = RETURN_OK; - int ravl; - u16 pmsn; - u64 queue_hdr_db_val; - struct spfc_hba_info *hba; - - hba = (struct spfc_hba_info *)sq->hba; - pmsn = sq->last_pmsn; - - if (sq->cache_id == INVALID_VALUE32) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]SQ(0x%x) invalid cid", sq->context_id); - return RETURN_ERROR; - } - /* Fill Doorbell Record */ - queue_hdr_db_val = sq->queue_header->door_bell_record; - queue_hdr_db_val &= (u64)(~(0xFFFFFFFF)); - queue_hdr_db_val |= (u64)((u64)pmsn << UNF_SHIFT_16 | pmsn); - sq->queue_header->door_bell_record = - cpu_to_be64(queue_hdr_db_val); - - ravl = cqm_ring_direct_wqe_db_fc(hba->dev_handle, SERVICE_T_FC, direct_wqe); - if (unlikely(ravl != CQM_SUCCESS)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]SQ(0x%x) send DB failed", sq->context_id); - - ret = RETURN_ERROR; - } - - atomic_inc(&sq->sq_db_cnt); - - return ret; -} - -u32 spfc_parent_sq_ring_doorbell(struct spfc_parent_ssq_info *sq, u8 qos_level, u32 c) -{ - u32 ret = RETURN_OK; - int ravl; - u16 pmsn; - u8 pmsn_lo; - u8 pmsn_hi; - u64 db_val_qw; - struct spfc_hba_info *hba; - struct spfc_parent_sq_db door_bell; - - hba = (struct spfc_hba_info *)sq->hba; - pmsn = sq->last_pmsn; - /* Obtain the low 8 Bit of PMSN */ - pmsn_lo = (u8)(pmsn & SPFC_PMSN_MASK); - /* Obtain the high 8 Bit of PMSN */ - pmsn_hi = (u8)((pmsn >> UNF_SHIFT_8) & SPFC_PMSN_MASK); - door_bell.wd0.service_type = SPFC_LSW(sq->service_type); - door_bell.wd0.cos = 0; - /* c = 0 data type, c = 1 control type, two type are different in mqm */ - door_bell.wd0.c = c; - door_bell.wd0.arm = SPFC_DB_ARM_DISABLE; - door_bell.wd0.cntx_size = SPFC_CNTX_SIZE_T_256B; - door_bell.wd0.xid = sq->context_id; - door_bell.wd1.sm_data = sq->cache_id; - door_bell.wd1.qid = sq->sq_queue_id; - door_bell.wd1.pi_hi = (u32)pmsn_hi; - - if (sq->cache_id == INVALID_VALUE32) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]SQ(0x%x) invalid cid", sq->context_id); - return UNF_RETURN_ERROR; - } - /* Fill Doorbell Record */ - db_val_qw = sq->queue_header->door_bell_record; - db_val_qw &= (u64)(~(SPFC_DB_VAL_MASK)); - db_val_qw |= (u64)((u64)pmsn << UNF_SHIFT_16 | pmsn); - sq->queue_header->door_bell_record = cpu_to_be64(db_val_qw); - - /* ring doorbell */ - db_val_qw = *(u64 *)&door_bell; - ravl = cqm3_ring_hardware_db_fc(hba->dev_handle, SERVICE_T_FC, pmsn_lo, - (qos_level & SPFC_QOS_LEVEL_MASK), - db_val_qw); - if (unlikely(ravl != CQM_SUCCESS)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]SQ(0x%x) send DB(0x%llx) failed", - sq->context_id, db_val_qw); - - ret = UNF_RETURN_ERROR; - } - - /* Doorbell success counter */ - atomic_inc(&sq->sq_db_cnt); - - return ret; -} - -u32 spfc_direct_sq_enqueue(struct spfc_parent_ssq_info *ssq, struct spfc_sqe *io_sqe, u8 wqe_type) -{ - u32 ret = RETURN_OK; - u32 msn_wd = INVALID_VALUE32; - u16 link_wqe_msn = 0; - ulong flag = 0; - struct spfc_wqe_page *tail_wpg = NULL; - struct spfc_sqe *sqe_in_wp = NULL; - struct spfc_linkwqe *link_wqe = NULL; - struct spfc_linkwqe *link_wqe_last_part = NULL; - u64 wqe_gpa; - struct spfc_direct_wqe_db dre_door_bell; - - spin_lock_irqsave(&ssq->parent_sq_enqueue_lock, flag); - tail_wpg = SPFC_GET_SQ_TAIL(ssq); - if (ssq->wqe_offset == ssq->wqe_num_per_buf) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_INFO, - "[info]Ssq(0x%x), xid(0x%x) qid(0x%x) add wqepage at Pmsn(0x%x), sqe_minus_cqe_cnt(0x%x)", - ssq->sqn, ssq->context_id, ssq->sq_queue_id, - ssq->last_pmsn, - atomic_read(&ssq->sqe_minus_cqe_cnt)); - - link_wqe_msn = SPFC_MSN_DEC(ssq->last_pmsn); - link_wqe = (struct spfc_linkwqe *)spfc_get_wqe_page_entry(tail_wpg, - ssq->wqe_offset); - msn_wd = be32_to_cpu(link_wqe->val_wd1); - msn_wd |= ((u32)(link_wqe_msn & SPFC_MSNWD_L_MASK)); - msn_wd |= (((u32)(link_wqe_msn & SPFC_MSNWD_H_MASK)) << UNF_SHIFT_16); - link_wqe->val_wd1 = cpu_to_be32(msn_wd); - link_wqe_last_part = (struct spfc_linkwqe *)((u8 *)link_wqe + - SPFC_EXTEND_WQE_OFFSET); - link_wqe_last_part->val_wd1 = link_wqe->val_wd1; - spfc_set_direct_wqe_owner_be(link_wqe, ssq->last_pi_owner); - ssq->wqe_offset = 0; - ssq->last_pi_owner = !ssq->last_pi_owner; - } - sqe_in_wp = - (struct spfc_sqe *)spfc_get_wqe_page_entry(tail_wpg, ssq->wqe_offset); - spfc_build_wqe_owner_pmsn(io_sqe, (ssq->last_pi_owner), ssq->last_pmsn); - SPFC_IO_STAT((struct spfc_hba_info *)ssq->hba, wqe_type); - - wqe_gpa = tail_wpg->wpg_phy_addr + (ssq->wqe_offset * sizeof(struct spfc_sqe)); - io_sqe->wqe_gpa = (wqe_gpa >> UNF_SHIFT_6); - - dre_door_bell.wd0.ddb = IWARP_FC_DDB_TYPE; - dre_door_bell.wd0.cos = 0; - dre_door_bell.wd0.c = 0; - dre_door_bell.wd0.pi_hi = - (u32)(ssq->last_pmsn >> UNF_SHIFT_12) & SPFC_DB_WD0_PI_H_MASK; - dre_door_bell.wd0.cntx_size = SPFC_CNTX_SIZE_T_256B; - dre_door_bell.wd0.xid = ssq->context_id; - dre_door_bell.wd1.sm_data = ssq->cache_id; - dre_door_bell.wd1.pi_lo = (u32)(ssq->last_pmsn & SPFC_DB_WD0_PI_L_MASK); - io_sqe->db_val = *(u64 *)&dre_door_bell; - - spfc_convert_parent_wqe_to_big_endian(io_sqe); - memcpy(sqe_in_wp, io_sqe, sizeof(struct spfc_sqe)); - spfc_set_direct_wqe_owner_be(sqe_in_wp, ssq->last_pi_owner); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_INFO, - "[INFO]Ssq(0x%x) xid:0x%x,qid:0x%x wqegpa:0x%llx,o:0x%x,outstandind:0x%x,pmsn:0x%x,cmsn:0x%x", - ssq->sqn, ssq->context_id, ssq->sq_queue_id, wqe_gpa, - ssq->last_pi_owner, atomic_read(&ssq->sqe_minus_cqe_cnt), - ssq->last_pmsn, SPFC_GET_QUEUE_CMSN(ssq)); - - ssq->accum_wqe_cnt++; - if (ssq->accum_wqe_cnt == accum_db_num) { - ret = spfc_parent_sq_ring_direct_wqe_doorbell(ssq, (void *)sqe_in_wp); - if (unlikely(ret != RETURN_OK)) - SPFC_ERR_IO_STAT((struct spfc_hba_info *)ssq->hba, wqe_type); - ssq->accum_wqe_cnt = 0; - } - - ssq->wqe_offset += 1; - ssq->last_pmsn = SPFC_MSN_INC(ssq->last_pmsn); - atomic_inc(&ssq->sq_wqe_cnt); - atomic_inc(&ssq->sqe_minus_cqe_cnt); - SPFC_SQ_IO_STAT(ssq, wqe_type); - spin_unlock_irqrestore(&ssq->parent_sq_enqueue_lock, flag); - return ret; -} - -u32 spfc_parent_ssq_enqueue(struct spfc_parent_ssq_info *ssq, struct spfc_sqe *io_sqe, u8 wqe_type) -{ - u32 ret = RETURN_OK; - u32 addr_wd = INVALID_VALUE32; - u32 msn_wd = INVALID_VALUE32; - u16 link_wqe_msn = 0; - ulong flag = 0; - struct spfc_wqe_page *new_wqe_page = NULL; - struct spfc_wqe_page *tail_wpg = NULL; - struct spfc_sqe *sqe_in_wp = NULL; - struct spfc_linkwqe *link_wqe = NULL; - struct spfc_linkwqe *link_wqe_last_part = NULL; - u32 cur_cmsn = 0; - u8 qos_level = (u8)io_sqe->ts_sl.cont.icmnd.info.dif_info.wd1.vpid; - u32 c = SPFC_DB_C_BIT_CONTROL_TYPE; - - if (ssq->queue_style == SPFC_QUEUE_RING_STYLE) - return spfc_direct_sq_enqueue(ssq, io_sqe, wqe_type); - - spin_lock_irqsave(&ssq->parent_sq_enqueue_lock, flag); - tail_wpg = SPFC_GET_SQ_TAIL(ssq); - if (ssq->wqe_offset == ssq->wqe_num_per_buf) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_INFO, - "[info]Ssq(0x%x), xid(0x%x) qid(0x%x) add wqepage at Pmsn(0x%x), WpgCnt(0x%x)", - ssq->sqn, ssq->context_id, ssq->sq_queue_id, - ssq->last_pmsn, - atomic_read(&ssq->wqe_page_cnt)); - cur_cmsn = SPFC_GET_QUEUE_CMSN(ssq); - spfc_free_sq_wqe_page(ssq, cur_cmsn); - new_wqe_page = spfc_add_one_wqe_page(ssq); - if (unlikely(!new_wqe_page)) { - SPFC_ERR_IO_STAT((struct spfc_hba_info *)ssq->hba, wqe_type); - spin_unlock_irqrestore(&ssq->parent_sq_enqueue_lock, flag); - return UNF_RETURN_ERROR; - } - link_wqe = (struct spfc_linkwqe *)spfc_get_wqe_page_entry(tail_wpg, - ssq->wqe_offset); - addr_wd = SPFC_MSD(new_wqe_page->wpg_phy_addr); - link_wqe->next_page_addr_hi = cpu_to_be32(addr_wd); - addr_wd = SPFC_LSD(new_wqe_page->wpg_phy_addr); - link_wqe->next_page_addr_lo = cpu_to_be32(addr_wd); - link_wqe_msn = SPFC_MSN_DEC(ssq->last_pmsn); - msn_wd = be32_to_cpu(link_wqe->val_wd1); - msn_wd |= ((u32)(link_wqe_msn & SPFC_MSNWD_L_MASK)); - msn_wd |= (((u32)(link_wqe_msn & SPFC_MSNWD_H_MASK)) << UNF_SHIFT_16); - link_wqe->val_wd1 = cpu_to_be32(msn_wd); - link_wqe_last_part = (struct spfc_linkwqe *)((u8 *)link_wqe + - SPFC_EXTEND_WQE_OFFSET); - link_wqe_last_part->next_page_addr_hi = link_wqe->next_page_addr_hi; - link_wqe_last_part->next_page_addr_lo = link_wqe->next_page_addr_lo; - link_wqe_last_part->val_wd1 = link_wqe->val_wd1; - spfc_set_sq_wqe_owner_be(link_wqe); - ssq->wqe_offset = 0; - tail_wpg = SPFC_GET_SQ_TAIL(ssq); - atomic_inc(&ssq->wqe_page_cnt); - } - - spfc_build_wqe_owner_pmsn(io_sqe, !(ssq->last_pi_owner), ssq->last_pmsn); - SPFC_IO_STAT((struct spfc_hba_info *)ssq->hba, wqe_type); - spfc_convert_parent_wqe_to_big_endian(io_sqe); - sqe_in_wp = (struct spfc_sqe *)spfc_get_wqe_page_entry(tail_wpg, ssq->wqe_offset); - memcpy(sqe_in_wp, io_sqe, sizeof(struct spfc_sqe)); - spfc_set_sq_wqe_owner_be(sqe_in_wp); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_INFO, - "[INFO]Ssq(0x%x) xid:0x%x,qid:0x%x wqegpa:0x%llx, qos_level:0x%x, c:0x%x", - ssq->sqn, ssq->context_id, ssq->sq_queue_id, - virt_to_phys(sqe_in_wp), qos_level, c); - - ssq->accum_wqe_cnt++; - if (ssq->accum_wqe_cnt == accum_db_num) { - ret = spfc_parent_sq_ring_doorbell(ssq, qos_level, c); - if (unlikely(ret != RETURN_OK)) - SPFC_ERR_IO_STAT((struct spfc_hba_info *)ssq->hba, wqe_type); - ssq->accum_wqe_cnt = 0; - } - ssq->wqe_offset += 1; - ssq->last_pmsn = SPFC_MSN_INC(ssq->last_pmsn); - atomic_inc(&ssq->sq_wqe_cnt); - atomic_inc(&ssq->sqe_minus_cqe_cnt); - SPFC_SQ_IO_STAT(ssq, wqe_type); - spin_unlock_irqrestore(&ssq->parent_sq_enqueue_lock, flag); - return ret; -} - -u32 spfc_parent_sq_enqueue(struct spfc_parent_sq_info *sq, struct spfc_sqe *io_sqe, u16 ssqn) -{ - u8 wqe_type = 0; - struct spfc_hba_info *hba = (struct spfc_hba_info *)sq->hba; - struct spfc_parent_ssq_info *ssq = NULL; - - if (unlikely(ssqn >= SPFC_MAX_SSQ_NUM)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Ssqn 0x%x is invalid.", ssqn); - - return UNF_RETURN_ERROR; - } - - wqe_type = (u8)SPFC_GET_WQE_TYPE(io_sqe); - - /* Serial enqueue */ - io_sqe->ts_sl.xid = sq->context_id; - io_sqe->ts_sl.cid = sq->cache_id; - io_sqe->ts_sl.sqn = ssqn; - - /* Choose SSQ */ - ssq = &hba->parent_queue_mgr->shared_queue[ssqn].parent_ssq_info; - - /* If the SQ is invalid, the wqe is discarded */ - if (unlikely(!atomic_read(&sq->sq_valid))) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]SQ is invalid, reject wqe(0x%x)", wqe_type); - - return UNF_RETURN_ERROR; - } - - /* The heartbeat detection status is 0, which allows control sessions - * enqueuing - */ - if (unlikely(!hba->heart_status && SPFC_WQE_IS_IO(io_sqe))) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]Heart status is false"); - - return UNF_RETURN_ERROR; - } - - if (sq->need_offloaded != SPFC_NEED_DO_OFFLOAD) { - /* Ensure to be offloaded */ - if (unlikely(!atomic_read(&sq->sq_cached))) { - SPFC_ERR_IO_STAT((struct spfc_hba_info *)sq->hba, wqe_type); - SPFC_HBA_STAT((struct spfc_hba_info *)sq->hba, - SPFC_STAT_PARENT_SQ_NOT_OFFLOADED); - - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[err]RPort(0x%x) Session(0x%x) is not offloaded, reject wqe(0x%x)", - sq->rport_index, sq->context_id, wqe_type); - - return UNF_RETURN_ERROR; - } - } - - /* Whether the SQ is in the flush state. Temporarily allow the control - * sessions to enqueue. - */ - if (unlikely(sq->port_in_flush && SPFC_WQE_IS_IO(io_sqe))) { - SPFC_ERR_IO_STAT((struct spfc_hba_info *)sq->hba, wqe_type); - SPFC_HBA_STAT((struct spfc_hba_info *)sq->hba, SPFC_STAT_PARENT_IO_FLUSHED); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Session(0x%x) in flush, Sqn(0x%x) cmsn(0x%x), reject wqe(0x%x)", - sq->context_id, ssqn, SPFC_GET_QUEUE_CMSN(ssq), - wqe_type); - - return UNF_RETURN_ERROR; - } - - /* If the SQ is in the Seesion deletion state and is the WQE of the I/O - * path, * the I/O failure is directly returned - */ - if (unlikely(sq->sq_in_sess_rst && SPFC_WQE_IS_IO(io_sqe))) { - SPFC_ERR_IO_STAT((struct spfc_hba_info *)sq->hba, wqe_type); - SPFC_HBA_STAT((struct spfc_hba_info *)sq->hba, SPFC_STAT_PARENT_IO_FLUSHED); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Session(0x%x) in session reset, reject wqe(0x%x)", - sq->context_id, wqe_type); - - return UNF_RETURN_ERROR; - } - - return spfc_parent_ssq_enqueue(ssq, io_sqe, wqe_type); -} - -static bool spfc_msn_in_wqe_page(u32 start_msn, u32 end_msn, u32 cur_cmsn) -{ - bool ret = true; - - if (end_msn >= start_msn) { - if (cur_cmsn < start_msn || cur_cmsn > end_msn) - ret = false; - else - ret = true; - } else { - if (cur_cmsn > end_msn && cur_cmsn < start_msn) - ret = false; - else - ret = true; - } - - return ret; -} - -void spfc_free_sq_wqe_page(struct spfc_parent_ssq_info *ssq, u32 cur_cmsn) -{ - u16 wpg_start_cmsn = 0; - u16 wpg_end_cmsn = 0; - bool wqe_page_in_use = false; - - /* If there is only zero or one Wqe Page, no release is required */ - if (atomic_read(&ssq->wqe_page_cnt) <= SPFC_MIN_WP_NUM) - return; - - /* Check whether the current MSN is within the MSN range covered by the - * WqePage - */ - wpg_start_cmsn = ssq->head_start_cmsn; - wpg_end_cmsn = ssq->head_end_cmsn; - wqe_page_in_use = spfc_msn_in_wqe_page(wpg_start_cmsn, wpg_end_cmsn, cur_cmsn); - - /* If the value of CMSN is within the current Wqe Page, no release is - * required - */ - if (wqe_page_in_use) - return; - - /* If the next WqePage is available and the CMSN is not in the current - * WqePage, * the current WqePage is released - */ - while (!wqe_page_in_use && - (atomic_read(&ssq->wqe_page_cnt) > SPFC_MIN_WP_NUM)) { - /* Free WqePage */ - spfc_free_head_wqe_page(ssq); - - /* Obtain the start MSN of the next WqePage */ - wpg_start_cmsn = SPFC_MSN_INC(wpg_end_cmsn); - - /* obtain the end MSN of the next WqePage */ - wpg_end_cmsn = - SPFC_GET_WP_END_CMSN(wpg_start_cmsn, ssq->wqe_num_per_buf); - - /* Set new MSN range */ - ssq->head_start_cmsn = wpg_start_cmsn; - ssq->head_end_cmsn = wpg_end_cmsn; - cur_cmsn = SPFC_GET_QUEUE_CMSN(ssq); - /* Check whether the current MSN is within the MSN range covered - * by the WqePage - */ - wqe_page_in_use = spfc_msn_in_wqe_page(wpg_start_cmsn, wpg_end_cmsn, cur_cmsn); - } -} - -/* - *Function Name : SPFC_UpdateSqCompletionStat - *Function Description: Update the calculation statistics of the CQE - *corresponding to the WQE on the connection SQ. - *Input Parameters : *sq, *scqe - *Output Parameters : N/A - *Return Type : void - */ -static void spfc_update_sq_wqe_completion_stat(struct spfc_parent_ssq_info *ssq, - union spfc_scqe *scqe) -{ - struct spfc_scqe_rcv_els_gs_rsp *els_gs_rsp = NULL; - - els_gs_rsp = (struct spfc_scqe_rcv_els_gs_rsp *)scqe; - - /* For the ELS/GS RSP intermediate frame and the CQE that is more than - * the ELS_GS_RSP_EXCH_CHECK_FAIL, no statistics are required - */ - if (unlikely(SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_ELS_RSP) || - (SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_GS_RSP)) { - if (!els_gs_rsp->wd3.end_rsp || !SPFC_SCQE_ERR_TO_CM(scqe)) - return; - } - - /* When the SQ statistics are updated, the PlogiAcc or PlogiAccSts that - * is * implicitly unloaded will enter here, and one more CQE count is - * added - */ - atomic_inc(&ssq->sq_cqe_cnt); - atomic_dec(&ssq->sqe_minus_cqe_cnt); - SPFC_SQ_IO_STAT(ssq, SPFC_GET_SCQE_TYPE(scqe)); -} - -/* - *Function Name : spfc_reclaim_sq_wqe_page - *Function Description: Reclaim the Wqe Pgae that has been used up in the Linked - * List SQ. - *Input Parameters : *handle, - * *scqe - *Output Parameters : N/A - *Return Type : u32 - */ -u32 spfc_reclaim_sq_wqe_page(void *handle, union spfc_scqe *scqe) -{ - u32 ret = RETURN_OK; - u32 cur_cmsn = 0; - u32 sqn = INVALID_VALUE32; - struct spfc_parent_ssq_info *ssq = NULL; - struct spfc_parent_shared_queue_info *parent_queue_info = NULL; - struct spfc_hba_info *hba = NULL; - ulong flag = 0; - - hba = (struct spfc_hba_info *)handle; - sqn = SPFC_GET_SCQE_SQN(scqe); - if (sqn >= SPFC_MAX_SSQ_NUM) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) do not have sqn: 0x%x", - hba->port_cfg.port_id, sqn); - - return UNF_RETURN_ERROR; - } - - parent_queue_info = &hba->parent_queue_mgr->shared_queue[sqn]; - ssq = &parent_queue_info->parent_ssq_info; - /* If there is only zero or one Wqe Page, no release is required */ - if (atomic_read(&ssq->wqe_page_cnt) <= SPFC_MIN_WP_NUM) { - spfc_update_sq_wqe_completion_stat(ssq, scqe); - return RETURN_OK; - } - - spin_lock_irqsave(&ssq->parent_sq_enqueue_lock, flag); - cur_cmsn = SPFC_GET_QUEUE_CMSN(ssq); - spfc_free_sq_wqe_page(ssq, cur_cmsn); - spin_unlock_irqrestore(&ssq->parent_sq_enqueue_lock, flag); - - spfc_update_sq_wqe_completion_stat(ssq, scqe); - - return ret; -} - -u32 spfc_root_cmdq_enqueue(void *handle, union spfc_cmdqe *cmdqe, u16 cmd_len) -{ -#define SPFC_ROOTCMDQ_TIMEOUT_MS 3000 - u8 wqe_type = 0; - int cmq_ret = 0; - struct sphw_cmd_buf *cmd_buf = NULL; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - wqe_type = (u8)cmdqe->common.wd0.task_type; - SPFC_IO_STAT(hba, wqe_type); - - cmd_buf = sphw_alloc_cmd_buf(hba->dev_handle); - if (!cmd_buf) { - SPFC_ERR_IO_STAT(hba, wqe_type); - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) CqmHandle(0x%p) allocate cmdq buffer failed", - hba->port_cfg.port_id, hba->dev_handle); - - return UNF_RETURN_ERROR; - } - - memcpy(cmd_buf->buf, cmdqe, cmd_len); - spfc_cpu_to_big32(cmd_buf->buf, cmd_len); - cmd_buf->size = cmd_len; - - cmq_ret = sphw_cmdq_async(hba->dev_handle, COMM_MOD_FC, 0, cmd_buf, SPHW_CHANNEL_FC); - - if (cmq_ret != RETURN_OK) { - sphw_free_cmd_buf(hba->dev_handle, cmd_buf); - SPFC_ERR_IO_STAT(hba, wqe_type); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) CqmHandle(0x%p) send buff clear cmnd failed(0x%x)", - hba->port_cfg.port_id, hba->dev_handle, cmq_ret); - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -struct spfc_parent_queue_info * -spfc_find_parent_queue_info_by_pkg(void *handle, struct unf_frame_pkg *pkg) -{ - u32 rport_index = 0; - struct spfc_parent_queue_info *parent_queue_info = NULL; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - rport_index = pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]; - - if (unlikely(rport_index >= UNF_SPFC_MAXRPORT_NUM)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[warn]Port(0x%x) send pkg sid_did(0x%x_0x%x), but uplevel allocate invalid rport index: 0x%x", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did, rport_index); - - return NULL; - } - - /* parent -->> session */ - parent_queue_info = &hba->parent_queue_mgr->parent_queue[rport_index]; - - return parent_queue_info; -} - -struct spfc_parent_queue_info *spfc_find_parent_queue_info_by_id(struct spfc_hba_info *hba, - u32 local_id, u32 remote_id) -{ - u32 index = 0; - ulong flag = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - struct spfc_parent_queue_info *parent_queue_info = NULL; - spinlock_t *prtq_state_lock = NULL; - u32 lport_id; - u32 rport_id; - - parent_queue_mgr = hba->parent_queue_mgr; - if (!parent_queue_mgr) - return NULL; - - /* rport_number -->> parent_number -->> session_number */ - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - prtq_state_lock = &parent_queue_mgr->parent_queue[index].parent_queue_state_lock; - lport_id = parent_queue_mgr->parent_queue[index].parent_sq_info.local_port_id; - rport_id = parent_queue_mgr->parent_queue[index].parent_sq_info.remote_port_id; - spin_lock_irqsave(prtq_state_lock, flag); - - /* local_id & remote_id & offload */ - if (local_id == lport_id && remote_id == rport_id && - parent_queue_mgr->parent_queue[index].offload_state == - SPFC_QUEUE_STATE_OFFLOADED) { - parent_queue_info = &parent_queue_mgr->parent_queue[index]; - spin_unlock_irqrestore(prtq_state_lock, flag); - - return parent_queue_info; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - } - - return NULL; -} - -struct spfc_parent_queue_info *spfc_find_offload_parent_queue(void *handle, u32 local_id, - u32 remote_id, u32 rport_index) -{ - u32 index = 0; - ulong flag = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - struct spfc_parent_queue_info *parent_queue_info = NULL; - struct spfc_hba_info *hba = NULL; - spinlock_t *prtq_state_lock = NULL; - - hba = (struct spfc_hba_info *)handle; - parent_queue_mgr = hba->parent_queue_mgr; - if (!parent_queue_mgr) - return NULL; - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - if (rport_index == index) - continue; - prtq_state_lock = &parent_queue_mgr->parent_queue[index].parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - if (local_id == parent_queue_mgr->parent_queue[index] - .parent_sq_info.local_port_id && - remote_id == parent_queue_mgr->parent_queue[index] - .parent_sq_info.remote_port_id && - parent_queue_mgr->parent_queue[index].offload_state != - SPFC_QUEUE_STATE_FREE && - parent_queue_mgr->parent_queue[index].offload_state != - SPFC_QUEUE_STATE_INITIALIZED) { - parent_queue_info = &parent_queue_mgr->parent_queue[index]; - spin_unlock_irqrestore(prtq_state_lock, flag); - - return parent_queue_info; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - } - - return NULL; -} - -struct spfc_parent_sq_info *spfc_find_parent_sq_by_pkg(void *handle, struct unf_frame_pkg *pkg) -{ - struct spfc_parent_queue_info *parent_queue_info = NULL; - struct cqm_qpc_mpt *cqm_parent_ctxt_obj = NULL; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - parent_queue_info = spfc_find_parent_queue_info_by_pkg(hba, pkg); - if (unlikely(!parent_queue_info)) { - parent_queue_info = spfc_find_parent_queue_info_by_id(hba, - pkg->frame_head.csctl_sid & - UNF_NPORTID_MASK, - pkg->frame_head.rctl_did & - UNF_NPORTID_MASK); - if (!parent_queue_info) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x), get a null parent queue information", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return NULL; - } - } - - cqm_parent_ctxt_obj = (parent_queue_info->parent_ctx.cqm_parent_ctx_obj); - if (unlikely(!cqm_parent_ctxt_obj)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[err]Port(0x%x) send pkg sid_did(0x%x_0x%x) with this rport has not alloc parent sq information", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return NULL; - } - - return &parent_queue_info->parent_sq_info; -} - -u32 spfc_check_all_parent_queue_free(struct spfc_hba_info *hba) -{ - u32 index = 0; - ulong flag = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - spinlock_t *prtq_state_lock = NULL; - - parent_queue_mgr = hba->parent_queue_mgr; - if (!parent_queue_mgr) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[err]Port(0x%x) get a null parent queue mgr", - hba->port_cfg.port_id); - - return UNF_RETURN_ERROR; - } - - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - prtq_state_lock = &parent_queue_mgr->parent_queue[index].parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - if (parent_queue_mgr->parent_queue[index].offload_state != SPFC_QUEUE_STATE_FREE) { - spin_unlock_irqrestore(prtq_state_lock, flag); - return UNF_RETURN_ERROR; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - } - - return RETURN_OK; -} - -void spfc_flush_specific_scq(struct spfc_hba_info *hba, u32 index) -{ - /* The software interrupt is scheduled and processed during the second - * timeout period - */ - struct spfc_scq_info *scq_info = NULL; - u32 flush_done_time = 0; - - scq_info = &hba->scq_info[index]; - atomic_set(&scq_info->flush_stat, SPFC_QUEUE_FLUSH_DOING); - tasklet_schedule(&scq_info->tasklet); - - /* Wait for a maximum of 2 seconds. If the SCQ soft interrupt is not - * scheduled * within 2 seconds, only timeout is returned - */ - while ((atomic_read(&scq_info->flush_stat) != SPFC_QUEUE_FLUSH_DONE) && - (flush_done_time < SPFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { - msleep(SPFC_QUEUE_FLUSH_WAIT_MS); - flush_done_time += SPFC_QUEUE_FLUSH_WAIT_MS; - tasklet_schedule(&scq_info->tasklet); - } - - if (atomic_read(&scq_info->flush_stat) != SPFC_QUEUE_FLUSH_DONE) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]Port(0x%x) special scq(0x%x) flush timeout", - hba->port_cfg.port_id, index); - } -} - -static void spfc_flush_cmd_scq(struct spfc_hba_info *hba) -{ - u32 index = 0; - - for (index = SPFC_CMD_SCQN_START; index < SPFC_SESSION_SCQ_NUM; - index += SPFC_SCQS_PER_SESSION) { - spfc_flush_specific_scq(hba, index); - } -} - -static void spfc_flush_sts_scq(struct spfc_hba_info *hba) -{ - u32 index = 0; - - /* for each STS SCQ */ - for (index = SPFC_STS_SCQN_START; index < SPFC_SESSION_SCQ_NUM; - index += SPFC_SCQS_PER_SESSION) { - spfc_flush_specific_scq(hba, index); - } -} - -static void spfc_flush_all_scq(struct spfc_hba_info *hba) -{ - spfc_flush_cmd_scq(hba); - spfc_flush_sts_scq(hba); - /* Flush Default SCQ */ - spfc_flush_specific_scq(hba, SPFC_SESSION_SCQ_NUM); -} - -void spfc_wait_all_queues_empty(struct spfc_hba_info *hba) -{ - spfc_flush_all_scq(hba); -} - -void spfc_set_rport_flush_state(void *handle, bool in_flush) -{ - u32 index = 0; - ulong flag = 0; - struct spfc_parent_queue_mgr *parent_queue_mgr = NULL; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - parent_queue_mgr = hba->parent_queue_mgr; - if (!parent_queue_mgr) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) parent queue manager is empty", - hba->port_cfg.port_id); - return; - } - - /* - * for each HBA's R_Port(SQ), - * set state with been flushing or flush done - */ - for (index = 0; index < UNF_SPFC_MAXRPORT_NUM; index++) { - spin_lock_irqsave(&parent_queue_mgr->parent_queue[index] - .parent_sq_info.parent_sq_enqueue_lock, flag); - if (parent_queue_mgr->parent_queue[index].offload_state != SPFC_QUEUE_STATE_FREE) { - parent_queue_mgr->parent_queue[index] - .parent_sq_info.port_in_flush = in_flush; - } - spin_unlock_irqrestore(&parent_queue_mgr->parent_queue[index] - .parent_sq_info.parent_sq_enqueue_lock, flag); - } -} - -u32 spfc_clear_fetched_sq_wqe(void *handle) -{ - u32 ret = UNF_RETURN_ERROR; - union spfc_cmdqe cmdqe; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - - hba = (struct spfc_hba_info *)handle; - /* - * The ROOT SQ cannot control the WQE in the empty queue of the ROOT SQ. - * Therefore, the ROOT SQ does not enqueue the WQE after the hardware - * obtains the. Link down after the wait mode is used. Therefore, the - * WQE of the hardware driver needs to enter the WQE of the queue after - * the Link down of the Link down is reported. - */ - memset(&cmdqe, 0, sizeof(union spfc_cmdqe)); - spfc_build_cmdqe_common(&cmdqe, SPFC_TASK_T_BUFFER_CLEAR, 0); - cmdqe.buffer_clear.wd1.rx_id_start = hba->exi_base; - cmdqe.buffer_clear.wd1.rx_id_end = hba->exi_base + hba->exi_count - 1; - cmdqe.buffer_clear.scqn = hba->default_scqn; - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "[info]Port(0x%x) start clear all fetched wqe in start(0x%x) - end(0x%x) scqn(0x%x) stage(0x%x)", - hba->port_cfg.port_id, cmdqe.buffer_clear.wd1.rx_id_start, - cmdqe.buffer_clear.wd1.rx_id_end, cmdqe.buffer_clear.scqn, - hba->queue_set_stage); - - /* Send BUFFER_CLEAR command via ROOT CMDQ */ - ret = spfc_root_cmdq_enqueue(hba, &cmdqe, sizeof(cmdqe.buffer_clear)); - - return ret; -} - -u32 spfc_clear_pending_sq_wqe(void *handle) -{ - u32 ret = UNF_RETURN_ERROR; - u32 cmdqe_len = 0; - ulong flag = 0; - struct spfc_parent_ssq_info *ssq_info = NULL; - union spfc_cmdqe cmdqe; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - memset(&cmdqe, 0, sizeof(union spfc_cmdqe)); - spfc_build_cmdqe_common(&cmdqe, SPFC_TASK_T_FLUSH_SQ, 0); - cmdqe.flush_sq.wd0.wqe_type = SPFC_TASK_T_FLUSH_SQ; - cmdqe.flush_sq.wd1.scqn = SPFC_LSW(hba->default_scqn); - cmdqe.flush_sq.wd1.port_id = hba->port_index; - - ssq_info = &hba->parent_queue_mgr->shared_queue[ARRAY_INDEX_0].parent_ssq_info; - - spin_lock_irqsave(&ssq_info->parent_sq_enqueue_lock, flag); - cmdqe.flush_sq.wd3.first_sq_xid = ssq_info->context_id; - spin_unlock_irqrestore(&ssq_info->parent_sq_enqueue_lock, flag); - cmdqe.flush_sq.wd0.entry_count = SPFC_MAX_SSQ_NUM; - cmdqe.flush_sq.wd3.sqqid_start_per_session = SPFC_SQ_QID_START_PER_QPC; - cmdqe.flush_sq.wd3.sqcnt_per_session = SPFC_SQ_NUM_PER_QPC; - cmdqe.flush_sq.wd1.last_wqe = 1; - - /* Clear pending Queue */ - cmdqe_len = (u32)(sizeof(cmdqe.flush_sq)); - ret = spfc_root_cmdq_enqueue(hba, &cmdqe, (u16)cmdqe_len); - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "[info]Port(0x%x) clear total 0x%x SQ in this CMDQE(last=%u), stage (0x%x)", - hba->port_cfg.port_id, SPFC_MAX_SSQ_NUM, - cmdqe.flush_sq.wd1.last_wqe, hba->queue_set_stage); - - return ret; -} - -u32 spfc_wait_queue_set_flush_done(struct spfc_hba_info *hba) -{ - u32 flush_done_time = 0; - u32 ret = RETURN_OK; - - while ((hba->queue_set_stage != SPFC_QUEUE_SET_STAGE_FLUSHDONE) && - (flush_done_time < SPFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS)) { - msleep(SPFC_QUEUE_FLUSH_WAIT_MS); - flush_done_time += SPFC_QUEUE_FLUSH_WAIT_MS; - } - - if (hba->queue_set_stage != SPFC_QUEUE_SET_STAGE_FLUSHDONE) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]Port(0x%x) queue sets flush timeout with stage(0x%x)", - hba->port_cfg.port_id, hba->queue_set_stage); - - ret = UNF_RETURN_ERROR; - } - - return ret; -} - -void spfc_disable_all_scq_schedule(struct spfc_hba_info *hba) -{ - struct spfc_scq_info *scq_info = NULL; - u32 index = 0; - - for (index = 0; index < SPFC_TOTAL_SCQ_NUM; index++) { - scq_info = &hba->scq_info[index]; - tasklet_disable(&scq_info->tasklet); - } -} - -void spfc_disable_queues_dispatch(struct spfc_hba_info *hba) -{ - spfc_disable_all_scq_schedule(hba); -} - -void spfc_enable_all_scq_schedule(struct spfc_hba_info *hba) -{ - struct spfc_scq_info *scq_info = NULL; - u32 index = 0; - - for (index = 0; index < SPFC_TOTAL_SCQ_NUM; index++) { - scq_info = &hba->scq_info[index]; - tasklet_enable(&scq_info->tasklet); - } -} - -void spfc_enalbe_queues_dispatch(void *handle) -{ - spfc_enable_all_scq_schedule((struct spfc_hba_info *)handle); -} - -/* - *Function Name : spfc_clear_els_srq - *Function Description: When the port is used as the remove, the resources - *related to the els srq are deleted. - *Input Parameters : *hba Output Parameters - *Return Type : void - */ -void spfc_clear_els_srq(struct spfc_hba_info *hba) -{ -#define SPFC_WAIT_CLR_SRQ_CTX_MS 500 -#define SPFC_WAIT_CLR_SRQ_CTX_LOOP_TIMES 60 - - u32 index = 0; - ulong flag = 0; - struct spfc_srq_info *srq_info = NULL; - - srq_info = &hba->els_srq_info; - - spin_lock_irqsave(&srq_info->srq_spin_lock, flag); - if (!srq_info->enable || srq_info->state == SPFC_CLEAN_DOING) { - spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); - - return; - } - srq_info->enable = false; - srq_info->state = SPFC_CLEAN_DOING; - spin_unlock_irqrestore(&srq_info->srq_spin_lock, flag); - - spfc_send_clear_srq_cmd(hba, &hba->els_srq_info); - - /* wait for uCode to clear SRQ context, the timer is 30S */ - while ((srq_info->state != SPFC_CLEAN_DONE) && - (index < SPFC_WAIT_CLR_SRQ_CTX_LOOP_TIMES)) { - msleep(SPFC_WAIT_CLR_SRQ_CTX_MS); - index++; - } - - if (srq_info->state != SPFC_CLEAN_DONE) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_WARN, - "[warn]SPFC Port(0x%x) clear els srq timeout", - hba->port_cfg.port_id); - } -} - -u32 spfc_wait_all_parent_queue_free(struct spfc_hba_info *hba) -{ -#define SPFC_MAX_LOOP_TIMES 6000 -#define SPFC_WAIT_ONE_TIME_MS 5 - u32 index = 0; - u32 ret = UNF_RETURN_ERROR; - - do { - ret = spfc_check_all_parent_queue_free(hba); - if (ret == RETURN_OK) - break; - - index++; - msleep(SPFC_WAIT_ONE_TIME_MS); - } while (index < SPFC_MAX_LOOP_TIMES); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ERR, - "[warn]Port(0x%x) wait all parent queue state free timeout", - hba->port_cfg.port_id); - } - - return ret; -} - -/* - *Function Name : spfc_queue_pre_process - *Function Description: When the port functions as the remove, the queue needs - * to be preprocessed. - *Input Parameters : *handle, - * clean - *Output Parameters : N/A - *Return Type : void - */ -void spfc_queue_pre_process(void *handle, bool clean) -{ -#define SPFC_WAIT_LINKDOWN_EVENT_MS 500 - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - /* From port reset & port remove */ - /* 1. Wait for 2s and wait for QUEUE to be FLUSH Done. */ - if (spfc_wait_queue_set_flush_done(hba) != RETURN_OK) { - /* - * During the process of removing the card, if the port is - * disabled and the flush done is not available, the chip is - * powered off or the pcie link is disconnected. In this case, - * you can proceed with the next step. - */ - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]SPFC Port(0x%x) clean queue sets timeout", - hba->port_cfg.port_id); - } - - /* - * 2. Port remove: - * 2.1 free parent queue - * 2.2 clear & destroy ELS/SIRT SRQ - */ - if (clean) { - if (spfc_wait_all_parent_queue_free(hba) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, - UNF_WARN, - "[warn]SPFC Port(0x%x) free all parent queue timeout", - hba->port_cfg.port_id); - } - - /* clear & than destroy ELS/SIRT SRQ */ - spfc_clear_els_srq(hba); - } - - msleep(SPFC_WAIT_LINKDOWN_EVENT_MS); - - /* - * 3. The internal resources of the port chip are flush done. However, - * there may be residual scqe or rq in the queue. The scheduling is - * forcibly refreshed once. - */ - spfc_wait_all_queues_empty(hba); - - /* 4. Disable tasklet scheduling for upstream queues on the software - * layer - */ - spfc_disable_queues_dispatch(hba); -} - -void spfc_queue_post_process(void *hba) -{ - spfc_enalbe_queues_dispatch((struct spfc_hba_info *)hba); -} - -/* - *Function Name : spfc_push_delay_sqe - *Function Description: Check whether there is a sq that is being deleted. - * If yes, add the sq to the sq. - *Input Parameters : *hba, - * *offload_parent_queue, - * *sqe, - * *pkg - *Output Parameters : N/A - *Return Type : u32 - */ -u32 spfc_push_delay_sqe(void *hba, - struct spfc_parent_queue_info *offload_parent_queue, - struct spfc_sqe *sqe, struct unf_frame_pkg *pkg) -{ - ulong flag = 0; - spinlock_t *prtq_state_lock = NULL; - - prtq_state_lock = &offload_parent_queue->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - if (offload_parent_queue->offload_state != SPFC_QUEUE_STATE_INITIALIZED && - offload_parent_queue->offload_state != SPFC_QUEUE_STATE_FREE) { - memcpy(&offload_parent_queue->parent_sq_info.delay_sqe.sqe, - sqe, sizeof(struct spfc_sqe)); - offload_parent_queue->parent_sq_info.delay_sqe.start_jiff = jiffies; - offload_parent_queue->parent_sq_info.delay_sqe.time_out = - pkg->private_data[PKG_PRIVATE_XCHG_TIMEER]; - offload_parent_queue->parent_sq_info.delay_sqe.valid = true; - offload_parent_queue->parent_sq_info.delay_sqe.rport_index = - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]; - offload_parent_queue->parent_sq_info.delay_sqe.sid = - pkg->frame_head.csctl_sid & UNF_NPORTID_MASK; - offload_parent_queue->parent_sq_info.delay_sqe.did = - pkg->frame_head.rctl_did & UNF_NPORTID_MASK; - offload_parent_queue->parent_sq_info.delay_sqe.xid = - sqe->ts_sl.xid; - offload_parent_queue->parent_sq_info.delay_sqe.ssqn = - (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - - spin_unlock_irqrestore(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%x) delay send ELS, OXID(0x%x), RXID(0x%x)", - ((struct spfc_hba_info *)hba)->port_cfg.port_id, - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX], - UNF_GET_OXID(pkg), UNF_GET_RXID(pkg)); - - return RETURN_OK; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - - return UNF_RETURN_ERROR; -} - -static u32 spfc_pop_session_valid_check(struct spfc_hba_info *hba, - struct spfc_delay_sqe_ctrl_info *sqe_info, u32 rport_index) -{ - if (!sqe_info->valid) - return UNF_RETURN_ERROR; - - if (jiffies_to_msecs(jiffies - sqe_info->start_jiff) >= sqe_info->time_out) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) pop delay enable session failed, start time 0x%llx, timeout value 0x%x", - hba->port_cfg.port_id, sqe_info->start_jiff, - sqe_info->time_out); - - return UNF_RETURN_ERROR; - } - - if (rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) pop delay enable session failed, rport index(0x%x) is invalid", - hba->port_cfg.port_id, rport_index); - - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -/* - *Function Name : spfc_pop_delay_sqe - *Function Description: The sqe that is delayed due to the deletion of the old - * connection is sent to the root sq for - *processing. Input Parameters : *hba, *sqe_info Output Parameters : N/A - *Return Type : void - */ -static void spfc_pop_delay_sqe(struct spfc_hba_info *hba, - struct spfc_delay_sqe_ctrl_info *sqe_info) -{ - ulong flag; - u32 delay_rport_index = INVALID_VALUE32; - struct spfc_parent_queue_info *parent_queue = NULL; - enum spfc_parent_queue_state offload_state = - SPFC_QUEUE_STATE_DESTROYING; - struct spfc_delay_destroy_ctrl_info destroy_sqe_info; - u32 ret = UNF_RETURN_ERROR; - struct spfc_parent_sq_info *sq_info = NULL; - spinlock_t *prtq_state_lock = NULL; - - memset(&destroy_sqe_info, 0, sizeof(struct spfc_delay_destroy_ctrl_info)); - delay_rport_index = sqe_info->rport_index; - - /* According to the sequence, the rport index id is reported and then - * the sqe of the new link setup request is delivered. - */ - ret = spfc_pop_session_valid_check(hba, sqe_info, delay_rport_index); - - if (ret != RETURN_OK) - return; - - parent_queue = &hba->parent_queue_mgr->parent_queue[delay_rport_index]; - sq_info = &parent_queue->parent_sq_info; - prtq_state_lock = &parent_queue->parent_queue_state_lock; - /* Before the root sq is delivered, check the status again to - * ensure that the initialization status is not uninstalled. Other - * states are not processed and are discarded directly. - */ - spin_lock_irqsave(prtq_state_lock, flag); - offload_state = parent_queue->offload_state; - - /* Before re-enqueuing the rootsq, check whether the offload status and - * connection information is consistent to prevent the old request from - * being sent after the connection status is changed. - */ - if (offload_state == SPFC_QUEUE_STATE_INITIALIZED && - parent_queue->parent_sq_info.local_port_id == sqe_info->sid && - parent_queue->parent_sq_info.remote_port_id == sqe_info->did && - SPFC_CHECK_XID_MATCHED(parent_queue->parent_sq_info.context_id, - sqe_info->sqe.ts_sl.xid)) { - parent_queue->offload_state = SPFC_QUEUE_STATE_OFFLOADING; - spin_unlock_irqrestore(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) pop up delay session enable, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", - hba->port_cfg.port_id, sqe_info->start_jiff, - sqe_info->time_out, delay_rport_index, offload_state); - - if (spfc_parent_sq_enqueue(sq_info, &sqe_info->sqe, sqe_info->ssqn) != RETURN_OK) { - spin_lock_irqsave(prtq_state_lock, flag); - - if (parent_queue->offload_state == SPFC_QUEUE_STATE_OFFLOADING) - parent_queue->offload_state = offload_state; - - if (parent_queue->parent_sq_info.destroy_sqe.valid) { - memcpy(&destroy_sqe_info, - &parent_queue->parent_sq_info.destroy_sqe, - sizeof(struct spfc_delay_destroy_ctrl_info)); - - parent_queue->parent_sq_info.destroy_sqe.valid = false; - } - - spin_unlock_irqrestore(prtq_state_lock, flag); - - spfc_pop_destroy_parent_queue_sqe((void *)hba, &destroy_sqe_info); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) pop up delay session enable fail, recover offload state 0x%x", - hba->port_cfg.port_id, parent_queue->offload_state); - return; - } - } else { - spin_unlock_irqrestore(prtq_state_lock, flag); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port 0x%x pop delay session enable failed, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", - hba->port_cfg.port_id, sqe_info->start_jiff, - sqe_info->time_out, delay_rport_index, - offload_state); - } -} - -void spfc_push_destroy_parent_queue_sqe(void *hba, - struct spfc_parent_queue_info *offloading_parent_queue, - struct unf_port_info *rport_info) -{ - offloading_parent_queue->parent_sq_info.destroy_sqe.valid = true; - offloading_parent_queue->parent_sq_info.destroy_sqe.rport_index = rport_info->rport_index; - offloading_parent_queue->parent_sq_info.destroy_sqe.time_out = - SPFC_SQ_DEL_STAGE_TIMEOUT_MS; - offloading_parent_queue->parent_sq_info.destroy_sqe.start_jiff = jiffies; - offloading_parent_queue->parent_sq_info.destroy_sqe.rport_info.nport_id = - rport_info->nport_id; - offloading_parent_queue->parent_sq_info.destroy_sqe.rport_info.rport_index = - rport_info->rport_index; - offloading_parent_queue->parent_sq_info.destroy_sqe.rport_info.port_name = - rport_info->port_name; -} - -/* - *Function Name : spfc_pop_destroy_parent_queue_sqe - *Function Description: The deletion connection sqe that is delayed due to - * connection uninstallation is sent to - *the parent sq for processing. Input Parameters : *handle, *destroy_sqe_info - *Output Parameters : N/A - *Return Type : void - */ -void spfc_pop_destroy_parent_queue_sqe(void *handle, - struct spfc_delay_destroy_ctrl_info *destroy_sqe_info) -{ - u32 ret = UNF_RETURN_ERROR; - ulong flag; - u32 index = INVALID_VALUE32; - struct spfc_parent_queue_info *parent_queue = NULL; - enum spfc_parent_queue_state offload_state = - SPFC_QUEUE_STATE_DESTROYING; - struct spfc_hba_info *hba = NULL; - spinlock_t *prtq_state_lock = NULL; - - hba = (struct spfc_hba_info *)handle; - if (!destroy_sqe_info->valid) - return; - - if (jiffies_to_msecs(jiffies - destroy_sqe_info->start_jiff) < destroy_sqe_info->time_out) { - index = destroy_sqe_info->rport_index; - parent_queue = &hba->parent_queue_mgr->parent_queue[index]; - prtq_state_lock = &parent_queue->parent_queue_state_lock; - /* Before delivery, check the status again to ensure that the - * initialization status is not uninstalled. Other states are - * not processed and are discarded directly. - */ - spin_lock_irqsave(prtq_state_lock, flag); - - offload_state = parent_queue->offload_state; - if (offload_state == SPFC_QUEUE_STATE_OFFLOADED || - offload_state == SPFC_QUEUE_STATE_INITIALIZED) { - spin_unlock_irqrestore(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port 0x%x pop up delay destroy parent sq, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, offload state 0x%x", - hba->port_cfg.port_id, - destroy_sqe_info->start_jiff, - destroy_sqe_info->time_out, - index, offload_state); - ret = spfc_free_parent_resource(hba, &destroy_sqe_info->rport_info); - } else { - ret = UNF_RETURN_ERROR; - spin_unlock_irqrestore(prtq_state_lock, flag); - } - } - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port 0x%x pop delay destroy parent sq failed, sqe start time 0x%llx, timeout value 0x%x, rport index 0x%x, rport nport id 0x%x,offload state 0x%x", - hba->port_cfg.port_id, destroy_sqe_info->start_jiff, - destroy_sqe_info->time_out, index, - destroy_sqe_info->rport_info.nport_id, offload_state); - } -} - -void spfc_free_parent_queue_info(void *handle, struct spfc_parent_queue_info *parent_queue_info) -{ - ulong flag = 0; - u32 ret = UNF_RETURN_ERROR; - u32 rport_index = INVALID_VALUE32; - struct spfc_hba_info *hba = NULL; - struct spfc_delay_sqe_ctrl_info sqe_info; - spinlock_t *prtq_state_lock = NULL; - - memset(&sqe_info, 0, sizeof(struct spfc_delay_sqe_ctrl_info)); - hba = (struct spfc_hba_info *)handle; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) begin to free parent sq, rport_index(0x%x)", - hba->port_cfg.port_id, parent_queue_info->parent_sq_info.rport_index); - - if (parent_queue_info->offload_state == SPFC_QUEUE_STATE_FREE) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[info]Port(0x%x) duplicate free parent sq, rport_index(0x%x)", - hba->port_cfg.port_id, - parent_queue_info->parent_sq_info.rport_index); - - spin_unlock_irqrestore(prtq_state_lock, flag); - return; - } - - if (parent_queue_info->parent_sq_info.delay_sqe.valid) { - memcpy(&sqe_info, &parent_queue_info->parent_sq_info.delay_sqe, - sizeof(struct spfc_delay_sqe_ctrl_info)); - } - - rport_index = parent_queue_info->parent_sq_info.rport_index; - - /* The Parent Contexe and SQ information is released. After - * initialization, the Parent Contexe and SQ information is associated - * with the sq in the queue of the parent - */ - - spfc_free_parent_sq(hba, parent_queue_info); - - /* The initialization of all queue id is invalid */ - parent_queue_info->parent_cmd_scq_info.cqm_queue_id = INVALID_VALUE32; - parent_queue_info->parent_sts_scq_info.cqm_queue_id = INVALID_VALUE32; - parent_queue_info->parent_els_srq_info.cqm_queue_id = INVALID_VALUE32; - parent_queue_info->offload_state = SPFC_QUEUE_STATE_FREE; - - spin_unlock_irqrestore(prtq_state_lock, flag); - - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, UNF_PORT_RELEASE_RPORT_INDEX, - (void *)&rport_index); - - spfc_pop_delay_sqe(hba, &sqe_info); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Port(0x%x) free parent sq with rport_index(0x%x) failed", - hba->port_cfg.port_id, rport_index); - } -} - -static void spfc_do_port_reset(struct work_struct *work) -{ - struct spfc_suspend_sqe_info *suspend_sqe = NULL; - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VOID(work); - - suspend_sqe = container_of(work, struct spfc_suspend_sqe_info, - timeout_work.work); - hba = (struct spfc_hba_info *)suspend_sqe->hba; - FC_CHECK_RETURN_VOID(hba); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) magic num (0x%x)do port reset.", - hba->port_cfg.port_id, suspend_sqe->magic_num); - - spfc_port_reset(hba); -} - -static void -spfc_push_sqe_suspend(void *hba, struct spfc_parent_queue_info *parent_queue, - struct spfc_sqe *sqe, struct unf_frame_pkg *pkg, u32 magic_num) -{ -#define SPFC_SQ_NOP_TIMEOUT_MS 1000 - ulong flag = 0; - u32 sqn_base; - struct spfc_parent_sq_info *sq = NULL; - struct spfc_suspend_sqe_info *suspend_sqe = NULL; - - sq = &parent_queue->parent_sq_info; - suspend_sqe = - kmalloc(sizeof(struct spfc_suspend_sqe_info), GFP_ATOMIC); - if (!suspend_sqe) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[err]alloc suspend sqe memory failed"); - return; - } - memset(suspend_sqe, 0, sizeof(struct spfc_suspend_sqe_info)); - memcpy(&suspend_sqe->sqe, sqe, sizeof(struct spfc_sqe)); - suspend_sqe->magic_num = magic_num; - suspend_sqe->old_offload_sts = sq->need_offloaded; - suspend_sqe->hba = sq->hba; - - if (pkg) { - memcpy(&suspend_sqe->pkg, pkg, sizeof(struct unf_frame_pkg)); - } else { - sqn_base = sq->sqn_base; - suspend_sqe->pkg.private_data[PKG_PRIVATE_XCHG_SSQ_INDEX] = - sqn_base; - } - - INIT_DELAYED_WORK(&suspend_sqe->timeout_work, spfc_do_port_reset); - INIT_LIST_HEAD(&suspend_sqe->list_sqe_entry); - - spin_lock_irqsave(&parent_queue->parent_queue_state_lock, flag); - list_add_tail(&suspend_sqe->list_sqe_entry, &sq->suspend_sqe_list); - spin_unlock_irqrestore(&parent_queue->parent_queue_state_lock, flag); - - (void)queue_delayed_work(((struct spfc_hba_info *)hba)->work_queue, - &suspend_sqe->timeout_work, - (ulong)msecs_to_jiffies((u32)SPFC_SQ_NOP_TIMEOUT_MS)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) magic num(0x%x)suspend sqe", - ((struct spfc_hba_info *)hba)->port_cfg.port_id, magic_num); -} - -u32 spfc_pop_suspend_sqe(void *handle, struct spfc_parent_queue_info *parent_queue, - struct spfc_suspend_sqe_info *suspen_sqe) -{ - ulong flag; - u32 ret = UNF_RETURN_ERROR; - struct spfc_parent_sq_info *sq = NULL; - u16 ssqn; - struct unf_frame_pkg *pkg = NULL; - struct spfc_hba_info *hba = (struct spfc_hba_info *)handle; - u8 task_type; - spinlock_t *prtq_state_lock = NULL; - - sq = &parent_queue->parent_sq_info; - task_type = suspen_sqe->sqe.ts_sl.task_type; - pkg = &suspen_sqe->pkg; - if (!pkg) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "[error]pkt is null."); - return UNF_RETURN_ERROR; - } - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) pop up suspend wqe sqn (0x%x) TaskType(0x%x)", - hba->port_cfg.port_id, ssqn, task_type); - - prtq_state_lock = &parent_queue->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flag); - if (SPFC_RPORT_NOT_OFFLOADED(parent_queue) && - (task_type == SPFC_SQE_ELS_RSP || - task_type == SPFC_TASK_T_ELS)) { - spin_unlock_irqrestore(prtq_state_lock, flag); - /* Send PLOGI or PLOGI ACC or SCR if session not offload */ - ret = spfc_send_els_via_default_session(hba, &suspen_sqe->sqe, pkg, parent_queue); - } else { - spin_unlock_irqrestore(prtq_state_lock, flag); - ret = spfc_parent_sq_enqueue(sq, &suspen_sqe->sqe, ssqn); - } - return ret; -} - -static void spfc_build_nop_sqe(struct spfc_hba_info *hba, struct spfc_parent_sq_info *sq, - struct spfc_sqe *sqe, u32 magic_num, u32 scqn) -{ - sqe->ts_sl.task_type = SPFC_SQE_NOP; - sqe->ts_sl.wd0.conn_id = (u16)(sq->rport_index); - sqe->ts_sl.cont.nop_sq.wd0.scqn = scqn; - sqe->ts_sl.cont.nop_sq.magic_num = magic_num; - spfc_build_common_wqe_ctrls(&sqe->ctrl_sl, - sizeof(struct spfc_sqe_ts) / SPFC_WQE_SECTION_CHUNK_SIZE); -} - -u32 spfc_send_nop_cmd(void *handle, struct spfc_parent_sq_info *parent_sq_info, - u32 magic_num, u16 sqn) -{ - struct spfc_sqe empty_sq_sqe; - struct spfc_hba_info *hba = (struct spfc_hba_info *)handle; - u32 ret; - - memset(&empty_sq_sqe, 0, sizeof(struct spfc_sqe)); - - spfc_build_nop_sqe(hba, parent_sq_info, &empty_sq_sqe, magic_num, hba->default_scqn); - ret = spfc_parent_sq_enqueue(parent_sq_info, &empty_sq_sqe, sqn); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]send nop cmd scqn(0x%x) sq(0x%x).", - hba->default_scqn, sqn); - return ret; -} - -u32 spfc_suspend_sqe_and_send_nop(void *handle, - struct spfc_parent_queue_info *parent_queue, - struct spfc_sqe *sqe, struct unf_frame_pkg *pkg) -{ - u32 ret = UNF_RETURN_ERROR; - u32 magic_num; - struct spfc_hba_info *hba = (struct spfc_hba_info *)handle; - struct spfc_parent_sq_info *parent_sq = &parent_queue->parent_sq_info; - struct unf_lport *lport = (struct unf_lport *)hba->lport; - - FC_CHECK_RETURN_VALUE(lport, UNF_RETURN_ERROR); - - if (pkg) { - magic_num = pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME]; - } else { - magic_num = (u32)atomic64_inc_return(&((struct unf_lport *) - lport->root_lport)->exchg_index); - } - - spfc_push_sqe_suspend(hba, parent_queue, sqe, pkg, magic_num); - if (SPFC_RPORT_NOT_OFFLOADED(parent_queue)) - parent_sq->need_offloaded = SPFC_NEED_DO_OFFLOAD; - - ret = spfc_send_nop_cmd(hba, parent_sq, magic_num, - (u16)parent_sq->sqn_base); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[err]Port(0x%x) rport_index(0x%x)send sq empty failed.", - hba->port_cfg.port_id, parent_sq->rport_index); - } - return ret; -} - -void spfc_build_session_rst_wqe(void *handle, struct spfc_parent_sq_info *sq, - struct spfc_sqe *sqe, enum spfc_session_reset_mode mode, u32 scqn) -{ - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - /* The reset session command does not occupy xid. Therefore, - * 0xffff can be used to align with the microcode. - */ - sqe->ts_sl.task_type = SPFC_SQE_SESS_RST; - sqe->ts_sl.local_xid = 0xffff; - sqe->ts_sl.wd0.conn_id = (u16)(sq->rport_index); - sqe->ts_sl.wd0.remote_xid = 0xffff; - sqe->ts_sl.cont.reset_session.wd0.reset_exch_start = hba->exi_base; - sqe->ts_sl.cont.reset_session.wd0.reset_exch_end = hba->exi_base + (hba->exi_count - 1); - sqe->ts_sl.cont.reset_session.wd1.reset_did = sq->remote_port_id; - sqe->ts_sl.cont.reset_session.wd1.mode = mode; - sqe->ts_sl.cont.reset_session.wd2.reset_sid = sq->local_port_id; - sqe->ts_sl.cont.reset_session.wd3.scqn = scqn; - - spfc_build_common_wqe_ctrls(&sqe->ctrl_sl, - sizeof(struct spfc_sqe_ts) / SPFC_WQE_SECTION_CHUNK_SIZE); -} - -u32 spfc_send_session_rst_cmd(void *handle, - struct spfc_parent_queue_info *parent_queue_info, - enum spfc_session_reset_mode mode) -{ - struct spfc_parent_sq_info *sq = NULL; - struct spfc_sqe rst_sess_sqe; - u32 ret = UNF_RETURN_ERROR; - u32 sts_scqn = 0; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - memset(&rst_sess_sqe, 0, sizeof(struct spfc_sqe)); - sq = &parent_queue_info->parent_sq_info; - sts_scqn = hba->default_scqn; - - spfc_build_session_rst_wqe(hba, sq, &rst_sess_sqe, mode, sts_scqn); - ret = spfc_suspend_sqe_and_send_nop(hba, parent_queue_info, &rst_sess_sqe, NULL); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]RPort(0x%x) send SESS_RST(%d) start_exch_id(0x%x) end_exch_id(0x%x), scqn(0x%x) ctx_id(0x%x) cid(0x%x)", - sq->rport_index, mode, - rst_sess_sqe.ts_sl.cont.reset_session.wd0.reset_exch_start, - rst_sess_sqe.ts_sl.cont.reset_session.wd0.reset_exch_end, - rst_sess_sqe.ts_sl.cont.reset_session.wd3.scqn, - sq->context_id, sq->cache_id); - return ret; -} - -void spfc_rcvd_els_from_srq_timeout(struct work_struct *work) -{ - struct spfc_hba_info *hba = NULL; - - hba = container_of(work, struct spfc_hba_info, srq_delay_info.del_work.work); - - /* If the frame is not processed, the frame is pushed to the CM layer: - * The frame may have been processed when the root rq receives data. - */ - if (hba->srq_delay_info.srq_delay_flag) { - spfc_recv_els_cmnd(hba, &hba->srq_delay_info.frame_pkg, - hba->srq_delay_info.frame_pkg.unf_cmnd_pload_bl.buffer_ptr, - 0, false); - hba->srq_delay_info.srq_delay_flag = 0; - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) srq delay work timeout, send saved plgoi to CM", - hba->port_cfg.port_id); - } -} - -u32 spfc_flush_ini_resp_queue(void *handle) -{ - struct spfc_hba_info *hba = NULL; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - hba = (struct spfc_hba_info *)handle; - - spfc_flush_sts_scq(hba); - - return RETURN_OK; -} - -static void spfc_handle_aeq_queue_error(struct spfc_hba_info *hba, - struct spfc_aqe_data *aeq_msg) -{ - u32 sts_scqn_local = 0; - u32 full_ci = INVALID_VALUE32; - u32 full_ci_owner = INVALID_VALUE32; - struct spfc_scq_info *scq_info = NULL; - - sts_scqn_local = SPFC_RPORTID_TO_STS_SCQN(aeq_msg->wd0.conn_id); - scq_info = &hba->scq_info[sts_scqn_local]; - full_ci = scq_info->ci; - full_ci_owner = scq_info->ci_owner; - - /* Currently, Flush is forcibly set to StsScq. No matter whether scq is - * processed, AEQE is returned - */ - tasklet_schedule(&scq_info->tasklet); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%x) LocalScqn(0x%x) CqmScqn(0x%x) is full, force flush CI from (%u|0x%x) to (%u|0x%x)", - hba->port_cfg.port_id, aeq_msg->wd0.conn_id, - sts_scqn_local, scq_info->scqn, full_ci_owner, full_ci, - scq_info->ci_owner, scq_info->ci); -} - -void spfc_process_aeqe(void *handle, u8 event_type, u8 *val) -{ - u32 ret = RETURN_OK; - struct spfc_hba_info *hba = (struct spfc_hba_info *)handle; - struct spfc_aqe_data aeq_msg; - u8 event_code = INVALID_VALUE8; - u64 event_val = *((u64 *)val); - - FC_CHECK_RETURN_VOID(hba); - - memcpy(&aeq_msg, (struct spfc_aqe_data *)&event_val, sizeof(struct spfc_aqe_data)); - event_code = (u8)aeq_msg.wd0.evt_code; - - switch (event_type) { - case FC_AEQ_EVENT_QUEUE_ERROR: - spfc_handle_aeq_queue_error(hba, &aeq_msg); - break; - - case FC_AEQ_EVENT_WQE_FATAL_ERROR: - UNF_LOWLEVEL_PORT_EVENT(ret, hba->lport, - UNF_PORT_ABNORMAL_RESET, NULL); - break; - - case FC_AEQ_EVENT_CTX_FATAL_ERROR: - break; - - case FC_AEQ_EVENT_OFFLOAD_ERROR: - ret = spfc_handle_aeq_off_load_err(hba, &aeq_msg); - break; - - default: - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[warn]Port(0x%x) receive a unsupported AEQ EventType(0x%x) EventVal(0x%llx).", - hba->port_cfg.port_id, event_type, (u64)event_val); - return; - } - - if (event_code < FC_AEQ_EVT_ERR_CODE_BUTT) - SPFC_AEQ_ERR_TYPE_STAT(hba, aeq_msg.wd0.evt_code); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_KEVENT, - "[info]Port(0x%x) receive AEQ EventType(0x%x) EventVal(0x%llx) EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x) %s", - hba->port_cfg.port_id, event_type, (u64)event_val, event_code, - aeq_msg.wd0.conn_id, aeq_msg.wd1.xid, - (ret == UNF_RETURN_ERROR) ? "ERROR" : "OK"); -} - -void spfc_sess_resource_free_sync(void *handle, - struct unf_port_info *rport_info) -{ - struct spfc_parent_queue_info *parent_queue_info = NULL; - ulong flag = 0; - u32 wait_sq_cnt = 0; - struct spfc_hba_info *hba = NULL; - spinlock_t *prtq_state_lock = NULL; - u32 index = SPFC_DEFAULT_RPORT_INDEX; - - FC_CHECK_RETURN_VOID(handle); - FC_CHECK_RETURN_VOID(rport_info); - - hba = (struct spfc_hba_info *)handle; - parent_queue_info = &hba->parent_queue_mgr->parent_queue[index]; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - (void)spfc_free_parent_resource((void *)hba, rport_info); - - for (;;) { - spin_lock_irqsave(prtq_state_lock, flag); - if (parent_queue_info->offload_state == SPFC_QUEUE_STATE_FREE) { - spin_unlock_irqrestore(prtq_state_lock, flag); - break; - } - spin_unlock_irqrestore(prtq_state_lock, flag); - msleep(SPFC_WAIT_SESS_FREE_ONE_TIME_MS); - wait_sq_cnt++; - if (wait_sq_cnt >= SPFC_MAX_WAIT_LOOP_TIMES) - break; - } -} diff --git a/drivers/scsi/spfc/hw/spfc_queue.h b/drivers/scsi/spfc/hw/spfc_queue.h deleted file mode 100644 index c09f098e7324..000000000000 --- a/drivers/scsi/spfc/hw/spfc_queue.h +++ /dev/null @@ -1,711 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_QUEUE_H -#define SPFC_QUEUE_H - -#include "unf_type.h" -#include "spfc_wqe.h" -#include "spfc_cqm_main.h" -#define SPFC_MIN_WP_NUM (2) -#define SPFC_EXTEND_WQE_OFFSET (128) -#define SPFC_SQE_SIZE (256) -#define WQE_MARKER_0 (0x0) -#define WQE_MARKER_6B (0x6b) - -/* PARENT SQ & Context defines */ -#define SPFC_MAX_MSN (65535) -#define SPFC_MSN_MASK (0xffff000000000000LL) -#define SPFC_SQE_TS_SIZE (72) -#define SPFC_SQE_FIRST_OBIT_DW_POS (0) -#define SPFC_SQE_SECOND_OBIT_DW_POS (30) -#define SPFC_SQE_OBIT_SET_MASK_BE (0x80) -#define SPFC_SQE_OBIT_CLEAR_MASK_BE (0xffffff7f) -#define SPFC_MAX_SQ_TASK_TYPE_CNT (128) -#define SPFC_SQ_NUM_PER_QPC (3) -#define SPFC_SQ_QID_START_PER_QPC 0 -#define SPFC_SQ_SPACE_OFFSET (64) -#define SPFC_MAX_SSQ_NUM (SPFC_SQ_NUM_PER_QPC * 63 + 1) /* must be a multiple of 3 */ -#define SPFC_DIRECTWQE_SQ_INDEX (SPFC_MAX_SSQ_NUM - 1) - -/* Note: if the location of flush done bit changes, the definition must be - * modifyed again - */ -#define SPFC_CTXT_FLUSH_DONE_DW_POS (58) -#define SPFC_CTXT_FLUSH_DONE_MASK_BE (0x4000) -#define SPFC_CTXT_FLUSH_DONE_MASK_LE (0x400000) - -#define SPFC_PCIE_TEMPLATE (0) -#define SPFC_DMA_ATTR_OFST (0) - -/* - *When driver assembles WQE SGE, the GPA parity bit is multiplexed as follows: - * {rsvd'2,zerocopysoro'2,zerocopy_dmaattr_idx'6,pcie_template'6} - */ -#define SPFC_PCIE_TEMPLATE_OFFSET 0 -#define SPFC_PCIE_ZEROCOPY_DMAATTR_IDX_OFFSET 6 -#define SPFC_PCIE_ZEROCOPY_SO_RO_OFFSET 12 -#define SPFC_PCIE_RELAXED_ORDERING (1) -#define SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE \ - (SPFC_PCIE_RELAXED_ORDERING << SPFC_PCIE_ZEROCOPY_SO_RO_OFFSET | \ - SPFC_DMA_ATTR_OFST << SPFC_PCIE_ZEROCOPY_DMAATTR_IDX_OFFSET | \ - SPFC_PCIE_TEMPLATE) - -#define SPFC_GET_SQ_HEAD(sq) \ - list_entry(UNF_OS_LIST_NEXT(&(sq)->list_linked_list_sq), \ - struct spfc_wqe_page, entry_wpg) -#define SPFC_GET_SQ_TAIL(sq) \ - list_entry(UNF_OS_LIST_PREV(&(sq)->list_linked_list_sq), \ - struct spfc_wqe_page, entry_wpg) -#define SPFC_SQ_IO_STAT(ssq, io_type) \ - (atomic_inc(&(ssq)->io_stat[io_type])) -#define SPFC_SQ_IO_STAT_READ(ssq, io_type) \ - (atomic_read(&(ssq)->io_stat[io_type])) -#define SPFC_GET_QUEUE_CMSN(ssq) \ - ((u32)(be64_to_cpu(((((ssq)->queue_header)->ci_record) & SPFC_MSN_MASK)))) -#define SPFC_GET_WP_END_CMSN(head_start_cmsn, wqe_num_per_buf) \ - ((u16)(((u32)(head_start_cmsn) + (u32)(wqe_num_per_buf) - 1) % (SPFC_MAX_MSN + 1))) -#define SPFC_MSN_INC(msn) (((SPFC_MAX_MSN) == (msn)) ? 0 : ((msn) + 1)) -#define SPFC_MSN_DEC(msn) (((msn) == 0) ? (SPFC_MAX_MSN) : ((msn) - 1)) -#define SPFC_QUEUE_MSN_OFFSET(start_cmsn, end_cmsn) \ - ((u32)((((u32)(end_cmsn) + (SPFC_MAX_MSN)) - (u32)(start_cmsn)) % (SPFC_MAX_MSN + 1))) -#define SPFC_MSN32_ADD(msn, inc) (((msn) + (inc)) % (SPFC_MAX_MSN + 1)) - -/* - *SCQ defines - */ -#define SPFC_INT_NUM_PER_QUEUE (1) -#define SPFC_SCQ_INT_ID_MAX (2048) /* 11BIT */ -#define SPFC_SCQE_SIZE (64) -#define SPFC_CQE_GPA_SHIFT (4) -#define SPFC_NEXT_CQE_GPA_SHIFT (12) -/* 1-Update Ci by Tile, 0-Update Ci by Hardware */ -#define SPFC_PMSN_CI_TYPE_FROM_HOST (0) -#define SPFC_PMSN_CI_TYPE_FROM_UCODE (1) -#define SPFC_ARMQ_IDLE (0) -#define SPFC_CQ_INT_MODE (2) -#define SPFC_CQ_HEADER_OWNER_SHIFT (15) - -/* SCQC_CQ_DEPTH 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k. - * include LinkWqe - */ -#define SPFC_CMD_SCQ_DEPTH (4096) -#define SPFC_STS_SCQ_DEPTH (8192) - -#define SPFC_CMD_SCQC_CQ_DEPTH (spfc_log2n(SPFC_CMD_SCQ_DEPTH >> 8)) -#define SPFC_STS_SCQC_CQ_DEPTH (spfc_log2n(SPFC_STS_SCQ_DEPTH >> 8)) -#define SPFC_STS_SCQ_CI_TYPE SPFC_PMSN_CI_TYPE_FROM_HOST - -#define SPFC_CMD_SCQ_CI_TYPE SPFC_PMSN_CI_TYPE_FROM_UCODE - -#define SPFC_SCQ_INTR_LOW_LATENCY_MODE 0 -#define SPFC_SCQ_INTR_POLLING_MODE 1 -#define SPFC_SCQ_PROC_CNT_PER_SECOND_THRESHOLD (30000) - -#define SPFC_CQE_MAX_PROCESS_NUM_PER_INTR (128) -#define SPFC_SESSION_SCQ_NUM (16) - -/* SCQ[0, 2, 4 ...]CMD SCQ,SCQ[1, 3, 5 ...]STS - * SCQ,SCQ[SPFC_TOTAL_SCQ_NUM-1]Defaul SCQ - */ -#define SPFC_CMD_SCQN_START (0) -#define SPFC_STS_SCQN_START (1) -#define SPFC_SCQS_PER_SESSION (2) - -#define SPFC_TOTAL_SCQ_NUM (SPFC_SESSION_SCQ_NUM + 1) - -#define SPFC_SCQ_IS_STS(scq_index) \ - (((scq_index) % SPFC_SCQS_PER_SESSION) || ((scq_index) == SPFC_SESSION_SCQ_NUM)) -#define SPFC_SCQ_IS_CMD(scq_index) (!SPFC_SCQ_IS_STS(scq_index)) -#define SPFC_RPORTID_TO_CMD_SCQN(rport_index) \ - (((rport_index) * SPFC_SCQS_PER_SESSION) % SPFC_SESSION_SCQ_NUM) -#define SPFC_RPORTID_TO_STS_SCQN(rport_index) \ - ((((rport_index) * SPFC_SCQS_PER_SESSION) + 1) % SPFC_SESSION_SCQ_NUM) - -/* - *SRQ defines - */ -#define SPFC_SRQE_SIZE (32) -#define SPFC_SRQ_INIT_LOOP_O (1) -#define SPFC_QUEUE_RING (1) -#define SPFC_SRQ_ELS_DATA_NUM (1) -#define SPFC_SRQ_ELS_SGE_LEN (256) -#define SPFC_SRQ_ELS_DATA_DEPTH (31750) /* depth should Divide 127 */ - -#define SPFC_IRQ_NAME_MAX (30) - -/* Support 2048 sessions(xid) */ -#define SPFC_CQM_XID_MASK (0x7ff) - -#define SPFC_QUEUE_FLUSH_DOING (0) -#define SPFC_QUEUE_FLUSH_DONE (1) -#define SPFC_QUEUE_FLUSH_WAIT_TIMEOUT_MS (2000) -#define SPFC_QUEUE_FLUSH_WAIT_MS (2) - -/* - *RPort defines - */ -#define SPFC_RPORT_OFFLOADED(prnt_qinfo) \ - ((prnt_qinfo)->offload_state == SPFC_QUEUE_STATE_OFFLOADED) -#define SPFC_RPORT_NOT_OFFLOADED(prnt_qinfo) \ - ((prnt_qinfo)->offload_state != SPFC_QUEUE_STATE_OFFLOADED) -#define SPFC_RPORT_FLUSH_NOT_NEEDED(prnt_qinfo) \ - (((prnt_qinfo)->offload_state == SPFC_QUEUE_STATE_INITIALIZED) || \ - ((prnt_qinfo)->offload_state == SPFC_QUEUE_STATE_OFFLOADING) || \ - ((prnt_qinfo)->offload_state == SPFC_QUEUE_STATE_FREE)) -#define SPFC_CHECK_XID_MATCHED(sq_xid, sqe_xid) \ - (((sq_xid) & SPFC_CQM_XID_MASK) == ((sqe_xid) & SPFC_CQM_XID_MASK)) -#define SPFC_PORT_MODE_TGT (0) /* Port mode */ -#define SPFC_PORT_MODE_INI (1) -#define SPFC_PORT_MODE_BOTH (2) - -/* - *Hardware Reserved Queue Info defines - */ -#define SPFC_HRQI_SEQ_ID_MAX (255) -#define SPFC_HRQI_SEQ_INDEX_MAX (64) -#define SPFC_HRQI_SEQ_INDEX_SHIFT (6) -#define SPFC_HRQI_SEQ_SEPCIAL_ID (3) -#define SPFC_HRQI_SEQ_INVALID_ID (~0LL) - -enum spfc_session_reset_mode { - SPFC_SESS_RST_DELETE_IO_ONLY = 1, - SPFC_SESS_RST_DELETE_CONN_ONLY = 2, - SPFC_SESS_RST_DELETE_IO_CONN_BOTH = 3, - SPFC_SESS_RST_MODE_BUTT -}; - -/* linkwqe */ -#define CQM_LINK_WQE_CTRLSL_VALUE 2 -#define CQM_LINK_WQE_LP_VALID 1 -#define CQM_LINK_WQE_LP_INVALID 0 - -/* bit mask */ -#define SPFC_SCQN_MASK 0xfffff -#define SPFC_SCQ_CTX_CI_GPA_MASK 0xfffffff -#define SPFC_SCQ_CTX_C_EQN_MSI_X_MASK 0x7 -#define SPFC_PARITY_MASK 0x1 -#define SPFC_KEYSECTION_XID_H_MASK 0xf -#define SPFC_KEYSECTION_XID_L_MASK 0xffff -#define SPFC_SRQ_CTX_rqe_dma_attr_idx_MASK 0xf -#define SPFC_SSQ_CTX_MASK 0xfffff -#define SPFC_KEY_WD3_SID_2_MASK 0x00ff0000 -#define SPFC_KEY_WD3_SID_1_MASK 0x00ff00 -#define SPFC_KEY_WD3_SID_0_MASK 0x0000ff -#define SPFC_KEY_WD4_DID_2_MASK 0x00ff0000 -#define SPFC_KEY_WD4_DID_1_MASK 0x00ff00 -#define SPFC_KEY_WD4_DID_0_MASK 0x0000ff -#define SPFC_LOCAL_LW_WD1_DUMP_MSN_MASK 0x7fff -#define SPFC_PMSN_MASK 0xff -#define SPFC_QOS_LEVEL_MASK 0x3 -#define SPFC_DB_VAL_MASK 0xFFFFFFFF -#define SPFC_MSNWD_L_MASK 0xffff -#define SPFC_MSNWD_H_MASK 0x7fff -#define SPFC_DB_WD0_PI_H_MASK 0xf -#define SPFC_DB_WD0_PI_L_MASK 0xfff - -#define SPFC_DB_C_BIT_DATA_TYPE 0 -#define SPFC_DB_C_BIT_CONTROL_TYPE 1 - -#define SPFC_OWNER_DRIVER_PRODUCT (1) - -#define SPFC_256BWQE_ENABLE (1) -#define SPFC_DB_ARM_DISABLE (0) - -#define SPFC_CNTX_SIZE_T_256B (0) -#define SPFC_CNTX_SIZE_256B (256) - -#define SPFC_SERVICE_TYPE_FC (12) -#define SPFC_SERVICE_TYPE_FC_SQ (13) - -#define SPFC_PACKET_COS_FC_CMD (0) -#define SPFC_PACKET_COS_FC_DATA (1) - -#define SPFC_QUEUE_LINK_STYLE (0) -#define SPFC_QUEUE_RING_STYLE (1) - -#define SPFC_NEED_DO_OFFLOAD (1) -#define SPFC_QID_SQ (0) - -/* - *SCQ defines - */ -struct spfc_scq_info { - struct cqm_queue *cqm_scq_info; - u32 wqe_num_per_buf; - u32 wqe_size; - u32 scqc_cq_depth; /* 0-256, 1-512, 2-1k, 3-2k, 4-4k, 5-8k, 6-16k, 7-32k */ - u16 scqc_ci_type; - u16 valid_wqe_num; /* ScQ depth include link wqe */ - u16 ci; - u16 ci_owner; - u32 queue_id; - u32 scqn; - char irq_name[SPFC_IRQ_NAME_MAX]; - u16 msix_entry_idx; - u32 irq_id; - struct tasklet_struct tasklet; - atomic_t flush_stat; - void *hba; - u32 reserved; - struct task_struct *delay_task; - bool task_exit; - u32 intr_mode; -}; - -struct spfc_srq_ctx { - /* DW0 */ - u64 pcie_template : 6; - u64 rsvd0 : 2; - u64 parity : 8; - u64 cur_rqe_usr_id : 16; - u64 cur_rqe_msn : 16; - u64 last_rq_pmsn : 16; - - /* DW1 */ - u64 cur_rqe_gpa; - - /* DW2 */ - u64 ctrl_sl : 1; - u64 cf : 1; - u64 csl : 2; - u64 cr : 1; - u64 bdsl : 4; - u64 pmsn_type : 1; - u64 cur_wqe_o : 1; - u64 consant_sge_len : 17; - u64 cur_sge_id : 4; - u64 cur_sge_remain_len : 17; - u64 ceqn_msix : 11; - u64 int_mode : 2; - u64 cur_sge_l : 1; - u64 cur_sge_v : 1; - - /* DW3 */ - u64 cur_sge_gpa; - - /* DW4 */ - u64 cur_pmsn_gpa; - - /* DW5 */ - u64 rsvd3 : 5; - u64 ring : 1; - u64 loop_o : 1; - u64 rsvd2 : 1; - u64 rqe_dma_attr_idx : 6; - u64 rq_so_ro : 2; - u64 cqe_dma_attr_idx : 6; - u64 cq_so_ro : 2; - u64 rsvd1 : 7; - u64 arm_q : 1; - u64 cur_cqe_cnt : 8; - u64 cqe_max_cnt : 8; - u64 prefetch_max_masn : 16; - - /* DW6~DW7 */ - u64 rsvd4; - u64 rsvd5; -}; - -struct spfc_drq_buff_entry { - u16 buff_id; - void *buff_addr; - dma_addr_t buff_dma; -}; - -enum spfc_clean_state { SPFC_CLEAN_DONE, SPFC_CLEAN_DOING, SPFC_CLEAN_BUTT }; -enum spfc_srq_type { SPFC_SRQ_ELS = 1, SPFC_SRQ_IMMI, SPFC_SRQ_BUTT }; - -struct spfc_srq_info { - enum spfc_srq_type srq_type; - - struct cqm_queue *cqm_srq_info; - u32 wqe_num_per_buf; /* Wqe number per buf, dont't inlcude link wqe */ - u32 wqe_size; - u32 valid_wqe_num; /* valid wqe number, dont't include link wqe */ - u16 pi; - u16 pi_owner; - u16 pmsn; - u16 ci; - u16 cmsn; - u32 srqn; - - dma_addr_t first_rqe_recv_dma; - - struct spfc_drq_buff_entry *els_buff_entry_head; - struct buf_describe buf_list; - spinlock_t srq_spin_lock; - bool spin_lock_init; - bool enable; - enum spfc_clean_state state; - - atomic_t ref; - - struct delayed_work del_work; - u32 del_retry_time; - void *hba; -}; - -/* - * The doorbell record keeps PI of WQE, which will be produced next time. - * The PI is 15 bits width o-bit - */ -struct db_record { - u64 pmsn : 16; - u64 dump_pmsn : 16; - u64 rsvd0 : 32; -}; - -/* - * The ci record keeps CI of WQE, which will be consumed next time. - * The ci is 15 bits width with 1 o-bit - */ -struct ci_record { - u64 cmsn : 16; - u64 dump_cmsn : 16; - u64 rsvd0 : 32; -}; - -/* The accumulate data in WQ header */ -struct accumulate { - u64 data_2_uc; - u64 data_2_drv; -}; - -/* The WQ header structure */ -struct wq_header { - struct db_record db_record; - struct ci_record ci_record; - struct accumulate soft_data; -}; - -/* Link list Sq WqePage Pool */ -/* queue header struct */ -struct spfc_queue_header { - u64 door_bell_record; - u64 ci_record; - u64 rsv1; - u64 rsv2; -}; - -/* WPG-WQEPAGE, LLSQ-LINKED LIST SQ */ -struct spfc_wqe_page { - struct list_head entry_wpg; - - /* Wqe Page virtual addr */ - void *wpg_addr; - - /* Wqe Page physical addr */ - u64 wpg_phy_addr; -}; - -struct spfc_sq_wqepage_pool { - u32 wpg_cnt; - u32 wpg_size; - u32 wqe_per_wpg; - - /* PCI DMA Pool */ - struct dma_pool *wpg_dma_pool; - struct spfc_wqe_page *wpg_pool_addr; - struct list_head list_free_wpg_pool; - spinlock_t wpg_pool_lock; - atomic_t wpg_in_use; -}; - -#define SPFC_SQ_DEL_STAGE_TIMEOUT_MS (3 * 1000) -#define SPFC_SRQ_DEL_STAGE_TIMEOUT_MS (10 * 1000) -#define SPFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS (10 * 1000) -#define SPFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_CNT (3) - -#define SPFC_SRQ_PROCESS_DELAY_MS (20) - -/* PLOGI parameters */ -struct spfc_plogi_copram { - u32 seq_cnt : 1; - u32 ed_tov : 1; - u32 rsvd : 14; - u32 tx_mfs : 16; - u32 ed_tov_time; -}; - -struct spfc_delay_sqe_ctrl_info { - bool valid; - u32 rport_index; - u32 time_out; - u64 start_jiff; - u32 sid; - u32 did; - u32 xid; - u16 ssqn; - struct spfc_sqe sqe; -}; - -struct spfc_suspend_sqe_info { - void *hba; - u32 magic_num; - u8 old_offload_sts; - struct unf_frame_pkg pkg; - struct spfc_sqe sqe; - struct delayed_work timeout_work; - struct list_head list_sqe_entry; -}; - -struct spfc_delay_destroy_ctrl_info { - bool valid; - u32 rport_index; - u32 time_out; - u64 start_jiff; - struct unf_port_info rport_info; -}; - -/* PARENT SQ Info */ -struct spfc_parent_sq_info { - void *hba; - spinlock_t parent_sq_enqueue_lock; - u32 rport_index; - u32 context_id; - /* Fixed value,used for Doorbell */ - u32 sq_queue_id; - /* When a session is offloaded, tile will return the CacheId to the - * driver,which is used for Doorbell - */ - u32 cache_id; - /* service type, fc or fc */ - u32 service_type; - /* OQID */ - u16 oqid_rd; - u16 oqid_wr; - u32 local_port_id; - u32 remote_port_id; - u32 sqn_base; - bool port_in_flush; - bool sq_in_sess_rst; - atomic_t sq_valid; - /* Used by NPIV QoS */ - u8 vport_id; - /* Used by NPIV QoS */ - u8 cs_ctrl; - struct delayed_work del_work; - struct delayed_work flush_done_timeout_work; - u64 del_start_jiff; - dma_addr_t srq_ctx_addr; - atomic_t sq_cached; - atomic_t flush_done_wait_cnt; - struct spfc_plogi_copram plogi_co_parms; - /* dif control info for immi */ - struct unf_dif_control_info sirt_dif_control; - struct spfc_delay_sqe_ctrl_info delay_sqe; - struct spfc_delay_destroy_ctrl_info destroy_sqe; - struct list_head suspend_sqe_list; - atomic_t io_stat[SPFC_MAX_SQ_TASK_TYPE_CNT]; - u8 need_offloaded; -}; - -/* parent context doorbell */ -struct spfc_parent_sq_db { - struct { - u32 xid : 20; - u32 cntx_size : 2; - u32 arm : 1; - u32 c : 1; - u32 cos : 3; - u32 service_type : 5; - } wd0; - - struct { - u32 pi_hi : 8; - u32 sm_data : 20; - u32 qid : 4; - } wd1; -}; - -#define IWARP_FC_DDB_TYPE 3 - -/* direct wqe doorbell */ -struct spfc_direct_wqe_db { - struct { - u32 xid : 20; - u32 cntx_size : 2; - u32 pi_hi : 4; - u32 c : 1; - u32 cos : 3; - u32 ddb : 2; - } wd0; - - struct { - u32 pi_lo : 12; - u32 sm_data : 20; - } wd1; -}; - -struct spfc_parent_cmd_scq_info { - u32 cqm_queue_id; - u32 local_queue_id; -}; - -struct spfc_parent_st_scq_info { - u32 cqm_queue_id; - u32 local_queue_id; -}; - -struct spfc_parent_els_srq_info { - u32 cqm_queue_id; - u32 local_queue_id; -}; - -enum spfc_parent_queue_state { - SPFC_QUEUE_STATE_INITIALIZED = 0, - SPFC_QUEUE_STATE_OFFLOADING = 1, - SPFC_QUEUE_STATE_OFFLOADED = 2, - SPFC_QUEUE_STATE_DESTROYING = 3, - SPFC_QUEUE_STATE_FREE = 4, - SPFC_QUEUE_STATE_BUTT -}; - -struct spfc_parent_ctx { - dma_addr_t parent_ctx_addr; - void *parent_ctx; - struct cqm_qpc_mpt *cqm_parent_ctx_obj; -}; - -struct spfc_parent_queue_info { - spinlock_t parent_queue_state_lock; - struct spfc_parent_ctx parent_ctx; - enum spfc_parent_queue_state offload_state; - struct spfc_parent_sq_info parent_sq_info; - struct spfc_parent_cmd_scq_info parent_cmd_scq_info; - struct spfc_parent_st_scq_info - parent_sts_scq_info; - struct spfc_parent_els_srq_info parent_els_srq_info; - u8 queue_vport_id; - u8 queue_data_cos; -}; - -struct spfc_parent_ssq_info { - void *hba; - spinlock_t parent_sq_enqueue_lock; - atomic_t wqe_page_cnt; - u32 context_id; - u32 cache_id; - u32 sq_queue_id; - u32 sqn; - u32 service_type; - u32 max_sqe_num; /* SQ depth */ - u32 wqe_num_per_buf; - u32 wqe_size; - u32 accum_wqe_cnt; - u32 wqe_offset; - u16 head_start_cmsn; - u16 head_end_cmsn; - u16 last_pmsn; - u16 last_pi_owner; - u32 queue_style; - atomic_t sq_valid; - void *queue_head_original; - struct spfc_queue_header *queue_header; - dma_addr_t queue_hdr_phy_addr_original; - dma_addr_t queue_hdr_phy_addr; - struct list_head list_linked_list_sq; - atomic_t sq_db_cnt; - atomic_t sq_wqe_cnt; - atomic_t sq_cqe_cnt; - atomic_t sqe_minus_cqe_cnt; - atomic_t io_stat[SPFC_MAX_SQ_TASK_TYPE_CNT]; -}; - -struct spfc_parent_shared_queue_info { - struct spfc_parent_ctx parent_ctx; - struct spfc_parent_ssq_info parent_ssq_info; -}; - -struct spfc_parent_queue_mgr { - struct spfc_parent_queue_info parent_queue[UNF_SPFC_MAXRPORT_NUM]; - struct spfc_parent_shared_queue_info shared_queue[SPFC_MAX_SSQ_NUM]; - struct buf_describe parent_sq_buf_list; -}; - -#define SPFC_SRQC_BUS_ROW 8 -#define SPFC_SRQC_BUS_COL 19 -#define SPFC_SQC_BUS_ROW 8 -#define SPFC_SQC_BUS_COL 13 -#define SPFC_HW_SCQC_BUS_ROW 6 -#define SPFC_HW_SCQC_BUS_COL 10 -#define SPFC_HW_SRQC_BUS_ROW 4 -#define SPFC_HW_SRQC_BUS_COL 15 -#define SPFC_SCQC_BUS_ROW 3 -#define SPFC_SCQC_BUS_COL 29 - -#define SPFC_QUEUE_INFO_BUS_NUM 4 -struct spfc_queue_info_bus { - u64 bus[SPFC_QUEUE_INFO_BUS_NUM]; -}; - -u32 spfc_free_parent_resource(void *handle, struct unf_port_info *rport_info); -u32 spfc_alloc_parent_resource(void *handle, struct unf_port_info *rport_info); -u32 spfc_alloc_parent_queue_mgr(void *handle); -void spfc_free_parent_queue_mgr(void *handle); -u32 spfc_create_common_share_queues(void *handle); -u32 spfc_create_ssq(void *handle); -void spfc_destroy_common_share_queues(void *v_pstHba); -u32 spfc_alloc_parent_sq_wqe_page_pool(void *handle); -void spfc_free_parent_sq_wqe_page_pool(void *handle); -struct spfc_parent_queue_info * -spfc_find_parent_queue_info_by_pkg(void *handle, struct unf_frame_pkg *pkg); -struct spfc_parent_sq_info * -spfc_find_parent_sq_by_pkg(void *handle, struct unf_frame_pkg *pkg); -u32 spfc_root_cmdq_enqueue(void *handle, union spfc_cmdqe *cmdqe, u16 cmd_len); -void spfc_process_scq_cqe(ulong scq_info); -u32 spfc_process_scq_cqe_entity(ulong scq_info, u32 proc_cnt); -void spfc_post_els_srq_wqe(struct spfc_srq_info *srq_info, u16 buf_id); -void spfc_process_aeqe(void *handle, u8 event_type, u8 *event_val); -u32 spfc_parent_sq_enqueue(struct spfc_parent_sq_info *sq, struct spfc_sqe *io_sqe, - u16 ssqn); -u32 spfc_parent_ssq_enqueue(struct spfc_parent_ssq_info *ssq, - struct spfc_sqe *io_sqe, u8 wqe_type); -void spfc_free_sq_wqe_page(struct spfc_parent_ssq_info *ssq, u32 cur_cmsn); -u32 spfc_reclaim_sq_wqe_page(void *handle, union spfc_scqe *scqe); -void spfc_set_rport_flush_state(void *handle, bool in_flush); -u32 spfc_clear_fetched_sq_wqe(void *handle); -u32 spfc_clear_pending_sq_wqe(void *handle); -void spfc_free_parent_queues(void *handle); -void spfc_free_ssq(void *handle, u32 free_sq_num); -void spfc_enalbe_queues_dispatch(void *handle); -void spfc_queue_pre_process(void *handle, bool clean); -void spfc_queue_post_process(void *handle); -void spfc_free_parent_queue_info(void *handle, struct spfc_parent_queue_info *parent_queue_info); -u32 spfc_send_session_rst_cmd(void *handle, - struct spfc_parent_queue_info *parent_queue_info, - enum spfc_session_reset_mode mode); -u32 spfc_send_nop_cmd(void *handle, struct spfc_parent_sq_info *parent_sq_info, - u32 magic_num, u16 sqn); -void spfc_build_session_rst_wqe(void *handle, struct spfc_parent_sq_info *sq, - struct spfc_sqe *sqe, - enum spfc_session_reset_mode mode, u32 scqn); -void spfc_wq_destroy_els_srq(struct work_struct *work); -void spfc_destroy_els_srq(void *handle); -u32 spfc_push_delay_sqe(void *hba, - struct spfc_parent_queue_info *offload_parent_queue, - struct spfc_sqe *sqe, struct unf_frame_pkg *pkg); -void spfc_push_destroy_parent_queue_sqe(void *hba, - struct spfc_parent_queue_info *offloading_parent_queue, - struct unf_port_info *rport_info); -void spfc_pop_destroy_parent_queue_sqe(void *handle, - struct spfc_delay_destroy_ctrl_info *destroy_sqe_info); -struct spfc_parent_queue_info *spfc_find_offload_parent_queue(void *handle, - u32 local_id, - u32 remote_id, - u32 rport_index); -u32 spfc_flush_ini_resp_queue(void *handle); -void spfc_rcvd_els_from_srq_timeout(struct work_struct *work); -u32 spfc_send_aeq_info_via_cmdq(void *hba, u32 aeq_error_type); -u32 spfc_parent_sq_ring_doorbell(struct spfc_parent_ssq_info *sq, u8 qos_level, - u32 c); -void spfc_sess_resource_free_sync(void *handle, - struct unf_port_info *rport_info); -u32 spfc_suspend_sqe_and_send_nop(void *handle, - struct spfc_parent_queue_info *parent_queue, - struct spfc_sqe *sqe, struct unf_frame_pkg *pkg); -u32 spfc_pop_suspend_sqe(void *handle, - struct spfc_parent_queue_info *parent_queue, - struct spfc_suspend_sqe_info *suspen_sqe); -#endif diff --git a/drivers/scsi/spfc/hw/spfc_service.c b/drivers/scsi/spfc/hw/spfc_service.c deleted file mode 100644 index 1da58e3f9fbe..000000000000 --- a/drivers/scsi/spfc/hw/spfc_service.c +++ /dev/null @@ -1,2170 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_service.h" -#include "unf_log.h" -#include "spfc_io.h" -#include "spfc_chipitf.h" - -#define SPFC_ELS_SRQ_BUF_NUM (0x9) -#define SPFC_LS_GS_USERID_LEN ((FC_LS_GS_USERID_CNT_MAX + 1) / 2) - -struct unf_scqe_handle_table { - u32 scqe_type; /* ELS type */ - bool reclaim_sq_wpg; - u32 (*scqe_handle_func)(struct spfc_hba_info *hba, union spfc_scqe *scqe); -}; - -static u32 spfc_get_els_rsp_pld_len(u16 els_type, u16 els_cmnd, - u32 *els_acc_pld_len) -{ - u32 ret = RETURN_OK; - - FC_CHECK_RETURN_VALUE(els_acc_pld_len, UNF_RETURN_ERROR); - - /* RJT */ - if (els_type == ELS_RJT) { - *els_acc_pld_len = UNF_ELS_ACC_RJT_LEN; - return RETURN_OK; - } - - /* ACC */ - switch (els_cmnd) { - /* uses the same PAYLOAD length as PLOGI. */ - case ELS_FLOGI: - case ELS_PDISC: - case ELS_PLOGI: - *els_acc_pld_len = UNF_PLOGI_ACC_PAYLOAD_LEN; - break; - - case ELS_PRLI: - /* If sirt is enabled, The PRLI ACC payload extends 12 bytes */ - *els_acc_pld_len = (UNF_PRLI_ACC_PAYLOAD_LEN - UNF_PRLI_SIRT_EXTRA_SIZE); - - break; - - case ELS_LOGO: - *els_acc_pld_len = UNF_LOGO_ACC_PAYLOAD_LEN; - break; - - case ELS_PRLO: - *els_acc_pld_len = UNF_PRLO_ACC_PAYLOAD_LEN; - break; - - case ELS_RSCN: - *els_acc_pld_len = UNF_RSCN_ACC_PAYLOAD_LEN; - break; - - case ELS_ADISC: - *els_acc_pld_len = UNF_ADISC_ACC_PAYLOAD_LEN; - break; - - case ELS_RRQ: - *els_acc_pld_len = UNF_RRQ_ACC_PAYLOAD_LEN; - break; - - case ELS_SCR: - *els_acc_pld_len = UNF_SCR_RSP_PAYLOAD_LEN; - break; - - case ELS_ECHO: - *els_acc_pld_len = UNF_ECHO_ACC_PAYLOAD_LEN; - break; - - case ELS_REC: - *els_acc_pld_len = UNF_REC_ACC_PAYLOAD_LEN; - break; - - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_WARN, "[warn]Unknown ELS command(0x%x)", - els_cmnd); - ret = UNF_RETURN_ERROR; - break; - } - - return ret; -} - -struct unf_els_cmd_paylod_table { - u16 els_cmnd; /* ELS type */ - u32 els_req_pld_len; - u32 els_rsp_pld_len; -}; - -static const struct unf_els_cmd_paylod_table els_pld_table_map[] = { - {ELS_FDISC, UNF_FDISC_PAYLOAD_LEN, UNF_FDISC_ACC_PAYLOAD_LEN}, - {ELS_FLOGI, UNF_FLOGI_PAYLOAD_LEN, UNF_FLOGI_ACC_PAYLOAD_LEN}, - {ELS_PLOGI, UNF_PLOGI_PAYLOAD_LEN, UNF_PLOGI_ACC_PAYLOAD_LEN}, - {ELS_SCR, UNF_SCR_PAYLOAD_LEN, UNF_SCR_RSP_PAYLOAD_LEN}, - {ELS_PDISC, UNF_PDISC_PAYLOAD_LEN, UNF_PDISC_ACC_PAYLOAD_LEN}, - {ELS_LOGO, UNF_LOGO_PAYLOAD_LEN, UNF_LOGO_ACC_PAYLOAD_LEN}, - {ELS_PRLO, UNF_PRLO_PAYLOAD_LEN, UNF_PRLO_ACC_PAYLOAD_LEN}, - {ELS_ADISC, UNF_ADISC_PAYLOAD_LEN, UNF_ADISC_ACC_PAYLOAD_LEN}, - {ELS_RRQ, UNF_RRQ_PAYLOAD_LEN, UNF_RRQ_ACC_PAYLOAD_LEN}, - {ELS_RSCN, 0, UNF_RSCN_ACC_PAYLOAD_LEN}, - {ELS_ECHO, UNF_ECHO_PAYLOAD_LEN, UNF_ECHO_ACC_PAYLOAD_LEN}, - {ELS_REC, UNF_REC_PAYLOAD_LEN, UNF_REC_ACC_PAYLOAD_LEN} -}; - -static u32 spfc_get_els_req_acc_pld_len(u16 els_cmnd, u32 *req_pld_len, u32 *rsp_pld_len) -{ - u32 ret = RETURN_OK; - u32 i; - - FC_CHECK_RETURN_VALUE(req_pld_len, UNF_RETURN_ERROR); - - for (i = 0; i < (sizeof(els_pld_table_map) / - sizeof(struct unf_els_cmd_paylod_table)); - i++) { - if (els_pld_table_map[i].els_cmnd == els_cmnd) { - *req_pld_len = els_pld_table_map[i].els_req_pld_len; - *rsp_pld_len = els_pld_table_map[i].els_rsp_pld_len; - return ret; - } - } - - switch (els_cmnd) { - case ELS_PRLI: - /* If sirt is enabled, The PRLI ACC payload extends 12 bytes */ - *req_pld_len = SPFC_GET_PRLI_PAYLOAD_LEN; - *rsp_pld_len = SPFC_GET_PRLI_PAYLOAD_LEN; - - break; - - default: - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Unknown ELS_CMD(0x%x)", els_cmnd); - ret = UNF_RETURN_ERROR; - break; - } - - return ret; -} - -static u32 spfc_check_parent_qinfo_valid(struct spfc_hba_info *hba, struct unf_frame_pkg *pkg, - struct spfc_parent_queue_info **prt_qinfo) -{ - if (!*prt_qinfo) { - if (pkg->type == UNF_PKG_ELS_REQ || pkg->type == UNF_PKG_ELS_REPLY) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send LS SID(0x%x) DID(0x%x) with null prtqinfo", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX] = SPFC_DEFAULT_RPORT_INDEX; - *prt_qinfo = spfc_find_parent_queue_info_by_pkg(hba, pkg); - if (!*prt_qinfo) - return UNF_RETURN_ERROR; - } else { - return UNF_RETURN_ERROR; - } - } - - if (pkg->type == UNF_PKG_GS_REQ && SPFC_RPORT_NOT_OFFLOADED(*prt_qinfo)) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[info]Port(0x%x) send GS SID(0x%x) DID(0x%x), send GS Request before PLOGI", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - return UNF_RETURN_ERROR; - } - return RETURN_OK; -} - -static void spfc_get_pkt_cmnd_type_code(struct unf_frame_pkg *pkg, - u16 *ls_gs_cmnd_code, - u16 *ls_gs_cmnd_type) -{ - *ls_gs_cmnd_type = SPFC_GET_LS_GS_CMND_CODE(pkg->cmnd); - if (SPFC_PKG_IS_ELS_RSP(*ls_gs_cmnd_type)) { - *ls_gs_cmnd_code = SPFC_GET_ELS_RSP_CODE(pkg->cmnd); - } else if (pkg->type == UNF_PKG_GS_REQ) { - *ls_gs_cmnd_code = *ls_gs_cmnd_type; - } else { - *ls_gs_cmnd_code = *ls_gs_cmnd_type; - *ls_gs_cmnd_type = ELS_CMND; - } -} - -static u32 spfc_get_gs_req_rsp_pld_len(u16 cmnd_code, u32 *gs_pld_len, u32 *gs_rsp_pld_len) -{ - FC_CHECK_RETURN_VALUE(gs_pld_len, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(gs_rsp_pld_len, UNF_RETURN_ERROR); - - switch (cmnd_code) { - case NS_GPN_ID: - *gs_pld_len = UNF_GPNID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GPNID_RSP_PAYLOAD_LEN; - break; - - case NS_GNN_ID: - *gs_pld_len = UNF_GNNID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GNNID_RSP_PAYLOAD_LEN; - break; - - case NS_GFF_ID: - *gs_pld_len = UNF_GFFID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GFFID_RSP_PAYLOAD_LEN; - break; - - case NS_GID_FT: - case NS_GID_PT: - *gs_pld_len = UNF_GID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; - break; - - case NS_RFT_ID: - *gs_pld_len = UNF_RFTID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN; - break; - - case NS_RFF_ID: - *gs_pld_len = UNF_RFFID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_RFFID_RSP_PAYLOAD_LEN; - break; - case NS_GA_NXT: - *gs_pld_len = UNF_GID_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; - break; - - case NS_GIEL: - *gs_pld_len = UNF_RFTID_RSP_PAYLOAD_LEN; - *gs_rsp_pld_len = UNF_GID_ACC_PAYLOAD_LEN; - break; - - default: - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Unknown GS commond type(0x%x)", cmnd_code); - return UNF_RETURN_ERROR; - } - - return RETURN_OK; -} - -static void *spfc_get_els_frame_addr(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, - u16 els_cmnd_code, u16 els_cmnd_type, - u64 *phy_addr) -{ - void *frame_pld_addr = NULL; - dma_addr_t els_frame_addr = 0; - - if (els_cmnd_code == ELS_ECHO) { - frame_pld_addr = (void *)UNF_GET_ECHO_PAYLOAD(pkg); - els_frame_addr = UNF_GET_ECHO_PAYLOAD_PHYADDR(pkg); - } else if (els_cmnd_code == ELS_RSCN) { - if (els_cmnd_type == ELS_CMND) { - /* Not Support */ - frame_pld_addr = NULL; - els_frame_addr = 0; - } else { - frame_pld_addr = (void *)UNF_GET_RSCN_ACC_PAYLOAD(pkg); - els_frame_addr = pkg->unf_cmnd_pload_bl.buf_dma_addr + - sizeof(struct unf_fc_head); - } - } else { - frame_pld_addr = (void *)SPFC_GET_CMND_PAYLOAD_ADDR(pkg); - els_frame_addr = pkg->unf_cmnd_pload_bl.buf_dma_addr + - sizeof(struct unf_fc_head); - } - *phy_addr = els_frame_addr; - return frame_pld_addr; -} - -static u32 spfc_get_frame_info(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, void **frame_pld_addr, - u32 *frame_pld_len, u64 *frame_phy_addr, - u32 *acc_pld_len) -{ - u32 ret = RETURN_OK; - u16 ls_gs_cmnd_code = SPFC_ZERO; - u16 ls_gs_cmnd_type = SPFC_ZERO; - - spfc_get_pkt_cmnd_type_code(pkg, &ls_gs_cmnd_code, &ls_gs_cmnd_type); - - if (pkg->type == UNF_PKG_GS_REQ) { - ret = spfc_get_gs_req_rsp_pld_len(ls_gs_cmnd_code, - frame_pld_len, acc_pld_len); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) send GS SID(0x%x) DID(0x%x), get error GS request and response payload length", - hba->port_cfg.port_id, - pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return ret; - } - *frame_pld_addr = (void *)(SPFC_GET_CMND_PAYLOAD_ADDR(pkg)); - *frame_phy_addr = pkg->unf_cmnd_pload_bl.buf_dma_addr + sizeof(struct unf_fc_head); - if (ls_gs_cmnd_code == NS_GID_FT || ls_gs_cmnd_code == NS_GID_PT) - *frame_pld_addr = (void *)(UNF_GET_GID_PAYLOAD(pkg)); - } else { - *frame_pld_addr = spfc_get_els_frame_addr(hba, pkg, ls_gs_cmnd_code, - ls_gs_cmnd_type, frame_phy_addr); - if (SPFC_PKG_IS_ELS_RSP(ls_gs_cmnd_type)) { - ret = spfc_get_els_rsp_pld_len(ls_gs_cmnd_type, ls_gs_cmnd_code, - frame_pld_len); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) get els cmd (0x%x) rsp len failed.", - hba->port_cfg.port_id, - ls_gs_cmnd_code); - return ret; - } - } else { - ret = spfc_get_els_req_acc_pld_len(ls_gs_cmnd_code, frame_pld_len, - acc_pld_len); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) get els cmd (0x%x) req and acc len failed.", - hba->port_cfg.port_id, - ls_gs_cmnd_code); - return ret; - } - } - } - return ret; -} - -static u32 -spfc_send_ls_gs_via_parent(struct spfc_hba_info *hba, struct unf_frame_pkg *pkg, - struct spfc_parent_queue_info *prt_queue_info) -{ - u32 ret = UNF_RETURN_ERROR; - u16 ls_gs_cmnd_code = SPFC_ZERO; - u16 ls_gs_cmnd_type = SPFC_ZERO; - u16 remote_exid = 0; - u16 hot_tag = 0; - struct spfc_parent_sq_info *parent_sq_info = NULL; - struct spfc_sqe tmp_sqe; - struct spfc_sqe *sqe = NULL; - void *frame_pld_addr = NULL; - u32 frame_pld_len = 0; - u32 acc_pld_len = 0; - u64 frame_pa = 0; - ulong flags = 0; - u16 ssqn = 0; - spinlock_t *prtq_state_lock = NULL; - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - sqe = &tmp_sqe; - memset(sqe, 0, sizeof(struct spfc_sqe)); - - parent_sq_info = &prt_queue_info->parent_sq_info; - hot_tag = (u16)UNF_GET_HOTPOOL_TAG(pkg) + hba->exi_base; - - spfc_get_pkt_cmnd_type_code(pkg, &ls_gs_cmnd_code, &ls_gs_cmnd_type); - - ret = spfc_get_frame_info(hba, pkg, &frame_pld_addr, &frame_pld_len, - &frame_pa, &acc_pld_len); - if (ret != RETURN_OK) - return ret; - - if (SPFC_PKG_IS_ELS_RSP(ls_gs_cmnd_type)) { - remote_exid = UNF_GET_OXID(pkg); - spfc_build_els_wqe_ts_rsp(sqe, prt_queue_info, pkg, - frame_pld_addr, ls_gs_cmnd_type, - ls_gs_cmnd_code); - - /* Assemble the SQE Task Section Els Common part */ - spfc_build_service_wqe_ts_common(&sqe->ts_sl, parent_sq_info->rport_index, - UNF_GET_RXID(pkg), remote_exid, - SPFC_LSW(frame_pld_len)); - } else { - remote_exid = UNF_GET_RXID(pkg); - /* send els req ,only use local_xid for hotpooltag */ - spfc_build_els_wqe_ts_req(sqe, parent_sq_info, - prt_queue_info->parent_sts_scq_info.cqm_queue_id, - frame_pld_addr, pkg); - spfc_build_service_wqe_ts_common(&sqe->ts_sl, parent_sq_info->rport_index, hot_tag, - remote_exid, SPFC_LSW(frame_pld_len)); - } - /* Assemble the SQE Control Section part */ - spfc_build_service_wqe_ctrl_section(&sqe->ctrl_sl, SPFC_BYTES_TO_QW_NUM(SPFC_SQE_TS_SIZE), - SPFC_BYTES_TO_QW_NUM(sizeof(struct spfc_variable_sge))); - - /* Build SGE */ - spfc_build_els_gs_wqe_sge(sqe, frame_pld_addr, frame_pa, frame_pld_len, - parent_sq_info->context_id, hba); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%x) send ELS/GS Type(0x%x) Code(0x%x) HotTag(0x%x)", - hba->port_cfg.port_id, parent_sq_info->rport_index, ls_gs_cmnd_type, - ls_gs_cmnd_code, hot_tag); - if (ls_gs_cmnd_code == ELS_PLOGI || ls_gs_cmnd_code == ELS_LOGO) { - ret = spfc_suspend_sqe_and_send_nop(hba, prt_queue_info, sqe, pkg); - return ret; - } - prtq_state_lock = &prt_queue_info->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flags); - if (SPFC_RPORT_NOT_OFFLOADED(prt_queue_info)) { - spin_unlock_irqrestore(prtq_state_lock, flags); - /* Send PLOGI or PLOGI ACC or SCR if session not offload */ - ret = spfc_send_els_via_default_session(hba, sqe, pkg, prt_queue_info); - } else { - spin_unlock_irqrestore(prtq_state_lock, flags); - ret = spfc_parent_sq_enqueue(parent_sq_info, sqe, ssqn); - } - - return ret; -} - -u32 spfc_send_ls_gs_cmnd(void *handle, struct unf_frame_pkg *pkg) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_hba_info *hba = NULL; - struct spfc_parent_queue_info *prt_qinfo = NULL; - u16 ls_gs_cmnd_code = SPFC_ZERO; - union unf_sfs_u *sfs_entry = NULL; - struct unf_rrq *rrq_pld = NULL; - u16 ox_id = 0; - u16 rx_id = 0; - - /* Check Parameters */ - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(UNF_GET_SFS_ENTRY(pkg), UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(SPFC_GET_CMND_PAYLOAD_ADDR(pkg), UNF_RETURN_ERROR); - - SPFC_CHECK_PKG_ALLOCTIME(pkg); - hba = (struct spfc_hba_info *)handle; - ls_gs_cmnd_code = SPFC_GET_LS_GS_CMND_CODE(pkg->cmnd); - - /* If RRQ Req, Special processing */ - if (ls_gs_cmnd_code == ELS_RRQ) { - sfs_entry = UNF_GET_SFS_ENTRY(pkg); - rrq_pld = &sfs_entry->rrq; - ox_id = (u16)(rrq_pld->oxid_rxid >> UNF_SHIFT_16); - rx_id = (u16)(rrq_pld->oxid_rxid & SPFC_RXID_MASK); - rrq_pld->oxid_rxid = (u32)ox_id << UNF_SHIFT_16 | rx_id; - } - - prt_qinfo = spfc_find_parent_queue_info_by_pkg(hba, pkg); - ret = spfc_check_parent_qinfo_valid(hba, pkg, &prt_qinfo); - - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_MAJOR, - "[error]Port(0x%x) send ELS/GS SID(0x%x) DID(0x%x) check qinfo invalid", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - return UNF_RETURN_ERROR; - } - - ret = spfc_send_ls_gs_via_parent(hba, pkg, prt_qinfo); - - return ret; -} - -void spfc_save_login_parms_in_sq_info(struct spfc_hba_info *hba, - struct unf_port_login_parms *login_params) -{ - u32 rport_index = login_params->rport_index; - struct spfc_parent_sq_info *parent_sq_info = NULL; - - if (rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[err]Port(0x%x) save login parms,but uplevel alloc invalid rport index: 0x%x", - hba->port_cfg.port_id, rport_index); - - return; - } - - parent_sq_info = &hba->parent_queue_mgr->parent_queue[rport_index].parent_sq_info; - - parent_sq_info->plogi_co_parms.seq_cnt = login_params->seq_cnt; - parent_sq_info->plogi_co_parms.ed_tov = login_params->ed_tov; - parent_sq_info->plogi_co_parms.tx_mfs = (login_params->tx_mfs < - SPFC_DEFAULT_TX_MAX_FREAM_SIZE) ? - SPFC_DEFAULT_TX_MAX_FREAM_SIZE : - login_params->tx_mfs; - parent_sq_info->plogi_co_parms.ed_tov_time = login_params->ed_tov_timer_val; -} - -static void -spfc_recover_offloading_state(struct spfc_parent_queue_info *prt_queue_info, - enum spfc_parent_queue_state offload_state) -{ - ulong flags = 0; - - spin_lock_irqsave(&prt_queue_info->parent_queue_state_lock, flags); - - if (prt_queue_info->offload_state == SPFC_QUEUE_STATE_OFFLOADING) - prt_queue_info->offload_state = offload_state; - - spin_unlock_irqrestore(&prt_queue_info->parent_queue_state_lock, flags); -} - -static bool spfc_check_need_delay_offload(void *hba, struct unf_frame_pkg *pkg, u32 rport_index, - struct spfc_parent_queue_info *cur_prt_queue_info, - struct spfc_parent_queue_info **offload_prt_queue_info) -{ - ulong flags = 0; - struct spfc_parent_queue_info *prt_queue_info = NULL; - spinlock_t *prtq_state_lock = NULL; - - prtq_state_lock = &cur_prt_queue_info->parent_queue_state_lock; - spin_lock_irqsave(prtq_state_lock, flags); - - if (cur_prt_queue_info->offload_state == SPFC_QUEUE_STATE_OFFLOADING) { - spin_unlock_irqrestore(prtq_state_lock, flags); - - prt_queue_info = spfc_find_offload_parent_queue(hba, pkg->frame_head.csctl_sid & - UNF_NPORTID_MASK, - pkg->frame_head.rctl_did & - UNF_NPORTID_MASK, rport_index); - if (prt_queue_info) { - *offload_prt_queue_info = prt_queue_info; - return true; - } - } else { - spin_unlock_irqrestore(prtq_state_lock, flags); - } - - return false; -} - -static u16 spfc_build_wqe_with_offload(struct spfc_hba_info *hba, struct spfc_sqe *sqe, - struct spfc_parent_queue_info *prt_queue_info, - struct unf_frame_pkg *pkg, - enum spfc_parent_queue_state last_offload_state) -{ - u32 tx_mfs = 2048; - u32 edtov_timer = 2000; - dma_addr_t ctx_pa = 0; - u16 els_cmnd_type = SPFC_ZERO; - u16 els_cmnd_code = SPFC_ZERO; - void *ctx_va = NULL; - struct spfc_parent_context *parent_ctx_info = NULL; - struct spfc_sw_section *sw_setction = NULL; - struct spfc_parent_sq_info *parent_sq_info = &prt_queue_info->parent_sq_info; - u16 offload_flag = 0; - - els_cmnd_type = SPFC_GET_ELS_RSP_TYPE(pkg->cmnd); - if (SPFC_PKG_IS_ELS_RSP(els_cmnd_type)) { - els_cmnd_code = SPFC_GET_ELS_RSP_CODE(pkg->cmnd); - } else { - els_cmnd_code = els_cmnd_type; - els_cmnd_type = ELS_CMND; - } - - offload_flag = SPFC_CHECK_NEED_OFFLOAD(els_cmnd_code, els_cmnd_type, last_offload_state); - - parent_ctx_info = (struct spfc_parent_context *)(prt_queue_info->parent_ctx.parent_ctx); - sw_setction = &parent_ctx_info->sw_section; - - sw_setction->tx_mfs = cpu_to_be16((u16)(tx_mfs)); - sw_setction->e_d_tov_timer_val = cpu_to_be32(edtov_timer); - - spfc_big_to_cpu32(&sw_setction->sw_ctxt_misc.pctxt_val0, - sizeof(sw_setction->sw_ctxt_misc.pctxt_val0)); - sw_setction->sw_ctxt_misc.dw.port_id = SPFC_GET_NETWORK_PORT_ID(hba); - spfc_cpu_to_big32(&sw_setction->sw_ctxt_misc.pctxt_val0, - sizeof(sw_setction->sw_ctxt_misc.pctxt_val0)); - - spfc_big_to_cpu32(&sw_setction->sw_ctxt_config.pctxt_val1, - sizeof(sw_setction->sw_ctxt_config.pctxt_val1)); - spfc_cpu_to_big32(&sw_setction->sw_ctxt_config.pctxt_val1, - sizeof(sw_setction->sw_ctxt_config.pctxt_val1)); - - /* Fill in contex to the chip */ - ctx_pa = prt_queue_info->parent_ctx.cqm_parent_ctx_obj->paddr; - ctx_va = prt_queue_info->parent_ctx.cqm_parent_ctx_obj->vaddr; - - /* No need write key and no need do BIG TO CPU32 */ - memcpy(ctx_va, prt_queue_info->parent_ctx.parent_ctx, sizeof(struct spfc_parent_context)); - - if (SPFC_PKG_IS_ELS_RSP(els_cmnd_type)) { - sqe->ts_sl.cont.els_rsp.context_gpa_hi = SPFC_HIGH_32_BITS(ctx_pa); - sqe->ts_sl.cont.els_rsp.context_gpa_lo = SPFC_LOW_32_BITS(ctx_pa); - sqe->ts_sl.cont.els_rsp.wd1.offload_flag = offload_flag; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]sid 0x%x, did 0x%x, GPA HIGH 0x%x,GPA LOW 0x%x, scq 0x%x,offload flag 0x%x", - parent_sq_info->local_port_id, - parent_sq_info->remote_port_id, - sqe->ts_sl.cont.els_rsp.context_gpa_hi, - sqe->ts_sl.cont.els_rsp.context_gpa_lo, - prt_queue_info->parent_sts_scq_info.cqm_queue_id, - offload_flag); - } else { - sqe->ts_sl.cont.t_els_gs.context_gpa_hi = SPFC_HIGH_32_BITS(ctx_pa); - sqe->ts_sl.cont.t_els_gs.context_gpa_lo = SPFC_LOW_32_BITS(ctx_pa); - sqe->ts_sl.cont.t_els_gs.wd4.offload_flag = offload_flag; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]sid 0x%x, did 0x%x, GPA HIGH 0x%x,GPA LOW 0x%x, scq 0x%x,offload flag 0x%x", - parent_sq_info->local_port_id, - parent_sq_info->remote_port_id, - sqe->ts_sl.cont.t_els_gs.context_gpa_hi, - sqe->ts_sl.cont.t_els_gs.context_gpa_lo, - prt_queue_info->parent_sts_scq_info.cqm_queue_id, - offload_flag); - } - - if (offload_flag) { - prt_queue_info->offload_state = SPFC_QUEUE_STATE_OFFLOADING; - parent_sq_info->need_offloaded = SPFC_NEED_DO_OFFLOAD; - } - - return offload_flag; -} - -u32 spfc_send_els_via_default_session(struct spfc_hba_info *hba, struct spfc_sqe *io_sqe, - struct unf_frame_pkg *pkg, - struct spfc_parent_queue_info *prt_queue_info) -{ - ulong flags = 0; - bool sqe_delay = false; - u32 ret = UNF_RETURN_ERROR; - u16 els_cmnd_code = SPFC_ZERO; - u16 els_cmnd_type = SPFC_ZERO; - u16 ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - u32 rport_index = pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]; - struct spfc_sqe *sqe = io_sqe; - struct spfc_parent_queue_info *default_prt_queue_info = NULL; - struct spfc_parent_sq_info *parent_sq_info = &prt_queue_info->parent_sq_info; - struct spfc_parent_queue_info *offload_queue_info = NULL; - enum spfc_parent_queue_state last_offload_state = SPFC_QUEUE_STATE_INITIALIZED; - struct spfc_delay_destroy_ctrl_info delay_ctl_info; - u16 offload_flag = 0; - u32 default_index = SPFC_DEFAULT_RPORT_INDEX; - - memset(&delay_ctl_info, 0, sizeof(struct spfc_delay_destroy_ctrl_info)); - /* Determine the ELS type in pkg */ - els_cmnd_type = SPFC_GET_LS_GS_CMND_CODE(pkg->cmnd); - - if (SPFC_PKG_IS_ELS_RSP(els_cmnd_type)) { - els_cmnd_code = SPFC_GET_ELS_RSP_CODE(pkg->cmnd); - } else { - els_cmnd_code = els_cmnd_type; - els_cmnd_type = ELS_CMND; - } - - spin_lock_irqsave(&prt_queue_info->parent_queue_state_lock, flags); - - last_offload_state = prt_queue_info->offload_state; - - offload_flag = spfc_build_wqe_with_offload(hba, sqe, prt_queue_info, - pkg, last_offload_state); - - spin_unlock_irqrestore(&prt_queue_info->parent_queue_state_lock, flags); - - if (!offload_flag) { - default_prt_queue_info = &hba->parent_queue_mgr->parent_queue[default_index]; - if (!default_prt_queue_info) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_ERR, - "[ERR]cmd(0x%x), type(0x%x) send fail, default session null", - els_cmnd_code, els_cmnd_type); - return UNF_RETURN_ERROR; - } - parent_sq_info = &default_prt_queue_info->parent_sq_info; - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]cmd(0x%x), type(0x%x) send via default session", - els_cmnd_code, els_cmnd_type); - } else { - /* Need this xid to judge delay offload, when Sqe Enqueue will - * write again - */ - sqe->ts_sl.xid = parent_sq_info->context_id; - sqe_delay = spfc_check_need_delay_offload(hba, pkg, rport_index, prt_queue_info, - &offload_queue_info); - - if (sqe_delay) { - ret = spfc_push_delay_sqe(hba, offload_queue_info, sqe, pkg); - if (ret == RETURN_OK) { - spfc_recover_offloading_state(prt_queue_info, last_offload_state); - return ret; - } - } - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_INFO, - "[info]cmd(0x%x), type(0x%x) do secretly offload", - els_cmnd_code, els_cmnd_type); - } - - ret = spfc_parent_sq_enqueue(parent_sq_info, sqe, ssqn); - - if (ret != RETURN_OK) { - spfc_recover_offloading_state(prt_queue_info, last_offload_state); - - spin_lock_irqsave(&prt_queue_info->parent_queue_state_lock, - flags); - - if (prt_queue_info->parent_sq_info.destroy_sqe.valid) { - memcpy(&delay_ctl_info, &prt_queue_info->parent_sq_info.destroy_sqe, - sizeof(struct spfc_delay_destroy_ctrl_info)); - - prt_queue_info->parent_sq_info.destroy_sqe.valid = false; - } - - spin_unlock_irqrestore(&prt_queue_info->parent_queue_state_lock, flags); - - spfc_pop_destroy_parent_queue_sqe((void *)hba, &delay_ctl_info); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_WARN, - "[warn]Port(0x%x) RPort(0x%x) send ELS Type(0x%x) Code(0x%x) fail,recover offloadstatus(%u)", - hba->port_cfg.port_id, rport_index, els_cmnd_type, - els_cmnd_code, prt_queue_info->offload_state); - } - - return ret; -} - -static u32 spfc_rcv_ls_gs_rsp_payload(struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag, - u8 *els_pld_buf, u32 pld_len) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - if (pkg->type == UNF_PKG_GS_REQ_DONE) - spfc_big_to_cpu32(els_pld_buf, pld_len); - else - pkg->byte_orders |= SPFC_BIT_2; - - pkg->unf_cmnd_pload_bl.buffer_ptr = els_pld_buf; - pkg->unf_cmnd_pload_bl.length = pld_len; - - pkg->last_pkg_flag = UNF_PKG_NOT_LAST_RESPONSE; - - UNF_LOWLEVEL_RECEIVE_LS_GS_PKG(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_scq_recv_abts_rsp(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - /* Default path, which is sent from SCQ to the driver */ - u8 status = 0; - u32 ret = UNF_RETURN_ERROR; - u32 ox_id = INVALID_VALUE32; - u32 hot_tag = INVALID_VALUE32; - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_rcv_abts_rsp *abts_rsp = NULL; - - abts_rsp = &scqe->rcv_abts_rsp; - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = abts_rsp->magic_num; - - ox_id = (u32)(abts_rsp->wd0.ox_id); - - hot_tag = abts_rsp->wd1.hotpooltag; - if (unlikely(hot_tag < (u32)hba->exi_base || - hot_tag >= (u32)(hba->exi_base + hba->exi_count))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) has bad HotTag(0x%x) for bls_rsp", - hba->port_cfg.port_id, hot_tag); - - status = UNF_IO_FAILED; - hot_tag = INVALID_VALUE32; - } else { - hot_tag -= hba->exi_base; - if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe))) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) BLS response has error code(0x%x) tag(0x%x)", - hba->port_cfg.port_id, - SPFC_GET_SCQE_STATUS(scqe), (u32)hot_tag); - - status = UNF_IO_FAILED; - } else { - pkg.frame_head.rctl_did = abts_rsp->wd3.did; - pkg.frame_head.csctl_sid = abts_rsp->wd4.sid; - pkg.frame_head.oxid_rxid = (u32)(abts_rsp->wd0.rx_id) | ox_id << - UNF_SHIFT_16; - - /* BLS_ACC/BLS_RJT: IO_succeed */ - if (abts_rsp->wd2.fh_rctrl == SPFC_RCTL_BLS_ACC) { - status = UNF_IO_SUCCESS; - } else if (abts_rsp->wd2.fh_rctrl == SPFC_RCTL_BLS_RJT) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) ABTS RJT: %08x-%08x-%08x", - hba->port_cfg.port_id, - abts_rsp->payload[ARRAY_INDEX_0], - abts_rsp->payload[ARRAY_INDEX_1], - abts_rsp->payload[ARRAY_INDEX_2]); - - status = UNF_IO_SUCCESS; - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) BLS response RCTL is error", - hba->port_cfg.port_id); - SPFC_ERR_IO_STAT(hba, SPFC_SCQE_ABTS_RSP); - status = UNF_IO_FAILED; - } - } - } - - /* Set PKG/exchange status & Process BLS_RSP */ - pkg.status = status; - ret = spfc_rcv_bls_rsp(hba, &pkg, hot_tag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) recv ABTS rsp OX_ID(0x%x) RX_ID(0x%x) HotTag(0x%x) SID(0x%x) DID(0x%x) %s", - hba->port_cfg.port_id, ox_id, abts_rsp->wd0.rx_id, hot_tag, - abts_rsp->wd4.sid, abts_rsp->wd3.did, - (ret == RETURN_OK) ? "OK" : "ERROR"); - - return ret; -} - -u32 spfc_recv_els_cmnd(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u8 *els_pld, u32 pld_len, - bool first) -{ - u32 ret = UNF_RETURN_ERROR; - - /* Convert Payload to small endian */ - spfc_big_to_cpu32(els_pld, pld_len); - - pkg->type = UNF_PKG_ELS_REQ; - - pkg->unf_cmnd_pload_bl.buffer_ptr = els_pld; - - /* Payload length */ - pkg->unf_cmnd_pload_bl.length = pld_len; - - /* Obtain the Cmnd type from the Paylaod. The Cmnd is in small endian */ - if (first) - pkg->cmnd = UNF_GET_FC_PAYLOAD_ELS_CMND(pkg->unf_cmnd_pload_bl.buffer_ptr); - - /* Errors have been processed in SPFC_RecvElsError */ - pkg->status = UNF_IO_SUCCESS; - - /* Send PKG to the CM layer */ - UNF_LOWLEVEL_RECEIVE_LS_GS_PKG(ret, hba->lport, pkg); - - if (ret != RETURN_OK) { - pkg->rx_or_ox_id = UNF_PKG_FREE_RXID; - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE32; - pkg->private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = INVALID_VALUE32; - ret = spfc_free_xid((void *)hba, pkg); - - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) recv %s ox_id(0x%x) RXID(0x%x) PldLen(0x%x) failed, Free xid %s", - hba->port_cfg.port_id, - UNF_GET_FC_HEADER_RCTL(&pkg->frame_head) == SPFC_FC_RCTL_ELS_REQ ? - "ELS REQ" : "ELS RSP", - UNF_GET_OXID(pkg), UNF_GET_RXID(pkg), pld_len, - (ret == RETURN_OK) ? "OK" : "ERROR"); - } - - return ret; -} - -u32 spfc_rcv_ls_gs_rsp(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - if (pkg->type == UNF_PKG_ELS_REQ_DONE) - pkg->byte_orders |= SPFC_BIT_2; - - pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; - - UNF_LOWLEVEL_RECEIVE_LS_GS_PKG(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_rcv_els_rsp_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->type = UNF_PKG_ELS_REPLY_DONE; - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - - UNF_LOWLEVEL_SEND_ELS_DONE(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_rcv_bls_rsp(const struct spfc_hba_info *hba, struct unf_frame_pkg *pkg, - u32 hot_tag) -{ - /* - * 1. SCQ (normal) - * 2. from Root RQ (parent no existence) - * * - * single frame, single sequence - */ - u32 ret = UNF_RETURN_ERROR; - - pkg->type = UNF_PKG_BLS_REQ_DONE; - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - pkg->last_pkg_flag = UNF_PKG_LAST_RESPONSE; - - UNF_LOWLEVEL_RECEIVE_BLS_PKG(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_rsv_bls_rsp_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 rx_id) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->type = UNF_PKG_BLS_REPLY_DONE; - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = rx_id; - - UNF_LOWLEVEL_RECEIVE_BLS_PKG(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_rcv_tmf_marker_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - - /* Send PKG info to COM */ - UNF_LOWLEVEL_RECEIVE_MARKER_STS(ret, hba->lport, pkg); - - return ret; -} - -u32 spfc_rcv_abts_marker_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag) -{ - u32 ret = UNF_RETURN_ERROR; - - pkg->private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - - UNF_LOWLEVEL_RECEIVE_ABTS_MARKER_STS(ret, hba->lport, pkg); - - return ret; -} - -static void spfc_scqe_error_pre_proc(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - /* Currently, only printing and statistics collection are performed */ - SPFC_ERR_IO_STAT(hba, SPFC_GET_SCQE_TYPE(scqe)); - SPFC_SCQ_ERR_TYPE_STAT(hba, SPFC_GET_SCQE_STATUS(scqe)); - - FC_DRV_PRINT(UNF_LOG_ABNORMAL, UNF_WARN, - "[warn]Port(0x%x)-Task_type(%u) SCQE contain error code(%u),additional info(0x%x)", - hba->port_cfg.port_id, scqe->common.ch.wd0.task_type, - scqe->common.ch.wd0.err_code, scqe->common.conn_id); -} - -void *spfc_get_els_buf_by_user_id(struct spfc_hba_info *hba, u16 user_id) -{ - struct spfc_drq_buff_entry *srq_buf_entry = NULL; - struct spfc_srq_info *srq_info = NULL; - - FC_CHECK_RETURN_VALUE(hba, NULL); - - srq_info = &hba->els_srq_info; - FC_CHECK_RETURN_VALUE(user_id < srq_info->valid_wqe_num, NULL); - - srq_buf_entry = &srq_info->els_buff_entry_head[user_id]; - - return srq_buf_entry->buff_addr; -} - -static u32 spfc_check_srq_buf_valid(struct spfc_hba_info *hba, - u16 *buf_id_array, u32 buf_num) -{ - u32 index = 0; - u32 buf_id = 0; - void *srq_buf = NULL; - - for (index = 0; index < buf_num; index++) { - buf_id = buf_id_array[index]; - - if (buf_id < hba->els_srq_info.valid_wqe_num) - srq_buf = spfc_get_els_buf_by_user_id(hba, (u16)buf_id); - else - srq_buf = NULL; - - if (!srq_buf) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) get srq buffer user id(0x%x) is null", - hba->port_cfg.port_id, buf_id); - - return UNF_RETURN_ERROR; - } - } - - return RETURN_OK; -} - -static void spfc_reclaim_srq_buf(struct spfc_hba_info *hba, u16 *buf_id_array, - u32 buf_num) -{ - u32 index = 0; - u32 buf_id = 0; - void *srq_buf = NULL; - - for (index = 0; index < buf_num; index++) { - buf_id = buf_id_array[index]; - if (buf_id < hba->els_srq_info.valid_wqe_num) - srq_buf = spfc_get_els_buf_by_user_id(hba, (u16)buf_id); - else - srq_buf = NULL; - - /* If the value of buffer is NULL, it indicates that the value - * of buffer is invalid. In this case, exit directly. - */ - if (!srq_buf) - break; - - spfc_post_els_srq_wqe(&hba->els_srq_info, (u16)buf_id); - } -} - -static u32 spfc_check_ls_gs_valid(struct spfc_hba_info *hba, union spfc_scqe *scqe, - struct unf_frame_pkg *pkg, u16 *buf_id_array, - u32 buf_num, u32 frame_len) -{ - u32 hot_tag; - - hot_tag = UNF_GET_HOTPOOL_TAG(pkg); - - /* The ELS CMD returns an error code and discards it directly */ - if ((sizeof(struct spfc_fc_frame_header) > frame_len) || - (SPFC_SCQE_HAS_ERRCODE(scqe)) || buf_num > SPFC_ELS_SRQ_BUF_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) get scqe type(0x%x) payload len(0x%x),scq status(0x%x),user id num(0x%x) abnormal", - hba->port_cfg.port_id, SPFC_GET_SCQE_TYPE(scqe), frame_len, - SPFC_GET_SCQE_STATUS(scqe), buf_num); - - /* ELS RSP Special Processing */ - if (SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_ELS_RSP || - SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_GS_RSP) { - if (SPFC_SCQE_ERR_TO_CM(scqe)) { - pkg->status = UNF_IO_FAILED; - (void)spfc_rcv_ls_gs_rsp(hba, pkg, hot_tag); - } else { - if (SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_ELS_RSP) - SPFC_HBA_STAT(hba, SPFC_STAT_ELS_RSP_EXCH_REUSE); - else - SPFC_HBA_STAT(hba, SPFC_STAT_GS_RSP_EXCH_REUSE); - } - } - - /* Reclaim srq */ - if (buf_num <= SPFC_ELS_SRQ_BUF_NUM) - spfc_reclaim_srq_buf(hba, buf_id_array, buf_num); - - return UNF_RETURN_ERROR; - } - - /* ELS CMD Check the validity of the buffer sent by the ucode */ - if (SPFC_GET_SCQE_TYPE(scqe) == SPFC_SCQE_ELS_CMND) { - if (spfc_check_srq_buf_valid(hba, buf_id_array, buf_num) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) get els cmnd scqe user id num(0x%x) abnormal, as some srq buff is null", - hba->port_cfg.port_id, buf_num); - - spfc_reclaim_srq_buf(hba, buf_id_array, buf_num); - - return UNF_RETURN_ERROR; - } - } - - return RETURN_OK; -} - -u32 spfc_scq_recv_els_cmnd(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 ret = RETURN_OK; - u32 pld_len = 0; - u32 header_len = 0; - u32 frame_len = 0; - u32 rcv_data_len = 0; - u32 max_buf_num = 0; - u16 buf_id = 0; - u32 index = 0; - u8 *pld_addr = NULL; - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_rcv_els_cmd *els_cmd = NULL; - struct spfc_fc_frame_header *els_frame = NULL; - struct spfc_fc_frame_header tmp_frame = {0}; - void *els_buf = NULL; - bool first = false; - - els_cmd = &scqe->rcv_els_cmd; - frame_len = els_cmd->wd3.data_len; - max_buf_num = els_cmd->wd3.user_id_num; - spfc_swap_16_in_32((u32 *)els_cmd->user_id, SPFC_LS_GS_USERID_LEN); - - pkg.xchg_contex = NULL; - pkg.status = UNF_IO_SUCCESS; - - /* Check the validity of error codes and buff. If an exception occurs, - * discard the error code - */ - ret = spfc_check_ls_gs_valid(hba, scqe, &pkg, els_cmd->user_id, - max_buf_num, frame_len); - if (ret != RETURN_OK) { - pkg.rx_or_ox_id = UNF_PKG_FREE_RXID; - pkg.frame_head.oxid_rxid = - (u32)(els_cmd->wd2.rx_id) | (u32)(els_cmd->wd2.ox_id) << UNF_SHIFT_16; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = INVALID_VALUE32; - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = INVALID_VALUE32; - pkg.frame_head.csctl_sid = els_cmd->wd1.sid; - pkg.frame_head.rctl_did = els_cmd->wd0.did; - spfc_free_xid((void *)hba, &pkg); - return RETURN_OK; - } - - /* Send data to COM cyclically */ - for (index = 0; index < max_buf_num; index++) { - /* Exception record, which is not processed currently */ - if (rcv_data_len >= frame_len) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) get els cmd date len(0x%x) is bigger than fream len(0x%x)", - hba->port_cfg.port_id, rcv_data_len, frame_len); - } - - buf_id = (u16)els_cmd->user_id[index]; - els_buf = spfc_get_els_buf_by_user_id(hba, buf_id); - - /* Obtain playload address */ - pld_addr = (u8 *)(els_buf); - header_len = 0; - first = false; - if (index == 0) { - els_frame = (struct spfc_fc_frame_header *)els_buf; - pld_addr = (u8 *)(els_frame + 1); - - header_len = sizeof(struct spfc_fc_frame_header); - first = true; - - memcpy(&tmp_frame, els_frame, sizeof(struct spfc_fc_frame_header)); - spfc_big_to_cpu32(&tmp_frame, sizeof(struct spfc_fc_frame_header)); - memcpy(&pkg.frame_head, &tmp_frame, sizeof(pkg.frame_head)); - pkg.frame_head.oxid_rxid = (u32)((pkg.frame_head.oxid_rxid & - SPFC_OXID_MASK) | (els_cmd->wd2.rx_id)); - } - - /* Calculate the playload length */ - pkg.last_pkg_flag = 0; - pld_len = SPFC_SRQ_ELS_SGE_LEN; - - if ((rcv_data_len + SPFC_SRQ_ELS_SGE_LEN) >= frame_len) { - pkg.last_pkg_flag = 1; - pld_len = frame_len - rcv_data_len; - } - - pkg.class_mode = els_cmd->wd0.class_mode; - - /* Push data to COM */ - if (ret == RETURN_OK) { - ret = spfc_recv_els_cmnd(hba, &pkg, pld_addr, - (pld_len - header_len), first); - } - - /* Reclaim srq buffer */ - spfc_post_els_srq_wqe(&hba->els_srq_info, buf_id); - - rcv_data_len += pld_len; - } - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) recv ELS Type(0x%x) Cmnd(0x%x) ox_id(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) %u", - hba->port_cfg.port_id, pkg.type, pkg.cmnd, els_cmd->wd2.ox_id, - els_cmd->wd2.rx_id, els_cmd->wd1.sid, els_cmd->wd0.did, ret); - - return ret; -} - -static u32 spfc_get_ls_gs_pld_len(struct spfc_hba_info *hba, u32 rcv_data_len, u32 frame_len) -{ - u32 pld_len; - - /* Exception record, which is not processed currently */ - if (rcv_data_len >= frame_len) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) get els rsp data len(0x%x) is bigger than fream len(0x%x)", - hba->port_cfg.port_id, rcv_data_len, frame_len); - } - - pld_len = SPFC_SRQ_ELS_SGE_LEN; - if ((rcv_data_len + SPFC_SRQ_ELS_SGE_LEN) >= frame_len) - pld_len = frame_len - rcv_data_len; - - return pld_len; -} - -u32 spfc_scq_recv_ls_gs_rsp(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 ret = RETURN_OK; - u32 pld_len = 0; - u32 header_len = 0; - u32 frame_len = 0; - u32 rcv_data_len = 0; - u32 max_buf_num = 0; - u16 buf_id = 0; - u32 hot_tag = INVALID_VALUE32; - u32 index = 0; - u32 ox_id = (~0); - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_rcv_els_gs_rsp *ls_gs_rsp_scqe = NULL; - struct spfc_fc_frame_header *els_frame = NULL; - void *ls_gs_buf = NULL; - u8 *pld_addr = NULL; - u8 task_type; - - ls_gs_rsp_scqe = &scqe->rcv_els_gs_rsp; - frame_len = ls_gs_rsp_scqe->wd2.data_len; - max_buf_num = ls_gs_rsp_scqe->wd4.user_id_num; - spfc_swap_16_in_32((u32 *)ls_gs_rsp_scqe->user_id, SPFC_LS_GS_USERID_LEN); - - ox_id = ls_gs_rsp_scqe->wd1.ox_id; - hot_tag = ((u16)ls_gs_rsp_scqe->wd5.hotpooltag) - hba->exi_base; - pkg.frame_head.oxid_rxid = (u32)(ls_gs_rsp_scqe->wd1.rx_id) | ox_id << UNF_SHIFT_16; - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = ls_gs_rsp_scqe->magic_num; - pkg.private_data[PKG_PRIVATE_XCHG_HOT_POOL_INDEX] = hot_tag; - pkg.frame_head.csctl_sid = ls_gs_rsp_scqe->wd4.sid; - pkg.frame_head.rctl_did = ls_gs_rsp_scqe->wd3.did; - pkg.status = UNF_IO_SUCCESS; - pkg.type = UNF_PKG_ELS_REQ_DONE; - - task_type = SPFC_GET_SCQE_TYPE(scqe); - if (task_type == SPFC_SCQE_GS_RSP) { - if (ls_gs_rsp_scqe->wd3.end_rsp) - SPFC_HBA_STAT(hba, SPFC_STAT_LAST_GS_SCQE); - pkg.type = UNF_PKG_GS_REQ_DONE; - } - - /* Handle the exception first. The LS/GS RSP returns the error code. - * Only the ox_id can submit the error code to the CM layer. - */ - ret = spfc_check_ls_gs_valid(hba, scqe, &pkg, ls_gs_rsp_scqe->user_id, - max_buf_num, frame_len); - if (ret != RETURN_OK) - return RETURN_OK; - - if (ls_gs_rsp_scqe->wd3.echo_rsp) { - pkg.private_data[PKG_PRIVATE_ECHO_CMD_RCV_TIME] = - ls_gs_rsp_scqe->user_id[ARRAY_INDEX_5]; - pkg.private_data[PKG_PRIVATE_ECHO_RSP_SND_TIME] = - ls_gs_rsp_scqe->user_id[ARRAY_INDEX_6]; - pkg.private_data[PKG_PRIVATE_ECHO_CMD_SND_TIME] = - ls_gs_rsp_scqe->user_id[ARRAY_INDEX_7]; - pkg.private_data[PKG_PRIVATE_ECHO_ACC_RCV_TIME] = - ls_gs_rsp_scqe->user_id[ARRAY_INDEX_8]; - } - - /* Send data to COM cyclically */ - for (index = 0; index < max_buf_num; index++) { - /* Obtain buffer address */ - ls_gs_buf = NULL; - buf_id = (u16)ls_gs_rsp_scqe->user_id[index]; - ls_gs_buf = spfc_get_els_buf_by_user_id(hba, buf_id); - - if (unlikely(!ls_gs_buf)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) ox_id(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) Index(0x%x) get els rsp buff user id(0x%x) abnormal", - hba->port_cfg.port_id, ox_id, - ls_gs_rsp_scqe->wd1.rx_id, ls_gs_rsp_scqe->wd4.sid, - ls_gs_rsp_scqe->wd3.did, index, buf_id); - - if (index == 0) { - pkg.status = UNF_IO_FAILED; - ret = spfc_rcv_ls_gs_rsp(hba, &pkg, hot_tag); - } - - return ret; - } - - header_len = 0; - pld_addr = (u8 *)(ls_gs_buf); - if (index == 0) { - header_len = sizeof(struct spfc_fc_frame_header); - els_frame = (struct spfc_fc_frame_header *)ls_gs_buf; - pld_addr = (u8 *)(els_frame + 1); - } - - /* Calculate the playload length */ - pld_len = spfc_get_ls_gs_pld_len(hba, rcv_data_len, frame_len); - - /* Push data to COM */ - if (ret == RETURN_OK) { - ret = spfc_rcv_ls_gs_rsp_payload(hba, &pkg, hot_tag, pld_addr, - (pld_len - header_len)); - } - - /* Reclaim srq buffer */ - spfc_post_els_srq_wqe(&hba->els_srq_info, buf_id); - - rcv_data_len += pld_len; - } - - if (ls_gs_rsp_scqe->wd3.end_rsp && ret == RETURN_OK) - ret = spfc_rcv_ls_gs_rsp(hba, &pkg, hot_tag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) receive LS/GS RSP ox_id(0x%x) RXID(0x%x) SID(0x%x) DID(0x%x) end_rsp(0x%x) user_num(0x%x)", - hba->port_cfg.port_id, ox_id, ls_gs_rsp_scqe->wd1.rx_id, - ls_gs_rsp_scqe->wd4.sid, ls_gs_rsp_scqe->wd3.did, - ls_gs_rsp_scqe->wd3.end_rsp, - ls_gs_rsp_scqe->wd4.user_id_num); - - return ret; -} - -u32 spfc_scq_recv_els_rsp_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 ret = UNF_RETURN_ERROR; - u32 rx_id = INVALID_VALUE32; - u32 hot_tag = INVALID_VALUE32; - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_comm_rsp_sts *els_rsp_sts_scqe = NULL; - - els_rsp_sts_scqe = &scqe->comm_sts; - rx_id = (u32)els_rsp_sts_scqe->wd0.rx_id; - - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = - els_rsp_sts_scqe->magic_num; - pkg.frame_head.oxid_rxid = rx_id | (u32)(els_rsp_sts_scqe->wd0.ox_id) << UNF_SHIFT_16; - hot_tag = (u32)(els_rsp_sts_scqe->wd1.hotpooltag - hba->exi_base); - - if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe))) - pkg.status = UNF_IO_FAILED; - else - pkg.status = UNF_IO_SUCCESS; - - ret = spfc_rcv_els_rsp_sts(hba, &pkg, hot_tag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) recv ELS RSP STS ox_id(0x%x) RXID(0x%x) HotTag(0x%x) %s", - hba->port_cfg.port_id, els_rsp_sts_scqe->wd0.ox_id, rx_id, - hot_tag, (ret == RETURN_OK) ? "OK" : "ERROR"); - - return ret; -} - -static u32 spfc_check_rport_valid(const struct spfc_parent_queue_info *prt_queue_info, u32 scqe_xid) -{ - if (prt_queue_info->parent_ctx.cqm_parent_ctx_obj) { - if ((prt_queue_info->parent_sq_info.context_id & SPFC_CQM_XID_MASK) == - (scqe_xid & SPFC_CQM_XID_MASK)) { - return RETURN_OK; - } - } - - return UNF_RETURN_ERROR; -} - -u32 spfc_scq_recv_offload_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 valid = UNF_RETURN_ERROR; - u32 rport_index = 0; - u32 cid = 0; - u32 xid = 0; - ulong flags = 0; - struct spfc_parent_queue_info *prt_qinfo = NULL; - struct spfc_parent_sq_info *parent_sq_info = NULL; - struct spfc_scqe_sess_sts *offload_sts_scqe = NULL; - struct spfc_delay_destroy_ctrl_info delay_ctl_info; - - memset(&delay_ctl_info, 0, sizeof(struct spfc_delay_destroy_ctrl_info)); - offload_sts_scqe = &scqe->sess_sts; - rport_index = offload_sts_scqe->wd1.conn_id; - cid = offload_sts_scqe->wd2.cid; - xid = offload_sts_scqe->wd0.xid_qpn; - - if (rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) receive an error offload status: rport(0x%x) is invalid, cacheid(0x%x)", - hba->port_cfg.port_id, rport_index, cid); - - return UNF_RETURN_ERROR; - } - - if (rport_index == SPFC_DEFAULT_RPORT_INDEX && - hba->default_sq_info.default_sq_flag == 0xF) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) default session timeout: rport(0x%x) cacheid(0x%x)", - hba->port_cfg.port_id, rport_index, cid); - return UNF_RETURN_ERROR; - } - - prt_qinfo = &hba->parent_queue_mgr->parent_queue[rport_index]; - parent_sq_info = &prt_qinfo->parent_sq_info; - - valid = spfc_check_rport_valid(prt_qinfo, xid); - if (valid != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) receive an error offload status: rport(0x%x), context id(0x%x) is invalid", - hba->port_cfg.port_id, rport_index, xid); - - return UNF_RETURN_ERROR; - } - - /* Offload failed */ - if (SPFC_GET_SCQE_STATUS(scqe) != SPFC_COMPLETION_STATUS_SUCCESS) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x), rport(0x%x), context id(0x%x), cache id(0x%x), offload failed", - hba->port_cfg.port_id, rport_index, xid, cid); - - spin_lock_irqsave(&prt_qinfo->parent_queue_state_lock, flags); - if (prt_qinfo->offload_state != SPFC_QUEUE_STATE_OFFLOADED) { - prt_qinfo->offload_state = SPFC_QUEUE_STATE_INITIALIZED; - parent_sq_info->need_offloaded = INVALID_VALUE8; - } - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, - flags); - - return UNF_RETURN_ERROR; - } - - spin_lock_irqsave(&prt_qinfo->parent_queue_state_lock, flags); - prt_qinfo->parent_sq_info.cache_id = cid; - prt_qinfo->offload_state = SPFC_QUEUE_STATE_OFFLOADED; - parent_sq_info->need_offloaded = SPFC_HAVE_OFFLOAD; - atomic_set(&prt_qinfo->parent_sq_info.sq_cached, true); - - if (prt_qinfo->parent_sq_info.destroy_sqe.valid) { - delay_ctl_info.valid = prt_qinfo->parent_sq_info.destroy_sqe.valid; - delay_ctl_info.rport_index = prt_qinfo->parent_sq_info.destroy_sqe.rport_index; - delay_ctl_info.time_out = prt_qinfo->parent_sq_info.destroy_sqe.time_out; - delay_ctl_info.start_jiff = prt_qinfo->parent_sq_info.destroy_sqe.start_jiff; - delay_ctl_info.rport_info.nport_id = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id; - delay_ctl_info.rport_info.rport_index = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index; - delay_ctl_info.rport_info.port_name = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name; - prt_qinfo->parent_sq_info.destroy_sqe.valid = false; - } - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, flags); - - if (rport_index == SPFC_DEFAULT_RPORT_INDEX) { - hba->default_sq_info.sq_cid = cid; - hba->default_sq_info.sq_xid = xid; - hba->default_sq_info.default_sq_flag = 1; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_MAJOR, "[info]Receive default Session info"); - } - - spfc_pop_destroy_parent_queue_sqe((void *)hba, &delay_ctl_info); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) offload success: rport index(0x%x),rport nportid(0x%x),context id(0x%x),cache id(0x%x).", - hba->port_cfg.port_id, rport_index, - prt_qinfo->parent_sq_info.remote_port_id, xid, cid); - - return RETURN_OK; -} - -static u32 spfc_send_bls_via_parent(struct spfc_hba_info *hba, struct unf_frame_pkg *pkg) -{ - u32 ret = UNF_RETURN_ERROR; - u16 ox_id = INVALID_VALUE16; - u16 rx_id = INVALID_VALUE16; - struct spfc_sqe tmp_sqe; - struct spfc_sqe *sqe = NULL; - struct spfc_parent_sq_info *parent_sq_info = NULL; - struct spfc_parent_queue_info *prt_qinfo = NULL; - u16 ssqn; - - FC_CHECK_RETURN_VALUE((pkg->type == UNF_PKG_BLS_REQ), UNF_RETURN_ERROR); - - sqe = &tmp_sqe; - memset(sqe, 0, sizeof(struct spfc_sqe)); - - prt_qinfo = spfc_find_parent_queue_info_by_pkg(hba, pkg); - if (!prt_qinfo) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return ret; - } - - parent_sq_info = spfc_find_parent_sq_by_pkg(hba, pkg); - if (!parent_sq_info) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send ABTS SID_DID(0x%x_0x%x) with null parent queue information", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return ret; - } - - rx_id = UNF_GET_RXID(pkg); - ox_id = UNF_GET_OXID(pkg); - - /* Assemble the SQE Control Section part. The ABTS does not have - * Payload. bdsl=0 - */ - spfc_build_service_wqe_ctrl_section(&sqe->ctrl_sl, SPFC_BYTES_TO_QW_NUM(SPFC_SQE_TS_SIZE), - 0); - - /* Assemble the SQE Task Section BLS Common part. The value of DW2 of - * BLS WQE is Rsvd, and the value of DW2 is 0 - */ - spfc_build_service_wqe_ts_common(&sqe->ts_sl, parent_sq_info->rport_index, ox_id, rx_id, 0); - - /* Assemble the special part of the ABTS */ - spfc_build_bls_wqe_ts_req(sqe, pkg, hba); - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) RPort(0x%x) send ABTS_REQ ox_id(0x%x) RXID(0x%x), HotTag(0x%x)", - hba->port_cfg.port_id, parent_sq_info->rport_index, ox_id, - rx_id, (u16)(UNF_GET_HOTPOOL_TAG(pkg) + hba->exi_base)); - - ssqn = (u16)pkg->private_data[PKG_PRIVATE_XCHG_SSQ_INDEX]; - ret = spfc_parent_sq_enqueue(parent_sq_info, sqe, ssqn); - - return ret; -} - -u32 spfc_send_bls_cmnd(void *handle, struct unf_frame_pkg *pkg) -{ - u32 ret = UNF_RETURN_ERROR; - struct spfc_hba_info *hba = NULL; - ulong flags = 0; - struct spfc_parent_queue_info *prt_qinfo = NULL; - - FC_CHECK_RETURN_VALUE(handle, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg->type == UNF_PKG_BLS_REQ || pkg->type == UNF_PKG_BLS_REPLY, - UNF_RETURN_ERROR); - - SPFC_CHECK_PKG_ALLOCTIME(pkg); - hba = (struct spfc_hba_info *)handle; - - prt_qinfo = spfc_find_parent_queue_info_by_pkg(hba, pkg); - if (!prt_qinfo) { - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[warn]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with null parent queue information", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - - return ret; - } - - spin_lock_irqsave(&prt_qinfo->parent_queue_state_lock, flags); - - if (SPFC_RPORT_OFFLOADED(prt_qinfo)) { - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, flags); - ret = spfc_send_bls_via_parent(hba, pkg); - } else { - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, flags); - FC_DRV_PRINT(UNF_LOG_IO_ATT, UNF_WARN, - "[error]Port(0x%x) send BLS SID_DID(0x%x_0x%x) with no offloaded, do noting", - hba->port_cfg.port_id, pkg->frame_head.csctl_sid, - pkg->frame_head.rctl_did); - } - - return ret; -} - -static u32 spfc_scq_rcv_flush_sq_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - /* - * RCVD sq flush sts - * --->>> continue flush or clear done - */ - u32 ret = UNF_RETURN_ERROR; - - if (scqe->flush_sts.wd0.port_id != hba->port_index) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_CRITICAL, - "[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)", - hba->port_cfg.port_id, scqe->clear_sts.wd0.port_id, - hba->port_index, hba->queue_set_stage); - - return UNF_RETURN_ERROR; - } - - if (scqe->flush_sts.wd0.last_flush) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_INFO, - "[info]Port(0x%x) flush sq(0x%x) done, stage(0x%x)", - hba->port_cfg.port_id, hba->next_clear_sq, hba->queue_set_stage); - - /* If the Flush STS is last one, send cmd done */ - ret = spfc_clear_sq_wqe_done(hba); - } else { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_MAJOR, - "[info]Port(0x%x) continue flush sq(0x%x), stage(0x%x)", - hba->port_cfg.port_id, hba->next_clear_sq, hba->queue_set_stage); - - ret = spfc_clear_pending_sq_wqe(hba); - } - - return ret; -} - -static u32 spfc_scq_rcv_buf_clear_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - /* - * clear: fetched sq wqe - * ---to--->>> pending sq wqe - */ - u32 ret = UNF_RETURN_ERROR; - - if (scqe->clear_sts.wd0.port_id != hba->port_index) { - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_CRITICAL, - "[err]Port(0x%x) clear_sts_port_idx(0x%x) not match hba_port_idx(0x%x), stage(0x%x)", - hba->port_cfg.port_id, scqe->clear_sts.wd0.port_id, - hba->port_index, hba->queue_set_stage); - - return UNF_RETURN_ERROR; - } - - /* set port with I/O cleared state */ - spfc_set_hba_clear_state(hba, true); - - FC_DRV_PRINT(UNF_LOG_EVENT, UNF_KEVENT, - "[info]Port(0x%x) cleared all fetched wqe, start clear sq pending wqe, stage (0x%x)", - hba->port_cfg.port_id, hba->queue_set_stage); - - hba->queue_set_stage = SPFC_QUEUE_SET_STAGE_FLUSHING; - ret = spfc_clear_pending_sq_wqe(hba); - - return ret; -} - -u32 spfc_scq_recv_sess_rst_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 rport_index = INVALID_VALUE32; - ulong flags = 0; - struct spfc_parent_queue_info *parent_queue_info = NULL; - struct spfc_scqe_sess_sts *sess_sts_scqe = (struct spfc_scqe_sess_sts *)(void *)scqe; - u32 flush_done; - u32 *ctx_array = NULL; - int ret; - spinlock_t *prtq_state_lock = NULL; - - rport_index = sess_sts_scqe->wd1.conn_id; - if (rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) receive reset session cmd sts failed, invlaid rport(0x%x) status_code(0x%x) remain_cnt(0x%x)", - hba->port_cfg.port_id, rport_index, - sess_sts_scqe->ch.wd0.err_code, - sess_sts_scqe->ch.wd0.cqe_remain_cnt); - - return UNF_RETURN_ERROR; - } - - parent_queue_info = &hba->parent_queue_mgr->parent_queue[rport_index]; - prtq_state_lock = &parent_queue_info->parent_queue_state_lock; - /* - * If only session reset is used, the offload status of sq remains - * unchanged. If a link is deleted, the offload status is set to - * destroying and is irreversible. - */ - spin_lock_irqsave(prtq_state_lock, flags); - - /* - * According to the fault tolerance principle, even if the connection - * deletion times out and the sts returns to delete the connection, one - * indicates that the cancel timer is successful, and 0 indicates that - * the timer is being processed. - */ - if (!cancel_delayed_work(&parent_queue_info->parent_sq_info.del_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) rport_index(0x%x) delete rport timer maybe timeout", - hba->port_cfg.port_id, rport_index); - } - - /* - * If the SessRstSts is returned too late and the Parent Queue Info - * resource is released, OK is returned. - */ - if (parent_queue_info->offload_state != SPFC_QUEUE_STATE_DESTROYING) { - spin_unlock_irqrestore(prtq_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[info]Port(0x%x) reset session cmd complete, no need to free parent qinfo, rport(0x%x) status_code(0x%x) remain_cnt(0x%x)", - hba->port_cfg.port_id, rport_index, - sess_sts_scqe->ch.wd0.err_code, - sess_sts_scqe->ch.wd0.cqe_remain_cnt); - - return RETURN_OK; - } - - if (parent_queue_info->parent_ctx.cqm_parent_ctx_obj) { - ctx_array = (u32 *)((void *)(parent_queue_info->parent_ctx - .cqm_parent_ctx_obj->vaddr)); - flush_done = ctx_array[SPFC_CTXT_FLUSH_DONE_DW_POS] & SPFC_CTXT_FLUSH_DONE_MASK_BE; - mb(); - if (flush_done == 0) { - spin_unlock_irqrestore(prtq_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) rport(0x%x) flushdone is not set, delay to free parent session", - hba->port_cfg.port_id, rport_index); - - /* If flushdone bit is not set,delay free Sq info */ - ret = queue_delayed_work(hba->work_queue, - &(parent_queue_info->parent_sq_info - .flush_done_timeout_work), - (ulong)msecs_to_jiffies((u32) - SPFC_SQ_WAIT_FLUSH_DONE_TIMEOUT_MS)); - if (!ret) { - SPFC_HBA_STAT(hba, SPFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK); - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) rport(0x%x) queue delayed work failed ret:%d", - hba->port_cfg.port_id, rport_index, - ret); - } - - return RETURN_OK; - } - } - - spin_unlock_irqrestore(prtq_state_lock, flags); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) begin to free parent session with rport(0x%x)", - hba->port_cfg.port_id, rport_index); - - spfc_free_parent_queue_info(hba, parent_queue_info); - - return RETURN_OK; -} - -static u32 spfc_scq_rcv_clear_srq_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - /* - * clear ELS/Immi SRQ - * ---then--->>> Destroy SRQ - */ - struct spfc_srq_info *srq_info = NULL; - - if (SPFC_GET_SCQE_STATUS(scqe) != 0) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) clear srq failed, status(0x%x)", - hba->port_cfg.port_id, SPFC_GET_SCQE_STATUS(scqe)); - - return RETURN_OK; - } - - srq_info = &hba->els_srq_info; - - /* - * 1: cancel timer succeed - * 0: the timer is being processed, the SQ is released when the timer - * times out - */ - if (cancel_delayed_work(&srq_info->del_work)) - queue_work(hba->work_queue, &hba->els_srq_clear_work); - - return RETURN_OK; -} - -u32 spfc_scq_recv_marker_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 ret = UNF_RETURN_ERROR; - u32 ox_id = INVALID_VALUE32; - u32 rx_id = INVALID_VALUE32; - u32 hot_tag = INVALID_VALUE32; - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_itmf_marker_sts *tmf_marker_sts_scqe = NULL; - - tmf_marker_sts_scqe = &scqe->itmf_marker_sts; - ox_id = (u32)tmf_marker_sts_scqe->wd1.ox_id; - rx_id = (u32)tmf_marker_sts_scqe->wd1.rx_id; - hot_tag = tmf_marker_sts_scqe->wd4.hotpooltag - hba->exi_base; - pkg.frame_head.oxid_rxid = rx_id | (u32)(ox_id) << UNF_SHIFT_16; - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = tmf_marker_sts_scqe->magic_num; - pkg.frame_head.csctl_sid = tmf_marker_sts_scqe->wd3.sid; - pkg.frame_head.rctl_did = tmf_marker_sts_scqe->wd2.did; - - /* 1. set pkg status */ - if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe))) - pkg.status = UNF_IO_FAILED; - else - pkg.status = UNF_IO_SUCCESS; - - /* 2 .process rcvd marker STS: set exchange state */ - ret = spfc_rcv_tmf_marker_sts(hba, &pkg, hot_tag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[event]Port(0x%x) recv marker STS OX_ID(0x%x) RX_ID(0x%x) HotTag(0x%x) result %s", - hba->port_cfg.port_id, ox_id, rx_id, hot_tag, - (ret == RETURN_OK) ? "succeed" : "failed"); - - return ret; -} - -u32 spfc_scq_recv_abts_marker_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 ret = UNF_RETURN_ERROR; - u32 ox_id = INVALID_VALUE32; - u32 rx_id = INVALID_VALUE32; - u32 hot_tag = INVALID_VALUE32; - struct unf_frame_pkg pkg = {0}; - struct spfc_scqe_abts_marker_sts *abts_marker_sts_scqe = NULL; - - abts_marker_sts_scqe = &scqe->abts_marker_sts; - if (!abts_marker_sts_scqe) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]ABTS marker STS is NULL"); - return ret; - } - - ox_id = (u32)abts_marker_sts_scqe->wd1.ox_id; - rx_id = (u32)abts_marker_sts_scqe->wd1.rx_id; - hot_tag = abts_marker_sts_scqe->wd4.hotpooltag - hba->exi_base; - pkg.frame_head.oxid_rxid = rx_id | (u32)(ox_id) << UNF_SHIFT_16; - pkg.frame_head.csctl_sid = abts_marker_sts_scqe->wd3.sid; - pkg.frame_head.rctl_did = abts_marker_sts_scqe->wd2.did; - pkg.abts_maker_status = (u32)abts_marker_sts_scqe->wd3.io_state; - pkg.private_data[PKG_PRIVATE_XCHG_ALLOC_TIME] = abts_marker_sts_scqe->magic_num; - - if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe))) - pkg.status = UNF_IO_FAILED; - else - pkg.status = UNF_IO_SUCCESS; - - ret = spfc_rcv_abts_marker_sts(hba, &pkg, hot_tag); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_MAJOR, - "[info]Port(0x%x) recv abts marker STS ox_id(0x%x) RXID(0x%x) HotTag(0x%x) %s", - hba->port_cfg.port_id, ox_id, rx_id, hot_tag, - (ret == RETURN_OK) ? "SUCCEED" : "FAILED"); - - return ret; -} - -u32 spfc_handle_aeq_off_load_err(struct spfc_hba_info *hba, struct spfc_aqe_data *aeq_msg) -{ - u32 ret = RETURN_OK; - u32 rport_index = 0; - u32 xid = 0; - struct spfc_parent_queue_info *prt_qinfo = NULL; - struct spfc_delay_destroy_ctrl_info delay_ctl_info; - ulong flags = 0; - - memset(&delay_ctl_info, 0, sizeof(struct spfc_delay_destroy_ctrl_info)); - - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) receive Offload Err Event, EvtCode(0x%x) Conn_id(0x%x) Xid(0x%x)", - hba->port_cfg.port_id, aeq_msg->wd0.evt_code, - aeq_msg->wd0.conn_id, aeq_msg->wd1.xid); - - /* Currently, only the offload failure caused by insufficient scqe is - * processed. Other errors are not processed temporarily. - */ - if (unlikely(aeq_msg->wd0.evt_code != FC_ERROR_OFFLOAD_LACKOF_SCQE_FAIL)) { - FC_DRV_PRINT(UNF_LOG_REG_ATT, UNF_ERR, - "[err]Port(0x%x) receive an unsupported error code of AEQ Event,EvtCode(0x%x) Conn_id(0x%x)", - hba->port_cfg.port_id, aeq_msg->wd0.evt_code, - aeq_msg->wd0.conn_id); - - return UNF_RETURN_ERROR; - } - SPFC_SCQ_ERR_TYPE_STAT(hba, FC_ERROR_OFFLOAD_LACKOF_SCQE_FAIL); - - rport_index = aeq_msg->wd0.conn_id; - xid = aeq_msg->wd1.xid; - - if (rport_index >= UNF_SPFC_MAXRPORT_NUM) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) receive an error offload status: rport(0x%x) is invalid, Xid(0x%x)", - hba->port_cfg.port_id, rport_index, aeq_msg->wd1.xid); - - return UNF_RETURN_ERROR; - } - - prt_qinfo = &hba->parent_queue_mgr->parent_queue[rport_index]; - if (spfc_check_rport_valid(prt_qinfo, xid) != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) receive an error offload status: rport(0x%x), context id(0x%x) is invalid", - hba->port_cfg.port_id, rport_index, xid); - - return UNF_RETURN_ERROR; - } - - /* The offload status is restored only when the offload status is offloading */ - spin_lock_irqsave(&prt_qinfo->parent_queue_state_lock, flags); - if (prt_qinfo->offload_state == SPFC_QUEUE_STATE_OFFLOADING) - prt_qinfo->offload_state = SPFC_QUEUE_STATE_INITIALIZED; - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, flags); - - if (prt_qinfo->parent_sq_info.destroy_sqe.valid) { - delay_ctl_info.valid = prt_qinfo->parent_sq_info.destroy_sqe.valid; - delay_ctl_info.rport_index = prt_qinfo->parent_sq_info.destroy_sqe.rport_index; - delay_ctl_info.time_out = prt_qinfo->parent_sq_info.destroy_sqe.time_out; - delay_ctl_info.start_jiff = prt_qinfo->parent_sq_info.destroy_sqe.start_jiff; - delay_ctl_info.rport_info.nport_id = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.nport_id; - delay_ctl_info.rport_info.rport_index = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index; - delay_ctl_info.rport_info.port_name = - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.port_name; - prt_qinfo->parent_sq_info.destroy_sqe.valid = false; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[info]Port(0x%x) pop up delay sqe, start:0x%llx, timeout:0x%x, rport:0x%x, offload state:0x%x", - hba->port_cfg.port_id, delay_ctl_info.start_jiff, - delay_ctl_info.time_out, - prt_qinfo->parent_sq_info.destroy_sqe.rport_info.rport_index, - SPFC_QUEUE_STATE_INITIALIZED); - - ret = spfc_free_parent_resource(hba, &delay_ctl_info.rport_info); - if (ret != RETURN_OK) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[err]Port(0x%x) pop delay destroy parent sq failed, rport(0x%x), rport nport id 0x%x", - hba->port_cfg.port_id, - delay_ctl_info.rport_info.rport_index, - delay_ctl_info.rport_info.nport_id); - } - } - - return ret; -} - -u32 spfc_free_xid(void *handle, struct unf_frame_pkg *pkg) -{ - u32 ret = RETURN_ERROR; - u16 rx_id = INVALID_VALUE16; - u16 ox_id = INVALID_VALUE16; - u16 hot_tag = INVALID_VALUE16; - struct spfc_hba_info *hba = (struct spfc_hba_info *)handle; - union spfc_cmdqe tmp_cmd_wqe; - union spfc_cmdqe *cmd_wqe = NULL; - - FC_CHECK_RETURN_VALUE(hba, RETURN_ERROR); - FC_CHECK_RETURN_VALUE(pkg, RETURN_ERROR); - SPFC_CHECK_PKG_ALLOCTIME(pkg); - - cmd_wqe = &tmp_cmd_wqe; - memset(cmd_wqe, 0, sizeof(union spfc_cmdqe)); - - rx_id = UNF_GET_RXID(pkg); - ox_id = UNF_GET_OXID(pkg); - if (UNF_GET_HOTPOOL_TAG(pkg) != INVALID_VALUE32) - hot_tag = (u16)UNF_GET_HOTPOOL_TAG(pkg) + hba->exi_base; - - spfc_build_cmdqe_common(cmd_wqe, SPFC_TASK_T_EXCH_ID_FREE, rx_id); - cmd_wqe->xid_free.wd2.hotpool_tag = hot_tag; - cmd_wqe->xid_free.magic_num = UNF_GETXCHGALLOCTIME(pkg); - cmd_wqe->xid_free.sid = pkg->frame_head.csctl_sid; - cmd_wqe->xid_free.did = pkg->frame_head.rctl_did; - cmd_wqe->xid_free.type = pkg->type; - - if (pkg->rx_or_ox_id == UNF_PKG_FREE_OXID) - cmd_wqe->xid_free.wd0.task_id = ox_id; - else - cmd_wqe->xid_free.wd0.task_id = rx_id; - - cmd_wqe->xid_free.wd0.port_id = hba->port_index; - cmd_wqe->xid_free.wd2.scqn = hba->default_scqn; - ret = spfc_root_cmdq_enqueue(hba, cmd_wqe, sizeof(cmd_wqe->xid_free)); - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "[info]Port(0x%x) ox_id(0x%x) RXID(0x%x) hottag(0x%x) magic_num(0x%x) Sid(0x%x) Did(0x%x), send free xid %s", - hba->port_cfg.port_id, ox_id, rx_id, hot_tag, - cmd_wqe->xid_free.magic_num, cmd_wqe->xid_free.sid, - cmd_wqe->xid_free.did, - (ret == RETURN_OK) ? "OK" : "ERROR"); - - return ret; -} - -u32 spfc_scq_free_xid_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 hot_tag = INVALID_VALUE32; - u32 magic_num = INVALID_VALUE32; - u32 ox_id = INVALID_VALUE32; - u32 rx_id = INVALID_VALUE32; - struct spfc_scqe_comm_rsp_sts *free_xid_sts_scqe = NULL; - - free_xid_sts_scqe = &scqe->comm_sts; - magic_num = free_xid_sts_scqe->magic_num; - ox_id = (u32)free_xid_sts_scqe->wd0.ox_id; - rx_id = (u32)free_xid_sts_scqe->wd0.rx_id; - - if (free_xid_sts_scqe->wd1.hotpooltag != INVALID_VALUE16) { - hot_tag = free_xid_sts_scqe->wd1.hotpooltag - hba->exi_base; - } - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "Port(0x%x) hottag(0x%x) magicnum(0x%x) ox_id(0x%x) rxid(0x%x) sts(%d)", - hba->port_cfg.port_id, hot_tag, magic_num, ox_id, rx_id, - SPFC_GET_SCQE_STATUS(scqe)); - - return RETURN_OK; -} - -u32 spfc_scq_exchg_timeout_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - u32 hot_tag = INVALID_VALUE32; - u32 magic_num = INVALID_VALUE32; - u32 ox_id = INVALID_VALUE32; - u32 rx_id = INVALID_VALUE32; - struct spfc_scqe_comm_rsp_sts *time_out_scqe = NULL; - - time_out_scqe = &scqe->comm_sts; - magic_num = time_out_scqe->magic_num; - ox_id = (u32)time_out_scqe->wd0.ox_id; - rx_id = (u32)time_out_scqe->wd0.rx_id; - - if (time_out_scqe->wd1.hotpooltag != INVALID_VALUE16) - hot_tag = time_out_scqe->wd1.hotpooltag - hba->exi_base; - - FC_DRV_PRINT(UNF_LOG_EQUIP_ATT, UNF_INFO, - "Port(0x%x) recv timer time out sts hotpooltag(0x%x) magicnum(0x%x) ox_id(0x%x) rxid(0x%x) sts(%d)", - hba->port_cfg.port_id, hot_tag, magic_num, ox_id, rx_id, - SPFC_GET_SCQE_STATUS(scqe)); - - return RETURN_OK; -} - -u32 spfc_scq_rcv_sq_nop_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe) -{ - struct spfc_scqe_sq_nop_sts *sq_nop_scqe = NULL; - struct spfc_parent_queue_info *prt_qinfo = NULL; - struct spfc_parent_sq_info *parent_sq_info = NULL; - struct list_head *node = NULL; - struct list_head *next_node = NULL; - struct spfc_suspend_sqe_info *suspend_sqe = NULL; - struct spfc_suspend_sqe_info *sqe = NULL; - u32 rport_index = 0; - u32 magic_num; - u16 sqn; - u32 sqn_base; - u32 sqn_max; - u32 ret = RETURN_OK; - ulong flags = 0; - - sq_nop_scqe = &scqe->sq_nop_sts; - rport_index = sq_nop_scqe->wd1.conn_id; - magic_num = sq_nop_scqe->magic_num; - sqn = sq_nop_scqe->wd0.sqn; - prt_qinfo = &hba->parent_queue_mgr->parent_queue[rport_index]; - parent_sq_info = &prt_qinfo->parent_sq_info; - sqn_base = parent_sq_info->sqn_base; - sqn_max = sqn_base + UNF_SQ_NUM_PER_SESSION - 1; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) rport(0x%x), magic_num(0x%x) receive nop sq sts form sq(0x%x)", - hba->port_cfg.port_id, rport_index, magic_num, sqn); - - spin_lock_irqsave(&prt_qinfo->parent_queue_state_lock, flags); - list_for_each_safe(node, next_node, &parent_sq_info->suspend_sqe_list) { - sqe = list_entry(node, struct spfc_suspend_sqe_info, list_sqe_entry); - if (sqe->magic_num != magic_num) - continue; - suspend_sqe = sqe; - if (sqn == sqn_max) - list_del(node); - break; - } - spin_unlock_irqrestore(&prt_qinfo->parent_queue_state_lock, flags); - - if (suspend_sqe) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) rport_index(0x%x) find suspend sqe.", - hba->port_cfg.port_id, rport_index); - if ((sqn < sqn_max) && (sqn >= sqn_base)) { - ret = spfc_send_nop_cmd(hba, parent_sq_info, magic_num, sqn + 1); - } else if (sqn == sqn_max) { - if (!cancel_delayed_work(&suspend_sqe->timeout_work)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "[warn]Port(0x%x) rport(0x%x) reset worker timer maybe timeout", - hba->port_cfg.port_id, rport_index); - } - parent_sq_info->need_offloaded = suspend_sqe->old_offload_sts; - ret = spfc_pop_suspend_sqe(hba, prt_qinfo, suspend_sqe); - kfree(suspend_sqe); - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) rport(0x%x) rcv error sqn(0x%x)", - hba->port_cfg.port_id, rport_index, sqn); - } - } else { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_WARN, - "[warn]Port(0x%x) rport(0x%x) magicnum(0x%x)can't find suspend sqe", - hba->port_cfg.port_id, rport_index, magic_num); - } - return ret; -} - -static const struct unf_scqe_handle_table scqe_handle_table[] = { - {/* INI rcvd FCP RSP */ - SPFC_SCQE_FCP_IRSP, true, spfc_scq_recv_iresp}, - {/* INI/TGT rcvd ELS_CMND */ - SPFC_SCQE_ELS_CMND, false, spfc_scq_recv_els_cmnd}, - {/* INI/TGT rcvd ELS_RSP */ - SPFC_SCQE_ELS_RSP, true, spfc_scq_recv_ls_gs_rsp}, - {/* INI/TGT rcvd GS_RSP */ - SPFC_SCQE_GS_RSP, true, spfc_scq_recv_ls_gs_rsp}, - {/* INI rcvd BLS_RSP */ - SPFC_SCQE_ABTS_RSP, true, spfc_scq_recv_abts_rsp}, - {/* INI/TGT rcvd ELS_RSP STS(Done) */ - SPFC_SCQE_ELS_RSP_STS, true, spfc_scq_recv_els_rsp_sts}, - {/* INI or TGT rcvd Session enable STS */ - SPFC_SCQE_SESS_EN_STS, false, spfc_scq_recv_offload_sts}, - {/* INI or TGT rcvd flush (pending) SQ STS */ - SPFC_SCQE_FLUSH_SQ_STS, false, spfc_scq_rcv_flush_sq_sts}, - {/* INI or TGT rcvd Buffer clear STS */ - SPFC_SCQE_BUF_CLEAR_STS, false, spfc_scq_rcv_buf_clear_sts}, - {/* INI or TGT rcvd session reset STS */ - SPFC_SCQE_SESS_RST_STS, false, spfc_scq_recv_sess_rst_sts}, - {/* ELS/IMMI SRQ */ - SPFC_SCQE_CLEAR_SRQ_STS, false, spfc_scq_rcv_clear_srq_sts}, - {/* INI rcvd TMF RSP */ - SPFC_SCQE_FCP_ITMF_RSP, true, spfc_scq_recv_iresp}, - {/* INI rcvd TMF Marker STS */ - SPFC_SCQE_ITMF_MARKER_STS, false, spfc_scq_recv_marker_sts}, - {/* INI rcvd ABTS Marker STS */ - SPFC_SCQE_ABTS_MARKER_STS, false, spfc_scq_recv_abts_marker_sts}, - {SPFC_SCQE_XID_FREE_ABORT_STS, false, spfc_scq_free_xid_sts}, - {SPFC_SCQE_EXCHID_TIMEOUT_STS, false, spfc_scq_exchg_timeout_sts}, - {SPFC_SQE_NOP_STS, true, spfc_scq_rcv_sq_nop_sts}, - -}; - -u32 spfc_rcv_scq_entry_from_scq(struct spfc_hba_info *hba, union spfc_scqe *scqe, u32 scqn) -{ - u32 ret = UNF_RETURN_ERROR; - bool reclaim = false; - u32 index = 0; - u32 total = 0; - - FC_CHECK_RETURN_VALUE(hba, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(scqe, UNF_RETURN_ERROR); - FC_CHECK_RETURN_VALUE(scqn < SPFC_TOTAL_SCQ_NUM, UNF_RETURN_ERROR); - - SPFC_IO_STAT(hba, SPFC_GET_SCQE_TYPE(scqe)); - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_INFO, - "[info]Port(0x%x) receive scqe type %d from SCQ[%u]", - hba->port_cfg.port_id, SPFC_GET_SCQE_TYPE(scqe), scqn); - - /* 1. error code cheking */ - if (unlikely(SPFC_SCQE_HAS_ERRCODE(scqe))) { - /* So far, just print & counter */ - spfc_scqe_error_pre_proc(hba, scqe); - } - - /* 2. Process SCQE by corresponding processer */ - total = sizeof(scqe_handle_table) / sizeof(struct unf_scqe_handle_table); - while (index < total) { - if (SPFC_GET_SCQE_TYPE(scqe) == scqe_handle_table[index].scqe_type) { - ret = scqe_handle_table[index].scqe_handle_func(hba, scqe); - reclaim = scqe_handle_table[index].reclaim_sq_wpg; - - break; - } - - index++; - } - - /* 3. SCQE type check */ - if (unlikely(total == index)) { - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_ERR, - "[warn]Unknown SCQE type %d", - SPFC_GET_SCQE_TYPE(scqe)); - - UNF_PRINT_SFS_LIMIT(UNF_ERR, hba->port_cfg.port_id, scqe, sizeof(union spfc_scqe)); - } - - /* 4. If SCQE is for SQ-WQE then recovery Link List SQ free page */ - if (reclaim) { - if (SPFC_GET_SCQE_SQN(scqe) < SPFC_MAX_SSQ_NUM) { - ret = spfc_reclaim_sq_wqe_page(hba, scqe); - } else { - /* NOTE: for buffer clear, the SCQE conn_id is 0xFFFF,count with HBA */ - SPFC_HBA_STAT((struct spfc_hba_info *)hba, SPFC_STAT_SQ_IO_BUFFER_CLEARED); - } - } - - return ret; -} diff --git a/drivers/scsi/spfc/hw/spfc_service.h b/drivers/scsi/spfc/hw/spfc_service.h deleted file mode 100644 index e2555c55f4d1..000000000000 --- a/drivers/scsi/spfc/hw/spfc_service.h +++ /dev/null @@ -1,282 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_SERVICE_H -#define SPFC_SERVICE_H - -#include "unf_type.h" -#include "unf_common.h" -#include "unf_scsi_common.h" -#include "spfc_hba.h" - -#define SPFC_HAVE_OFFLOAD (0) - -/* FC txmfs */ -#define SPFC_DEFAULT_TX_MAX_FREAM_SIZE (256) - -#define SPFC_GET_NETWORK_PORT_ID(hba) \ - (((hba)->port_index > 1) ? ((hba)->port_index + 2) : (hba)->port_index) - -#define SPFC_GET_PRLI_PAYLOAD_LEN \ - (UNF_PRLI_PAYLOAD_LEN - UNF_PRLI_SIRT_EXTRA_SIZE) -/* Start addr of the header/payloed of the cmnd buffer in the pkg */ -#define SPFC_FC_HEAD_LEN (sizeof(struct unf_fc_head)) -#define SPFC_PAYLOAD_OFFSET (sizeof(struct unf_fc_head)) -#define SPFC_GET_CMND_PAYLOAD_ADDR(pkg) UNF_GET_FLOGI_PAYLOAD(pkg) -#define SPFC_GET_CMND_HEADER_ADDR(pkg) \ - ((pkg)->unf_cmnd_pload_bl.buffer_ptr) -#define SPFC_GET_RSP_HEADER_ADDR(pkg) \ - ((pkg)->unf_rsp_pload_bl.buffer_ptr) -#define SPFC_GET_RSP_PAYLOAD_ADDR(pkg) \ - ((pkg)->unf_rsp_pload_bl.buffer_ptr + SPFC_PAYLOAD_OFFSET) -#define SPFC_GET_CMND_FC_HEADER(pkg) \ - (&(UNF_GET_SFS_ENTRY(pkg)->sfs_common.frame_head)) -#define SPFC_PKG_IS_ELS_RSP(cmd_type) \ - (((cmd_type) == ELS_ACC) || ((cmd_type) == ELS_RJT)) -#define SPFC_XID_IS_VALID(exid, base, exi_count) \ - (((exid) >= (base)) && ((exid) < ((base) + (exi_count)))) -#define SPFC_CHECK_NEED_OFFLOAD(cmd_code, cmd_type, offload_state) \ - (((cmd_code) == ELS_PLOGI) && ((cmd_type) != ELS_RJT) && \ - ((offload_state) == SPFC_QUEUE_STATE_INITIALIZED)) - -#define UNF_FC_PAYLOAD_ELS_MASK (0xFF000000) -#define UNF_FC_PAYLOAD_ELS_SHIFT (24) -#define UNF_FC_PAYLOAD_ELS_DWORD (0) - -/* Note: this pfcpayload is little endian */ -#define UNF_GET_FC_PAYLOAD_ELS_CMND(pfcpayload) \ - UNF_GET_SHIFTMASK(((u32 *)(void *)(pfcpayload))[UNF_FC_PAYLOAD_ELS_DWORD], \ - UNF_FC_PAYLOAD_ELS_SHIFT, UNF_FC_PAYLOAD_ELS_MASK) - -/* Note: this pfcpayload is big endian */ -#define SPFC_GET_FC_PAYLOAD_ELS_CMND(pfcpayload) \ - UNF_GET_SHIFTMASK(be32_to_cpu(((u32 *)(void *)(pfcpayload))[UNF_FC_PAYLOAD_ELS_DWORD]), \ - UNF_FC_PAYLOAD_ELS_SHIFT, UNF_FC_PAYLOAD_ELS_MASK) - -#define UNF_FC_PAYLOAD_RX_SZ_MASK (0x00000FFF) -#define UNF_FC_PAYLOAD_RX_SZ_SHIFT (16) -#define UNF_FC_PAYLOAD_RX_SZ_DWORD (2) - -/* Note: this pfcpayload is little endian */ -#define UNF_GET_FC_PAYLOAD_RX_SZ(pfcpayload) \ - ((u16)(((u32 *)(void *)(pfcpayload))[UNF_FC_PAYLOAD_RX_SZ_DWORD] & \ - UNF_FC_PAYLOAD_RX_SZ_MASK)) - -/* Note: this pfcpayload is big endian */ -#define SPFC_GET_FC_PAYLOAD_RX_SZ(pfcpayload) \ - (be32_to_cpu((u16)(((u32 *)(void *)(pfcpayload))[UNF_FC_PAYLOAD_RX_SZ_DWORD]) & \ - UNF_FC_PAYLOAD_RX_SZ_MASK)) - -#define SPFC_GET_RA_TOV_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.r_a_tov) -#define SPFC_GET_RT_TOV_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.r_t_tov) -#define SPFC_GET_E_D_TOV_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.e_d_tov) -#define SPFC_GET_E_D_TOV_RESOLUTION_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.e_d_tov_resolution) -#define SPFC_GET_BB_SC_N_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.bbscn) -#define SPFC_GET_BB_CREDIT_FROM_PAYLOAD(pfcpayload) \ - (((struct unf_flogi_fdisc_payload *)(pfcpayload))->fabric_parms.co_parms.bb_credit) - -#define SPFC_GET_RA_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.r_a_tov) -#define SPFC_GET_RT_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.r_t_tov) -#define SPFC_GET_E_D_TOV_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.e_d_tov) -#define SPFC_GET_E_D_TOV_RESOLUTION_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.e_d_tov_resolution) -#define SPFC_GET_BB_SC_N_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.bbscn) -#define SPFC_GET_BB_CREDIT_FROM_PARAMS(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.bb_credit) -#define SPFC_CHECK_NPORT_FPORT_BIT(pfcparams) \ - (((struct unf_fabric_parm *)(pfcparams))->co_parms.nport) - -#define UNF_FC_RCTL_BLS_MASK (0x80) -#define SPFC_UNSOLICITED_FRAME_IS_BLS(hdr) (UNF_GET_FC_HEADER_RCTL(hdr) & UNF_FC_RCTL_BLS_MASK) - -#define SPFC_LOW_SEQ_CNT (0) -#define SPFC_HIGH_SEQ_CNT (0xFFFF) - -/* struct unf_frame_pkg.cmnd meaning: - * The least significant 16 bits indicate whether to send ELS CMND or ELS RSP - * (ACC or RJT). The most significant 16 bits indicate the corresponding ELS - * CMND when the lower 16 bits are ELS RSP. - */ -#define SPFC_ELS_CMND_MASK (0xffff) -#define SPFC_ELS_CMND__RELEVANT_SHIFT (16UL) -#define SPFC_GET_LS_GS_CMND_CODE(cmnd) ((u16)((cmnd) & SPFC_ELS_CMND_MASK)) -#define SPFC_GET_ELS_RSP_TYPE(cmnd) ((u16)((cmnd) & SPFC_ELS_CMND_MASK)) -#define SPFC_GET_ELS_RSP_CODE(cmnd) \ - ((u16)((cmnd) >> SPFC_ELS_CMND__RELEVANT_SHIFT & SPFC_ELS_CMND_MASK)) - -/* ELS CMND Request */ -#define ELS_CMND (0) - -/* fh_f_ctl - Frame control flags. */ -#define SPFC_FC_EX_CTX BIT(23) /* sent by responder to exchange */ -#define SPFC_FC_SEQ_CTX BIT(22) /* sent by responder to sequence */ -#define SPFC_FC_FIRST_SEQ BIT(21) /* first sequence of this exchange */ -#define SPFC_FC_LAST_SEQ BIT(20) /* last sequence of this exchange */ -#define SPFC_FC_END_SEQ BIT(19) /* last frame of sequence */ -#define SPFC_FC_END_CONN BIT(18) /* end of class 1 connection pending */ -#define SPFC_FC_RES_B17 BIT(17) /* reserved */ -#define SPFC_FC_SEQ_INIT BIT(16) /* transfer of sequence initiative */ -#define SPFC_FC_X_ID_REASS BIT(15) /* exchange ID has been changed */ -#define SPFC_FC_X_ID_INVAL BIT(14) /* exchange ID invalidated */ -#define SPFC_FC_ACK_1 BIT(12) /* 13:12 = 1: ACK_1 expected */ -#define SPFC_FC_ACK_N (2 << 12) /* 13:12 = 2: ACK_N expected */ -#define SPFC_FC_ACK_0 (3 << 12) /* 13:12 = 3: ACK_0 expected */ -#define SPFC_FC_RES_B11 BIT(11) /* reserved */ -#define SPFC_FC_RES_B10 BIT(10) /* reserved */ -#define SPFC_FC_RETX_SEQ BIT(9) /* retransmitted sequence */ -#define SPFC_FC_UNI_TX BIT(8) /* unidirectional transmit (class 1) */ -#define SPFC_FC_CONT_SEQ(i) ((i) << 6) -#define SPFC_FC_ABT_SEQ(i) ((i) << 4) -#define SPFC_FC_REL_OFF BIT(3) /* parameter is relative offset */ -#define SPFC_FC_RES2 BIT(2) /* reserved */ -#define SPFC_FC_FILL(i) ((i) & 3) /* 1:0: bytes of trailing fill */ - -#define SPFC_FCTL_REQ (SPFC_FC_FIRST_SEQ | SPFC_FC_END_SEQ | SPFC_FC_SEQ_INIT) -#define SPFC_FCTL_RESP \ - (SPFC_FC_EX_CTX | SPFC_FC_LAST_SEQ | SPFC_FC_END_SEQ | SPFC_FC_SEQ_INIT) -#define SPFC_RCTL_BLS_REQ (0x81) -#define SPFC_RCTL_BLS_ACC (0x84) -#define SPFC_RCTL_BLS_RJT (0x85) - -#define PHY_PORT_TYPE_FC 0x1 /* Physical port type of FC */ -#define PHY_PORT_TYPE_FCOE 0x2 /* Physical port type of FCoE */ -#define SPFC_FC_COS_VALUE (0X4) - -#define SPFC_CDB16_LBA_MASK 0xffff -#define SPFC_CDB16_TRANSFERLEN_MASK 0xff -#define SPFC_RXID_MASK 0xffff -#define SPFC_OXID_MASK 0xffff0000 - -enum spfc_fc_fh_type { - SPFC_FC_TYPE_BLS = 0x00, /* basic link service */ - SPFC_FC_TYPE_ELS = 0x01, /* extended link service */ - SPFC_FC_TYPE_IP = 0x05, /* IP over FC, RFC 4338 */ - SPFC_FC_TYPE_FCP = 0x08, /* SCSI FCP */ - SPFC_FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */ - SPFC_FC_TYPE_ILS = 0x22 /* internal link service */ -}; - -enum spfc_fc_fh_rctl { - SPFC_FC_RCTL_DD_UNCAT = 0x00, /* uncategorized information */ - SPFC_FC_RCTL_DD_SOL_DATA = 0x01, /* solicited data */ - SPFC_FC_RCTL_DD_UNSOL_CTL = 0x02, /* unsolicited control */ - SPFC_FC_RCTL_DD_SOL_CTL = 0x03, /* solicited control or reply */ - SPFC_FC_RCTL_DD_UNSOL_DATA = 0x04, /* unsolicited data */ - SPFC_FC_RCTL_DD_DATA_DESC = 0x05, /* data descriptor */ - SPFC_FC_RCTL_DD_UNSOL_CMD = 0x06, /* unsolicited command */ - SPFC_FC_RCTL_DD_CMD_STATUS = 0x07, /* command status */ - -#define SPFC_FC_RCTL_ILS_REQ SPFC_FC_RCTL_DD_UNSOL_CTL /* ILS request */ -#define SPFC_FC_RCTL_ILS_REP SPFC_FC_RCTL_DD_SOL_CTL /* ILS reply */ - - /* - * Extended Link_Data - */ - SPFC_FC_RCTL_ELS_REQ = 0x22, /* extended link services request */ - SPFC_FC_RCTL_ELS_RSP = 0x23, /* extended link services reply */ - SPFC_FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */ - SPFC_FC_RCTL_ELS4_RSP = 0x33, /* FC-4 ELS reply */ - /* - * Optional Extended Headers - */ - SPFC_FC_RCTL_VFTH = 0x50, /* virtual fabric tagging header */ - SPFC_FC_RCTL_IFRH = 0x51, /* inter-fabric routing header */ - SPFC_FC_RCTL_ENCH = 0x52, /* encapsulation header */ - /* - * Basic Link Services fh_r_ctl values. - */ - SPFC_FC_RCTL_BA_NOP = 0x80, /* basic link service NOP */ - SPFC_FC_RCTL_BA_ABTS = 0x81, /* basic link service abort */ - SPFC_FC_RCTL_BA_RMC = 0x82, /* remove connection */ - SPFC_FC_RCTL_BA_ACC = 0x84, /* basic accept */ - SPFC_FC_RCTL_BA_RJT = 0x85, /* basic reject */ - SPFC_FC_RCTL_BA_PRMT = 0x86, /* dedicated connection preempted */ - /* - * Link Control Information. - */ - SPFC_FC_RCTL_ACK_1 = 0xc0, /* acknowledge_1 */ - SPFC_FC_RCTL_ACK_0 = 0xc1, /* acknowledge_0 */ - SPFC_FC_RCTL_P_RJT = 0xc2, /* port reject */ - SPFC_FC_RCTL_F_RJT = 0xc3, /* fabric reject */ - SPFC_FC_RCTL_P_BSY = 0xc4, /* port busy */ - SPFC_FC_RCTL_F_BSY = 0xc5, /* fabric busy to data frame */ - SPFC_FC_RCTL_F_BSYL = 0xc6, /* fabric busy to link control frame */ - SPFC_FC_RCTL_LCR = 0xc7, /* link credit reset */ - SPFC_FC_RCTL_END = 0xc9 /* end */ -}; - -struct spfc_fc_frame_header { - u8 rctl; /* routing control */ - u8 did[ARRAY_INDEX_3]; /* Destination ID */ - - u8 cs_ctrl; /* class of service control / pri */ - u8 sid[ARRAY_INDEX_3]; /* Source ID */ - - u8 type; /* see enum fc_fh_type below */ - u8 frame_ctrl[ARRAY_INDEX_3]; /* frame control */ - - u8 seq_id; /* sequence ID */ - u8 df_ctrl; /* data field control */ - u16 seq_cnt; /* sequence count */ - - u16 oxid; /* originator exchange ID */ - u16 rxid; /* responder exchange ID */ - u32 param_offset; /* parameter or relative offset */ -}; - -u32 spfc_recv_els_cmnd(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u8 *els_pld, u32 pld_len, - bool first); -u32 spfc_rcv_ls_gs_rsp(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag); -u32 spfc_rcv_els_rsp_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 hot_tag); -u32 spfc_rcv_bls_rsp(const struct spfc_hba_info *hba, struct unf_frame_pkg *pkg, - u32 hot_tag); -u32 spfc_rsv_bls_rsp_sts(const struct spfc_hba_info *hba, - struct unf_frame_pkg *pkg, u32 rx_id); -void spfc_save_login_parms_in_sq_info(struct spfc_hba_info *hba, - struct unf_port_login_parms *login_params); -u32 spfc_handle_aeq_off_load_err(struct spfc_hba_info *hba, - struct spfc_aqe_data *aeq_msg); -u32 spfc_free_xid(void *handle, struct unf_frame_pkg *pkg); -u32 spfc_scq_free_xid_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe); -u32 spfc_scq_exchg_timeout_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe); -u32 spfc_scq_rcv_sq_nop_sts(struct spfc_hba_info *hba, union spfc_scqe *scqe); -u32 spfc_send_els_via_default_session(struct spfc_hba_info *hba, struct spfc_sqe *io_sqe, - struct unf_frame_pkg *pkg, - struct spfc_parent_queue_info *prt_queue_info); -u32 spfc_send_ls_gs_cmnd(void *handle, struct unf_frame_pkg *pkg); -u32 spfc_send_bls_cmnd(void *handle, struct unf_frame_pkg *pkg); - -/* Receive Frame from SCQ */ -u32 spfc_rcv_scq_entry_from_scq(struct spfc_hba_info *hba, - union spfc_scqe *scqe, u32 scqn); -void *spfc_get_els_buf_by_user_id(struct spfc_hba_info *hba, u16 user_id); - -#define SPFC_CHECK_PKG_ALLOCTIME(pkg) \ - do { \ - if (unlikely(UNF_GETXCHGALLOCTIME(pkg) == 0)) { \ - FC_DRV_PRINT(UNF_LOG_NORMAL, \ - UNF_WARN, \ - "[warn]Invalid MagicNum,S_ID(0x%x) " \ - "D_ID(0x%x) OXID(0x%x) " \ - "RX_ID(0x%x) Pkg type(0x%x) hot " \ - "pooltag(0x%x)", \ - UNF_GET_SID(pkg), UNF_GET_DID(pkg), \ - UNF_GET_OXID(pkg), UNF_GET_RXID(pkg), \ - ((struct unf_frame_pkg *)(pkg))->type, \ - UNF_GET_XCHG_TAG(pkg)); \ - } \ - } while (0) - -#endif diff --git a/drivers/scsi/spfc/hw/spfc_utils.c b/drivers/scsi/spfc/hw/spfc_utils.c deleted file mode 100644 index 328c388c95fe..000000000000 --- a/drivers/scsi/spfc/hw/spfc_utils.c +++ /dev/null @@ -1,102 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_utils.h" -#include "unf_log.h" -#include "unf_common.h" - -void spfc_cpu_to_big64(void *addr, u32 size) -{ - u32 index = 0; - u32 cnt = 0; - u64 *temp = NULL; - - FC_CHECK_VALID(addr, dump_stack(); return); - FC_CHECK_VALID((size % SPFC_QWORD_BYTE) == 0, dump_stack(); return); - - temp = (u64 *)addr; - cnt = SPFC_SHIFT_TO_U64(size); - - for (index = 0; index < cnt; index++) { - *temp = cpu_to_be64(*temp); - temp++; - } -} - -void spfc_big_to_cpu64(void *addr, u32 size) -{ - u32 index = 0; - u32 cnt = 0; - u64 *temp = NULL; - - FC_CHECK_VALID(addr, dump_stack(); return); - FC_CHECK_VALID((size % SPFC_QWORD_BYTE) == 0, dump_stack(); return); - - temp = (u64 *)addr; - cnt = SPFC_SHIFT_TO_U64(size); - - for (index = 0; index < cnt; index++) { - *temp = be64_to_cpu(*temp); - temp++; - } -} - -void spfc_cpu_to_big32(void *addr, u32 size) -{ - unf_cpu_to_big_end(addr, size); -} - -void spfc_big_to_cpu32(void *addr, u32 size) -{ - if (size % UNF_BYTES_OF_DWORD) - dump_stack(); - - unf_big_end_to_cpu(addr, size); -} - -void spfc_cpu_to_be24(u8 *data, u32 value) -{ - data[ARRAY_INDEX_0] = (value >> UNF_SHIFT_16) & UNF_MASK_BIT_7_0; - data[ARRAY_INDEX_1] = (value >> UNF_SHIFT_8) & UNF_MASK_BIT_7_0; - data[ARRAY_INDEX_2] = value & UNF_MASK_BIT_7_0; -} - -u32 spfc_big_to_cpu24(u8 *data) -{ - return (data[ARRAY_INDEX_0] << UNF_SHIFT_16) | - (data[ARRAY_INDEX_1] << UNF_SHIFT_8) | data[ARRAY_INDEX_2]; -} - -void spfc_print_buff(u32 dbg_level, void *buff, u32 size) -{ - u32 *spfc_buff = NULL; - u32 loop = 0; - u32 index = 0; - - FC_CHECK_VALID(buff, dump_stack(); return); - FC_CHECK_VALID(0 == (size % SPFC_DWORD_BYTE), dump_stack(); return); - - if ((dbg_level) <= unf_dgb_level) { - spfc_buff = (u32 *)buff; - loop = size / SPFC_DWORD_BYTE; - - for (index = 0; index < loop; index++) { - spfc_buff = (u32 *)buff + index; - FC_DRV_PRINT(UNF_LOG_NORMAL, - UNF_MAJOR, "Buff DW%u 0x%08x.", index, *spfc_buff); - } - } -} - -u32 spfc_log2n(u32 val) -{ - u32 result = 0; - u32 logn = (val >> UNF_SHIFT_1); - - while (logn) { - logn >>= UNF_SHIFT_1; - result++; - } - - return result; -} diff --git a/drivers/scsi/spfc/hw/spfc_utils.h b/drivers/scsi/spfc/hw/spfc_utils.h deleted file mode 100644 index 6b4330da3f1d..000000000000 --- a/drivers/scsi/spfc/hw/spfc_utils.h +++ /dev/null @@ -1,202 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_UTILS_H -#define SPFC_UTILS_H - -#include "unf_type.h" -#include "unf_log.h" - -#define SPFC_ZERO (0) - -#define SPFC_BIT(n) (0x1UL << (n)) -#define SPFC_BIT_0 SPFC_BIT(0) -#define SPFC_BIT_1 SPFC_BIT(1) -#define SPFC_BIT_2 SPFC_BIT(2) -#define SPFC_BIT_3 SPFC_BIT(3) -#define SPFC_BIT_4 SPFC_BIT(4) -#define SPFC_BIT_5 SPFC_BIT(5) -#define SPFC_BIT_6 SPFC_BIT(6) -#define SPFC_BIT_7 SPFC_BIT(7) -#define SPFC_BIT_8 SPFC_BIT(8) -#define SPFC_BIT_9 SPFC_BIT(9) -#define SPFC_BIT_10 SPFC_BIT(10) -#define SPFC_BIT_11 SPFC_BIT(11) -#define SPFC_BIT_12 SPFC_BIT(12) -#define SPFC_BIT_13 SPFC_BIT(13) -#define SPFC_BIT_14 SPFC_BIT(14) -#define SPFC_BIT_15 SPFC_BIT(15) -#define SPFC_BIT_16 SPFC_BIT(16) -#define SPFC_BIT_17 SPFC_BIT(17) -#define SPFC_BIT_18 SPFC_BIT(18) -#define SPFC_BIT_19 SPFC_BIT(19) -#define SPFC_BIT_20 SPFC_BIT(20) -#define SPFC_BIT_21 SPFC_BIT(21) -#define SPFC_BIT_22 SPFC_BIT(22) -#define SPFC_BIT_23 SPFC_BIT(23) -#define SPFC_BIT_24 SPFC_BIT(24) -#define SPFC_BIT_25 SPFC_BIT(25) -#define SPFC_BIT_26 SPFC_BIT(26) -#define SPFC_BIT_27 SPFC_BIT(27) -#define SPFC_BIT_28 SPFC_BIT(28) -#define SPFC_BIT_29 SPFC_BIT(29) -#define SPFC_BIT_30 SPFC_BIT(30) -#define SPFC_BIT_31 SPFC_BIT(31) - -#define SPFC_GET_BITS(data, mask) ((data) & (mask)) /* Obtains the bit */ -#define SPFC_SET_BITS(data, mask) ((data) |= (mask)) /* set the bit */ -#define SPFC_CLR_BITS(data, mask) ((data) &= ~(mask)) /* clear the bit */ - -#define SPFC_LSB(x) ((u8)(x)) -#define SPFC_MSB(x) ((u8)((u16)(x) >> 8)) - -#define SPFC_LSW(x) ((u16)(x)) -#define SPFC_MSW(x) ((u16)((u32)(x) >> 16)) - -#define SPFC_LSD(x) ((u32)((u64)(x))) -#define SPFC_MSD(x) ((u32)((((u64)(x)) >> 16) >> 16)) - -#define SPFC_BYTES_TO_QW_NUM(x) ((x) >> 3) -#define SPFC_BYTES_TO_DW_NUM(x) ((x) >> 2) - -#define UNF_GET_SHIFTMASK(__src, __shift, __mask) (((__src) & (__mask)) >> (__shift)) -#define UNF_FC_SET_SHIFTMASK(__des, __val, __shift, __mask) \ - ((__des) = (((__des) & ~(__mask)) | (((__val) << (__shift)) & (__mask)))) - -/* R_CTL */ -#define UNF_FC_HEADER_RCTL_MASK (0xFF000000) -#define UNF_FC_HEADER_RCTL_SHIFT (24) -#define UNF_FC_HEADER_RCTL_DWORD (0) -#define UNF_GET_FC_HEADER_RCTL(__pfcheader) \ - UNF_GET_SHIFTMASK(((u32 *)(void *)(__pfcheader))[UNF_FC_HEADER_RCTL_DWORD], \ - UNF_FC_HEADER_RCTL_SHIFT, UNF_FC_HEADER_RCTL_MASK) - -#define UNF_SET_FC_HEADER_RCTL(__pfcheader, __val) \ - do { \ - UNF_FC_SET_SHIFTMASK(((u32 *)(void *)(__pfcheader)[UNF_FC_HEADER_RCTL_DWORD], \ - __val, UNF_FC_HEADER_RCTL_SHIFT, UNF_FC_HEADER_RCTL_MASK) \ - } while (0) - -/* PRLI PARAM 3 */ -#define SPFC_PRLI_PARAM_WXFER_ENABLE_MASK (0x00000001) -#define SPFC_PRLI_PARAM_WXFER_ENABLE_SHIFT (0) -#define SPFC_PRLI_PARAM_WXFER_DWORD (3) -#define SPFC_GET_PRLI_PARAM_WXFER(__pfcheader) \ - UNF_GET_SHIFTMASK(((u32 *)(void *)(__pfcheader))[SPFC_PRLI_PARAM_WXFER_DWORD], \ - SPFC_PRLI_PARAM_WXFER_ENABLE_SHIFT, \ - SPFC_PRLI_PARAM_WXFER_ENABLE_MASK) - -#define SPFC_PRLI_PARAM_CONF_ENABLE_MASK (0x00000080) -#define SPFC_PRLI_PARAM_CONF_ENABLE_SHIFT (7) -#define SPFC_PRLI_PARAM_CONF_DWORD (3) -#define SPFC_GET_PRLI_PARAM_CONF(__pfcheader) \ - UNF_GET_SHIFTMASK(((u32 *)(void *)(__pfcheader))[SPFC_PRLI_PARAM_CONF_DWORD], \ - SPFC_PRLI_PARAM_CONF_ENABLE_SHIFT, \ - SPFC_PRLI_PARAM_CONF_ENABLE_MASK) - -#define SPFC_PRLI_PARAM_REC_ENABLE_MASK (0x00000400) -#define SPFC_PRLI_PARAM_REC_ENABLE_SHIFT (10) -#define SPFC_PRLI_PARAM_CONF_REC (3) -#define SPFC_GET_PRLI_PARAM_REC(__pfcheader) \ - UNF_GET_SHIFTMASK(((u32 *)(void *)(__pfcheader))[SPFC_PRLI_PARAM_CONF_REC], \ - SPFC_PRLI_PARAM_REC_ENABLE_SHIFT, SPFC_PRLI_PARAM_REC_ENABLE_MASK) - -#define SPFC_FUNCTION_ENTER \ - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ALL, \ - "%s Enter.", __func__) -#define SPFC_FUNCTION_RETURN \ - FC_DRV_PRINT(UNF_LOG_NORMAL, UNF_ALL, \ - "%s Return.", __func__) - -#define SPFC_SPIN_LOCK_IRQSAVE(interrupt, hw_adapt_lock, flags) \ - do { \ - if ((interrupt) == false) { \ - spin_lock_irqsave(&(hw_adapt_lock), flags); \ - } \ - } while (0) - -#define SPFC_SPIN_UNLOCK_IRQRESTORE(interrupt, hw_adapt_lock, flags) \ - do { \ - if ((interrupt) == false) { \ - spin_unlock_irqrestore(&(hw_adapt_lock), flags); \ - } \ - } while (0) - -#define FC_CHECK_VALID(condition, fail_do) \ - do { \ - if (unlikely(!(condition))) { \ - FC_DRV_PRINT(UNF_LOG_REG_ATT, \ - UNF_ERR, "Para check(%s) invalid", \ - #condition); \ - fail_do; \ - } \ - } while (0) - -#define RETURN_ERROR_S32 (-1) -#define UNF_RETURN_ERROR_S32 (-1) - -enum SPFC_LOG_CTRL_E { - SPFC_LOG_ALL = 0, - SPFC_LOG_SCQE_RX, - SPFC_LOG_ELS_TX, - SPFC_LOG_ELS_RX, - SPFC_LOG_GS_TX, - SPFC_LOG_GS_RX, - SPFC_LOG_BLS_TX, - SPFC_LOG_BLS_RX, - SPFC_LOG_FCP_TX, - SPFC_LOG_FCP_RX, - SPFC_LOG_SESS_TX, - SPFC_LOG_SESS_RX, - SPFC_LOG_DIF_TX, - SPFC_LOG_DIF_RX -}; - -extern u32 spfc_log_en; -#define SPFC_LOG_EN(hba, log_ctrl) (spfc_log_en + (log_ctrl)) - -enum SPFC_HBA_ERR_STAT_E { - SPFC_STAT_CTXT_FLUSH_DONE = 0, - SPFC_STAT_SQ_WAIT_EMPTY, - SPFC_STAT_LAST_GS_SCQE, - SPFC_STAT_SQ_POOL_EMPTY, - SPFC_STAT_PARENT_IO_FLUSHED, - SPFC_STAT_ROOT_IO_FLUSHED, /* 5 */ - SPFC_STAT_ROOT_SQ_FULL, - SPFC_STAT_ELS_RSP_EXCH_REUSE, - SPFC_STAT_GS_RSP_EXCH_REUSE, - SPFC_STAT_SQ_IO_BUFFER_CLEARED, - SPFC_STAT_PARENT_SQ_NOT_OFFLOADED, /* 10 */ - SPFC_STAT_PARENT_SQ_QUEUE_DELAYED_WORK, - SPFC_STAT_PARENT_SQ_INVALID_CACHED_ID, - SPFC_HBA_STAT_BUTT -}; - -#define SPFC_DWORD_BYTE (4) -#define SPFC_QWORD_BYTE (8) -#define SPFC_SHIFT_TO_U64(x) ((x) >> 3) -#define SPFC_SHIFT_TO_U32(x) ((x) >> 2) - -void spfc_cpu_to_big64(void *addr, u32 size); -void spfc_big_to_cpu64(void *addr, u32 size); -void spfc_cpu_to_big32(void *addr, u32 size); -void spfc_big_to_cpu32(void *addr, u32 size); -void spfc_cpu_to_be24(u8 *data, u32 value); -u32 spfc_big_to_cpu24(u8 *data); - -void spfc_print_buff(u32 dbg_level, void *buff, u32 size); - -u32 spfc_log2n(u32 val); - -static inline void spfc_swap_16_in_32(u32 *paddr, u32 length) -{ - u32 i; - - for (i = 0; i < length; i++) { - paddr[i] = - ((((paddr[i]) & UNF_MASK_BIT_31_16) >> UNF_SHIFT_16) | - (((paddr[i]) & UNF_MASK_BIT_15_0) << UNF_SHIFT_16)); - } -} - -#endif /* __SPFC_UTILS_H__ */ diff --git a/drivers/scsi/spfc/hw/spfc_wqe.c b/drivers/scsi/spfc/hw/spfc_wqe.c deleted file mode 100644 index 61909c51bc8c..000000000000 --- a/drivers/scsi/spfc/hw/spfc_wqe.c +++ /dev/null @@ -1,646 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include "spfc_wqe.h" -#include "spfc_module.h" -#include "spfc_service.h" - -void spfc_build_tmf_rsp_wqe_ts_header(struct unf_frame_pkg *pkg, - struct spfc_sqe_tmf_rsp *sqe, u16 exi_base, - u32 scqn) -{ - sqe->ts_sl.task_type = SPFC_SQE_FCP_TMF_TRSP; - sqe->ts_sl.wd0.conn_id = - (u16)(pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]); - - if (UNF_GET_RXID(pkg) == INVALID_VALUE16) - sqe->ts_sl.local_xid = INVALID_VALUE16; - else - sqe->ts_sl.local_xid = UNF_GET_RXID(pkg) + exi_base; - - sqe->ts_sl.tmf_rsp.wd0.scqn = scqn; - sqe->ts_sl.magic_num = UNF_GETXCHGALLOCTIME(pkg); -} - -void spfc_build_common_wqe_ctrls(struct spfc_wqe_ctrl *ctrl_sl, u8 task_len) -{ - /* "BDSL" field of CtrlS - defines the size of BDS, which varies from 0 - * to 2040 bytes (8 bits of 8 bytes' chunk) - */ - ctrl_sl->ch.wd0.bdsl = 0; - - /* "DrvSL" field of CtrlS - defines the size of DrvS, which varies from - * 0 to 24 bytes - */ - ctrl_sl->ch.wd0.drv_sl = 0; - - /* a. - * b1 - linking WQE, which will be only used in linked page architecture - * instead of ring, it's a special control WQE which does not contain - * any buffer or inline data information, and will only be consumed by - * hardware. The size is aligned to WQEBB/WQE b0 - normal WQE, either - * normal SEG WQE or inline data WQE - */ - ctrl_sl->ch.wd0.wf = 0; - - /* - * "CF" field of CtrlS - Completion Format - defines the format of CS. - * a. b0 - Status information is embedded inside of Completion Section - * b. b1 - Completion Section keeps SGL, where Status information - * should be written. (For the definition of SGLs see ?4.1 - * .) - */ - ctrl_sl->ch.wd0.cf = 0; - - /* "TSL" field of CtrlS - defines the size of TS, which varies from 0 to - * 248 bytes - */ - ctrl_sl->ch.wd0.tsl = task_len; - - /* - * Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE - * format is of two types, which are defined by "VA " field of CtrlS. - * "VA" stands for Virtual Address: o b0. SGE comprises 64-bits - * buffer's pointer and 31-bits Length, each SGE can only support up to - * 2G-1B, it can guarantee each single SGE length can not exceed 2GB by - * nature, A byte count value of zero means a 0byte data transfer. o b1. - * SGE comprises 64-bits buffer's pointer, 31-bits Length and 30-bits - * Key of the Translation table , each SGE can only support up to 2G-1B, - * it can guarantee each single SGE length can not exceed 2GB by nature, - * A byte count value of zero means a 0byte data transfer - */ - ctrl_sl->ch.wd0.va = 0; - - /* - * "DF" field of CtrlS - Data Format - defines the format of BDS - * a. b0 - BDS carries the list of SGEs (SGL) - * b. b1 - BDS carries the inline data - */ - ctrl_sl->ch.wd0.df = 0; - - /* "CR" - Completion is Required - marks CQE generation request per WQE - */ - ctrl_sl->ch.wd0.cr = 1; - - /* "DIFSL" field of CtrlS - defines the size of DIFS, which varies from - * 0 to 56 bytes - */ - ctrl_sl->ch.wd0.dif_sl = 0; - - /* "CSL" field of CtrlS - defines the size of CS, which varies from 0 to - * 24 bytes - */ - ctrl_sl->ch.wd0.csl = 0; - - /* CtrlSL describes the size of CtrlS in 8 bytes chunks. The - * value Zero is not valid - */ - ctrl_sl->ch.wd0.ctrl_sl = 1; - - /* "O" - Owner - marks ownership of WQE */ - ctrl_sl->ch.wd0.owner = 0; -} - -void spfc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe) -{ - /* "BDSL" field of CtrlS - defines the size of BDS, which varies from 0 - * to 2040 bytes (8 bits of 8 bytes' chunk) - */ - /* TrdWqe carry 2 SGE defaultly, 4DW per SGE, the value is 4 because - * unit is 2DW, in double SGL mode, bdsl is 2 - */ - sqe->ctrl_sl.ch.wd0.bdsl = SPFC_T_RD_WR_WQE_CTR_BDSL_SIZE; - - /* "DrvSL" field of CtrlS - defines the size of DrvS, which varies from - * 0 to 24 bytes - */ - /* DrvSL = 0 */ - sqe->ctrl_sl.ch.wd0.drv_sl = 0; - - /* a. - * b1 - linking WQE, which will be only used in linked page architecture - * instead of ring, it's a special control WQE which does not contain - * any buffer or inline data information, and will only be consumed by - * hardware. The size is aligned to WQEBB/WQE b0 - normal WQE, either - * normal SEG WQE or inline data WQE - */ - /* normal wqe */ - sqe->ctrl_sl.ch.wd0.wf = 0; - - /* - * "CF" field of CtrlS - Completion Format - defines the format of CS. - * a. b0 - Status information is embedded inside of Completion Section - * b. b1 - Completion Section keeps SGL, where Status information - * should be written. (For the definition of SGLs see ?4.1) - */ - /* by SCQE mode, the value is ignored */ - sqe->ctrl_sl.ch.wd0.cf = 0; - - /* "TSL" field of CtrlS - defines the size of TS, which varies from 0 to - * 248 bytes - */ - /* TSL is configured by 56 bytes */ - sqe->ctrl_sl.ch.wd0.tsl = - sizeof(struct spfc_sqe_ts) / SPFC_WQE_SECTION_CHUNK_SIZE; - - /* - * Variable length SGE (vSGE). The size of SGE is 16 bytes. The vSGE - * format is of two types, which are defined by "VA " field of CtrlS. - * "VA" stands for Virtual Address: o b0. SGE comprises 64-bits buffer's - * pointer and 31-bits Length, each SGE can only support up to 2G-1B, it - * can guarantee each single SGE length can not exceed 2GB by nature, A - * byte count value of zero means a 0byte data transfer. o b1. SGE - * comprises 64-bits buffer's pointer, 31-bits Length and 30-bits Key of - * the Translation table , each SGE can only support up to 2G-1B, it can - * guarantee each single SGE length can not exceed 2GB by nature, A byte - * count value of zero means a 0byte data transfer - */ - sqe->ctrl_sl.ch.wd0.va = 0; - - /* - * "DF" field of CtrlS - Data Format - defines the format of BDS - * a. b0 - BDS carries the list of SGEs (SGL) - * b. b1 - BDS carries the inline data - */ - sqe->ctrl_sl.ch.wd0.df = 0; - - /* "CR" - Completion is Required - marks CQE generation request per WQE - */ - /* by SCQE mode, this value is ignored */ - sqe->ctrl_sl.ch.wd0.cr = 1; - - /* "DIFSL" field of CtrlS - defines the size of DIFS, which varies from - * 0 to 56 bytes. - */ - sqe->ctrl_sl.ch.wd0.dif_sl = 0; - - /* "CSL" field of CtrlS - defines the size of CS, which varies from 0 to - * 24 bytes - */ - sqe->ctrl_sl.ch.wd0.csl = 0; - - /* CtrlSL describes the size of CtrlS in 8 bytes chunks. The - * value Zero is not valid. - */ - sqe->ctrl_sl.ch.wd0.ctrl_sl = SPFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE; - - /* "O" - Owner - marks ownership of WQE */ - sqe->ctrl_sl.ch.wd0.owner = 0; -} - -/* **************************************************************************** - * Function Name : spfc_build_service_wqe_ts_common - * Function Description : Construct the DW1~DW3 field in the Parent SQ WQE - * request of the ELS and ELS_RSP requests. - * Input Parameters : struct spfc_sqe_ts *sqe_ts u32 rport_index u16 local_xid - * u16 remote_xid u16 data_len - * Output Parameters : N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_service_wqe_ts_common(struct spfc_sqe_ts *sqe_ts, u32 rport_index, - u16 local_xid, u16 remote_xid, u16 data_len) -{ - sqe_ts->local_xid = local_xid; - - sqe_ts->wd0.conn_id = (u16)rport_index; - sqe_ts->wd0.remote_xid = remote_xid; - - sqe_ts->cont.els_gs_elsrsp_comm.data_len = data_len; -} - -/* **************************************************************************** - * Function Name : spfc_build_els_gs_wqe_sge - * Function Description : Construct the SGE field of the ELS and ELS_RSP WQE. - * The SGE and frame content have been converted to large ends in this - * function. - * Input Parameters: struct spfc_sqe *sqe void *buf_addr u32 buf_len u32 xid - * Output Parameters : N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_els_gs_wqe_sge(struct spfc_sqe *sqe, void *buf_addr, u64 phy_addr, - u32 buf_len, u32 xid, void *handle) -{ - u64 els_rsp_phy_addr; - struct spfc_variable_sge *sge = NULL; - - /* Fill in SGE and convert it to big-endian. */ - sge = &sqe->sge[ARRAY_INDEX_0]; - els_rsp_phy_addr = phy_addr; - sge->buf_addr_hi = SPFC_HIGH_32_BITS(els_rsp_phy_addr); - sge->buf_addr_lo = SPFC_LOW_32_BITS(els_rsp_phy_addr); - sge->wd0.buf_len = buf_len; - sge->wd0.r_flag = 0; - sge->wd1.extension_flag = SPFC_WQE_SGE_NOT_EXTEND_FLAG; - sge->wd1.buf_addr_gpa = SPFC_ZEROCOPY_PCIE_TEMPLATE_VALUE; - sge->wd1.xid = 0; - sge->wd1.last_flag = SPFC_WQE_SGE_LAST_FLAG; - spfc_cpu_to_big32(sge, sizeof(*sge)); - - /* Converts the payload of an FC frame into a big end. */ - if (buf_addr) - spfc_cpu_to_big32(buf_addr, buf_len); -} - -/* **************************************************************************** - * Function Name : spfc_build_els_wqe_ts_rsp - * Function Description : Construct the DW2~DW6 field in the Parent SQ WQE - * of the ELS_RSP request. - * Input Parameters : struct spfc_sqe *sqe void *sq_info void *frame_pld - * u16 type u16 cmnd u32 scqn - * Output Parameters: N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_els_wqe_ts_rsp(struct spfc_sqe *sqe, void *info, - struct unf_frame_pkg *pkg, void *frame_pld, - u16 type, u16 cmnd) -{ - struct unf_prli_payload *prli_acc_pld = NULL; - struct spfc_sqe_els_rsp *els_rsp = NULL; - struct spfc_sqe_ts *sqe_ts = NULL; - struct spfc_parent_sq_info *sq_info = NULL; - struct spfc_hba_info *hba = NULL; - struct unf_fc_head *pkg_fc_hdr_info = NULL; - struct spfc_parent_queue_info *prnt_q_info = (struct spfc_parent_queue_info *)info; - - FC_CHECK_RETURN_VOID(sqe); - FC_CHECK_RETURN_VOID(frame_pld); - - sqe_ts = &sqe->ts_sl; - els_rsp = &sqe_ts->cont.els_rsp; - sqe_ts->task_type = SPFC_SQE_ELS_RSP; - - /* The default chip does not need to update parameters. */ - els_rsp->wd1.para_update = 0x0; - - sq_info = &prnt_q_info->parent_sq_info; - hba = (struct spfc_hba_info *)sq_info->hba; - - pkg_fc_hdr_info = &pkg->frame_head; - els_rsp->sid = pkg_fc_hdr_info->csctl_sid; - els_rsp->did = pkg_fc_hdr_info->rctl_did; - els_rsp->wd7.hotpooltag = UNF_GET_HOTPOOL_TAG(pkg) + hba->exi_base; - els_rsp->wd2.class_mode = FC_PROTOCOL_CLASS_3; - - if (type == ELS_RJT) - els_rsp->wd2.class_mode = pkg->class_mode; - - /* When the PLOGI request is sent, the microcode needs to be instructed - * to clear the I/O related to the link to avoid data inconsistency - * caused by the disorder of the IO. - */ - if ((cmnd == ELS_LOGO || cmnd == ELS_PLOGI)) { - els_rsp->wd1.clr_io = 1; - els_rsp->wd6.reset_exch_start = hba->exi_base; - els_rsp->wd6.reset_exch_end = - hba->exi_base + (hba->exi_count - 1); - els_rsp->wd7.scqn = - prnt_q_info->parent_sts_scq_info.cqm_queue_id; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) send cmd(0x%x) to RPort(0x%x),rport index(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.", - sq_info->local_port_id, cmnd, sq_info->remote_port_id, - sq_info->rport_index, els_rsp->wd6.reset_exch_start, - els_rsp->wd6.reset_exch_end, els_rsp->wd7.scqn); - - return; - } - - if (type == ELS_RJT) - return; - - /* Enter WQE in the PrliAcc negotiation parameter, and fill in the - * Update flag in WQE. - */ - if (cmnd == ELS_PRLI) { - /* The chip updates the PLOGI ACC negotiation parameters. */ - els_rsp->wd2.seq_cnt = sq_info->plogi_co_parms.seq_cnt; - els_rsp->wd2.e_d_tov = sq_info->plogi_co_parms.ed_tov; - els_rsp->wd2.tx_mfs = sq_info->plogi_co_parms.tx_mfs; - els_rsp->e_d_tov_timer_val = sq_info->plogi_co_parms.ed_tov_time; - - /* The chip updates the PRLI ACC parameter. */ - prli_acc_pld = (struct unf_prli_payload *)frame_pld; - els_rsp->wd4.xfer_dis = SPFC_GET_PRLI_PARAM_WXFER(prli_acc_pld->parms); - els_rsp->wd4.conf = SPFC_GET_PRLI_PARAM_CONF(prli_acc_pld->parms); - els_rsp->wd4.rec = SPFC_GET_PRLI_PARAM_REC(prli_acc_pld->parms); - - els_rsp->wd1.para_update = 0x03; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x,e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x, xfer_dis:0x%x,conf:0x%x,rec:0x%x.", - sq_info->local_port_id, sq_info->rport_index, - els_rsp->wd2.seq_cnt, els_rsp->wd2.e_d_tov, - els_rsp->wd2.tx_mfs, els_rsp->e_d_tov_timer_val, - els_rsp->wd4.xfer_dis, els_rsp->wd4.conf, els_rsp->wd4.rec); - } -} - -/* **************************************************************************** - * Function Name : spfc_build_els_wqe_ts_req - * Function Description: Construct the DW2~DW4 field in the Parent SQ WQE - * of the ELS request. - * Input Parameters: struct spfc_sqe *sqe void *sq_info u16 cmnd u32 scqn - * Output Parameters: N/A - * Return Type: void - **************************************************************************** - */ -void spfc_build_els_wqe_ts_req(struct spfc_sqe *sqe, void *info, u32 scqn, - void *frame_pld, struct unf_frame_pkg *pkg) -{ - struct spfc_sqe_ts *sqe_ts = NULL; - struct spfc_sqe_t_els_gs *els_req = NULL; - struct spfc_parent_sq_info *sq_info = NULL; - struct spfc_hba_info *hba = NULL; - struct unf_fc_head *pkg_fc_hdr_info = NULL; - u16 cmnd; - - cmnd = SPFC_GET_LS_GS_CMND_CODE(pkg->cmnd); - - sqe_ts = &sqe->ts_sl; - if (pkg->type == UNF_PKG_GS_REQ) - sqe_ts->task_type = SPFC_SQE_GS_CMND; - else - sqe_ts->task_type = SPFC_SQE_ELS_CMND; - - sqe_ts->magic_num = UNF_GETXCHGALLOCTIME(pkg); - - els_req = &sqe_ts->cont.t_els_gs; - pkg_fc_hdr_info = &pkg->frame_head; - - sq_info = (struct spfc_parent_sq_info *)info; - hba = (struct spfc_hba_info *)sq_info->hba; - els_req->sid = pkg_fc_hdr_info->csctl_sid; - els_req->did = pkg_fc_hdr_info->rctl_did; - - /* When the PLOGI request is sent, the microcode needs to be instructed - * to clear the I/O related to the link to avoid data inconsistency - * caused by the disorder of the IO. - */ - if ((cmnd == ELS_LOGO || cmnd == ELS_PLOGI) && hba) { - els_req->wd4.clr_io = 1; - els_req->wd6.reset_exch_start = hba->exi_base; - els_req->wd6.reset_exch_end = hba->exi_base + (hba->exi_count - 1); - els_req->wd7.scqn = scqn; - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) Rport(0x%x) SID(0x%x) send %s to DID(0x%x), notify clean io start 0x%x, end 0x%x, scqn 0x%x.", - hba->port_cfg.port_id, sq_info->rport_index, - sq_info->local_port_id, (cmnd == ELS_PLOGI) ? "PLOGI" : "LOGO", - sq_info->remote_port_id, els_req->wd6.reset_exch_start, - els_req->wd6.reset_exch_end, scqn); - - return; - } - - /* The chip updates the PLOGI ACC negotiation parameters. */ - if (cmnd == ELS_PRLI) { - els_req->wd5.seq_cnt = sq_info->plogi_co_parms.seq_cnt; - els_req->wd5.e_d_tov = sq_info->plogi_co_parms.ed_tov; - els_req->wd5.tx_mfs = sq_info->plogi_co_parms.tx_mfs; - els_req->e_d_tov_timer_val = sq_info->plogi_co_parms.ed_tov_time; - - els_req->wd4.rec_support = hba->port_cfg.tape_support ? 1 : 0; - els_req->wd4.para_update = 0x01; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, - UNF_INFO, - "Port(0x%x) save rport index(0x%x) login parms,seqcnt:0x%x,e_d_tov:0x%x,txmfs:0x%x,e_d_tovtimerval:0x%x.", - sq_info->local_port_id, sq_info->rport_index, - els_req->wd5.seq_cnt, els_req->wd5.e_d_tov, - els_req->wd5.tx_mfs, els_req->e_d_tov_timer_val); - } - - if (cmnd == ELS_ECHO) - els_req->echo_flag = true; - - if (cmnd == ELS_REC) { - els_req->wd4.rec_flag = 1; - els_req->wd4.origin_hottag = pkg->origin_hottag + hba->exi_base; - els_req->origin_magicnum = pkg->origin_magicnum; - - FC_DRV_PRINT(UNF_LOG_LOGIN_ATT, UNF_MAJOR, - "Port(0x%x) Rport(0x%x) SID(0x%x) send Rec to DID(0x%x), origin_hottag 0x%x", - hba->port_cfg.port_id, sq_info->rport_index, - sq_info->local_port_id, sq_info->remote_port_id, - els_req->wd4.origin_hottag); - } -} - -/* **************************************************************************** - * Function Name : spfc_build_bls_wqe_ts_req - * Function Description: Construct the DW2 field in the Parent SQ WQE of - * the ELS request, that is, ABTS parameter. - * Input Parameters:struct unf_frame_pkg *pkg void *hba - * Output Parameters: N/A - * Return Type: void - **************************************************************************** - */ -void spfc_build_bls_wqe_ts_req(struct spfc_sqe *sqe, struct unf_frame_pkg *pkg, void *handle) -{ - struct spfc_sqe_abts *abts; - - sqe->ts_sl.task_type = SPFC_SQE_BLS_CMND; - sqe->ts_sl.magic_num = UNF_GETXCHGALLOCTIME(pkg); - - abts = &sqe->ts_sl.cont.abts; - abts->fh_parm_abts = pkg->frame_head.parameter; - abts->hotpooltag = UNF_GET_HOTPOOL_TAG(pkg) + - ((struct spfc_hba_info *)handle)->exi_base; - abts->release_timer = UNF_GET_XID_RELEASE_TIMER(pkg); -} - -/* **************************************************************************** - * Function Name : spfc_build_service_wqe_ctrl_section - * Function Description: fill Parent SQ WQE and Root SQ WQE's Control Section - * Input Parameters : struct spfc_wqe_ctrl *wqe_cs u32 ts_size u32 bdsl - * Output Parameters : N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_service_wqe_ctrl_section(struct spfc_wqe_ctrl *wqe_cs, u32 ts_size, - u32 bdsl) -{ - wqe_cs->ch.wd0.bdsl = bdsl; - wqe_cs->ch.wd0.drv_sl = 0; - wqe_cs->ch.wd0.rsvd0 = 0; - wqe_cs->ch.wd0.wf = 0; - wqe_cs->ch.wd0.cf = 0; - wqe_cs->ch.wd0.tsl = ts_size; - wqe_cs->ch.wd0.va = 0; - wqe_cs->ch.wd0.df = 0; - wqe_cs->ch.wd0.cr = 1; - wqe_cs->ch.wd0.dif_sl = 0; - wqe_cs->ch.wd0.csl = 0; - wqe_cs->ch.wd0.ctrl_sl = SPFC_BYTES_TO_QW_NUM(sizeof(*wqe_cs)); /* divided by 8 */ - wqe_cs->ch.wd0.owner = 0; -} - -/* **************************************************************************** - * Function Name : spfc_build_wqe_owner_pmsn - * Function Description: This field is filled using the value of Control - * Section of Parent SQ WQE. - * Input Parameters: struct spfc_wqe_ctrl *wqe_cs u16 owner u16 pmsn - * Output Parameters : N/A - * Return Type: void - **************************************************************************** - */ -void spfc_build_wqe_owner_pmsn(struct spfc_sqe *io_sqe, u16 owner, u16 pmsn) -{ - struct spfc_wqe_ctrl *wqe_cs = &io_sqe->ctrl_sl; - struct spfc_wqe_ctrl *wqee_cs = &io_sqe->ectrl_sl; - - wqe_cs->qsf.wqe_sn = pmsn; - wqe_cs->qsf.dump_wqe_sn = wqe_cs->qsf.wqe_sn; - wqe_cs->ch.wd0.owner = (u32)owner; - wqee_cs->ch.ctrl_ch_val = wqe_cs->ch.ctrl_ch_val; - wqee_cs->qsf.wqe_sn = wqe_cs->qsf.wqe_sn; - wqee_cs->qsf.dump_wqe_sn = wqe_cs->qsf.dump_wqe_sn; -} - -/* **************************************************************************** - * Function Name : spfc_convert_parent_wqe_to_big_endian - * Function Description: Set the Done field of Parent SQ WQE and convert - * Control Section and Task Section to big-endian. - * Input Parameters:struct spfc_sqe *sqe - * Output Parameters : N/A - * Return Type : void - **************************************************************************** - */ -void spfc_convert_parent_wqe_to_big_endian(struct spfc_sqe *sqe) -{ - if (likely(sqe->ts_sl.task_type != SPFC_TASK_T_TRESP && - sqe->ts_sl.task_type != SPFC_TASK_T_TMF_RESP)) { - /* Convert Control Secton and Task Section to big-endian. Before - * the SGE enters the queue, the upper-layer driver converts the - * SGE and Task Section to the big-endian mode. - */ - spfc_cpu_to_big32(&sqe->ctrl_sl, sizeof(sqe->ctrl_sl)); - spfc_cpu_to_big32(&sqe->ts_sl, sizeof(sqe->ts_sl)); - spfc_cpu_to_big32(&sqe->ectrl_sl, sizeof(sqe->ectrl_sl)); - spfc_cpu_to_big32(&sqe->sid, sizeof(sqe->sid)); - spfc_cpu_to_big32(&sqe->did, sizeof(sqe->did)); - spfc_cpu_to_big32(&sqe->wqe_gpa, sizeof(sqe->wqe_gpa)); - spfc_cpu_to_big32(&sqe->db_val, sizeof(sqe->db_val)); - } else { - /* The SPFC_TASK_T_TRESP may use the SGE as the Task Section to - * convert the entire SQE into a large end. - */ - spfc_cpu_to_big32(sqe, sizeof(struct spfc_sqe_tresp)); - } -} - -/* **************************************************************************** - * Function Name : spfc_build_cmdqe_common - * Function Description : Assemble the Cmdqe Common part. - * Input Parameters: union spfc_cmdqe *cmd_qe enum spfc_task_type task_type u16 rxid - * Output Parameters : N/A - * Return Type: void - **************************************************************************** - */ -void spfc_build_cmdqe_common(union spfc_cmdqe *cmd_qe, enum spfc_task_type task_type, - u16 rxid) -{ - cmd_qe->common.wd0.task_type = task_type; - cmd_qe->common.wd0.rx_id = rxid; - cmd_qe->common.wd0.rsvd0 = 0; -} - -#define SPFC_STANDARD_SIRT_ENABLE (1) -#define SPFC_STANDARD_SIRT_DISABLE (0) -#define SPFC_UNKNOWN_ID (0xFFFF) - -void spfc_build_icmnd_wqe_ts_header(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe, - u8 task_type, u16 exi_base, u8 port_idx) -{ - sqe->ts_sl.local_xid = (u16)UNF_GET_HOTPOOL_TAG(pkg) + exi_base; - sqe->ts_sl.task_type = task_type; - sqe->ts_sl.wd0.conn_id = - (u16)(pkg->private_data[PKG_PRIVATE_XCHG_RPORT_INDEX]); - - sqe->ts_sl.wd0.remote_xid = SPFC_UNKNOWN_ID; - sqe->ts_sl.magic_num = UNF_GETXCHGALLOCTIME(pkg); -} - -/* **************************************************************************** - * Function Name : spfc_build_icmnd_wqe_ts - * Function Description : Constructing the TS Domain of the ICmnd - * Input Parameters: void *hba struct unf_frame_pkg *pkg - * struct spfc_sqe_ts *sqe_ts - * Output Parameters :N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_icmnd_wqe_ts(void *handle, struct unf_frame_pkg *pkg, - struct spfc_sqe_ts *sqe_ts, union spfc_sqe_ts_ex *sqe_tsex) -{ - struct spfc_sqe_icmnd *icmnd = &sqe_ts->cont.icmnd; - struct spfc_hba_info *hba = NULL; - - hba = (struct spfc_hba_info *)handle; - - sqe_ts->cdb_type = 0; - memcpy(icmnd->fcp_cmnd_iu, pkg->fcp_cmnd, sizeof(struct unf_fcp_cmnd)); - - if (sqe_ts->task_type == SPFC_SQE_FCP_ITMF) { - icmnd->info.tmf.w0.bs.reset_exch_start = hba->exi_base; - icmnd->info.tmf.w0.bs.reset_exch_end = hba->exi_base + hba->exi_count - 1; - - icmnd->info.tmf.w1.bs.reset_did = UNF_GET_DID(pkg); - /* delivers the marker status flag to the microcode. */ - icmnd->info.tmf.w1.bs.marker_sts = 1; - SPFC_GET_RESET_TYPE(UNF_GET_TASK_MGMT_FLAGS(pkg->fcp_cmnd->control), - icmnd->info.tmf.w1.bs.reset_type); - - icmnd->info.tmf.w2.bs.reset_sid = UNF_GET_SID(pkg); - - memcpy(icmnd->info.tmf.reset_lun, pkg->fcp_cmnd->lun, - sizeof(icmnd->info.tmf.reset_lun)); - } -} - -/* **************************************************************************** - * Function Name : spfc_build_icmnd_wqe_ctrls - * Function Description : The CtrlS domain of the ICmnd is constructed. The - * analysis result is the same as that of the TWTR. - * Input Parameters: struct unf_frame_pkg *pkg struct spfc_sqe *sqe - * Output Parameters: N/A - * Return Type: void - **************************************************************************** - */ -void spfc_build_icmnd_wqe_ctrls(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe) -{ - spfc_build_trd_twr_wqe_ctrls(pkg, sqe); -} - -/* **************************************************************************** - * Function Name : spfc_build_srq_wqe_ctrls - * Function Description : Construct the CtrlS domain of the ICmnd. The analysis - * result is the same as that of the TWTR. - * Input Parameters : struct spfc_rqe *rqe u16 owner u16 pmsn - * Output Parameters : N/A - * Return Type : void - **************************************************************************** - */ -void spfc_build_srq_wqe_ctrls(struct spfc_rqe *rqe, u16 owner, u16 pmsn) -{ - struct spfc_wqe_ctrl_ch *wqe_ctrls = NULL; - - wqe_ctrls = &rqe->ctrl_sl.ch; - wqe_ctrls->wd0.owner = owner; - wqe_ctrls->wd0.ctrl_sl = sizeof(struct spfc_wqe_ctrl) >> UNF_SHIFT_3; - wqe_ctrls->wd0.csl = 1; - wqe_ctrls->wd0.dif_sl = 0; - wqe_ctrls->wd0.cr = 1; - wqe_ctrls->wd0.df = 0; - wqe_ctrls->wd0.va = 0; - wqe_ctrls->wd0.tsl = 0; - wqe_ctrls->wd0.cf = 0; - wqe_ctrls->wd0.wf = 0; - wqe_ctrls->wd0.drv_sl = sizeof(struct spfc_rqe_drv) >> UNF_SHIFT_3; - wqe_ctrls->wd0.bdsl = sizeof(struct spfc_constant_sge) >> UNF_SHIFT_3; - - rqe->ctrl_sl.wd0.wqe_msn = pmsn; - rqe->ctrl_sl.wd0.dump_wqe_msn = rqe->ctrl_sl.wd0.wqe_msn; -} diff --git a/drivers/scsi/spfc/hw/spfc_wqe.h b/drivers/scsi/spfc/hw/spfc_wqe.h deleted file mode 100644 index ec6d7bbdf8f9..000000000000 --- a/drivers/scsi/spfc/hw/spfc_wqe.h +++ /dev/null @@ -1,239 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPFC_WQE_H -#define SPFC_WQE_H - -#include "unf_type.h" -#include "unf_common.h" -#include "spfc_hw_wqe.h" -#include "spfc_parent_context.h" - -/* TGT WQE type */ -/* DRV->uCode via Parent SQ */ -#define SPFC_SQE_FCP_TRD SPFC_TASK_T_TREAD -#define SPFC_SQE_FCP_TWR SPFC_TASK_T_TWRITE -#define SPFC_SQE_FCP_TRSP SPFC_TASK_T_TRESP -#define SPFC_SQE_FCP_TACK SPFC_TASK_T_TACK -#define SPFC_SQE_ELS_CMND SPFC_TASK_T_ELS -#define SPFC_SQE_ELS_RSP SPFC_TASK_T_ELS_RSP -#define SPFC_SQE_GS_CMND SPFC_TASK_T_GS -#define SPFC_SQE_BLS_CMND SPFC_TASK_T_ABTS -#define SPFC_SQE_FCP_IREAD SPFC_TASK_T_IREAD -#define SPFC_SQE_FCP_IWRITE SPFC_TASK_T_IWRITE -#define SPFC_SQE_FCP_ITMF SPFC_TASK_T_ITMF -#define SPFC_SQE_SESS_RST SPFC_TASK_T_SESS_RESET -#define SPFC_SQE_FCP_TMF_TRSP SPFC_TASK_T_TMF_RESP -#define SPFC_SQE_NOP SPFC_TASK_T_NOP -/* DRV->uCode Via CMDQ */ -#define SPFC_CMDQE_ABTS_RSP SPFC_TASK_T_ABTS_RSP -#define SPFC_CMDQE_ABORT SPFC_TASK_T_ABORT -#define SPFC_CMDQE_SESS_DIS SPFC_TASK_T_SESS_DIS -#define SPFC_CMDQE_SESS_DEL SPFC_TASK_T_SESS_DEL - -/* uCode->Drv Via CMD SCQ */ -#define SPFC_SCQE_FCP_TCMND SPFC_TASK_T_RCV_TCMND -#define SPFC_SCQE_ELS_CMND SPFC_TASK_T_RCV_ELS_CMD -#define SPFC_SCQE_ABTS_CMD SPFC_TASK_T_RCV_ABTS_CMD -#define SPFC_SCQE_FCP_IRSP SPFC_TASK_T_IRESP -#define SPFC_SCQE_FCP_ITMF_RSP SPFC_TASK_T_ITMF_RESP - -/* uCode->Drv Via STS SCQ */ -#define SPFC_SCQE_FCP_TSTS SPFC_TASK_T_TSTS -#define SPFC_SCQE_GS_RSP SPFC_TASK_T_RCV_GS_RSP -#define SPFC_SCQE_ELS_RSP SPFC_TASK_T_RCV_ELS_RSP -#define SPFC_SCQE_ABTS_RSP SPFC_TASK_T_RCV_ABTS_RSP -#define SPFC_SCQE_ELS_RSP_STS SPFC_TASK_T_ELS_RSP_STS -#define SPFC_SCQE_ABORT_STS SPFC_TASK_T_ABORT_STS -#define SPFC_SCQE_SESS_EN_STS SPFC_TASK_T_SESS_EN_STS -#define SPFC_SCQE_SESS_DIS_STS SPFC_TASK_T_SESS_DIS_STS -#define SPFC_SCQE_SESS_DEL_STS SPFC_TASK_T_SESS_DEL_STS -#define SPFC_SCQE_SESS_RST_STS SPFC_TASK_T_SESS_RESET_STS -#define SPFC_SCQE_ITMF_MARKER_STS SPFC_TASK_T_ITMF_MARKER_STS -#define SPFC_SCQE_ABTS_MARKER_STS SPFC_TASK_T_ABTS_MARKER_STS -#define SPFC_SCQE_FLUSH_SQ_STS SPFC_TASK_T_FLUSH_SQ_STS -#define SPFC_SCQE_BUF_CLEAR_STS SPFC_TASK_T_BUFFER_CLEAR_STS -#define SPFC_SCQE_CLEAR_SRQ_STS SPFC_TASK_T_CLEAR_SRQ_STS -#define SPFC_SCQE_DIFX_RESULT_STS SPFC_TASK_T_DIFX_RESULT_STS -#define SPFC_SCQE_XID_FREE_ABORT_STS SPFC_TASK_T_EXCH_ID_FREE_ABORT_STS -#define SPFC_SCQE_EXCHID_TIMEOUT_STS SPFC_TASK_T_EXCHID_TIMEOUT_STS -#define SPFC_SQE_NOP_STS SPFC_TASK_T_NOP_STS - -#define SPFC_LOW_32_BITS(__addr) ((u32)((u64)(__addr) & 0xffffffff)) -#define SPFC_HIGH_32_BITS(__addr) ((u32)(((u64)(__addr) >> 32) & 0xffffffff)) - -/* Error Code from SCQ */ -#define SPFC_COMPLETION_STATUS_SUCCESS FC_CQE_COMPLETED -#define SPFC_COMPLETION_STATUS_ABORTED_SETUP_FAIL FC_IMMI_CMDPKT_SETUP_FAIL - -#define SPFC_COMPLETION_STATUS_TIMEOUT FC_ERROR_CODE_E_D_TIMER_EXPIRE -#define SPFC_COMPLETION_STATUS_DIF_ERROR FC_ERROR_CODE_DATA_DIFX_FAILED -#define SPFC_COMPLETION_STATUS_DATA_OOO FC_ERROR_CODE_DATA_OOO_RO -#define SPFC_COMPLETION_STATUS_DATA_OVERFLOW \ - FC_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS - -#define SPFC_SCQE_INVALID_CONN_ID (0xffff) -#define SPFC_GET_SCQE_TYPE(scqe) ((scqe)->common.ch.wd0.task_type) -#define SPFC_GET_SCQE_STATUS(scqe) ((scqe)->common.ch.wd0.err_code) -#define SPFC_GET_SCQE_REMAIN_CNT(scqe) ((scqe)->common.ch.wd0.cqe_remain_cnt) -#define SPFC_GET_SCQE_CONN_ID(scqe) ((scqe)->common.conn_id) -#define SPFC_GET_SCQE_SQN(scqe) ((scqe)->common.ch.wd0.sqn) -#define SPFC_GET_WQE_TYPE(wqe) ((wqe)->ts_sl.task_type) - -#define SPFC_WQE_IS_IO(wqe) \ - ((SPFC_GET_WQE_TYPE(wqe) != SPFC_SQE_SESS_RST) && \ - (SPFC_GET_WQE_TYPE(wqe) != SPFC_SQE_NOP)) -#define SPFC_SCQE_HAS_ERRCODE(scqe) \ - (SPFC_GET_SCQE_STATUS(scqe) != SPFC_COMPLETION_STATUS_SUCCESS) -#define SPFC_SCQE_ERR_TO_CM(scqe) \ - (SPFC_GET_SCQE_STATUS(scqe) != FC_ELS_GS_RSP_EXCH_CHECK_FAIL) -#define SPFC_SCQE_EXCH_ABORTED(scqe) \ - ((SPFC_GET_SCQE_STATUS(scqe) >= \ - FC_CQE_BUFFER_CLEAR_IO_COMPLETED) && \ - (SPFC_GET_SCQE_STATUS(scqe) <= FC_CQE_WQE_FLUSH_IO_COMPLETED)) -#define SPFC_SCQE_CONN_ID_VALID(scqe) \ - (SPFC_GET_SCQE_CONN_ID(scqe) != SPFC_SCQE_INVALID_CONN_ID) - -/* - * checksum error bitmap define - */ -#define NIC_RX_CSUM_HW_BYPASS_ERR (1) -#define NIC_RX_CSUM_IP_CSUM_ERR (1 << 1) -#define NIC_RX_CSUM_TCP_CSUM_ERR (1 << 2) -#define NIC_RX_CSUM_UDP_CSUM_ERR (1 << 3) -#define NIC_RX_CSUM_SCTP_CRC_ERR (1 << 4) - -#define SPFC_WQE_SECTION_CHUNK_SIZE 8 /* 8 bytes' chunk */ -#define SPFC_T_RESP_WQE_CTR_TSL_SIZE 15 /* 8 bytes' chunk */ -#define SPFC_T_RD_WR_WQE_CTR_TSL_SIZE 9 /* 8 bytes' chunk */ -#define SPFC_T_RD_WR_WQE_CTR_BDSL_SIZE 4 /* 8 bytes' chunk */ -#define SPFC_T_RD_WR_WQE_CTR_CTRLSL_SIZE 1 /* 8 bytes' chunk */ - -#define SPFC_WQE_MAX_ESGE_NUM 3 /* 3 ESGE In Extended wqe */ -#define SPFC_WQE_SGE_ENTRY_NUM 2 /* BD SGE and DIF SGE count */ -#define SPFC_WQE_SGE_DIF_ENTRY_NUM 1 /* DIF SGE count */ -#define SPFC_WQE_SGE_LAST_FLAG 1 -#define SPFC_WQE_SGE_NOT_LAST_FLAG 0 -#define SPFC_WQE_SGE_EXTEND_FLAG 1 -#define SPFC_WQE_SGE_NOT_EXTEND_FLAG 0 - -#define SPFC_FCP_TMF_PORT_RESET (0) -#define SPFC_FCP_TMF_LUN_RESET (1) -#define SPFC_FCP_TMF_TGT_RESET (2) -#define SPFC_FCP_TMF_RSVD (3) - -#define SPFC_ADJUST_DATA(old_va, new_va) \ - { \ - (old_va) = new_va; \ - } - -#define SPFC_GET_RESET_TYPE(tmf_flag, reset_flag) \ - { \ - switch (tmf_flag) { \ - case UNF_FCP_TM_ABORT_TASK_SET: \ - case UNF_FCP_TM_LOGICAL_UNIT_RESET: \ - (reset_flag) = SPFC_FCP_TMF_LUN_RESET; \ - break; \ - case UNF_FCP_TM_TARGET_RESET: \ - (reset_flag) = SPFC_FCP_TMF_TGT_RESET; \ - break; \ - case UNF_FCP_TM_CLEAR_TASK_SET: \ - (reset_flag) = SPFC_FCP_TMF_PORT_RESET; \ - break; \ - default: \ - (reset_flag) = SPFC_FCP_TMF_RSVD; \ - } \ - } - -/* Link WQE structure */ -struct spfc_linkwqe { - union { - struct { - u32 rsv1 : 14; - u32 wf : 1; - u32 rsv2 : 14; - u32 ctrlsl : 2; - u32 o : 1; - } wd0; - - u32 val_wd0; - }; - - union { - struct { - u32 msn : 16; - u32 dump_msn : 15; - u32 lp : 1; /* lp means whether O bit is overturn */ - } wd1; - - u32 val_wd1; - }; - - u32 next_page_addr_hi; - u32 next_page_addr_lo; -}; - -/* Session Enable */ -struct spfc_host_keys { - struct { - u32 smac1 : 8; - u32 smac0 : 8; - u32 rsv : 16; - } wd0; - - u8 smac[ARRAY_INDEX_4]; - - u8 dmac[ARRAY_INDEX_4]; - struct { - u8 sid_1; - u8 sid_2; - u8 dmac_rvd[ARRAY_INDEX_2]; - } wd3; - struct { - u8 did_0; - u8 did_1; - u8 did_2; - u8 sid_0; - } wd4; - - struct { - u32 port_id : 3; - u32 host_id : 2; - u32 rsvd : 27; - } wd5; - u32 rsvd; -}; - -/* Parent SQ WQE Related function */ -void spfc_build_service_wqe_ctrl_section(struct spfc_wqe_ctrl *wqe_cs, u32 ts_size, - u32 bdsl); -void spfc_build_service_wqe_ts_common(struct spfc_sqe_ts *sqe_ts, u32 rport_index, - u16 local_xid, u16 remote_xid, - u16 data_len); -void spfc_build_els_gs_wqe_sge(struct spfc_sqe *sqe, void *buf_addr, u64 phy_addr, - u32 buf_len, u32 xid, void *handle); -void spfc_build_els_wqe_ts_req(struct spfc_sqe *sqe, void *info, u32 scqn, - void *frame_pld, struct unf_frame_pkg *pkg); -void spfc_build_els_wqe_ts_rsp(struct spfc_sqe *sqe, void *info, - struct unf_frame_pkg *pkg, void *frame_pld, - u16 type, u16 cmnd); -void spfc_build_bls_wqe_ts_req(struct spfc_sqe *sqe, struct unf_frame_pkg *pkg, - void *handle); -void spfc_build_trd_twr_wqe_ctrls(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe); -void spfc_build_wqe_owner_pmsn(struct spfc_sqe *io_sqe, u16 owner, u16 pmsn); -void spfc_convert_parent_wqe_to_big_endian(struct spfc_sqe *sqe); -void spfc_build_icmnd_wqe_ctrls(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe); -void spfc_build_icmnd_wqe_ts(void *handle, struct unf_frame_pkg *pkg, - struct spfc_sqe_ts *sqe_ts, union spfc_sqe_ts_ex *sqe_tsex); -void spfc_build_icmnd_wqe_ts_header(struct unf_frame_pkg *pkg, struct spfc_sqe *sqe, - u8 task_type, u16 exi_base, u8 port_idx); - -void spfc_build_cmdqe_common(union spfc_cmdqe *cmd_qe, enum spfc_task_type task_type, - u16 rxid); -void spfc_build_srq_wqe_ctrls(struct spfc_rqe *rqe, u16 owner, u16 pmsn); -void spfc_build_common_wqe_ctrls(struct spfc_wqe_ctrl *ctrl_sl, u8 task_len); -void spfc_build_tmf_rsp_wqe_ts_header(struct unf_frame_pkg *pkg, - struct spfc_sqe_tmf_rsp *sqe, u16 exi_base, - u32 scqn); - -#endif diff --git a/drivers/scsi/spfc/sphw_api_cmd.c b/drivers/scsi/spfc/sphw_api_cmd.c deleted file mode 120000 index 27c7c0770fa3..000000000000 --- a/drivers/scsi/spfc/sphw_api_cmd.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_cmdq.c b/drivers/scsi/spfc/sphw_cmdq.c deleted file mode 120000 index 5ac779ba274b..000000000000 --- a/drivers/scsi/spfc/sphw_cmdq.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_common.c b/drivers/scsi/spfc/sphw_common.c deleted file mode 120000 index a1a30a4840e1..000000000000 --- a/drivers/scsi/spfc/sphw_common.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_common.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_eqs.c b/drivers/scsi/spfc/sphw_eqs.c deleted file mode 120000 index 74430dcb9dc5..000000000000 --- a/drivers/scsi/spfc/sphw_eqs.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_eqs.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_hw_cfg.c b/drivers/scsi/spfc/sphw_hw_cfg.c deleted file mode 120000 index 4f43d68624c1..000000000000 --- a/drivers/scsi/spfc/sphw_hw_cfg.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_hw_comm.c b/drivers/scsi/spfc/sphw_hw_comm.c deleted file mode 120000 index c943b3b2933a..000000000000 --- a/drivers/scsi/spfc/sphw_hw_comm.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_hwdev.c b/drivers/scsi/spfc/sphw_hwdev.c deleted file mode 120000 index b7279f17eaa2..000000000000 --- a/drivers/scsi/spfc/sphw_hwdev.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_hwif.c b/drivers/scsi/spfc/sphw_hwif.c deleted file mode 120000 index d40ef71f9033..000000000000 --- a/drivers/scsi/spfc/sphw_hwif.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_hwif.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_mbox.c b/drivers/scsi/spfc/sphw_mbox.c deleted file mode 120000 index 1b00fe7289cc..000000000000 --- a/drivers/scsi/spfc/sphw_mbox.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_mbox.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_mgmt.c b/drivers/scsi/spfc/sphw_mgmt.c deleted file mode 120000 index fd18a73e9d3a..000000000000 --- a/drivers/scsi/spfc/sphw_mgmt.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_prof_adap.c b/drivers/scsi/spfc/sphw_prof_adap.c deleted file mode 120000 index fbc7db05dd27..000000000000 --- a/drivers/scsi/spfc/sphw_prof_adap.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c \ No newline at end of file diff --git a/drivers/scsi/spfc/sphw_wq.c b/drivers/scsi/spfc/sphw_wq.c deleted file mode 120000 index cdfcb3a610c0..000000000000 --- a/drivers/scsi/spfc/sphw_wq.c +++ /dev/null @@ -1 +0,0 @@ -../../net/ethernet/ramaxel/spnic/hw/sphw_wq.c \ No newline at end of file
Ramaxel inclusion category: bugfix bugzilla:https://gitee.com/openeuler/kernel/issues/I4ZR0O CVE: NA
There are some issues of the driver that cannot be fixed now. The driver is not good enough for the LTS quality requirements of openEuler,so remove it.
Signed-off-by: Yanling Song songyl@ramaxel.com Reviewed-by: Yang Gan yanggan@ramaxel.com --- arch/arm64/configs/openeuler_defconfig | 2 - arch/x86/configs/openeuler_defconfig | 2 - drivers/net/ethernet/Kconfig | 1 - drivers/net/ethernet/Makefile | 1 - drivers/net/ethernet/ramaxel/Kconfig | 20 - drivers/net/ethernet/ramaxel/Makefile | 6 - drivers/net/ethernet/ramaxel/spnic/Kconfig | 14 - drivers/net/ethernet/ramaxel/spnic/Makefile | 39 - .../ethernet/ramaxel/spnic/hw/sphw_api_cmd.c | 1165 ----------- .../ethernet/ramaxel/spnic/hw/sphw_api_cmd.h | 277 --- .../ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h | 127 -- .../net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c | 1573 --------------- .../net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h | 195 -- .../ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h | 60 - .../ramaxel/spnic/hw/sphw_comm_msg_intf.h | 273 --- .../ethernet/ramaxel/spnic/hw/sphw_common.c | 88 - .../ethernet/ramaxel/spnic/hw/sphw_common.h | 106 - .../net/ethernet/ramaxel/spnic/hw/sphw_crm.h | 982 --------- .../net/ethernet/ramaxel/spnic/hw/sphw_csr.h | 158 -- .../net/ethernet/ramaxel/spnic/hw/sphw_eqs.c | 1272 ------------ .../net/ethernet/ramaxel/spnic/hw/sphw_eqs.h | 157 -- .../net/ethernet/ramaxel/spnic/hw/sphw_hw.h | 643 ------ .../ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c | 1341 ------------ .../ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h | 329 --- .../ethernet/ramaxel/spnic/hw/sphw_hw_comm.c | 1280 ------------ .../ethernet/ramaxel/spnic/hw/sphw_hw_comm.h | 45 - .../ethernet/ramaxel/spnic/hw/sphw_hwdev.c | 1324 ------------ .../ethernet/ramaxel/spnic/hw/sphw_hwdev.h | 93 - .../net/ethernet/ramaxel/spnic/hw/sphw_hwif.c | 886 -------- .../net/ethernet/ramaxel/spnic/hw/sphw_hwif.h | 102 - .../net/ethernet/ramaxel/spnic/hw/sphw_mbox.c | 1792 ----------------- .../net/ethernet/ramaxel/spnic/hw/sphw_mbox.h | 271 --- .../net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c | 895 -------- .../net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h | 106 - .../ramaxel/spnic/hw/sphw_mgmt_msg_base.h | 19 - .../net/ethernet/ramaxel/spnic/hw/sphw_mt.h | 533 ----- .../ramaxel/spnic/hw/sphw_prof_adap.c | 94 - .../ramaxel/spnic/hw/sphw_prof_adap.h | 49 - .../ethernet/ramaxel/spnic/hw/sphw_profile.h | 36 - .../net/ethernet/ramaxel/spnic/hw/sphw_wq.c | 152 -- .../net/ethernet/ramaxel/spnic/hw/sphw_wq.h | 119 -- .../net/ethernet/ramaxel/spnic/spnic_dbg.c | 752 ------- .../net/ethernet/ramaxel/spnic/spnic_dcb.c | 965 --------- .../net/ethernet/ramaxel/spnic/spnic_dcb.h | 56 - .../ethernet/ramaxel/spnic/spnic_dev_mgmt.c | 811 -------- .../ethernet/ramaxel/spnic/spnic_dev_mgmt.h | 78 - .../ethernet/ramaxel/spnic/spnic_ethtool.c | 994 --------- .../ramaxel/spnic/spnic_ethtool_stats.c | 1035 ---------- .../net/ethernet/ramaxel/spnic/spnic_filter.c | 411 ---- .../net/ethernet/ramaxel/spnic/spnic_irq.c | 178 -- .../net/ethernet/ramaxel/spnic/spnic_lld.c | 937 --------- .../net/ethernet/ramaxel/spnic/spnic_lld.h | 75 - .../ethernet/ramaxel/spnic/spnic_mag_cfg.c | 778 ------- .../ethernet/ramaxel/spnic/spnic_mag_cmd.h | 643 ------ .../net/ethernet/ramaxel/spnic/spnic_main.c | 924 --------- .../ramaxel/spnic/spnic_mgmt_interface.h | 605 ------ .../ethernet/ramaxel/spnic/spnic_netdev_ops.c | 1526 -------------- .../net/ethernet/ramaxel/spnic/spnic_nic.h | 148 -- .../ethernet/ramaxel/spnic/spnic_nic_cfg.c | 1334 ------------ .../ethernet/ramaxel/spnic/spnic_nic_cfg.h | 709 ------- .../ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c | 658 ------ .../ethernet/ramaxel/spnic/spnic_nic_cmd.h | 105 - .../ethernet/ramaxel/spnic/spnic_nic_dbg.c | 151 -- .../ethernet/ramaxel/spnic/spnic_nic_dbg.h | 16 - .../ethernet/ramaxel/spnic/spnic_nic_dev.h | 354 ---- .../ethernet/ramaxel/spnic/spnic_nic_event.c | 506 ----- .../net/ethernet/ramaxel/spnic/spnic_nic_io.c | 1123 ----------- .../net/ethernet/ramaxel/spnic/spnic_nic_io.h | 305 --- .../net/ethernet/ramaxel/spnic/spnic_nic_qp.h | 416 ---- .../net/ethernet/ramaxel/spnic/spnic_ntuple.c | 841 -------- .../ethernet/ramaxel/spnic/spnic_pci_id_tbl.h | 12 - .../net/ethernet/ramaxel/spnic/spnic_rss.c | 741 ------- .../net/ethernet/ramaxel/spnic/spnic_rss.h | 50 - .../ethernet/ramaxel/spnic/spnic_rss_cfg.c | 333 --- drivers/net/ethernet/ramaxel/spnic/spnic_rx.c | 1238 ------------ drivers/net/ethernet/ramaxel/spnic/spnic_rx.h | 118 -- .../net/ethernet/ramaxel/spnic/spnic_sriov.c | 200 -- .../net/ethernet/ramaxel/spnic/spnic_sriov.h | 24 - drivers/net/ethernet/ramaxel/spnic/spnic_tx.c | 877 -------- drivers/net/ethernet/ramaxel/spnic/spnic_tx.h | 129 -- 80 files changed, 37783 deletions(-) delete mode 100644 drivers/net/ethernet/ramaxel/Kconfig delete mode 100644 drivers/net/ethernet/ramaxel/Makefile delete mode 100644 drivers/net/ethernet/ramaxel/spnic/Kconfig delete mode 100644 drivers/net/ethernet/ramaxel/spnic/Makefile delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_filter.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_irq.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_lld.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_lld.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_main.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rss.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rss.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rx.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_rx.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_tx.c delete mode 100644 drivers/net/ethernet/ramaxel/spnic/spnic_tx.h
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 1fd250cc103c..5020f94eea34 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -2815,8 +2815,6 @@ CONFIG_NET_VENDOR_QUALCOMM=y # CONFIG_QCA7000_SPI is not set CONFIG_QCOM_EMAC=m # CONFIG_RMNET is not set -CONFIG_NET_VENDOR_RAMAXEL=y -CONFIG_SPNIC=m # CONFIG_NET_VENDOR_RDC is not set CONFIG_NET_VENDOR_REALTEK=y CONFIG_8139CP=m diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig index cd9aaba18fa4..351a89b54d7f 100644 --- a/arch/x86/configs/openeuler_defconfig +++ b/arch/x86/configs/openeuler_defconfig @@ -2792,8 +2792,6 @@ CONFIG_QED_ISCSI=y CONFIG_QED_FCOE=y CONFIG_QED_OOO=y # CONFIG_NET_VENDOR_QUALCOMM is not set -CONFIG_NET_VENDOR_RAMAXEL=y -CONFIG_SPNIC=m # CONFIG_NET_VENDOR_RDC is not set CONFIG_NET_VENDOR_REALTEK=y # CONFIG_ATP is not set diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 7d7f6a972c28..6998a8cb3faa 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -160,7 +160,6 @@ source "drivers/net/ethernet/pasemi/Kconfig" source "drivers/net/ethernet/pensando/Kconfig" source "drivers/net/ethernet/qlogic/Kconfig" source "drivers/net/ethernet/qualcomm/Kconfig" -source "drivers/net/ethernet/ramaxel/Kconfig" source "drivers/net/ethernet/rdc/Kconfig" source "drivers/net/ethernet/realtek/Kconfig" source "drivers/net/ethernet/renesas/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 5e375d5a6230..6a7d68ea63ed 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -71,7 +71,6 @@ obj-$(CONFIG_NET_VENDOR_PACKET_ENGINES) += packetengines/ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ -obj-$(CONFIG_NET_VENDOR_RAMAXEL) += ramaxel/ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/ obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ diff --git a/drivers/net/ethernet/ramaxel/Kconfig b/drivers/net/ethernet/ramaxel/Kconfig deleted file mode 100644 index 987c7eb4880b..000000000000 --- a/drivers/net/ethernet/ramaxel/Kconfig +++ /dev/null @@ -1,20 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Ramaxel driver configuration -# - -config NET_VENDOR_RAMAXEL - bool "Ramaxel devices" - default y - help - If you have a network (Ethernet) card belonging to this class, say Y. - Note that the answer to this question doesn't directly affect the - kernel: saying N will just cause the configurator to skip all - the questions about Ramaxel cards. If you say Y, you will be asked - for your specific card in the following questions. - -if NET_VENDOR_RAMAXEL - - source "drivers/net/ethernet/ramaxel/spnic/Kconfig" - -endif # NET_VENDOR_RAMAXEL diff --git a/drivers/net/ethernet/ramaxel/Makefile b/drivers/net/ethernet/ramaxel/Makefile deleted file mode 100644 index 087f570c2257..000000000000 --- a/drivers/net/ethernet/ramaxel/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Makefile for the Ramaxel device drivers. -# - -obj-$(CONFIG_SPNIC) += spnic/ \ No newline at end of file diff --git a/drivers/net/ethernet/ramaxel/spnic/Kconfig b/drivers/net/ethernet/ramaxel/spnic/Kconfig deleted file mode 100644 index d0cb81041e8c..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/Kconfig +++ /dev/null @@ -1,14 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -# -# Ramaxel SPNIC driver configuration -# - -config SPNIC - tristate "Ramaxel PCIE Network Interface Card" - default n - depends on PCI_MSI && NUMA && PCI_IOV && (X86 || ARM64) - help - This driver supports Ramaxel PCIE Ethernet cards. - To compile this driver as part of the kernel, choose Y here. - If unsure, choose N. - The default is N. diff --git a/drivers/net/ethernet/ramaxel/spnic/Makefile b/drivers/net/ethernet/ramaxel/spnic/Makefile deleted file mode 100644 index 207e1d9c431a..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/Makefile +++ /dev/null @@ -1,39 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_SPNIC) += spnic.o - -subdir-ccflags-y += -I$(srctree)/$(src)/hw - -spnic-objs := hw/sphw_common.o \ - hw/sphw_hwif.o \ - hw/sphw_eqs.o \ - hw/sphw_mbox.o \ - hw/sphw_api_cmd.o \ - hw/sphw_mgmt.o \ - hw/sphw_wq.o \ - hw/sphw_cmdq.o \ - hw/sphw_prof_adap.o \ - hw/sphw_hw_cfg.o \ - hw/sphw_hw_comm.o \ - hw/sphw_hwdev.o \ - spnic_sriov.o \ - spnic_lld.o \ - spnic_dev_mgmt.o \ - spnic_main.o \ - spnic_tx.o \ - spnic_rx.o \ - spnic_rss.o \ - spnic_ntuple.o \ - spnic_dcb.o \ - spnic_ethtool.o \ - spnic_ethtool_stats.o \ - spnic_dbg.o \ - spnic_irq.o \ - spnic_filter.o \ - spnic_netdev_ops.o \ - spnic_nic_cfg.o \ - spnic_mag_cfg.o \ - spnic_nic_cfg_vf.o \ - spnic_rss_cfg.o \ - spnic_nic_event.o \ - spnic_nic_io.o \ - spnic_nic_dbg.o diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c deleted file mode 100644 index b459ca322515..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.c +++ /dev/null @@ -1,1165 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/completion.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/semaphore.h> -#include <linux/jiffies.h> -#include <linux/delay.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_hwdev.h" -#include "sphw_csr.h" -#include "sphw_hwif.h" -#include "sphw_api_cmd.h" - -#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U - -#define API_CMD_CELL_DESC_SIZE 8 -#define API_CMD_CELL_DATA_ADDR_SIZE 8 - -#define API_CHAIN_NUM_CELLS 32 -#define API_CHAIN_CELL_SIZE 128 -#define API_CHAIN_RSP_DATA_SIZE 128 - -#define API_CMD_CELL_WB_ADDR_SIZE 8 - -#define API_CHAIN_CELL_ALIGNMENT 8 - -#define API_CMD_TIMEOUT 10000 -#define API_CMD_STATUS_TIMEOUT 100000 - -#define API_CMD_BUF_SIZE 2048ULL - -#define API_CMD_NODE_ALIGN_SIZE 512ULL -#define API_PAYLOAD_ALIGN_SIZE 64ULL - -#define API_CHAIN_RESP_ALIGNMENT 128ULL - -#define COMPLETION_TIMEOUT_DEFAULT 1000UL -#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U - -#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val))) - -#define READ_API_CMD_PRIV_DATA(id, token) ((((u32)(id)) << 16) + (token)) -#define WRITE_API_CMD_PRIV_DATA(id) (((u8)(id)) << 16) - -#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) - -#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2) -#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3) - -enum api_cmd_data_format { - SGL_DATA = 1, -}; - -enum api_cmd_type { - API_CMD_WRITE_TYPE = 0, - API_CMD_READ_TYPE = 1, -}; - -enum api_cmd_bypass { - NOT_BYPASS = 0, - BYPASS = 1, -}; - -enum api_cmd_resp_aeq { - NOT_TRIGGER = 0, - TRIGGER = 1, -}; - -enum api_cmd_chn_code { - APICHN_0 = 0, -}; - -enum api_cmd_chn_rsvd { - APICHN_VALID = 0, - APICHN_INVALID = 1, -}; - -#define API_DESC_LEN 7 - -static u8 xor_chksum_set(void *data) -{ - int idx; - u8 checksum = 0; - u8 *val = data; - - for (idx = 0; idx < API_DESC_LEN; idx++) - checksum ^= val[idx]; - - return checksum; -} - -static void set_prod_idx(struct sphw_api_cmd_chain *chain) -{ - enum sphw_api_cmd_chain_type chain_type = chain->chain_type; - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 hw_prod_idx_addr = SPHW_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); - u32 prod_idx = chain->prod_idx; - - sphw_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx); -} - -static u32 get_hw_cons_idx(struct sphw_api_cmd_chain *chain) -{ - u32 addr, val; - - addr = SPHW_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); - val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); - - return SPHW_API_CMD_STATUS_GET(val, CONS_IDX); -} - -static void dump_api_chain_reg(struct sphw_api_cmd_chain *chain) -{ - void *dev = chain->hwdev->dev_hdl; - u32 addr, val; - - addr = SPHW_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); - val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); - - sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", - chain->chain_type, SPHW_API_CMD_STATUS_GET(val, CPLD_ERR), - SPHW_API_CMD_STATUS_GET(val, CHKSUM_ERR), - SPHW_API_CMD_STATUS_GET(val, FSM)); - - sdk_err(dev, "Chain hw current ci: 0x%x\n", - SPHW_API_CMD_STATUS_GET(val, CONS_IDX)); - - addr = SPHW_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); - val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); - sdk_err(dev, "Chain hw current pi: 0x%x\n", val); -} - -/** - * chain_busy - check if the chain is still processing last requests - * @chain: chain to check - **/ -static int chain_busy(struct sphw_api_cmd_chain *chain) -{ - void *dev = chain->hwdev->dev_hdl; - struct sphw_api_cmd_cell_ctxt *ctxt; - u64 resp_header; - - ctxt = &chain->cell_ctxt[chain->prod_idx]; - - switch (chain->chain_type) { - case SPHW_API_CMD_MULTI_READ: - case SPHW_API_CMD_POLL_READ: - resp_header = be64_to_cpu(ctxt->resp->header); - if (ctxt->status && - !SPHW_API_CMD_RESP_HEADER_VALID(resp_header)) { - sdk_err(dev, "Context(0x%x) busy!, pi: %u, resp_header: 0x%08x%08x\n", - ctxt->status, chain->prod_idx, - upper_32_bits(resp_header), - lower_32_bits(resp_header)); - dump_api_chain_reg(chain); - return -EBUSY; - } - break; - case SPHW_API_CMD_POLL_WRITE: - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - chain->cons_idx = get_hw_cons_idx(chain); - - if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) { - sdk_err(dev, "API CMD chain %d is busy, cons_idx = %u, prod_idx = %u\n", - chain->chain_type, chain->cons_idx, - chain->prod_idx); - dump_api_chain_reg(chain); - return -EBUSY; - } - break; - default: - sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type); - return -EINVAL; - } - - return 0; -} - -/** - * get_cell_data_size - get the data size of specific cell type - * @type: chain type - **/ -static u16 get_cell_data_size(enum sphw_api_cmd_chain_type type) -{ - u16 cell_data_size = 0; - - switch (type) { - case SPHW_API_CMD_POLL_READ: - cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + - API_CMD_CELL_WB_ADDR_SIZE + - API_CMD_CELL_DATA_ADDR_SIZE, - API_CHAIN_CELL_ALIGNMENT); - break; - - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - case SPHW_API_CMD_POLL_WRITE: - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + - API_CMD_CELL_DATA_ADDR_SIZE, - API_CHAIN_CELL_ALIGNMENT); - break; - default: - break; - } - - return cell_data_size; -} - -/** - * prepare_cell_ctrl - prepare the ctrl of the cell for the command - * @cell_ctrl: the control of the cell to set the control into it - * @cell_len: the size of the cell - **/ -static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) -{ - u64 ctrl; - u8 chksum; - - ctrl = SPHW_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) | - SPHW_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | - SPHW_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); - - chksum = xor_chksum_set(&ctrl); - - ctrl |= SPHW_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); - - /* The data in the HW should be in Big Endian Format */ - *cell_ctrl = cpu_to_be64(ctrl); -} - -/** - * prepare_api_cmd - prepare API CMD command - * @chain: chain for the command - * @cell: the cell of the command - * @node_id: destination node on the card that will receive the command - * @cmd: command data - * @cmd_size: the command size - **/ -static void prepare_api_cmd(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_cell *cell, u8 node_id, - const void *cmd, u16 cmd_size) -{ - struct sphw_api_cmd_cell_ctxt *cell_ctxt; - u32 priv; - - cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; - - switch (chain->chain_type) { - case SPHW_API_CMD_POLL_READ: - priv = READ_API_CMD_PRIV_DATA(chain->chain_type, - cell_ctxt->saved_prod_idx); - cell->desc = SPHW_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | - SPHW_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) | - SPHW_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | - SPHW_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | - SPHW_API_CMD_DESC_SET(priv, PRIV_DATA); - break; - case SPHW_API_CMD_POLL_WRITE: - priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); - cell->desc = SPHW_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | - SPHW_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | - SPHW_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | - SPHW_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | - SPHW_API_CMD_DESC_SET(priv, PRIV_DATA); - break; - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); - cell->desc = SPHW_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | - SPHW_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | - SPHW_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) | - SPHW_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) | - SPHW_API_CMD_DESC_SET(priv, PRIV_DATA); - break; - default: - sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n", - chain->chain_type); - return; - } - - cell->desc |= SPHW_API_CMD_DESC_SET(APICHN_0, APICHN_CODE) | - SPHW_API_CMD_DESC_SET(APICHN_VALID, APICHN_RSVD); - - cell->desc |= SPHW_API_CMD_DESC_SET(node_id, DEST) | - SPHW_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); - - cell->desc |= SPHW_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), - XOR_CHKSUM); - - /* The data in the HW should be in Big Endian Format */ - cell->desc = cpu_to_be64(cell->desc); - - memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); -} - -/** - * prepare_cell - prepare cell ctrl and cmd in the current producer cell - * @chain: chain for the command - * @node_id: destination node on the card that will receive the command - * @cmd: command data - * @cmd_size: the command size - * Return: 0 - success, negative - failure - **/ -static void prepare_cell(struct sphw_api_cmd_chain *chain, u8 node_id, - const void *cmd, u16 cmd_size) -{ - struct sphw_api_cmd_cell *curr_node; - u16 cell_size; - - curr_node = chain->curr_node; - - cell_size = get_cell_data_size(chain->chain_type); - - prepare_cell_ctrl(&curr_node->ctrl, cell_size); - prepare_api_cmd(chain, curr_node, node_id, cmd, cmd_size); -} - -static inline void cmd_chain_prod_idx_inc(struct sphw_api_cmd_chain *chain) -{ - chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); -} - -static void issue_api_cmd(struct sphw_api_cmd_chain *chain) -{ - set_prod_idx(chain); -} - -/** - * api_cmd_status_update - update the status of the chain - * @chain: chain to update - **/ -static void api_cmd_status_update(struct sphw_api_cmd_chain *chain) -{ - struct sphw_api_cmd_status *wb_status; - enum sphw_api_cmd_chain_type chain_type; - u64 status_header; - u32 buf_desc; - - wb_status = chain->wb_status; - - buf_desc = be32_to_cpu(wb_status->buf_desc); - if (SPHW_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR)) - return; - - status_header = be64_to_cpu(wb_status->header); - chain_type = SPHW_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); - if (chain_type >= SPHW_API_CMD_MAX) - return; - - if (chain_type != chain->chain_type) - return; - - chain->cons_idx = SPHW_API_CMD_STATUS_GET(buf_desc, CONS_IDX); -} - -static enum sphw_wait_return wait_for_status_poll_handler(void *priv_data) -{ - struct sphw_api_cmd_chain *chain = priv_data; - - if (!chain->hwdev->chip_present_flag) - return WAIT_PROCESS_ERR; - - api_cmd_status_update(chain); - /* SYNC API CMD cmd should start after prev cmd finished */ - if (chain->cons_idx == chain->prod_idx) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -/** - * wait_for_status_poll - wait for write to mgmt command to complete - * @chain: the chain of the command - * Return: 0 - success, negative - failure - **/ -static int wait_for_status_poll(struct sphw_api_cmd_chain *chain) -{ - return sphw_wait_for_timeout(chain, wait_for_status_poll_handler, - API_CMD_STATUS_TIMEOUT, 100); -} - -static void copy_resp_data(struct sphw_api_cmd_cell_ctxt *ctxt, void *ack, - u16 ack_size) -{ - struct sphw_api_cmd_resp_fmt *resp = ctxt->resp; - - memcpy(ack, &resp->resp_data, ack_size); - ctxt->status = 0; -} - -static enum sphw_wait_return check_cmd_resp_handler(void *priv_data) -{ - struct sphw_api_cmd_cell_ctxt *ctxt = priv_data; - u64 resp_header; - u8 resp_status; - - resp_header = be64_to_cpu(ctxt->resp->header); - rmb(); /* read the latest header */ - - if (SPHW_API_CMD_RESP_HEADER_VALID(resp_header)) { - resp_status = SPHW_API_CMD_RESP_HEAD_GET(resp_header, STATUS); - if (resp_status) { - pr_err("Api chain response data err, status: %u\n", - resp_status); - return WAIT_PROCESS_ERR; - } - - return WAIT_PROCESS_CPL; - } - - return WAIT_PROCESS_WAITING; -} - -/** - * prepare_cell - polling for respense data of the read api-command - * @chain: pointer to api cmd chain - * - * Return: 0 - success, negative - failure - **/ -static int wait_for_resp_polling(struct sphw_api_cmd_cell_ctxt *ctxt) -{ - return sphw_wait_for_timeout(ctxt, check_cmd_resp_handler, - POLLING_COMPLETION_TIMEOUT_DEFAULT, - USEC_PER_MSEC); -} - -/** - * wait_for_api_cmd_completion - wait for command to complete - * @chain: chain for the command - * Return: 0 - success, negative - failure - **/ -static int wait_for_api_cmd_completion(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_cell_ctxt *ctxt, - void *ack, u16 ack_size) -{ - void *dev = chain->hwdev->dev_hdl; - int err = 0; - - switch (chain->chain_type) { - case SPHW_API_CMD_POLL_READ: - err = wait_for_resp_polling(ctxt); - if (!err) - copy_resp_data(ctxt, ack, ack_size); - else - sdk_err(dev, "API CMD poll response timeout\n"); - break; - case SPHW_API_CMD_POLL_WRITE: - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - err = wait_for_status_poll(chain); - if (err) { - sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n", - chain->chain_type); - break; - } - break; - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - /* No need to wait */ - break; - default: - sdk_err(dev, "Unknown API CMD Chain type: %d\n", - chain->chain_type); - err = -EINVAL; - break; - } - - if (err) - dump_api_chain_reg(chain); - - return err; -} - -static inline void update_api_cmd_ctxt(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_cell_ctxt *ctxt) -{ - ctxt->status = 1; - ctxt->saved_prod_idx = chain->prod_idx; - if (ctxt->resp) { - ctxt->resp->header = 0; - - /* make sure "header" was cleared */ - wmb(); - } -} - -/** - * api_cmd - API CMD command - * @chain: chain for the command - * @node_id: destination node on the card that will receive the command - * @cmd: command data - * @size: the command size - * Return: 0 - success, negative - failure - **/ -static int api_cmd(struct sphw_api_cmd_chain *chain, u8 node_id, - const void *cmd, u16 cmd_size, void *ack, u16 ack_size) -{ - struct sphw_api_cmd_cell_ctxt *ctxt = NULL; - - if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) - spin_lock(&chain->async_lock); - else - down(&chain->sem); - ctxt = &chain->cell_ctxt[chain->prod_idx]; - if (chain_busy(chain)) { - if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) - spin_unlock(&chain->async_lock); - else - up(&chain->sem); - return -EBUSY; - } - update_api_cmd_ctxt(chain, ctxt); - - prepare_cell(chain, node_id, cmd, cmd_size); - - cmd_chain_prod_idx_inc(chain); - - wmb(); /* issue the command */ - - issue_api_cmd(chain); - - /* incremented prod idx, update ctxt */ - - chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr; - if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) - spin_unlock(&chain->async_lock); - else - up(&chain->sem); - - return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size); -} - -/** - * sphw_api_cmd_write - Write API CMD command - * @chain: chain for write command - * @node_id: destination node on the card that will receive the command - * @cmd: command data - * @size: the command size - * Return: 0 - success, negative - failure - **/ -int sphw_api_cmd_write(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size) -{ - /* Verify the chain type */ - return api_cmd(chain, node_id, cmd, size, NULL, 0); -} - -/** - * sphw_api_cmd_read - Read API CMD command - * @chain: chain for read command - * @node_id: destination node on the card that will receive the command - * @cmd: command data - * @size: the command size - * Return: 0 - success, negative - failure - **/ -int sphw_api_cmd_read(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size, - void *ack, u16 ack_size) -{ - return api_cmd(chain, node_id, cmd, size, ack, ack_size); -} - -static enum sphw_wait_return check_chain_restart_handler(void *priv_data) -{ - struct sphw_api_cmd_chain *cmd_chain = priv_data; - u32 reg_addr, val; - - reg_addr = SPHW_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); - val = sphw_hwif_read_reg(cmd_chain->hwdev->hwif, reg_addr); - if (!SPHW_API_CMD_CHAIN_REQ_GET(val, RESTART)) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -/** - * api_cmd_hw_restart - restart the chain in the HW - * @chain: the API CMD specific chain to restart - **/ -static int api_cmd_hw_restart(struct sphw_api_cmd_chain *cmd_chain) -{ - struct sphw_hwif *hwif = cmd_chain->hwdev->hwif; - u32 reg_addr, val; - - /* Read Modify Write */ - reg_addr = SPHW_CSR_API_CMD_CHAIN_REQ_ADDR(cmd_chain->chain_type); - val = sphw_hwif_read_reg(hwif, reg_addr); - - val = SPHW_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); - val |= SPHW_API_CMD_CHAIN_REQ_SET(1, RESTART); - - sphw_hwif_write_reg(hwif, reg_addr, val); - - return sphw_wait_for_timeout(cmd_chain, check_chain_restart_handler, - API_CMD_TIMEOUT, USEC_PER_MSEC); -} - -/** - * api_cmd_ctrl_init - set the control register of a chain - * @chain: the API CMD specific chain to set control register for - **/ -static void api_cmd_ctrl_init(struct sphw_api_cmd_chain *chain) -{ - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 reg_addr, ctrl; - u32 size; - - /* Read Modify Write */ - reg_addr = SPHW_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); - - size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT); - - ctrl = sphw_hwif_read_reg(hwif, reg_addr); - - ctrl = SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & - SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); - - ctrl |= SPHW_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) | - SPHW_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE); - - sphw_hwif_write_reg(hwif, reg_addr, ctrl); -} - -/** - * api_cmd_set_status_addr - set the status address of a chain in the HW - * @chain: the API CMD specific chain to set status address for - **/ -static void api_cmd_set_status_addr(struct sphw_api_cmd_chain *chain) -{ - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 addr, val; - - addr = SPHW_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); - val = upper_32_bits(chain->wb_status_paddr); - sphw_hwif_write_reg(hwif, addr, val); - - addr = SPHW_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); - val = lower_32_bits(chain->wb_status_paddr); - sphw_hwif_write_reg(hwif, addr, val); -} - -/** - * api_cmd_set_num_cells - set the number cells of a chain in the HW - * @chain: the API CMD specific chain to set the number of cells for - **/ -static void api_cmd_set_num_cells(struct sphw_api_cmd_chain *chain) -{ - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 addr, val; - - addr = SPHW_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); - val = chain->num_cells; - sphw_hwif_write_reg(hwif, addr, val); -} - -/** - * api_cmd_head_init - set the head cell of a chain in the HW - * @chain: the API CMD specific chain to set the head for - **/ -static void api_cmd_head_init(struct sphw_api_cmd_chain *chain) -{ - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 addr, val; - - addr = SPHW_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); - val = upper_32_bits(chain->head_cell_paddr); - sphw_hwif_write_reg(hwif, addr, val); - - addr = SPHW_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); - val = lower_32_bits(chain->head_cell_paddr); - sphw_hwif_write_reg(hwif, addr, val); -} - -static enum sphw_wait_return check_chain_ready_handler(void *priv_data) -{ - struct sphw_api_cmd_chain *chain = priv_data; - u32 addr, val; - u32 hw_cons_idx; - - addr = SPHW_CSR_API_CMD_STATUS_0_ADDR(chain->chain_type); - val = sphw_hwif_read_reg(chain->hwdev->hwif, addr); - hw_cons_idx = SPHW_API_CMD_STATUS_GET(val, CONS_IDX); - - /* wait for HW cons idx to be updated */ - if (hw_cons_idx == chain->cons_idx) - return WAIT_PROCESS_CPL; - return WAIT_PROCESS_WAITING; -} - -/** - * wait_for_ready_chain - wait for the chain to be ready - * @chain: the API CMD specific chain to wait for - * Return: 0 - success, negative - failure - **/ -static int wait_for_ready_chain(struct sphw_api_cmd_chain *chain) -{ - return sphw_wait_for_timeout(chain, check_chain_ready_handler, - API_CMD_TIMEOUT, USEC_PER_MSEC); -} - -/** - * api_cmd_chain_hw_clean - clean the HW - * @chain: the API CMD specific chain - **/ -static void api_cmd_chain_hw_clean(struct sphw_api_cmd_chain *chain) -{ - struct sphw_hwif *hwif = chain->hwdev->hwif; - u32 addr, ctrl; - - addr = SPHW_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); - - ctrl = sphw_hwif_read_reg(hwif, addr); - ctrl = SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) & - SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & - SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & - SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & - SPHW_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); - - sphw_hwif_write_reg(hwif, addr, ctrl); -} - -/** - * api_cmd_chain_hw_init - initialize the chain in the HW - * @chain: the API CMD specific chain to initialize in HW - * Return: 0 - success, negative - failure - **/ -static int api_cmd_chain_hw_init(struct sphw_api_cmd_chain *chain) -{ - api_cmd_chain_hw_clean(chain); - - api_cmd_set_status_addr(chain); - - if (api_cmd_hw_restart(chain)) { - sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n"); - return -EBUSY; - } - - api_cmd_ctrl_init(chain); - api_cmd_set_num_cells(chain); - api_cmd_head_init(chain); - - return wait_for_ready_chain(chain); -} - -/** - * alloc_cmd_buf - allocate a dma buffer for API CMD command - * @chain: the API CMD specific chain for the cmd - * @cell: the cell in the HW for the cmd - * @cell_idx: the index of the cell - * Return: 0 - success, negative - failure - **/ -static int alloc_cmd_buf(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_cell *cell, u32 cell_idx) -{ - struct sphw_api_cmd_cell_ctxt *cell_ctxt; - void *dev = chain->hwdev->dev_hdl; - void *buf_vaddr; - u64 buf_paddr; - int err = 0; - - buf_vaddr = (u8 *)((u64)chain->buf_vaddr_base + - chain->buf_size_align * cell_idx); - buf_paddr = chain->buf_paddr_base + - chain->buf_size_align * cell_idx; - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - - cell_ctxt->api_cmd_vaddr = buf_vaddr; - - /* set the cmd DMA address in the cell */ - switch (chain->chain_type) { - case SPHW_API_CMD_POLL_READ: - cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr); - break; - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - case SPHW_API_CMD_POLL_WRITE: - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - /* The data in the HW should be in Big Endian Format */ - cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr); - break; - default: - sdk_err(dev, "Unknown API CMD Chain type: %d\n", - chain->chain_type); - err = -EINVAL; - break; - } - - return err; -} - -/** - * alloc_cmd_buf - allocate a resp buffer for API CMD command - * @chain: the API CMD specific chain for the cmd - * @cell: the cell in the HW for the cmd - * @cell_idx: the index of the cell - **/ -static void alloc_resp_buf(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_cell *cell, u32 cell_idx) -{ - struct sphw_api_cmd_cell_ctxt *cell_ctxt; - void *resp_vaddr; - u64 resp_paddr; - - resp_vaddr = (u8 *)((u64)chain->rsp_vaddr_base + - chain->rsp_size_align * cell_idx); - resp_paddr = chain->rsp_paddr_base + - chain->rsp_size_align * cell_idx; - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - - cell_ctxt->resp = resp_vaddr; - cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr); -} - -static int sphw_alloc_api_cmd_cell_buf(struct sphw_api_cmd_chain *chain, u32 cell_idx, - struct sphw_api_cmd_cell *node) -{ - void *dev = chain->hwdev->dev_hdl; - int err; - - /* For read chain, we should allocate buffer for the response data */ - if (chain->chain_type == SPHW_API_CMD_MULTI_READ || - chain->chain_type == SPHW_API_CMD_POLL_READ) - alloc_resp_buf(chain, node, cell_idx); - - switch (chain->chain_type) { - case SPHW_API_CMD_WRITE_TO_MGMT_CPU: - case SPHW_API_CMD_POLL_WRITE: - case SPHW_API_CMD_POLL_READ: - case SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: - err = alloc_cmd_buf(chain, node, cell_idx); - if (err) { - sdk_err(dev, "Failed to allocate cmd buffer\n"); - goto alloc_cmd_buf_err; - } - break; - /* For api command write and api command read, the data section - * is directly inserted in the cell, so no need to allocate. - */ - case SPHW_API_CMD_MULTI_READ: - chain->cell_ctxt[cell_idx].api_cmd_vaddr = - &node->read.hw_cmd_paddr; /* to do: who int this*/ - break; - default: - sdk_err(dev, "Unsupported API CMD chain type\n"); - err = -EINVAL; - goto alloc_cmd_buf_err; - } - - return 0; - -alloc_cmd_buf_err: - - return err; -} - -/** - * api_cmd_create_cell - create API CMD cell of specific chain - * @chain: the API CMD specific chain to create its cell - * @cell_idx: the cell index to create - * @pre_node: previous cell - * @node_vaddr: the virt addr of the cell - * Return: 0 - success, negative - failure - **/ -static int api_cmd_create_cell(struct sphw_api_cmd_chain *chain, u32 cell_idx, - struct sphw_api_cmd_cell *pre_node, - struct sphw_api_cmd_cell **node_vaddr) -{ - struct sphw_api_cmd_cell_ctxt *cell_ctxt; - struct sphw_api_cmd_cell *node; - void *cell_vaddr; - u64 cell_paddr; - int err; - - cell_vaddr = (void *)((u64)chain->cell_vaddr_base + - chain->cell_size_align * cell_idx); - cell_paddr = chain->cell_paddr_base + - chain->cell_size_align * cell_idx; - - cell_ctxt = &chain->cell_ctxt[cell_idx]; - cell_ctxt->cell_vaddr = cell_vaddr; - node = cell_ctxt->cell_vaddr; - - if (!pre_node) { - chain->head_node = cell_vaddr; - chain->head_cell_paddr = cell_paddr; - } else { - /* The data in the HW should be in Big Endian Format */ - pre_node->next_cell_paddr = cpu_to_be64(cell_paddr); - } - - /* Driver software should make sure that there is an empty API - * command cell at the end the chain - */ - node->next_cell_paddr = 0; - - err = sphw_alloc_api_cmd_cell_buf(chain, cell_idx, node); - if (err) - return err; - - *node_vaddr = node; - - return 0; -} - -/** - * api_cmd_create_cells - create API CMD cells for specific chain - * @chain: the API CMD specific chain - * Return: 0 - success, negative - failure - **/ -static int api_cmd_create_cells(struct sphw_api_cmd_chain *chain) -{ - struct sphw_api_cmd_cell *node = NULL, *pre_node = NULL; - void *dev = chain->hwdev->dev_hdl; - u32 cell_idx; - int err; - - for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { - err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); - if (err) { - sdk_err(dev, "Failed to create API CMD cell\n"); - return err; - } - - pre_node = node; - } - - if (!node) - return -EFAULT; - - /* set the Final node to point on the start */ - node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); - - /* set the current node to be the head */ - chain->curr_node = chain->head_node; - return 0; -} - -/** - * api_chain_init - initialize API CMD specific chain - * @chain: the API CMD specific chain to initialize - * @attr: attributes to set in the chain - * Return: 0 - success, negative - failure - **/ -static int api_chain_init(struct sphw_api_cmd_chain *chain, - struct sphw_api_cmd_chain_attr *attr) -{ - void *dev = chain->hwdev->dev_hdl; - size_t cell_ctxt_size; - size_t cells_buf_size; - int err; - - chain->chain_type = attr->chain_type; - chain->num_cells = attr->num_cells; - chain->cell_size = attr->cell_size; - chain->rsp_size = attr->rsp_size; - - chain->prod_idx = 0; - chain->cons_idx = 0; - - if (chain->chain_type == SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) - spin_lock_init(&chain->async_lock); - else - sema_init(&chain->sem, 1); - - cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); - if (!cell_ctxt_size) { - sdk_err(dev, "Api chain cell size cannot be zero\n"); - return -EINVAL; - } - - chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL); - if (!chain->cell_ctxt) { - sdk_err(dev, "Failed to allocate cell contexts for a chain\n"); - return -ENOMEM; - } - - chain->wb_status = dma_alloc_coherent(dev, sizeof(*chain->wb_status), - &chain->wb_status_paddr, GFP_KERNEL); - if (!chain->wb_status) { - sdk_err(dev, "Failed to allocate DMA wb status\n"); - err = -ENOMEM; - goto alloc_wb_status_err; - } - - chain->cell_size_align = ALIGN((u64)chain->cell_size, - API_CMD_NODE_ALIGN_SIZE); - chain->rsp_size_align = ALIGN((u64)chain->rsp_size, - API_CHAIN_RESP_ALIGNMENT); - chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE); - - cells_buf_size = (chain->cell_size_align + chain->rsp_size_align + - chain->buf_size_align) * chain->num_cells; - - err = sphw_dma_alloc_coherent_align(dev, cells_buf_size, API_CMD_NODE_ALIGN_SIZE, - GFP_KERNEL, &chain->cells_addr); - if (err) { - sdk_err(dev, "Failed to allocate API CMD cells buffer\n"); - goto alloc_cells_buf_err; - } - - chain->cell_vaddr_base = chain->cells_addr.align_vaddr; - chain->cell_paddr_base = chain->cells_addr.align_paddr; - - chain->rsp_vaddr_base = (u8 *)((u64)chain->cell_vaddr_base + - chain->cell_size_align * chain->num_cells); - chain->rsp_paddr_base = chain->cell_paddr_base + - chain->cell_size_align * chain->num_cells; - - chain->buf_vaddr_base = (u8 *)((u64)chain->rsp_vaddr_base + - chain->rsp_size_align * chain->num_cells); - chain->buf_paddr_base = chain->rsp_paddr_base + - chain->rsp_size_align * chain->num_cells; - - return 0; - -alloc_cells_buf_err: - dma_free_coherent(dev, sizeof(*chain->wb_status), - chain->wb_status, chain->wb_status_paddr); - -alloc_wb_status_err: - kfree(chain->cell_ctxt); - - return err; -} - -/** - * api_chain_free - free API CMD specific chain - * @chain: the API CMD specific chain to free - **/ -static void api_chain_free(struct sphw_api_cmd_chain *chain) -{ - void *dev = chain->hwdev->dev_hdl; - - sphw_dma_free_coherent_align(dev, &chain->cells_addr); - - dma_free_coherent(dev, sizeof(*chain->wb_status), - chain->wb_status, chain->wb_status_paddr); - kfree(chain->cell_ctxt); -} - -/** - * api_cmd_create_chain - create API CMD specific chain - * @chain: the API CMD specific chain to create - * @attr: attributes to set in the chain - * Return: 0 - success, negative - failure - **/ -static int api_cmd_create_chain(struct sphw_api_cmd_chain **cmd_chain, - struct sphw_api_cmd_chain_attr *attr) -{ - struct sphw_hwdev *hwdev = attr->hwdev; - struct sphw_api_cmd_chain *chain = NULL; - int err; - - if (attr->num_cells & (attr->num_cells - 1)) { - sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n"); - return -EINVAL; - } - - chain = kzalloc(sizeof(*chain), GFP_KERNEL); - if (!chain) - return -ENOMEM; - - chain->hwdev = hwdev; - - err = api_chain_init(chain, attr); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n"); - goto chain_init_err; - } - - err = api_cmd_create_cells(chain); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n"); - goto create_cells_err; - } - - err = api_cmd_chain_hw_init(chain); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n"); - goto chain_hw_init_err; - } - - *cmd_chain = chain; - return 0; - -chain_hw_init_err: -create_cells_err: - api_chain_free(chain); - -chain_init_err: - kfree(chain); - return err; -} - -/** - * api_cmd_destroy_chain - destroy API CMD specific chain - * @chain: the API CMD specific chain to destroy - **/ -static void api_cmd_destroy_chain(struct sphw_api_cmd_chain *chain) -{ - api_chain_free(chain); - kfree(chain); -} - -/** - * sphw_api_cmd_init - Initialize all the API CMD chains - * @hwif: the hardware interface of a pci function device - * @chain: the API CMD chains that will be initialized - * Return: 0 - success, negative - failure - **/ -int sphw_api_cmd_init(struct sphw_hwdev *hwdev, struct sphw_api_cmd_chain **chain) -{ - void *dev = hwdev->dev_hdl; - struct sphw_api_cmd_chain_attr attr; - enum sphw_api_cmd_chain_type chain_type, i; - int err; - - attr.hwdev = hwdev; - attr.num_cells = API_CHAIN_NUM_CELLS; - attr.cell_size = API_CHAIN_CELL_SIZE; - attr.rsp_size = API_CHAIN_RSP_DATA_SIZE; - - chain_type = SPHW_API_CMD_WRITE_TO_MGMT_CPU; - for (; chain_type < SPHW_API_CMD_MAX; chain_type++) { - attr.chain_type = chain_type; - - err = api_cmd_create_chain(&chain[chain_type], &attr); - if (err) { - sdk_err(dev, "Failed to create chain %d\n", chain_type); - goto create_chain_err; - } - } - - return 0; - -create_chain_err: - i = SPHW_API_CMD_WRITE_TO_MGMT_CPU; - for (; i < chain_type; i++) - api_cmd_destroy_chain(chain[i]); - - return err; -} - -/** - * sphw_api_cmd_free - free the API CMD chains - * @chain: the API CMD chains that will be freed - **/ -void sphw_api_cmd_free(struct sphw_api_cmd_chain **chain) -{ - enum sphw_api_cmd_chain_type chain_type; - - chain_type = SPHW_API_CMD_WRITE_TO_MGMT_CPU; - - for (; chain_type < SPHW_API_CMD_MAX; chain_type++) - api_cmd_destroy_chain(chain[chain_type]); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h deleted file mode 100644 index 14a6c0b50e17..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_api_cmd.h +++ /dev/null @@ -1,277 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_API_CMD_H -#define SPHW_API_CMD_H - -#include "sphw_hwif.h" - -/*api_cmd_cell.ctrl structure*/ -#define SPHW_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0 -#define SPHW_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 -#define SPHW_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 -#define SPHW_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 - -#define SPHW_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU -#define SPHW_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU -#define SPHW_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU -#define SPHW_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU - -#define SPHW_API_CMD_CELL_CTRL_SET(val, member) \ - ((((u64)(val)) & SPHW_API_CMD_CELL_CTRL_##member##_MASK) << \ - SPHW_API_CMD_CELL_CTRL_##member##_SHIFT) - -/*api_cmd_cell.desc structure*/ -#define SPHW_API_CMD_DESC_API_TYPE_SHIFT 0 -#define SPHW_API_CMD_DESC_RD_WR_SHIFT 1 -#define SPHW_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 -#define SPHW_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3 -#define SPHW_API_CMD_DESC_APICHN_RSVD_SHIFT 4 -#define SPHW_API_CMD_DESC_APICHN_CODE_SHIFT 6 -#define SPHW_API_CMD_DESC_PRIV_DATA_SHIFT 8 -#define SPHW_API_CMD_DESC_DEST_SHIFT 32 -#define SPHW_API_CMD_DESC_SIZE_SHIFT 40 -#define SPHW_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 - -#define SPHW_API_CMD_DESC_API_TYPE_MASK 0x1U -#define SPHW_API_CMD_DESC_RD_WR_MASK 0x1U -#define SPHW_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U -#define SPHW_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U -#define SPHW_API_CMD_DESC_APICHN_RSVD_MASK 0x3U -#define SPHW_API_CMD_DESC_APICHN_CODE_MASK 0x3U -#define SPHW_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU -#define SPHW_API_CMD_DESC_DEST_MASK 0x1FU -#define SPHW_API_CMD_DESC_SIZE_MASK 0x7FFU -#define SPHW_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU - -#define SPHW_API_CMD_DESC_SET(val, member) \ - ((((u64)(val)) & SPHW_API_CMD_DESC_##member##_MASK) << \ - SPHW_API_CMD_DESC_##member##_SHIFT) - -/*api_cmd_status header*/ -#define SPHW_API_CMD_STATUS_HEADER_VALID_SHIFT 0 -#define SPHW_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 - -#define SPHW_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU -#define SPHW_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU - -#define SPHW_API_CMD_STATUS_HEADER_GET(val, member) \ - (((val) >> SPHW_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ - SPHW_API_CMD_STATUS_HEADER_##member##_MASK) - -/*API_CHAIN_REQ CSR: 0x0020+api_idx*0x080*/ -#define SPHW_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 -#define SPHW_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2 - -#define SPHW_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U -#define SPHW_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U - -#define SPHW_API_CMD_CHAIN_REQ_SET(val, member) \ - (((val) & SPHW_API_CMD_CHAIN_REQ_##member##_MASK) << \ - SPHW_API_CMD_CHAIN_REQ_##member##_SHIFT) - -#define SPHW_API_CMD_CHAIN_REQ_GET(val, member) \ - (((val) >> SPHW_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ - SPHW_API_CMD_CHAIN_REQ_##member##_MASK) - -#define SPHW_API_CMD_CHAIN_REQ_CLEAR(val, member) \ - ((val) & (~(SPHW_API_CMD_CHAIN_REQ_##member##_MASK \ - << SPHW_API_CMD_CHAIN_REQ_##member##_SHIFT))) - -/*API_CHAIN_CTL CSR: 0x0014+api_idx*0x080*/ -#define SPHW_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1 -#define SPHW_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 -#define SPHW_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 -#define SPHW_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 -#define SPHW_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 -#define SPHW_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 - -#define SPHW_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U -#define SPHW_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U -#define SPHW_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U -#define SPHW_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U -#define SPHW_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U -#define SPHW_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U - -#define SPHW_API_CMD_CHAIN_CTRL_SET(val, member) \ - (((val) & SPHW_API_CMD_CHAIN_CTRL_##member##_MASK) << \ - SPHW_API_CMD_CHAIN_CTRL_##member##_SHIFT) - -#define SPHW_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ - ((val) & (~(SPHW_API_CMD_CHAIN_CTRL_##member##_MASK \ - << SPHW_API_CMD_CHAIN_CTRL_##member##_SHIFT))) - -/*api_cmd rsp header*/ -#define SPHW_API_CMD_RESP_HEAD_VALID_SHIFT 0 -#define SPHW_API_CMD_RESP_HEAD_STATUS_SHIFT 8 -#define SPHW_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 -#define SPHW_API_CMD_RESP_HEAD_RESP_LEN_SHIFT 24 -#define SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 - -#define SPHW_API_CMD_RESP_HEAD_VALID_MASK 0xFF -#define SPHW_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU -#define SPHW_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFFU -#define SPHW_API_CMD_RESP_HEAD_RESP_LEN_MASK 0x1FFU -#define SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU - -#define SPHW_API_CMD_RESP_HEAD_VALID_CODE 0xFF - -#define SPHW_API_CMD_RESP_HEADER_VALID(val) \ - (((val) & SPHW_API_CMD_RESP_HEAD_VALID_MASK) == \ - SPHW_API_CMD_RESP_HEAD_VALID_CODE) - -#define SPHW_API_CMD_RESP_HEAD_GET(val, member) \ - (((val) >> SPHW_API_CMD_RESP_HEAD_##member##_SHIFT) & \ - SPHW_API_CMD_RESP_HEAD_##member##_MASK) - -#define SPHW_API_CMD_RESP_HEAD_CHAIN_ID(val) \ - (((val) >> SPHW_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT) & \ - SPHW_API_CMD_RESP_HEAD_CHAIN_ID_MASK) - -#define SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV(val) \ - ((u16)(((val) >> SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT) & \ - SPHW_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK)) -/*API_STATUS_0 CSR: 0x0030+api_idx*0x080*/ -#define SPHW_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU -#define SPHW_API_CMD_STATUS_CONS_IDX_SHIFT 0 - -#define SPHW_API_CMD_STATUS_FSM_MASK 0xFU -#define SPHW_API_CMD_STATUS_FSM_SHIFT 24 - -#define SPHW_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U -#define SPHW_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 - -#define SPHW_API_CMD_STATUS_CPLD_ERR_MASK 0x1U -#define SPHW_API_CMD_STATUS_CPLD_ERR_SHIFT 30 - -#define SPHW_API_CMD_STATUS_CONS_IDX(val) \ - ((val) & SPHW_API_CMD_STATUS_CONS_IDX_MASK) - -#define SPHW_API_CMD_STATUS_CHKSUM_ERR(val) \ - (((val) >> SPHW_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \ - SPHW_API_CMD_STATUS_CHKSUM_ERR_MASK) - -#define SPHW_API_CMD_STATUS_GET(val, member) \ - (((val) >> SPHW_API_CMD_STATUS_##member##_SHIFT) & \ - SPHW_API_CMD_STATUS_##member##_MASK) - -enum sphw_api_cmd_chain_type { - /* write to mgmt cpu command with completion */ - SPHW_API_CMD_WRITE_TO_MGMT_CPU = 2, - /* multi read command with completion notification - not used */ - SPHW_API_CMD_MULTI_READ = 3, - /* write command without completion notification */ - SPHW_API_CMD_POLL_WRITE = 4, - /* read command without completion notification */ - SPHW_API_CMD_POLL_READ = 5, - /* read from mgmt cpu command with completion */ - SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6, - SPHW_API_CMD_MAX, -}; - -struct sphw_api_cmd_status { - u64 header; - u32 buf_desc; - u32 cell_addr_hi; - u32 cell_addr_lo; - u32 rsvd0; - u64 rsvd1; -}; - -/* HW struct */ -struct sphw_api_cmd_cell { - u64 ctrl; - - /* address is 64 bit in HW struct */ - u64 next_cell_paddr; - - u64 desc; - - /* HW struct */ - union { - struct { - u64 hw_cmd_paddr; - } write; - - struct { - u64 hw_wb_resp_paddr; - u64 hw_cmd_paddr; - } read; - }; -}; - -struct sphw_api_cmd_resp_fmt { - u64 header; - u64 resp_data; -}; - -struct sphw_api_cmd_cell_ctxt { - struct sphw_api_cmd_cell *cell_vaddr; - - void *api_cmd_vaddr; - - struct sphw_api_cmd_resp_fmt *resp; - - struct completion done; - int status; - - u32 saved_prod_idx; -}; - -struct sphw_api_cmd_chain_attr { - struct sphw_hwdev *hwdev; - enum sphw_api_cmd_chain_type chain_type; - - u32 num_cells; - u16 rsp_size; - u16 cell_size; -}; - -struct sphw_api_cmd_chain { - struct sphw_hwdev *hwdev; - enum sphw_api_cmd_chain_type chain_type; - - u32 num_cells; - u16 cell_size; - u16 rsp_size; - - /* HW members is 24 bit format */ - u32 prod_idx; - u32 cons_idx; - - struct semaphore sem; - /* Async cmd can not be scheduling */ - spinlock_t async_lock; - - dma_addr_t wb_status_paddr; - struct sphw_api_cmd_status *wb_status; - - dma_addr_t head_cell_paddr; - struct sphw_api_cmd_cell *head_node; - - struct sphw_api_cmd_cell_ctxt *cell_ctxt; - struct sphw_api_cmd_cell *curr_node; - - struct sphw_dma_addr_align cells_addr; - - u8 *cell_vaddr_base; - u64 cell_paddr_base; - u8 *rsp_vaddr_base; - u64 rsp_paddr_base; - u8 *buf_vaddr_base; - u64 buf_paddr_base; - u64 cell_size_align; - u64 rsp_size_align; - u64 buf_size_align; -}; - -int sphw_api_cmd_write(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size); - -int sphw_api_cmd_read(struct sphw_api_cmd_chain *chain, u8 node_id, const void *cmd, u16 size, - void *ack, u16 ack_size); - -int sphw_api_cmd_init(struct sphw_hwdev *hwdev, struct sphw_api_cmd_chain **chain); - -void sphw_api_cmd_free(struct sphw_api_cmd_chain **chain); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h deleted file mode 100644 index 63b89e71c552..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cfg_cmd.h +++ /dev/null @@ -1,127 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_CFG_CMD_H -#define SPHW_CFG_CMD_H - -#include "sphw_mgmt_msg_base.h" - -enum cfg_cmd { - CFG_CMD_GET_DEV_CAP = 0, -}; - -struct cfg_cmd_dev_cap { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1; - - /* Public resources */ - u8 host_id; - u8 ep_id; - u8 er_id; - u8 port_id; - - u16 host_total_func; - u8 host_pf_num; - u8 pf_id_start; - u16 host_vf_num; - u16 vf_id_start; - u8 host_oq_id_mask_val; - u8 rsvd_host[3]; - - u16 svc_cap_en; - u16 max_vf; - u8 flexq_en; - u8 valid_cos_bitmap; - /* Reserved for func_valid_cos_bitmap */ - u16 rsvd_func1; - u32 rsvd_func2; - - u8 sf_svc_attr; - u8 func_sf_en; - u8 lb_mode; - u8 smf_pg; - - u32 max_conn_num; - u16 max_stick2cache_num; - u16 max_bfilter_start_addr; - u16 bfilter_len; - u16 hash_bucket_num; - - /* shared resource */ - u8 host_sf_en; - u8 rsvd2_sr[3]; - u32 host_pctx_num; - u32 host_ccxt_num; - u32 host_scq_num; - u32 host_srq_num; - u32 host_mpt_num; - - /* l2nic */ - u16 nic_max_sq_id; - u16 nic_max_rq_id; - u32 rsvd_nic[3]; - - /* RoCE */ - u32 roce_max_qp; - u32 roce_max_cq; - u32 roce_max_srq; - u32 roce_max_mpt; - u32 roce_max_drc_qp; - - u32 roce_cmtt_cl_start; - u32 roce_cmtt_cl_end; - u32 roce_cmtt_cl_size; - - u32 roce_dmtt_cl_start; - u32 roce_dmtt_cl_end; - u32 roce_dmtt_cl_size; - - u32 roce_wqe_cl_start; - u32 roce_wqe_cl_end; - u32 roce_wqe_cl_size; - u8 roce_srq_container_mode; - u8 rsvd_roce1[3]; - u32 rsvd_roce2[5]; - - /* IPsec */ - u32 ipsec_max_sactx; - u32 rsvd_ipsec[3]; - - /* OVS */ - u32 ovs_max_qpc; - u16 fake_vf_start_id; - u8 fake_vf_num; - u8 rsvd_ovs1; - u32 rsvd_ovs2[2]; - - /* ToE */ - u32 toe_max_pctx; - u32 toe_max_cq; - u16 toe_max_srq; - u16 toe_srq_id_start; - u16 toe_max_mpt; - u16 toe_max_cctxt; - u32 rsvd_toe[2]; - - /* FC */ - u32 fc_max_pctx; - u32 fc_max_scq; - u32 fc_max_srq; - - u32 fc_max_cctx; - u32 fc_cctx_id_start; - - u8 fc_vp_id_start; - u8 fc_vp_id_end; - u8 rsvd_fc1[2]; - u32 rsvd_fc2[5]; - - /* VBS */ - u32 rsvd_vbs[4]; - - u32 rsvd_glb[11]; -}; - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c deleted file mode 100644 index 9ebff6f8ac97..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.c +++ /dev/null @@ -1,1573 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/pci.h> -#include <linux/errno.h> -#include <linux/completion.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/delay.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_eqs.h" -#include "sphw_common.h" -#include "sphw_wq.h" -#include "sphw_hw_comm.h" -#include "sphw_cmdq.h" - -#define SPHW_CMDQ_BUF_SIZE 2048U -#define SPHW_CMDQ_BUF_HW_RSVD 8 -#define SPHW_CMDQ_MAX_DATA_SIZE \ - (SPHW_CMDQ_BUF_SIZE - SPHW_CMDQ_BUF_HW_RSVD) - -#define CMDQ_CMD_TIMEOUT 5000 /* millisecond */ - -#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) -#define LOWER_8_BITS(data) ((data) & 0xFF) - -#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 -#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU -#define CMDQ_DB_INFO_SET(val, member) \ - ((((u32)(val)) & CMDQ_DB_INFO_##member##_MASK) << \ - CMDQ_DB_INFO_##member##_SHIFT) - -#define CMDQ_DB_HEAD_QUEUE_TYPE_SHIFT 23 -#define CMDQ_DB_HEAD_CMDQ_TYPE_SHIFT 24 -#define CMDQ_DB_HEAD_SRC_TYPE_SHIFT 27 -#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK 0x1U -#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK 0x7U -#define CMDQ_DB_HEAD_SRC_TYPE_MASK 0x1FU -#define CMDQ_DB_HEAD_SET(val, member) \ - ((((u32)(val)) & CMDQ_DB_HEAD_##member##_MASK) << \ - CMDQ_DB_HEAD_##member##_SHIFT) - -#define CMDQ_CTRL_PI_SHIFT 0 -#define CMDQ_CTRL_CMD_SHIFT 16 -#define CMDQ_CTRL_MOD_SHIFT 24 -#define CMDQ_CTRL_ACK_TYPE_SHIFT 29 -#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 - -#define CMDQ_CTRL_PI_MASK 0xFFFFU -#define CMDQ_CTRL_CMD_MASK 0xFFU -#define CMDQ_CTRL_MOD_MASK 0x1FU -#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U -#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U - -#define CMDQ_CTRL_SET(val, member) \ - ((((u32)(val)) & CMDQ_CTRL_##member##_MASK) << \ - CMDQ_CTRL_##member##_SHIFT) - -#define CMDQ_CTRL_GET(val, member) \ - (((val) >> CMDQ_CTRL_##member##_SHIFT) & \ - CMDQ_CTRL_##member##_MASK) - -#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 -#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 -#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 -#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 -#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 -#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 -#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 - -#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU -#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U -#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U -#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U -#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U -#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U -#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U - -#define CMDQ_WQE_HEADER_SET(val, member) \ - ((((u32)(val)) & CMDQ_WQE_HEADER_##member##_MASK) << \ - CMDQ_WQE_HEADER_##member##_SHIFT) - -#define CMDQ_WQE_HEADER_GET(val, member) \ - (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \ - CMDQ_WQE_HEADER_##member##_MASK) - -#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 -#define CMDQ_CTXT_EQ_ID_SHIFT 53 -#define CMDQ_CTXT_CEQ_ARM_SHIFT 61 -#define CMDQ_CTXT_CEQ_EN_SHIFT 62 -#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63 - -#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF -#define CMDQ_CTXT_EQ_ID_MASK 0xFF -#define CMDQ_CTXT_CEQ_ARM_MASK 0x1 -#define CMDQ_CTXT_CEQ_EN_MASK 0x1 -#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1 - -#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \ - (((u64)(val) & CMDQ_CTXT_##member##_MASK) << \ - CMDQ_CTXT_##member##_SHIFT) - -#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \ - (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) & \ - CMDQ_CTXT_##member##_MASK) - -#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 -#define CMDQ_CTXT_CI_SHIFT 52 - -#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF -#define CMDQ_CTXT_CI_MASK 0xFFF - -#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ - (((u64)(val) & CMDQ_CTXT_##member##_MASK) << \ - CMDQ_CTXT_##member##_SHIFT) - -#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ - (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) & \ - CMDQ_CTXT_##member##_MASK) - -#define SAVED_DATA_ARM_SHIFT 31 - -#define SAVED_DATA_ARM_MASK 0x1U - -#define SAVED_DATA_SET(val, member) \ - (((val) & SAVED_DATA_##member##_MASK) << \ - SAVED_DATA_##member##_SHIFT) - -#define SAVED_DATA_CLEAR(val, member) \ - ((val) & (~(SAVED_DATA_##member##_MASK << \ - SAVED_DATA_##member##_SHIFT))) - -#define WQE_ERRCODE_VAL_SHIFT 0 - -#define WQE_ERRCODE_VAL_MASK 0x7FFFFFFF - -#define WQE_ERRCODE_GET(val, member) \ - (((val) >> WQE_ERRCODE_##member##_SHIFT) & \ - WQE_ERRCODE_##member##_MASK) - -#define CEQE_CMDQ_TYPE_SHIFT 0 - -#define CEQE_CMDQ_TYPE_MASK 0x7 - -#define CEQE_CMDQ_GET(val, member) \ - (((val) >> CEQE_CMDQ_##member##_SHIFT) & \ - CEQE_CMDQ_##member##_MASK) - -#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) - -#define WQE_HEADER(wqe) ((struct sphw_cmdq_header *)(wqe)) - -#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) - -#define CMDQ_DB_ADDR(db_base, pi) \ - (((u8 *)(db_base)) + CMDQ_DB_PI_OFF(pi)) - -#define CMDQ_PFN_SHIFT 12 -#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) - -#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) - -#define WQE_LCMD_SIZE 64 -#define WQE_SCMD_SIZE 64 - -#define COMPLETE_LEN 3 - -#define CMDQ_WQEBB_SIZE 64 -#define CMDQ_WQE_SIZE 64 - -#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ - struct sphw_cmdqs, cmdq[0]) - -#define CMDQ_SEND_CMPT_CODE 10 -#define CMDQ_COMPLETE_CMPT_CODE 11 -#define CMDQ_FORCE_STOP_CMPT_CODE 12 - -enum cmdq_scmd_type { - CMDQ_SET_ARM_CMD = 2, -}; - -enum cmdq_wqe_type { - WQE_LCMD_TYPE, - WQE_SCMD_TYPE, -}; - -enum ctrl_sect_len { - CTRL_SECT_LEN = 1, - CTRL_DIRECT_SECT_LEN = 2, -}; - -enum bufdesc_len { - BUFDESC_LCMD_LEN = 2, - BUFDESC_SCMD_LEN = 3, -}; - -enum data_format { - DATA_SGE, - DATA_DIRECT, -}; - -enum completion_format { - COMPLETE_DIRECT, - COMPLETE_SGE, -}; - -enum completion_request { - CEQ_SET = 1, -}; - -enum cmdq_cmd_type { - SYNC_CMD_DIRECT_RESP, - SYNC_CMD_SGE_RESP, - ASYNC_CMD, -}; - -#define NUM_WQEBBS_FOR_CMDQ_WQE 1 - -bool sphw_cmdq_idle(struct sphw_cmdq *cmdq) -{ - return sphw_wq_is_empty(&cmdq->wq); -} - -static void *cmdq_read_wqe(struct sphw_wq *wq, u16 *ci) -{ - if (sphw_wq_is_empty(wq)) - return NULL; - - return sphw_wq_read_one_wqebb(wq, ci); -} - -static void *cmdq_get_wqe(struct sphw_wq *wq, u16 *pi) -{ - if (!sphw_wq_free_wqebbs(wq)) - return NULL; - - return sphw_wq_get_one_wqebb(wq, pi); -} - -struct sphw_cmd_buf *sphw_alloc_cmd_buf(void *hwdev) -{ - struct sphw_cmdqs *cmdqs = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - void *dev = NULL; - - if (!hwdev) { - pr_err("Failed to alloc cmd buf, Invalid hwdev\n"); - return NULL; - } - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - dev = ((struct sphw_hwdev *)hwdev)->dev_hdl; - - cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC); - if (!cmd_buf) { - sdk_err(dev, "Failed to allocate cmd buf\n"); - return NULL; - } - - cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, - &cmd_buf->dma_addr); - if (!cmd_buf->buf) { - sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n"); - goto alloc_pci_buf_err; - } - - atomic_set(&cmd_buf->ref_cnt, 1); - - return cmd_buf; - -alloc_pci_buf_err: - kfree(cmd_buf); - return NULL; -} - -void sphw_free_cmd_buf(void *hwdev, struct sphw_cmd_buf *cmd_buf) -{ - struct sphw_cmdqs *cmdqs = NULL; - - if (!hwdev || !cmd_buf) { - pr_err("Failed to free cmd buf: hwdev: %p, cmd_buf: %p\n", - hwdev, cmd_buf); - return; - } - - if (!atomic_dec_and_test(&cmd_buf->ref_cnt)) - return; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - - pci_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); - kfree(cmd_buf); -} - -static void cmdq_set_completion(struct sphw_cmdq_completion *complete, - struct sphw_cmd_buf *buf_out) -{ - struct sphw_sge_resp *sge_resp = &complete->sge_resp; - - sphw_set_sge(&sge_resp->sge, buf_out->dma_addr, SPHW_CMDQ_BUF_SIZE); -} - -static void cmdq_set_lcmd_bufdesc(struct sphw_cmdq_wqe_lcmd *wqe, - struct sphw_cmd_buf *buf_in) -{ - sphw_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size); -} - -static void cmdq_set_inline_wqe_data(struct sphw_cmdq_inline_wqe *wqe, - const void *buf_in, u32 in_size) -{ - struct sphw_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; - - wqe_scmd->buf_desc.buf_len = in_size; - memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); -} - -static void cmdq_fill_db(struct sphw_cmdq_db *db, - enum sphw_cmdq_type cmdq_type, u16 prod_idx) -{ - db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX); - - db->db_head = CMDQ_DB_HEAD_SET(SPHW_DB_CMDQ_TYPE, QUEUE_TYPE) | - CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE) | - CMDQ_DB_HEAD_SET(SPHW_DB_SRC_CMDQ_TYPE, SRC_TYPE); -} - -static void cmdq_set_db(struct sphw_cmdq *cmdq, - enum sphw_cmdq_type cmdq_type, u16 prod_idx) -{ - struct sphw_cmdq_db db = {0}; - - cmdq_fill_db(&db, cmdq_type, prod_idx); - - wmb(); /* write all before the doorbell */ - writeq(*((u64 *)&db), CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); -} - -static void cmdq_wqe_fill(void *dst, const void *src) -{ - memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST, - (u8 *)src + FIRST_DATA_TO_WRITE_LAST, - CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); - - wmb(); /* The first 8 bytes should be written last */ - - *(u64 *)dst = *(u64 *)src; -} - -static void cmdq_prepare_wqe_ctrl(struct sphw_cmdq_wqe *wqe, int wrapped, - u8 mod, u8 cmd, u16 prod_idx, - enum completion_format complete_format, - enum data_format data_format, - enum bufdesc_len buf_len) -{ - struct sphw_ctrl *ctrl = NULL; - enum ctrl_sect_len ctrl_len; - struct sphw_cmdq_wqe_lcmd *wqe_lcmd = NULL; - struct sphw_cmdq_wqe_scmd *wqe_scmd = NULL; - u32 saved_data = WQE_HEADER(wqe)->saved_data; - - if (data_format == DATA_SGE) { - wqe_lcmd = &wqe->wqe_lcmd; - - wqe_lcmd->status.status_info = 0; - ctrl = &wqe_lcmd->ctrl; - ctrl_len = CTRL_SECT_LEN; - } else { - wqe_scmd = &wqe->inline_wqe.wqe_scmd; - - wqe_scmd->status.status_info = 0; - ctrl = &wqe_scmd->ctrl; - ctrl_len = CTRL_DIRECT_SECT_LEN; - } - - ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) | - CMDQ_CTRL_SET(cmd, CMD) | - CMDQ_CTRL_SET(mod, MOD); - - WQE_HEADER(wqe)->header_info = - CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | - CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | - CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | - CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | - CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | - CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | - CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); - - if (cmd == CMDQ_SET_ARM_CMD && mod == SPHW_MOD_COMM) { - saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); - WQE_HEADER(wqe)->saved_data = saved_data | - SAVED_DATA_SET(1, ARM); - } else { - saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); - WQE_HEADER(wqe)->saved_data = saved_data; - } -} - -static void cmdq_set_lcmd_wqe(struct sphw_cmdq_wqe *wqe, - enum cmdq_cmd_type cmd_type, - struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, int wrapped, - u8 mod, u8 cmd, u16 prod_idx) -{ - struct sphw_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; - enum completion_format complete_format = COMPLETE_DIRECT; - - switch (cmd_type) { - case SYNC_CMD_DIRECT_RESP: - wqe_lcmd->completion.direct_resp = 0; - break; - case SYNC_CMD_SGE_RESP: - if (buf_out) { - complete_format = COMPLETE_SGE; - cmdq_set_completion(&wqe_lcmd->completion, - buf_out); - } - break; - case ASYNC_CMD: - wqe_lcmd->completion.direct_resp = 0; - wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in); - break; - } - - cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format, - DATA_SGE, BUFDESC_LCMD_LEN); - - cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); -} - -static void cmdq_set_inline_wqe(struct sphw_cmdq_wqe *wqe, - enum cmdq_cmd_type cmd_type, - const void *buf_in, u16 in_size, - struct sphw_cmd_buf *buf_out, int wrapped, - u8 mod, u8 cmd, u16 prod_idx) -{ - struct sphw_cmdq_wqe_scmd *wqe_scmd = &wqe->inline_wqe.wqe_scmd; - enum completion_format complete_format = COMPLETE_DIRECT; - - switch (cmd_type) { - case SYNC_CMD_SGE_RESP: - complete_format = COMPLETE_SGE; - cmdq_set_completion(&wqe_scmd->completion, buf_out); - break; - case SYNC_CMD_DIRECT_RESP: - complete_format = COMPLETE_DIRECT; - wqe_scmd->completion.direct_resp = 0; - break; - default: - break; - } - - cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, - complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); - - cmdq_set_inline_wqe_data(&wqe->inline_wqe, buf_in, in_size); -} - -static void cmdq_update_cmd_status(struct sphw_cmdq *cmdq, u16 prod_idx, - struct sphw_cmdq_wqe *wqe) -{ - struct sphw_cmdq_cmd_info *cmd_info; - struct sphw_cmdq_wqe_lcmd *wqe_lcmd; - u32 status_info; - - wqe_lcmd = &wqe->wqe_lcmd; - cmd_info = &cmdq->cmd_infos[prod_idx]; - - if (cmd_info->errcode) { - status_info = wqe_lcmd->status.status_info; - *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL); - } - - if (cmd_info->direct_resp) - *cmd_info->direct_resp = wqe_lcmd->completion.direct_resp; -} - -static int sphw_cmdq_sync_timeout_check(struct sphw_cmdq *cmdq, struct sphw_cmdq_wqe *wqe, u16 pi) -{ - struct sphw_cmdq_wqe_lcmd *wqe_lcmd; - u32 ctrl_info; - - wqe_lcmd = &wqe->wqe_lcmd; - ctrl_info = wqe_lcmd->ctrl.ctrl_info; - if (!WQE_COMPLETED(ctrl_info)) { - sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set\n"); - return -EFAULT; - } - - cmdq_update_cmd_status(cmdq, pi, wqe); - - sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed\n"); - return 0; -} - -static void clear_cmd_info(struct sphw_cmdq_cmd_info *cmd_info, - struct sphw_cmdq_cmd_info *saved_cmd_info) -{ - if (cmd_info->errcode == saved_cmd_info->errcode) - cmd_info->errcode = NULL; - - if (cmd_info->done == saved_cmd_info->done) - cmd_info->done = NULL; - - if (cmd_info->direct_resp == saved_cmd_info->direct_resp) - cmd_info->direct_resp = NULL; -} - -static int cmdq_ceq_handler_status(struct sphw_cmdq *cmdq, - struct sphw_cmdq_cmd_info *cmd_info, - struct sphw_cmdq_cmd_info *saved_cmd_info, - u64 curr_msg_id, u16 curr_prod_idx, - struct sphw_cmdq_wqe *curr_wqe, - u32 timeout) -{ - ulong timeo; - int err; - - timeo = msecs_to_jiffies(timeout); - if (wait_for_completion_timeout(saved_cmd_info->done, timeo)) - return 0; - - spin_lock_bh(&cmdq->cmdq_lock); - - if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code) - cmd_info->cmpt_code = NULL; - - if (*saved_cmd_info->cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { - sdk_info(cmdq->hwdev->dev_hdl, "Cmdq direct sync command has been completed\n"); - spin_unlock_bh(&cmdq->cmdq_lock); - return 0; - } - - if (curr_msg_id == cmd_info->cmdq_msg_id) { - err = sphw_cmdq_sync_timeout_check(cmdq, curr_wqe, curr_prod_idx); - if (err) - cmd_info->cmd_type = SPHW_CMD_TYPE_TIMEOUT; - else - cmd_info->cmd_type = SPHW_CMD_TYPE_FAKE_TIMEOUT; - } else { - err = -ETIMEDOUT; - sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command current msg id dismatch with cmd_info msg id\n"); - } - - clear_cmd_info(cmd_info, saved_cmd_info); - - spin_unlock_bh(&cmdq->cmdq_lock); - - if (!err) - return 0; - - sphw_dump_ceq_info(cmdq->hwdev); - - return -ETIMEDOUT; -} - -static int wait_cmdq_sync_cmd_completion(struct sphw_cmdq *cmdq, - struct sphw_cmdq_cmd_info *cmd_info, - struct sphw_cmdq_cmd_info *saved_cmd_info, - u64 curr_msg_id, u16 curr_prod_idx, - struct sphw_cmdq_wqe *curr_wqe, u32 timeout) -{ - return cmdq_ceq_handler_status(cmdq, cmd_info, saved_cmd_info, - curr_msg_id, curr_prod_idx, - curr_wqe, timeout); -} - -static int cmdq_msg_lock(struct sphw_cmdq *cmdq, u16 channel) -{ - struct sphw_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); - - /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ - spin_lock_bh(&cmdq->cmdq_lock); - - if (cmdqs->lock_channel_en && test_bit(channel, &cmdqs->channel_stop)) { - spin_unlock_bh(&cmdq->cmdq_lock); - return -EAGAIN; - } - - return 0; -} - -static void cmdq_msg_unlock(struct sphw_cmdq *cmdq) -{ - spin_unlock_bh(&cmdq->cmdq_lock); -} - -static void cmdq_clear_cmd_buf(struct sphw_cmdq_cmd_info *cmd_info, - struct sphw_hwdev *hwdev) -{ - if (cmd_info->buf_in) - sphw_free_cmd_buf(hwdev, cmd_info->buf_in); - - if (cmd_info->buf_out) - sphw_free_cmd_buf(hwdev, cmd_info->buf_out); - - cmd_info->buf_in = NULL; - cmd_info->buf_out = NULL; -} - -static void cmdq_set_cmd_buf(struct sphw_cmdq_cmd_info *cmd_info, - struct sphw_hwdev *hwdev, - struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out) -{ - cmd_info->buf_in = buf_in; - cmd_info->buf_out = buf_out; - - if (buf_in) - atomic_inc(&buf_in->ref_cnt); - - if (buf_out) - atomic_inc(&buf_out->ref_cnt); -} - -static int cmdq_sync_cmd_direct_resp(struct sphw_cmdq *cmdq, u8 mod, - u8 cmd, struct sphw_cmd_buf *buf_in, - u64 *out_param, u32 timeout, u16 channel) -{ - struct sphw_wq *wq = &cmdq->wq; - struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; - struct sphw_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; - struct completion done; - u16 curr_prod_idx, next_prod_idx; - int wrapped, errcode = 0; - int cmpt_code = CMDQ_SEND_CMPT_CODE; - u64 curr_msg_id; - int err; - - err = cmdq_msg_lock(cmdq, channel); - if (err) - return err; - - curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); - if (!curr_wqe) { - cmdq_msg_unlock(cmdq); - return -EBUSY; - } - - memset(&wqe, 0, sizeof(wqe)); - - wrapped = cmdq->wrapped; - - next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= (u16)wq->q_depth; - } - - cmd_info = &cmdq->cmd_infos[curr_prod_idx]; - - init_completion(&done); - - cmd_info->cmd_type = SPHW_CMD_TYPE_DIRECT_RESP; - cmd_info->done = &done; - cmd_info->errcode = &errcode; - cmd_info->direct_resp = out_param; - cmd_info->cmpt_code = &cmpt_code; - cmd_info->channel = channel; - cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, NULL); - - memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); - - cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL, - wrapped, mod, cmd, curr_prod_idx); - - /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ - cmdq_wqe_fill(curr_wqe, &wqe); - - (cmd_info->cmdq_msg_id)++; - curr_msg_id = cmd_info->cmdq_msg_id; - - cmdq_set_db(cmdq, SPHW_CMDQ_SYNC, next_prod_idx); - - cmdq_msg_unlock(cmdq); - - timeout = timeout ? timeout : CMDQ_CMD_TIMEOUT; - err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, - curr_msg_id, curr_prod_idx, - curr_wqe, timeout); - if (err) { - sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command(mod: %u, cmd: %u) timeout, prod idx: 0x%x\n", - mod, cmd, curr_prod_idx); - err = -ETIMEDOUT; - } - - if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { - sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", - mod, cmd); - err = -EAGAIN; - } - - smp_rmb(); /* read error code after completion */ - - return err ? err : errcode; -} - -static int cmdq_sync_cmd_detail_resp(struct sphw_cmdq *cmdq, u8 mod, u8 cmd, - struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, - u64 *out_param, u32 timeout, u16 channel) -{ - struct sphw_wq *wq = &cmdq->wq; - struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; - struct sphw_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; - struct completion done; - u16 curr_prod_idx, next_prod_idx; - int wrapped, errcode = 0; - int cmpt_code = CMDQ_SEND_CMPT_CODE; - u64 curr_msg_id; - int err; - - err = cmdq_msg_lock(cmdq, channel); - if (err) - return err; - - curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); - if (!curr_wqe) { - cmdq_msg_unlock(cmdq); - return -EBUSY; - } - - memset(&wqe, 0, sizeof(wqe)); - - wrapped = cmdq->wrapped; - - next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= (u16)wq->q_depth; - } - - cmd_info = &cmdq->cmd_infos[curr_prod_idx]; - - init_completion(&done); - - cmd_info->cmd_type = SPHW_CMD_TYPE_SGE_RESP; - cmd_info->done = &done; - cmd_info->errcode = &errcode; - cmd_info->direct_resp = out_param; - cmd_info->cmpt_code = &cmpt_code; - cmd_info->channel = channel; - cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, buf_out); - - memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); - - cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out, - wrapped, mod, cmd, curr_prod_idx); - - cmdq_wqe_fill(curr_wqe, &wqe); - - (cmd_info->cmdq_msg_id)++; - curr_msg_id = cmd_info->cmdq_msg_id; - - cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); - - cmdq_msg_unlock(cmdq); - - timeout = timeout ? timeout : CMDQ_CMD_TIMEOUT; - err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, - curr_msg_id, curr_prod_idx, - curr_wqe, timeout); - if (err) { - sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command(mod: %u, cmd: %u) timeout, prod idx: 0x%x\n", - mod, cmd, curr_prod_idx); - err = -ETIMEDOUT; - } - - if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { - sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", - mod, cmd); - err = -EAGAIN; - } - - smp_rmb(); /* read error code after completion */ - - return err ? err : errcode; -} - -static int cmdq_async_cmd(struct sphw_cmdq *cmdq, u8 mod, u8 cmd, - struct sphw_cmd_buf *buf_in, u16 channel) -{ - struct sphw_cmdq_cmd_info *cmd_info = NULL; - struct sphw_wq *wq = &cmdq->wq; - u16 curr_prod_idx, next_prod_idx; - struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; - int wrapped, err; - - err = cmdq_msg_lock(cmdq, channel); - if (err) - return err; - - curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); - if (!curr_wqe) { - cmdq_msg_unlock(cmdq); - return -EBUSY; - } - - memset(&wqe, 0, sizeof(wqe)); - - wrapped = cmdq->wrapped; - next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= (u16)wq->q_depth; - } - - cmdq_set_lcmd_wqe(&wqe, ASYNC_CMD, buf_in, NULL, wrapped, - mod, cmd, curr_prod_idx); - - cmdq_wqe_fill(curr_wqe, &wqe); - - cmd_info = &cmdq->cmd_infos[curr_prod_idx]; - cmd_info->cmd_type = SPHW_CMD_TYPE_ASYNC; - cmd_info->channel = channel; - /* The caller will not free the cmd_buf of the asynchronous command, - * so there is no need to increase the reference count here - */ - cmd_info->buf_in = buf_in; - - /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait */ - cmdq_set_db(cmdq, SPHW_CMDQ_SYNC, next_prod_idx); - - cmdq_msg_unlock(cmdq); - - return 0; -} - -int cmdq_set_arm_bit(struct sphw_cmdq *cmdq, const void *buf_in, - u16 in_size) -{ - struct sphw_wq *wq = &cmdq->wq; - struct sphw_cmdq_wqe *curr_wqe = NULL, wqe; - u16 curr_prod_idx, next_prod_idx; - int wrapped; - - /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ - spin_lock_bh(&cmdq->cmdq_lock); - - curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx); - if (!curr_wqe) { - spin_unlock_bh(&cmdq->cmdq_lock); - return -EBUSY; - } - - memset(&wqe, 0, sizeof(wqe)); - - wrapped = cmdq->wrapped; - - next_prod_idx = curr_prod_idx + NUM_WQEBBS_FOR_CMDQ_WQE; - if (next_prod_idx >= wq->q_depth) { - cmdq->wrapped = !cmdq->wrapped; - next_prod_idx -= (u16)wq->q_depth; - } - - cmdq_set_inline_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, in_size, NULL, - wrapped, SPHW_MOD_COMM, CMDQ_SET_ARM_CMD, - curr_prod_idx); - - /* cmdq wqe is not shadow, therefore wqe will be written to wq */ - cmdq_wqe_fill(curr_wqe, &wqe); - - cmdq->cmd_infos[curr_prod_idx].cmd_type = SPHW_CMD_TYPE_SET_ARM; - - cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx); - - spin_unlock_bh(&cmdq->cmdq_lock); - - return 0; -} - -static int cmdq_params_valid(void *hwdev, struct sphw_cmd_buf *buf_in) -{ - if (!buf_in || !hwdev) { - pr_err("Invalid CMDQ buffer addr: %p or hwdev: %p\n", - buf_in, hwdev); - return -EINVAL; - } - - if (!buf_in->size || buf_in->size > SPHW_CMDQ_MAX_DATA_SIZE) { - pr_err("Invalid CMDQ buffer size: 0x%x\n", buf_in->size); - return -EINVAL; - } - - return 0; -} - -#define WAIT_CMDQ_ENABLE_TIMEOUT 300 -static int wait_cmdqs_enable(struct sphw_cmdqs *cmdqs) -{ - unsigned long end; - - end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); - do { - if (cmdqs->status & SPHW_CMDQ_ENABLE) - return 0; - } while (time_before(jiffies, end) && cmdqs->hwdev->chip_present_flag && - !cmdqs->disable_flag); - - cmdqs->disable_flag = 1; - - return -EBUSY; -} - -int sphw_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, - u64 *out_param, u32 timeout, u16 channel) -{ - struct sphw_cmdqs *cmdqs = NULL; - int err = cmdq_params_valid(hwdev, buf_in); - - if (err) { - pr_err("Invalid CMDQ parameters\n"); - return err; - } - - /* to do : support send cmdq only when cmdq init*/ - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -EPERM; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - err = wait_cmdqs_enable(cmdqs); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); - return err; - } - - err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[SPHW_CMDQ_SYNC], - mod, cmd, buf_in, out_param, - timeout, channel); - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -ETIMEDOUT; - else - return err; -} - -int sphw_cmdq_detail_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel) -{ - struct sphw_cmdqs *cmdqs = NULL; - int err = cmdq_params_valid(hwdev, buf_in); - - if (err) - return err; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) /*to do*/ - return -EPERM; - - err = wait_cmdqs_enable(cmdqs); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); - return err; - } - - err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[SPHW_CMDQ_SYNC], - mod, cmd, buf_in, buf_out, out_param, - timeout, channel); - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -ETIMEDOUT; - else - return err; -} - -int sphw_cos_id_detail_resp(void *hwdev, u8 mod, u8 cmd, u8 cos_id, struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel) -{ - struct sphw_cmdqs *cmdqs = NULL; - int err = cmdq_params_valid(hwdev, buf_in); - - if (err) - return err; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) /*to do*/ - return -EPERM; - - err = wait_cmdqs_enable(cmdqs); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); - return err; - } - - if (cos_id >= SPHW_MAX_CMDQ_TYPES) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq id is invalid\n"); - return -EINVAL; - } - - err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[cos_id], mod, cmd, - buf_in, buf_out, out_param, - timeout, channel); - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -ETIMEDOUT; - else - return err; -} - -int sphw_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, u16 channel) -{ - struct sphw_cmdqs *cmdqs = NULL; - int err = cmdq_params_valid(hwdev, buf_in); - - if (err) - return err; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) /*to do*/ - return -EPERM; - - err = wait_cmdqs_enable(cmdqs); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); - return err; - } - /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait*/ - return cmdq_async_cmd(&cmdqs->cmdq[SPHW_CMDQ_SYNC], mod, - cmd, buf_in, channel); -} - -int sphw_set_arm_bit(void *hwdev, enum sphw_set_arm_type q_type, u16 q_id) -{ - struct sphw_cmdqs *cmdqs = NULL; - struct sphw_cmdq *cmdq = NULL; - struct sphw_cmdq_arm_bit arm_bit; - enum sphw_cmdq_type cmdq_type = SPHW_CMDQ_SYNC; - u16 in_size; - int err; - - if (!hwdev) - return -EINVAL; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag))/* to do*/ - return -EPERM; - - cmdqs = ((struct sphw_hwdev *)hwdev)->cmdqs; - - if (!(cmdqs->status & SPHW_CMDQ_ENABLE)) - return -EBUSY; - - if (q_type == SPHW_SET_ARM_CMDQ) { - if (q_id >= SPHW_MAX_CMDQ_TYPES) - return -EFAULT; - - cmdq_type = q_id; - } - /* sq is using interrupt now, so we only need to set arm bit for cmdq, - * remove comment below if need to set sq arm bit - * else - * cmdq_type = SPHW_CMDQ_SYNC; - */ - - cmdq = &cmdqs->cmdq[cmdq_type]; - - arm_bit.q_type = q_type; - arm_bit.q_id = q_id; - in_size = sizeof(arm_bit); - - err = cmdq_set_arm_bit(cmdq, &arm_bit, in_size); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, - "Failed to set arm for q_type: %d, qid %d\n", - q_type, q_id); - return err; - } - - return 0; -} - -static void clear_wqe_complete_bit(struct sphw_cmdq *cmdq, - struct sphw_cmdq_wqe *wqe, u16 ci) -{ - struct sphw_ctrl *ctrl = NULL; - u32 header_info = WQE_HEADER(wqe)->header_info; - enum data_format df = CMDQ_WQE_HEADER_GET(header_info, DATA_FMT); - - if (df == DATA_SGE) - ctrl = &wqe->wqe_lcmd.ctrl; - else - ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; - - /* clear HW busy bit */ - ctrl->ctrl_info = 0; - cmdq->cmd_infos[ci].cmd_type = SPHW_CMD_TYPE_NONE; - - wmb(); /* verify wqe is clear */ - - sphw_wq_put_wqebbs(&cmdq->wq, NUM_WQEBBS_FOR_CMDQ_WQE); -} - -static void cmdq_sync_cmd_handler(struct sphw_cmdq *cmdq, - struct sphw_cmdq_wqe *wqe, u16 ci) -{ - spin_lock(&cmdq->cmdq_lock); - - cmdq_update_cmd_status(cmdq, ci, wqe); - - if (cmdq->cmd_infos[ci].cmpt_code) { - *cmdq->cmd_infos[ci].cmpt_code = CMDQ_COMPLETE_CMPT_CODE; - cmdq->cmd_infos[ci].cmpt_code = NULL; - } - - /* make sure cmpt_code operation before done operation */ - smp_rmb(); - - if (cmdq->cmd_infos[ci].done) { - complete(cmdq->cmd_infos[ci].done); - cmdq->cmd_infos[ci].done = NULL; - } - - spin_unlock(&cmdq->cmdq_lock); - - cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev); - clear_wqe_complete_bit(cmdq, wqe, ci); -} - -static void cmdq_async_cmd_handler(struct sphw_hwdev *hwdev, - struct sphw_cmdq *cmdq, - struct sphw_cmdq_wqe *wqe, u16 ci) -{ - cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], hwdev); - clear_wqe_complete_bit(cmdq, wqe, ci); -} - -static int cmdq_arm_ceq_handler(struct sphw_cmdq *cmdq, - struct sphw_cmdq_wqe *wqe, u16 ci) -{ - struct sphw_ctrl *ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; - u32 ctrl_info = ctrl->ctrl_info; - - if (!WQE_COMPLETED(ctrl_info)) - return -EBUSY; - - clear_wqe_complete_bit(cmdq, wqe, ci); - - return 0; -} - -#define SPHW_CMDQ_WQE_HEAD_LEN 32 -static void sphw_dump_cmdq_wqe_head(struct sphw_hwdev *hwdev, struct sphw_cmdq_wqe *wqe) -{ - u32 i; - u32 *data = (u32 *)wqe; - - for (i = 0; i < (SPHW_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 4) { - sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", - *(data + i), *(data + i + 1), *(data + i + 2), - *(data + i + 3)); - } -} - -void sphw_cmdq_ceq_handler(void *handle, u32 ceqe_data) -{ - struct sphw_cmdqs *cmdqs = ((struct sphw_hwdev *)handle)->cmdqs; - enum sphw_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE); - struct sphw_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; - struct sphw_hwdev *hwdev = cmdqs->hwdev; - struct sphw_cmdq_wqe *wqe = NULL; - struct sphw_cmdq_wqe_lcmd *wqe_lcmd = NULL; - struct sphw_cmdq_cmd_info *cmd_info = NULL; - u32 ctrl_info; - u16 ci; - - while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) { - cmd_info = &cmdq->cmd_infos[ci]; - - switch (cmd_info->cmd_type) { - case SPHW_CMD_TYPE_NONE: - return; - case SPHW_CMD_TYPE_TIMEOUT: - sdk_warn(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", - cmdq_type, ci); - sphw_dump_cmdq_wqe_head(hwdev, wqe); - fallthrough; - case SPHW_CMD_TYPE_FAKE_TIMEOUT: - cmdq_clear_cmd_buf(cmd_info, hwdev); - clear_wqe_complete_bit(cmdq, wqe, ci); - break; - case SPHW_CMD_TYPE_SET_ARM: - if (cmdq_arm_ceq_handler(cmdq, wqe, ci)) - return; - - break; - default: - /* only arm bit is using scmd wqe, the wqe is lcmd */ - wqe_lcmd = &wqe->wqe_lcmd; - ctrl_info = wqe_lcmd->ctrl.ctrl_info; - - if (!WQE_COMPLETED(ctrl_info)) - return; - - /* For FORCE_STOP cmd_type, we also need to wait for - * the firmware processing to complete to prevent the - * firmware from accessing the released cmd_buf - */ - if (cmd_info->cmd_type == SPHW_CMD_TYPE_FORCE_STOP) { - cmdq_clear_cmd_buf(cmd_info, hwdev); - clear_wqe_complete_bit(cmdq, wqe, ci); - } else if (cmd_info->cmd_type == SPHW_CMD_TYPE_ASYNC) { - cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); - } else { - cmdq_sync_cmd_handler(cmdq, wqe, ci); - } - - break; - } - } -} - -static void cmdq_init_queue_ctxt(struct sphw_cmdqs *cmdqs, - struct sphw_cmdq *cmdq, - struct cmdq_ctxt_info *ctxt_info) -{ - struct sphw_wq *wq = &cmdq->wq; - u64 cmdq_first_block_paddr, pfn; - u16 start_ci = (u16)wq->cons_idx; - - pfn = CMDQ_PFN(sphw_wq_get_first_wqe_page_addr(wq)); - - ctxt_info->curr_wqe_page_pfn = - CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) | - CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | - CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | - CMDQ_CTXT_PAGE_INFO_SET(SPHW_CEQ_ID_CMDQ, EQ_ID) | - CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN); - - if (!WQ_IS_0_LEVEL_CLA(wq)) { - cmdq_first_block_paddr = cmdqs->wq_block_paddr; - pfn = CMDQ_PFN(cmdq_first_block_paddr); - } - - ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) | - CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN); -} - -static int init_cmdq(struct sphw_cmdq *cmdq, struct sphw_hwdev *hwdev, - enum sphw_cmdq_type q_type) -{ - void __iomem *db_base; - int err; - - cmdq->cmdq_type = q_type; - cmdq->wrapped = 1; - cmdq->hwdev = hwdev; - - spin_lock_init(&cmdq->cmdq_lock); - - cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos), - GFP_KERNEL); - if (!cmdq->cmd_infos) { - sdk_err(hwdev->dev_hdl, "Failed to allocate cmdq infos\n"); - err = -ENOMEM; - goto cmd_infos_err; - } - - err = sphw_alloc_db_addr(hwdev, &db_base, NULL); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to allocate doorbell address\n"); - goto alloc_db_err; - } - - cmdq->db_base = (u8 *)db_base; - return 0; - -alloc_db_err: - kfree(cmdq->cmd_infos); - -cmd_infos_err: - - return err; -} - -static void free_cmdq(struct sphw_hwdev *hwdev, struct sphw_cmdq *cmdq) -{ - sphw_free_db_addr(hwdev, cmdq->db_base, NULL); - kfree(cmdq->cmd_infos); -} - -static int sphw_set_cmdq_ctxts(struct sphw_hwdev *hwdev) -{ - struct sphw_cmdqs *cmdqs = hwdev->cmdqs; - enum sphw_cmdq_type cmdq_type; - int err; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - err = sphw_set_cmdq_ctxt(hwdev, (u8)cmdq_type, &cmdqs->cmdq[cmdq_type].cmdq_ctxt); - if (err) - return err; - } - - cmdqs->status |= SPHW_CMDQ_ENABLE; - cmdqs->disable_flag = 0; - - return 0; -} - -static void cmdq_flush_sync_cmd(struct sphw_cmdq_cmd_info *cmd_info) -{ - if (cmd_info->cmd_type != SPHW_CMD_TYPE_DIRECT_RESP && - cmd_info->cmd_type != SPHW_CMD_TYPE_SGE_RESP) - return; - - cmd_info->cmd_type = SPHW_CMD_TYPE_FORCE_STOP; - - if (cmd_info->cmpt_code && - *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE) - *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE; - - if (cmd_info->done) { - complete(cmd_info->done); - cmd_info->done = NULL; - cmd_info->cmpt_code = NULL; - cmd_info->direct_resp = NULL; - cmd_info->errcode = NULL; - } -} - -void sphw_cmdq_flush_cmd(struct sphw_hwdev *hwdev, struct sphw_cmdq *cmdq) -{ - struct sphw_cmdq_cmd_info *cmd_info = NULL; - u16 ci = 0; - - spin_lock_bh(&cmdq->cmdq_lock); - - while (cmdq_read_wqe(&cmdq->wq, &ci)) { - sphw_wq_put_wqebbs(&cmdq->wq, NUM_WQEBBS_FOR_CMDQ_WQE); - cmd_info = &cmdq->cmd_infos[ci]; - - if (cmd_info->cmd_type == SPHW_CMD_TYPE_DIRECT_RESP || - cmd_info->cmd_type == SPHW_CMD_TYPE_SGE_RESP) - cmdq_flush_sync_cmd(cmd_info); - } - - spin_unlock_bh(&cmdq->cmdq_lock); -} - -void sphw_cmdq_flush_channel_sync_cmd(struct sphw_hwdev *hwdev, u16 channel) -{ - struct sphw_cmdq_cmd_info *cmd_info = NULL; - struct sphw_cmdq *cmdq = NULL; - struct sphw_wq *wq = NULL; - u16 wqe_cnt, ci, i; - - if (channel >= SPHW_CHANNEL_MAX) - return; - - cmdq = &hwdev->cmdqs->cmdq[SPHW_CMDQ_SYNC]; - - spin_lock_bh(&cmdq->cmdq_lock); - - wq = &cmdq->wq; - ci = wq->cons_idx; - wqe_cnt = (u16)WQ_MASK_IDX(wq, wq->prod_idx + - wq->q_depth - wq->cons_idx); - for (i = 0; i < wqe_cnt; i++) { - cmd_info = &cmdq->cmd_infos[WQ_MASK_IDX(wq, ci + i)]; - - if (cmd_info->channel == channel) - cmdq_flush_sync_cmd(cmd_info); - } - - spin_unlock_bh(&cmdq->cmdq_lock); -} - -static void cmdq_reset_all_cmd_buff(struct sphw_cmdq *cmdq) -{ - u16 i; - - for (i = 0; i < cmdq->wq.q_depth; i++) - cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev); -} - -int sphw_cmdq_set_channel_status(struct sphw_hwdev *hwdev, u16 channel, bool enable) -{ - if (channel >= SPHW_CHANNEL_MAX) - return -EINVAL; - - if (enable) { - clear_bit(channel, &hwdev->cmdqs->channel_stop); - } else { - set_bit(channel, &hwdev->cmdqs->channel_stop); - sphw_cmdq_flush_channel_sync_cmd(hwdev, channel); - } - - sdk_info(hwdev->dev_hdl, "%s cmdq channel 0x%x\n", - enable ? "Enable" : "Disable", channel); - - return 0; -} - -void sphw_cmdq_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable) -{ - hwdev->cmdqs->lock_channel_en = enable; - - sdk_info(hwdev->dev_hdl, "%s cmdq channel lock\n", - enable ? "Enable" : "Disable"); -} - -int sphw_reinit_cmdq_ctxts(struct sphw_hwdev *hwdev) -{ - struct sphw_cmdqs *cmdqs = hwdev->cmdqs; - enum sphw_cmdq_type cmdq_type; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - sphw_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); - cmdq_reset_all_cmd_buff(&cmdqs->cmdq[cmdq_type]); - cmdqs->cmdq[cmdq_type].wrapped = 1; - sphw_wq_reset(&cmdqs->cmdq[cmdq_type].wq); - } - - return sphw_set_cmdq_ctxts(hwdev); -} - -static int create_cmdq_wq(struct sphw_cmdqs *cmdqs) -{ - enum sphw_cmdq_type type, cmdq_type; - int err; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - err = sphw_wq_create(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type].wq, SPHW_CMDQ_DEPTH, - CMDQ_WQEBB_SIZE); - if (err) { - sdk_err(cmdqs->hwdev->dev_hdl, "Failed to create cmdq wq\n"); - goto destroy_wq; - } - } - - /* 1-level CLA must put all cmdq's wq page addr in one wq block */ - if (!WQ_IS_0_LEVEL_CLA(&cmdqs->cmdq[SPHW_CMDQ_SYNC].wq)) { - /* cmdq wq's CLA table is up to 512B */ -#define CMDQ_WQ_CLA_SIZE 512 - if (cmdqs->cmdq[SPHW_CMDQ_SYNC].wq.num_wq_pages > - CMDQ_WQ_CLA_SIZE / sizeof(u64)) { - sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq wq page exceed limit: %lu\n", - CMDQ_WQ_CLA_SIZE / sizeof(u64)); - goto destroy_wq; - } - - cmdqs->wq_block_vaddr = - dma_alloc_coherent(cmdqs->hwdev->dev_hdl, PAGE_SIZE, - &cmdqs->wq_block_paddr, GFP_KERNEL); - if (!cmdqs->wq_block_vaddr) { - sdk_err(cmdqs->hwdev->dev_hdl, "Failed to alloc cmdq wq block\n"); - goto destroy_wq; - } - - type = SPHW_CMDQ_SYNC; - for (; type < SPHW_MAX_CMDQ_TYPES; type++) - memcpy((u8 *)cmdqs->wq_block_vaddr + - CMDQ_WQ_CLA_SIZE * type, - cmdqs->cmdq[type].wq.wq_block_vaddr, - cmdqs->cmdq[type].wq.num_wq_pages * sizeof(u64)); - } - - return 0; - -destroy_wq: - type = SPHW_CMDQ_SYNC; - for (; type < cmdq_type; type++) - sphw_wq_destroy(&cmdqs->cmdq[type].wq); - - return err; -} - -static void destroy_cmdq_wq(struct sphw_cmdqs *cmdqs) -{ - enum sphw_cmdq_type cmdq_type; - - if (cmdqs->wq_block_vaddr) - dma_free_coherent(cmdqs->hwdev->dev_hdl, PAGE_SIZE, - cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr); - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) - sphw_wq_destroy(&cmdqs->cmdq[cmdq_type].wq); -} - -int sphw_cmdqs_init(struct sphw_hwdev *hwdev) -{ - struct sphw_cmdqs *cmdqs = NULL; - enum sphw_cmdq_type type, cmdq_type; - int err; - - cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); - if (!cmdqs) - return -ENOMEM; - - hwdev->cmdqs = cmdqs; - cmdqs->hwdev = hwdev; - - cmdqs->cmd_buf_pool = dma_pool_create("sphw_cmdq", hwdev->dev_hdl, - SPHW_CMDQ_BUF_SIZE, - SPHW_CMDQ_BUF_SIZE, 0ULL); - if (!cmdqs->cmd_buf_pool) { - sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n"); - err = -ENOMEM; - goto pool_create_err; - } - - err = create_cmdq_wq(cmdqs); - if (err) - goto create_wq_err; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%d\n", - cmdq_type); - goto init_cmdq_err; - } - - cmdq_init_queue_ctxt(cmdqs, &cmdqs->cmdq[cmdq_type], - &cmdqs->cmdq[cmdq_type].cmdq_ctxt); - } - - err = sphw_set_cmdq_ctxts(hwdev); - if (err) - goto init_cmdq_err; - - return 0; - -init_cmdq_err: - type = SPHW_CMDQ_SYNC; - for (; type < cmdq_type; type++) - free_cmdq(hwdev, &cmdqs->cmdq[type]); - - destroy_cmdq_wq(cmdqs); - -create_wq_err: - dma_pool_destroy(cmdqs->cmd_buf_pool); - -pool_create_err: - kfree(cmdqs); - - return err; -} - -void sphw_cmdqs_free(struct sphw_hwdev *hwdev) -{ - struct sphw_cmdqs *cmdqs = hwdev->cmdqs; - enum sphw_cmdq_type cmdq_type = SPHW_CMDQ_SYNC; - - cmdqs->status &= ~SPHW_CMDQ_ENABLE; - - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - sphw_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); - cmdq_reset_all_cmd_buff(&cmdqs->cmdq[cmdq_type]); - free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]); - } - - destroy_cmdq_wq(cmdqs); - - dma_pool_destroy(cmdqs->cmd_buf_pool); - - kfree(cmdqs); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h deleted file mode 100644 index 2c1f1bbda4ad..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_cmdq.h +++ /dev/null @@ -1,195 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_CMDQ_H -#define SPHW_CMDQ_H - -#include "sphw_comm_msg_intf.h" - -#define SPHW_SCMD_DATA_LEN 16 - -#define SPHW_CMDQ_DEPTH 4096 - -enum sphw_cmdq_type { - SPHW_CMDQ_SYNC, - SPHW_CMDQ_ASYNC, - SPHW_MAX_CMDQ_TYPES, -}; - -enum sphw_db_src_type { - SPHW_DB_SRC_CMDQ_TYPE, - SPHW_DB_SRC_L2NIC_SQ_TYPE, -}; - -enum sphw_cmdq_db_type { - SPHW_DB_SQ_RQ_TYPE, - SPHW_DB_CMDQ_TYPE, -}; - -/*hardware define: cmdq wqe*/ -struct sphw_cmdq_header { - u32 header_info; - u32 saved_data; -}; - -struct sphw_scmd_bufdesc { - u32 buf_len; - u32 rsvd; - u8 data[SPHW_SCMD_DATA_LEN]; -}; - -struct sphw_lcmd_bufdesc { - struct sphw_sge sge; - u32 rsvd1; - u64 saved_async_buf; - u64 rsvd3; -}; - -struct sphw_cmdq_db { - u32 db_head; - u32 db_info; -}; - -struct sphw_status { - u32 status_info; -}; - -struct sphw_ctrl { - u32 ctrl_info; -}; - -struct sphw_sge_resp { - struct sphw_sge sge; - u32 rsvd; -}; - -struct sphw_cmdq_completion { - union { - struct sphw_sge_resp sge_resp; - u64 direct_resp; - }; -}; - -struct sphw_cmdq_wqe_scmd { - struct sphw_cmdq_header header; - u64 rsvd; - struct sphw_status status; - struct sphw_ctrl ctrl; - struct sphw_cmdq_completion completion; - struct sphw_scmd_bufdesc buf_desc; -}; - -struct sphw_cmdq_wqe_lcmd { - struct sphw_cmdq_header header; - struct sphw_status status; - struct sphw_ctrl ctrl; - struct sphw_cmdq_completion completion; - struct sphw_lcmd_bufdesc buf_desc; -}; - -struct sphw_cmdq_inline_wqe { - struct sphw_cmdq_wqe_scmd wqe_scmd; -}; - -struct sphw_cmdq_wqe { - union { - struct sphw_cmdq_inline_wqe inline_wqe; - struct sphw_cmdq_wqe_lcmd wqe_lcmd; - }; -}; - -struct sphw_cmdq_arm_bit { - u32 q_type; - u32 q_id; -}; - -enum sphw_cmdq_status { - SPHW_CMDQ_ENABLE = BIT(0), -}; - -enum sphw_cmdq_cmd_type { - SPHW_CMD_TYPE_NONE, - SPHW_CMD_TYPE_SET_ARM, - SPHW_CMD_TYPE_DIRECT_RESP, - SPHW_CMD_TYPE_SGE_RESP, - SPHW_CMD_TYPE_ASYNC, - SPHW_CMD_TYPE_FAKE_TIMEOUT, - SPHW_CMD_TYPE_TIMEOUT, - SPHW_CMD_TYPE_FORCE_STOP, -}; - -struct sphw_cmdq_cmd_info { - enum sphw_cmdq_cmd_type cmd_type; - u16 channel; - - struct completion *done; - int *errcode; - int *cmpt_code; - u64 *direct_resp; - u64 cmdq_msg_id; - - struct sphw_cmd_buf *buf_in; - struct sphw_cmd_buf *buf_out; -}; - -struct sphw_cmdq { - struct sphw_wq wq; - - enum sphw_cmdq_type cmdq_type; - int wrapped; - - /* spinlock for send cmdq commands */ - spinlock_t cmdq_lock; - - /* doorbell area */ - u8 __iomem *db_base; - - struct cmdq_ctxt_info cmdq_ctxt; - - struct sphw_cmdq_cmd_info *cmd_infos; - - struct sphw_hwdev *hwdev; -}; - -struct sphw_cmdqs { - struct sphw_hwdev *hwdev; - - struct pci_pool *cmd_buf_pool; - - /* All cmdq's CLA of a VF occupy a PAGE when cmdq wq is 1-level CLA */ - dma_addr_t wq_block_paddr; - void *wq_block_vaddr; - struct sphw_cmdq cmdq[SPHW_MAX_CMDQ_TYPES]; - - u32 status; - u32 disable_flag; - - bool lock_channel_en; - unsigned long channel_stop; -}; - -enum sphw_set_arm_type { - SPHW_SET_ARM_CMDQ, - SPHW_SET_ARM_SQ, - SPHW_SET_ARM_TYPE_NUM, -}; - -int sphw_set_arm_bit(void *hwdev, enum sphw_set_arm_type q_type, u16 q_id); - -void sphw_cmdq_ceq_handler(void *hwdev, u32 ceqe_data); - -int sphw_reinit_cmdq_ctxts(struct sphw_hwdev *hwdev); - -bool sphw_cmdq_idle(struct sphw_cmdq *cmdq); - -int sphw_cmdqs_init(struct sphw_hwdev *hwdev); - -void sphw_cmdqs_free(struct sphw_hwdev *hwdev); - -void sphw_cmdq_flush_cmd(struct sphw_hwdev *hwdev, struct sphw_cmdq *cmdq); - -int sphw_cmdq_set_channel_status(struct sphw_hwdev *hwdev, u16 channel, bool enable); - -void sphw_cmdq_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h deleted file mode 100644 index d0e4c87942b5..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_cmd.h +++ /dev/null @@ -1,60 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_COMMON_CMD_H -#define SPHW_COMMON_CMD_H - -/* COMM Commands between Driver to MPU */ -enum sphw_mgmt_cmd { - COMM_MGMT_CMD_FUNC_RESET = 0, - COMM_MGMT_CMD_FEATURE_NEGO, - COMM_MGMT_CMD_FLUSH_DOORBELL, - COMM_MGMT_CMD_START_FLUSH, - COMM_MGMT_CMD_SET_FUNC_FLR, - COMM_MGMT_CMD_GET_GLOBAL_ATTR, - - COMM_MGMT_CMD_SET_CMDQ_CTXT = 20, - COMM_MGMT_CMD_SET_VAT, - COMM_MGMT_CMD_CFG_PAGESIZE, - COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, - COMM_MGMT_CMD_SET_CEQ_CTRL_REG, - COMM_MGMT_CMD_SET_DMA_ATTR, - - COMM_MGMT_CMD_GET_MQM_FIX_INFO = 40, - COMM_MGMT_CMD_SET_MQM_CFG_INFO, - COMM_MGMT_CMD_SET_MQM_SRCH_GPA, - COMM_MGMT_CMD_SET_PPF_TMR, - COMM_MGMT_CMD_SET_PPF_HT_GPA, - COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, - - COMM_MGMT_CMD_GET_FW_VERSION = 60, - COMM_MGMT_CMD_GET_BOARD_INFO, - COMM_MGMT_CMD_SYNC_TIME, - COMM_MGMT_CMD_GET_HW_PF_INFOS, - COMM_MGMT_CMD_SEND_BDF_INFO, - COMM_MGMT_CMD_GET_VIRTIO_BDF_INFO, - - COMM_MGMT_CMD_UPDATE_FW = 80, - COMM_MGMT_CMD_ACTIVE_FW, - COMM_MGMT_CMD_HOT_ACTIVE_FW, - COMM_MGMT_CMD_HOT_ACTIVE_DONE_NOTICE, - COMM_MGMT_CMD_SWITCH_CFG, - COMM_MGMT_CMD_CHECK_FLASH, - COMM_MGMT_CMD_CHECK_FLASH_RW, - COMM_MGMT_CMD_RESOURCE_CFG, - - COMM_MGMT_CMD_FAULT_REPORT = 100, - COMM_MGMT_CMD_WATCHDOG_INFO, - COMM_MGMT_CMD_MGMT_RESET, - COMM_MGMT_CMD_FFM_SET, - - COMM_MGMT_CMD_GET_LOG = 120, - COMM_MGMT_CMD_TEMP_OP, - COMM_MGMT_CMD_EN_AUTO_RST_CHIP, - COMM_MGMT_CMD_CFG_REG, - COMM_MGMT_CMD_GET_CHIP_ID, - COMM_MGMT_CMD_SYSINFO_DFX, - COMM_MGMT_CMD_PCIE_DFX_NTC, -}; - -#endif /* SPHW_COMMON_CMD_H */ diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h deleted file mode 100644 index fd12a47e5bb5..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_comm_msg_intf.h +++ /dev/null @@ -1,273 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_COMM_MSG_INTF_H -#define SPHW_COMM_MSG_INTF_H - -#include "sphw_mgmt_msg_base.h" - -#define FUNC_RESET_FLAG_MAX_VALUE ((1U << (RES_TYPE_IPSEC + 1)) - 1) -struct comm_cmd_func_reset { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1[3]; - u64 reset_flag; -}; - -enum { - COMM_F_API_CHAIN = 1U << 0, -}; - -#define COMM_MAX_FEATURE_QWORD 4 -struct comm_cmd_feature_nego { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; /* 1: set, 0: get */ - u8 rsvd; - u64 s_feature[COMM_MAX_FEATURE_QWORD]; -}; - -struct comm_cmd_clear_doorbell { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1[3]; -}; - -struct comm_cmd_clear_resource { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1[3]; -}; - -struct comm_global_attr { - u8 max_host_num; - u8 max_pf_num; - u16 vf_id_start; - - u8 mgmt_host_node_id; /* for api cmd to mgmt cpu */ - u8 rsvd1[3]; - - u32 rsvd2[8]; -}; - -struct comm_cmd_get_glb_attr { - struct mgmt_msg_head head; - - struct comm_global_attr attr; -}; - -enum sphw_fw_ver_type { - SPHW_FW_VER_TYPE_BOOT, - SPHW_FW_VER_TYPE_MPU, - SPHW_FW_VER_TYPE_NPU, - SPHW_FW_VER_TYPE_SMU, - SPHW_FW_VER_TYPE_CFG, -}; - -#define SPHW_FW_VERSION_LEN 16 -#define SPHW_FW_COMPILE_TIME_LEN 20 -struct comm_cmd_get_fw_version { - struct mgmt_msg_head head; - - u16 fw_type; - u16 rsvd1; - u8 ver[SPHW_FW_VERSION_LEN]; - u8 time[SPHW_FW_COMPILE_TIME_LEN]; -}; - -/* hardware define: cmdq context */ -struct cmdq_ctxt_info { - u64 curr_wqe_page_pfn; - u64 wq_block_pfn; -}; - -struct comm_cmd_cmdq_ctxt { - struct mgmt_msg_head head; - - u16 func_id; - u8 cmdq_id; - u8 rsvd1[5]; - - struct cmdq_ctxt_info ctxt; -}; - -struct comm_cmd_root_ctxt { - struct mgmt_msg_head head; - - u16 func_id; - u8 set_cmdq_depth; - u8 cmdq_depth; - u16 rx_buf_sz; - u8 lro_en; - u8 rsvd1; - u16 sq_depth; - u16 rq_depth; - u64 rsvd2; -}; - -struct comm_cmd_wq_page_size { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; - /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ - u8 page_size; - - u32 rsvd1; -}; - -struct comm_cmd_msix_config { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u16 msix_index; - u8 pending_cnt; - u8 coalesce_timer_cnt; - u8 resend_timer_cnt; - u8 lli_timer_cnt; - u8 lli_credit_cnt; - u8 rsvd2[5]; -}; - -struct comm_cmd_dma_attr_config { - struct mgmt_msg_head head; - - u16 func_id; - u8 entry_idx; - u8 st; - u8 at; - u8 ph; - u8 no_snooping; - u8 tph_en; - u32 resv1; -}; - -struct comm_cmd_ceq_ctrl_reg { - struct mgmt_msg_head head; - - u16 func_id; - u16 q_id; - u32 ctrl0; - u32 ctrl1; - u32 rsvd1; -}; - -struct comm_cmd_func_tmr_bitmap_op { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; /* 1: start, 0: stop */ - u8 rsvd1[5]; -}; - -struct comm_cmd_ppf_tmr_op { - struct mgmt_msg_head head; - - u8 ppf_id; - u8 opcode; /* 1: start, 0: stop */ - u8 rsvd1[6]; -}; - -struct comm_cmd_ht_gpa { - struct mgmt_msg_head head; - - u8 host_id; - u32 rsvd1[7]; - u64 page_pa0; - u64 page_pa1; -}; - -struct comm_cmd_get_eqm_num { - struct mgmt_msg_head head; - - u8 host_id; - u8 rsvd1[3]; - u32 chunk_num; - u32 search_gpa_num; -}; - -struct comm_cmd_eqm_cfg { - struct mgmt_msg_head head; - - u8 host_id; - u8 valid; - u16 rsvd1; - u32 page_size; - u32 rsvd2; -}; - -struct comm_cmd_eqm_search_gpa { - struct mgmt_msg_head head; - - u8 host_id; - u8 rsvd1[3]; - u32 start_idx; - u32 num; - u32 rsvd2; - u64 gpa_hi52[0]; -}; - -struct comm_cmd_ffm_info { - struct mgmt_msg_head head; - - u8 node_id; - /* error level of the interrupt source */ - u8 err_level; - /* Classification by interrupt source properties */ - u16 err_type; - u32 err_csr_addr; - u32 err_csr_value; - u32 rsvd1; -}; - -struct sphw_board_info { - u8 board_type; - u8 port_num; - u8 port_speed; - u8 pcie_width; - u8 host_num; - u8 pf_num; - u16 vf_total_num; - u8 tile_num; - u8 qcm_num; - u8 core_num; - u8 work_mode; - u8 service_mode; - u8 pcie_mode; - u8 boot_sel; - u8 board_id; - u32 cfg_addr; -}; - -struct comm_cmd_board_info { - struct mgmt_msg_head head; - - struct sphw_board_info info; - u32 rsvd[25]; -}; - -struct comm_cmd_sync_time { - struct mgmt_msg_head head; - - u64 mstime; - u64 rsvd1; -}; - -struct comm_cmd_bdf_info { - struct mgmt_msg_head head; - - u16 function_idx; - u8 rsvd1[2]; - u8 bus; - u8 device; - u8 function; - u8 rsvd2[5]; -}; - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c deleted file mode 100644 index aaba9e68ba31..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.c +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include <linux/kernel.h> -#include <linux/dma-mapping.h> -#include <linux/delay.h> - -#include "sphw_common.h" - -int sphw_dma_alloc_coherent_align(void *dev_hdl, u64 size, u64 align, unsigned int flag, - struct sphw_dma_addr_align *mem_align) -{ - void *vaddr = NULL, *align_vaddr = NULL; - dma_addr_t paddr, align_paddr; - u64 real_size = size; - - vaddr = dma_alloc_coherent(dev_hdl, real_size, &paddr, flag); - if (!vaddr) - return -ENOMEM; - - align_paddr = ALIGN(paddr, align); - /* align */ - if (align_paddr == paddr) { - align_vaddr = vaddr; - goto out; - } - - dma_free_coherent(dev_hdl, real_size, vaddr, paddr); - - /* realloc memory for align */ - real_size = size + align; - vaddr = dma_alloc_coherent(dev_hdl, real_size, &paddr, flag); - if (!vaddr) - return -ENOMEM; - - align_paddr = ALIGN(paddr, align); - align_vaddr = (void *)((u64)vaddr + (align_paddr - paddr)); - -out: - mem_align->real_size = (u32)real_size; - mem_align->ori_vaddr = vaddr; - mem_align->ori_paddr = paddr; - mem_align->align_vaddr = align_vaddr; - mem_align->align_paddr = align_paddr; - - return 0; -} - -void sphw_dma_free_coherent_align(void *dev_hdl, struct sphw_dma_addr_align *mem_align) -{ - dma_free_coherent(dev_hdl, mem_align->real_size, - mem_align->ori_vaddr, mem_align->ori_paddr); -} - -int sphw_wait_for_timeout(void *priv_data, wait_cpl_handler handler, - u32 wait_total_ms, u32 wait_once_us) -{ - enum sphw_wait_return ret; - unsigned long end; - /* Take 9/10 * wait_once_us as the minimum sleep time of usleep_range */ - u32 usleep_min = wait_once_us - wait_once_us / 10; - - if (!handler) - return -EINVAL; - - end = jiffies + msecs_to_jiffies(wait_total_ms); - do { - ret = handler(priv_data); - if (ret == WAIT_PROCESS_CPL) - return 0; - else if (ret == WAIT_PROCESS_ERR) - return -EIO; - - /* Sleep more than 20ms using msleep is accurate */ - if (wait_once_us >= 20 * USEC_PER_MSEC) - msleep(wait_once_us / USEC_PER_MSEC); - else - usleep_range(usleep_min, wait_once_us); - } while (time_before(jiffies, end)); - - ret = handler(priv_data); - if (ret == WAIT_PROCESS_CPL) - return 0; - else if (ret == WAIT_PROCESS_ERR) - return -EIO; - - return -ETIMEDOUT; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h deleted file mode 100644 index 05327bd4bcfe..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_common.h +++ /dev/null @@ -1,106 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_COMMON_H -#define SPHW_COMMON_H - -#include <linux/types.h> - -struct sphw_dma_addr_align { - u32 real_size; - - void *ori_vaddr; - dma_addr_t ori_paddr; - - void *align_vaddr; - dma_addr_t align_paddr; -}; - -int sphw_dma_alloc_coherent_align(void *dev_hdl, u64 size, u64 align, unsigned int flag, - struct sphw_dma_addr_align *mem_align); - -void sphw_dma_free_coherent_align(void *dev_hdl, struct sphw_dma_addr_align *mem_align); - -enum sphw_wait_return { - WAIT_PROCESS_CPL = 0, - WAIT_PROCESS_WAITING = 1, - WAIT_PROCESS_ERR = 2, -}; - -typedef enum sphw_wait_return (*wait_cpl_handler)(void *priv_data); - -int sphw_wait_for_timeout(void *priv_data, wait_cpl_handler handler, - u32 wait_total_ms, u32 wait_once_us); - -/* * - * sphw_cpu_to_be32 - convert data to big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert, must be Multiple of 4B - */ -static inline void sphw_cpu_to_be32(void *data, int len) -{ - int i, chunk_sz = sizeof(u32); - u32 *mem = data; - - if (!data) - return; - - len = len / chunk_sz; - - for (i = 0; i < len; i++) { - *mem = cpu_to_be32(*mem); - mem++; - } -} - -/* * - * sphw_cpu_to_be32 - convert data from big endian 32 bit format - * @data: the data to convert - * @len: length of data to convert - */ -static inline void sphw_be32_to_cpu(void *data, int len) -{ - int i, chunk_sz = sizeof(u32); - u32 *mem = data; - - if (!data) - return; - - len = len / chunk_sz; - - for (i = 0; i < len; i++) { - *mem = be32_to_cpu(*mem); - mem++; - } -} - -struct sphw_sge { - u32 hi_addr; - u32 lo_addr; - u32 len; -}; - -/* * - * sphw_set_sge - set dma area in scatter gather entry - * @sge: scatter gather entry - * @addr: dma address - * @len: length of relevant data in the dma address - */ -static inline void sphw_set_sge(struct sphw_sge *sge, dma_addr_t addr, int len) -{ - sge->hi_addr = upper_32_bits(addr); - sge->lo_addr = lower_32_bits(addr); - sge->len = len; -} - -#define sdk_err(dev, format, ...) dev_err(dev, "[COMM]" format, ##__VA_ARGS__) -#define sdk_warn(dev, format, ...) dev_warn(dev, "[COMM]" format, ##__VA_ARGS__) -#define sdk_notice(dev, format, ...) dev_notice(dev, "[COMM]" format, ##__VA_ARGS__) -#define sdk_info(dev, format, ...) dev_info(dev, "[COMM]" format, ##__VA_ARGS__) - -#define nic_err(dev, format, ...) dev_err(dev, "[NIC]" format, ##__VA_ARGS__) -#define nic_warn(dev, format, ...) dev_warn(dev, "[NIC]" format, ##__VA_ARGS__) -#define nic_notice(dev, format, ...) dev_notice(dev, "[NIC]" format, ##__VA_ARGS__) -#define nic_info(dev, format, ...) dev_info(dev, "[NIC]" format, ##__VA_ARGS__) - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h deleted file mode 100644 index 8cce36698e3d..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_crm.h +++ /dev/null @@ -1,982 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_CRM_H -#define SPHW_CRM_H - -#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof((arr)[0]))) - -#define SPHW_MGMT_VERSION_MAX_LEN 32 - -#define SPHW_FW_VERSION_NAME 16 -#define SPHW_FW_VERSION_SECTION_CNT 4 -#define SPHW_FW_VERSION_SECTION_BORDER 0xFF -struct sphw_fw_version { - u8 mgmt_ver[SPHW_FW_VERSION_NAME]; - u8 microcode_ver[SPHW_FW_VERSION_NAME]; - u8 boot_ver[SPHW_FW_VERSION_NAME]; -}; - -#define SPHW_MGMT_CMD_UNSUPPORTED 0xFF - -/* show each drivers only such as nic_service_cap, - * toe_service_cap structure, but not show service_cap - */ -enum sphw_service_type { - SERVICE_T_NIC = 0, - SERVICE_T_OVS, - SERVICE_T_ROCE, - SERVICE_T_TOE, - SERVICE_T_IOE, - SERVICE_T_FC, - SERVICE_T_VBS, - SERVICE_T_IPSEC, - SERVICE_T_VIRTIO, - SERVICE_T_MIGRATE, - SERVICE_T_MAX, - - /* Only used for interruption resource management, - * mark the request module - */ - SERVICE_T_INTF = (1 << 15), - SERVICE_T_CQM = (1 << 16), -}; - -struct nic_service_cap { - u16 max_sqs; - u16 max_rqs; -}; - -/* PF/VF ToE service resource structure */ -struct dev_toe_svc_cap { - /* PF resources */ - u32 max_pctxs; /* Parent Context: max specifications 1M */ - u32 max_cqs; - u16 max_srqs; - u32 srq_id_start; - u32 max_mpts; -}; - -/* ToE services */ -struct toe_service_cap { - struct dev_toe_svc_cap dev_toe_cap; - - bool alloc_flag; - u32 pctx_sz; /* 1KB */ - u32 scqc_sz; /* 64B */ -}; - -/* PF FC service resource structure defined */ -struct dev_fc_svc_cap { - /* PF Parent QPC */ - u32 max_parent_qpc_num; /* max number is 2048 */ - - /* PF Child QPC */ - u32 max_child_qpc_num; /* max number is 2048 */ - u32 child_qpc_id_start; - - /* PF SCQ */ - u32 scq_num; /* 16 */ - - /* PF supports SRQ */ - u32 srq_num; /* Number of SRQ is 2 */ - - u8 vp_id_start; - u8 vp_id_end; -}; - -/* FC services */ -struct fc_service_cap { - struct dev_fc_svc_cap dev_fc_cap; - - /* Parent QPC */ - u32 parent_qpc_size; /* 256B */ - - /* Child QPC */ - u32 child_qpc_size; /* 256B */ - - /* SQ */ - u32 sqe_size; /* 128B(in linked list mode) */ - - /* SCQ */ - u32 scqc_size; /* Size of the Context 32B */ - u32 scqe_size; /* 64B */ - - /* SRQ */ - u32 srqc_size; /* Size of SRQ Context (64B) */ - u32 srqe_size; /* 32B */ -}; - -struct dev_roce_svc_own_cap { - u32 max_qps; - u32 max_cqs; - u32 max_srqs; - u32 max_mpts; - u32 max_drc_qps; - - u32 cmtt_cl_start; - u32 cmtt_cl_end; - u32 cmtt_cl_sz; - - u32 dmtt_cl_start; - u32 dmtt_cl_end; - u32 dmtt_cl_sz; - - u32 wqe_cl_start; - u32 wqe_cl_end; - u32 wqe_cl_sz; - - u32 qpc_entry_sz; - u32 max_wqes; - u32 max_rq_sg; - u32 max_sq_inline_data_sz; - u32 max_rq_desc_sz; - - u32 rdmarc_entry_sz; - u32 max_qp_init_rdma; - u32 max_qp_dest_rdma; - - u32 max_srq_wqes; - u32 reserved_srqs; - u32 max_srq_sge; - u32 srqc_entry_sz; - - u32 max_msg_sz; /* Message size 2GB */ -}; - -/* RDMA service capability structure */ -struct dev_rdma_svc_cap { - /* ROCE service unique parameter structure */ - struct dev_roce_svc_own_cap roce_own_cap; -}; - -/* Defines the RDMA service capability flag */ -enum { - RDMA_BMME_FLAG_LOCAL_INV = (1 << 0), - RDMA_BMME_FLAG_REMOTE_INV = (1 << 1), - RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2), - RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3), - RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4), - RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5), - - RDMA_DEV_CAP_FLAG_XRC = (1 << 6), - RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7), - RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8), - RDMA_DEV_CAP_FLAG_APM = (1 << 9), -}; - -/* RDMA services */ -struct rdma_service_cap { - struct dev_rdma_svc_cap dev_rdma_cap; - - u8 log_mtt; /* 1. the number of MTT PA must be integer power of 2 - * 2. represented by logarithm. Each MTT table can - * contain 1, 2, 4, 8, and 16 PA) - */ - /* todo: need to check whether related to max_mtt_seg */ - u32 num_mtts; /* Number of MTT table (4M), - * is actually MTT seg number - */ - u32 log_mtt_seg; - u32 mtt_entry_sz; /* MTT table size 8B, including 1 PA(64bits) */ - u32 mpt_entry_sz; /* MPT table size (64B) */ - - u32 dmtt_cl_start; - u32 dmtt_cl_end; - u32 dmtt_cl_sz; - - u8 log_rdmarc; /* 1. the number of RDMArc PA must be integer power of 2 - * 2. represented by logarithm. Each MTT table can - * contain 1, 2, 4, 8, and 16 PA) - */ - - u32 reserved_qps; /* Number of reserved QP */ - u32 max_sq_sg; /* Maximum SGE number of SQ (8) */ - u32 max_sq_desc_sz; /* WQE maximum size of SQ(1024B), inline maximum - * size if 960B(944B aligned to the 960B), - * 960B=>wqebb alignment=>1024B - */ - u32 wqebb_size; /* Currently, the supports 64B and 128B, - * defined as 64Bytes - */ - - u32 max_cqes; /* Size of the depth of the CQ (64K-1) */ - u32 reserved_cqs; /* Number of reserved CQ */ - u32 cqc_entry_sz; /* Size of the CQC (64B/128B) */ - u32 cqe_size; /* Size of CQE (32B) */ - - u32 reserved_mrws; /* Number of reserved MR/MR Window */ - - u32 max_fmr_maps; /* max MAP of FMR, - * (1 << (32-ilog2(num_mpt)))-1; - */ - - /* todo: max value needs to be confirmed */ - /* MTT table number of Each MTT seg(3) */ - - u32 log_rdmarc_seg; /* table number of each RDMArc seg(3) */ - - /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr] */ - u32 local_ca_ack_delay; - u32 num_ports; /* Physical port number */ - - u32 db_page_size; /* Size of the DB (4KB) */ - u32 direct_wqe_size; /* Size of the DWQE (256B) */ - - u32 num_pds; /* Maximum number of PD (128K) */ - u32 reserved_pds; /* Number of reserved PD */ - u32 max_xrcds; /* Maximum number of xrcd (64K) */ - u32 reserved_xrcds; /* Number of reserved xrcd */ - - u32 max_gid_per_port; /* gid number (16) of each port */ - u32 gid_entry_sz; /* RoCE v2 GID table is 32B, - * compatible RoCE v1 expansion - */ - - u32 reserved_lkey; /* local_dma_lkey */ - u32 num_comp_vectors; /* Number of complete vector (32) */ - u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M and 4M page_size */ - - u32 flags; /* RDMA some identity */ - u32 max_frpl_len; /* Maximum number of pages frmr registration */ - u32 max_pkeys; /* Number of supported pkey group */ -}; - -/* PF OVS service resource structure defined */ -struct dev_ovs_svc_cap { - u32 max_pctxs; /* Parent Context: max specifications 1M */ - u8 dynamic_qp_en; - u8 fake_vf_num; - u16 fake_vf_start_id; -}; - -/* OVS services */ -struct ovs_service_cap { - struct dev_ovs_svc_cap dev_ovs_cap; - - u32 pctx_sz; /* 512B */ -}; - -/* PF IPsec service resource structure defined */ -struct dev_ipsec_svc_cap { - /* PF resources */ - u32 max_sa_ctxs; /* Parent Context: max specifications 8192 */ -}; - -/* IPsec services */ -struct ipsec_service_cap { - struct dev_ipsec_svc_cap dev_ipsec_cap; - u32 sactx_sz; /* 512B */ -}; - -/* Defines the IRQ information structure */ -struct irq_info { - u16 msix_entry_idx; /* IRQ corresponding index number */ - u32 irq_id; /* the IRQ number from OS */ -}; - -struct interrupt_info { - u32 lli_set; - u32 interrupt_coalesc_set; - u16 msix_index; - u8 lli_credit_limit; - u8 lli_timer_cfg; - u8 pending_limt; - u8 coalesc_timer_cfg; - u8 resend_timer_cfg; -}; - -enum sphw_msix_state { - SPHW_MSIX_ENABLE, - SPHW_MSIX_DISABLE, -}; - -enum sphw_msix_auto_mask { - SPHW_SET_MSIX_AUTO_MASK, - SPHW_CLR_MSIX_AUTO_MASK, -}; - -enum func_type { - TYPE_PF, - TYPE_VF, - TYPE_PPF, - TYPE_UNKNOWN, -}; - -struct sphw_init_para { - /* Record spnic_pcidev or NDIS_Adapter pointer address */ - void *adapter_hdl; - /* Record pcidev or Handler pointer address - * for example: ioremap interface input parameter - */ - void *pcidev_hdl; - /* Record pcidev->dev or Handler pointer address which used to - * dma address application or dev_err print the parameter - */ - void *dev_hdl; - - /* Configure virtual address, PF is bar1, VF is bar0/1 */ - void *cfg_reg_base; - /* interrupt configuration register address, PF is bar2, VF is bar2/3 - */ - void *intr_reg_base; - /* for PF bar3 virtual address, if function is VF should set to NULL */ - void *mgmt_reg_base; - - u64 db_dwqe_len; - u64 db_base_phy; - /* the doorbell address, bar4/5 higher 4M space */ - void *db_base; - /* direct wqe 4M, follow the doorbell address space */ - void *dwqe_mapping; - void **hwdev; - void *chip_node; - /* In bmgw x86 host, driver can't send message to mgmt cpu directly, - * need to trasmit message ppf mbox to bmgw arm host. - */ - void *ppf_hwdev; -}; - -/* B200 config BAR45 4MB, DB & DWQE both 2MB */ -#define SPHW_DB_DWQE_SIZE 0x00400000 - -/* db/dwqe page size: 4K */ -#define SPHW_DB_PAGE_SIZE 0x00001000ULL -#define SPHW_DWQE_OFFSET 0x00000800ULL - -#define SPHW_DB_MAX_AREAS (SPHW_DB_DWQE_SIZE / SPHW_DB_PAGE_SIZE) - -#ifndef IFNAMSIZ -#define IFNAMSIZ 16 -#endif -#define MAX_FUNCTION_NUM 4096 -#define SPHW_MAX_COS 8 - -struct card_node { - struct list_head node; - struct list_head func_list; - char chip_name[IFNAMSIZ]; - void *log_info; - void *dbgtool_info; - void *func_handle_array[MAX_FUNCTION_NUM]; - unsigned char bus_num; - u8 func_num; - bool up_bitmap_setted; - u8 valid_up_bitmap; -}; - -#define FAULT_SHOW_STR_LEN 16 - -enum sphw_fault_source_type { - /* same as FAULT_TYPE_CHIP */ - SPHW_FAULT_SRC_HW_MGMT_CHIP = 0, - /* same as FAULT_TYPE_UCODE */ - SPHW_FAULT_SRC_HW_MGMT_UCODE, - /* same as FAULT_TYPE_MEM_RD_TIMEOUT */ - SPHW_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, - /* same as FAULT_TYPE_MEM_WR_TIMEOUT */ - SPHW_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, - /* same as FAULT_TYPE_REG_RD_TIMEOUT */ - SPHW_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, - /* same as FAULT_TYPE_REG_WR_TIMEOUT */ - SPHW_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, - SPHW_FAULT_SRC_SW_MGMT_UCODE, - SPHW_FAULT_SRC_MGMT_WATCHDOG, - SPHW_FAULT_SRC_MGMT_RESET = 8, - SPHW_FAULT_SRC_HW_PHY_FAULT, - SPHW_FAULT_SRC_TX_PAUSE_EXCP, - SPHW_FAULT_SRC_PCIE_LINK_DOWN = 20, - SPHW_FAULT_SRC_HOST_HEARTBEAT_LOST = 21, - SPHW_FAULT_SRC_TX_TIMEOUT, - SPHW_FAULT_SRC_TYPE_MAX, -}; - -union sphw_fault_hw_mgmt { - u32 val[4]; - /* valid only type == FAULT_TYPE_CHIP */ - struct { - u8 node_id; - /* enum sphw_fault_err_level */ - u8 err_level; - u16 err_type; - u32 err_csr_addr; - u32 err_csr_value; - /* func_id valid only if err_level == FAULT_LEVEL_SERIOUS_FLR */ - u16 func_id; - u16 rsvd2; - } chip; - - /* valid only if type == FAULT_TYPE_UCODE */ - struct { - u8 cause_id; - u8 core_id; - u8 c_id; - u8 rsvd3; - u32 epc; - u32 rsvd4; - u32 rsvd5; - } ucode; - - /* valid only if type == FAULT_TYPE_MEM_RD_TIMEOUT || - * FAULT_TYPE_MEM_WR_TIMEOUT - */ - struct { - u32 err_csr_ctrl; - u32 err_csr_data; - u32 ctrl_tab; - u32 mem_index; - } mem_timeout; - - /* valid only if type == FAULT_TYPE_REG_RD_TIMEOUT || - * FAULT_TYPE_REG_WR_TIMEOUT - */ - struct { - u32 err_csr; - u32 rsvd6; - u32 rsvd7; - u32 rsvd8; - } reg_timeout; - - struct { - /* 0: read; 1: write */ - u8 op_type; - u8 port_id; - u8 dev_ad; - u8 rsvd9; - u32 csr_addr; - u32 op_data; - u32 rsvd10; - } phy_fault; -}; - -/* defined by chip */ -struct sphw_fault_event { - /* enum sphw_fault_type */ - u8 type; - u8 fault_level; /* sdk write fault level for uld event */ - u8 rsvd0[2]; - union sphw_fault_hw_mgmt event; -}; - -struct sphw_cmd_fault_event { - u8 status; - u8 version; - u8 rsvd0[6]; - struct sphw_fault_event event; -}; - -enum sphw_event_type { - SPHW_EVENT_LINK_DOWN = 0, - SPHW_EVENT_LINK_UP = 1, - SPHW_EVENT_FAULT = 3, - SPHW_EVENT_DCB_STATE_CHANGE = 5, - SPHW_EVENT_INIT_MIGRATE_PF, - SPHW_EVENT_SRIOV_STATE_CHANGE, - SPHW_EVENT_PORT_MODULE_EVENT, - SPHW_EVENT_PCIE_LINK_DOWN, - SPHW_EVENT_HEART_LOST, -}; - -struct sphw_event_link_info { - u8 valid; - u8 port_type; - u8 autoneg_cap; - u8 autoneg_state; - u8 duplex; - u8 speed; -}; - -struct sphw_dcb_info { - u8 dcb_on; - u8 default_cos; - u8 up_cos[SPHW_MAX_COS]; -}; - -struct sphw_sriov_state_info { - u8 enable; - u16 num_vfs; -}; - -enum link_err_type { - LINK_ERR_MODULE_UNRECOGENIZED, - LINK_ERR_NUM, -}; - -enum port_module_event_type { - SPHW_PORT_MODULE_CABLE_PLUGGED, - SPHW_PORT_MODULE_CABLE_UNPLUGGED, - SPHW_PORT_MODULE_LINK_ERR, - SPHW_PORT_MODULE_MAX_EVENT, -}; - -struct sphw_port_module_event { - enum port_module_event_type type; - enum link_err_type err_type; -}; - -struct sphw_event_info { - enum sphw_event_type type; - union { - struct sphw_event_link_info link_info; - struct sphw_fault_event info; - struct sphw_dcb_info dcb_state; - struct sphw_sriov_state_info sriov_state; - struct sphw_port_module_event module_event; - }; -}; - -typedef void (*sphw_event_handler)(void *handle, struct sphw_event_info *event); - -/* * - * @brief sphw_event_register - register hardware event - * @param dev: device pointer to hwdev - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - */ -void sphw_event_register(void *dev, void *pri_handle, sphw_event_handler callback); - -/* * - * @brief sphw_event_unregister - unregister hardware event - * @param dev: device pointer to hwdev - */ -void sphw_event_unregister(void *dev); - -/* * - * @brief sphw_set_msix_auto_mask_state - set msix auto mask function - * @param hwdev: device pointer to hwdev - * @param msix_idx: msix id - * @param flag: msix auto_mask flag, 1-enable, 2-clear - */ -void sphw_set_msix_auto_mask_state(void *hwdev, u16 msix_idx, enum sphw_msix_auto_mask flag); - -/* * - * @brief sphw_set_msix_state - set msix state - * @param hwdev: device pointer to hwdev - * @param msix_idx: msix id - * @param flag: msix state flag, 0-enable, 1-disable - */ -void sphw_set_msix_state(void *hwdev, u16 msix_idx, enum sphw_msix_state flag); - -/* * - * @brief sphw_misx_intr_clear_resend_bit - clear msix resend bit - * @param hwdev: device pointer to hwdev - * @param msix_idx: msix id - * @param clear_resend_en: 1-clear - */ -void sphw_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, u8 clear_resend_en); - -/* * - * @brief sphw_set_interrupt_cfg_direct - set interrupt cfg - * @param hwdev: device pointer to hwdev - * @param interrupt_para: interrupt info - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_set_interrupt_cfg_direct(void *hwdev, struct interrupt_info *interrupt_para, u16 channel); - -int sphw_set_interrupt_cfg(void *hwdev, struct interrupt_info interrupt_info, u16 channel); - -/* * - * @brief sphw_get_interrupt_cfg - get interrupt cfg - * @param dev: device pointer to hwdev - * @param info: interrupt info - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_get_interrupt_cfg(void *dev, struct interrupt_info *info, u16 channel); - -/* * - * @brief sphw_alloc_irqs - alloc irq - * @param hwdev: device pointer to hwdev - * @param type: service type - * @param num: alloc number - * @param irq_info_array: alloc irq info - * @param act_num: alloc actual number - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_alloc_irqs(void *hwdev, enum sphw_service_type type, u16 num, - struct irq_info *irq_info_array, u16 *act_num); - -/* * - * @brief sphw_free_irq - free irq - * @param hwdev: device pointer to hwdev - * @param type: service type - * @param irq_id: irq id - */ -void sphw_free_irq(void *hwdev, enum sphw_service_type type, u32 irq_id); - -/* * - * @brief sphw_alloc_ceqs - alloc ceqs - * @param hwdev: device pointer to hwdev - * @param type: service type - * @param num: alloc ceq number - * @param ceq_id_array: alloc ceq_id_array - * @param act_num: alloc actual number - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_alloc_ceqs(void *hwdev, enum sphw_service_type type, int num, int *ceq_id_array, - int *act_num); - -/* * - * @brief sphw_free_irq - free ceq - * @param hwdev: device pointer to hwdev - * @param type: service type - * @param irq_id: ceq id - */ -void sphw_free_ceq(void *hwdev, enum sphw_service_type type, int ceq_id); - -/* * - * @brief sphw_get_pcidev_hdl - get pcidev_hdl - * @param hwdev: device pointer to hwdev - * @retval non-null: success - * @retval null: failure - */ -void *sphw_get_pcidev_hdl(void *hwdev); - -/* * - * @brief sphw_ppf_idx - get ppf id - * @param hwdev: device pointer to hwdev - * @retval ppf id - */ -u8 sphw_ppf_idx(void *hwdev); - -/* * - * @brief sphw_get_chip_present_flag - get chip present flag - * @param hwdev: device pointer to hwdev - * @retval 1: chip is present - * @retval 0: chip is absent - */ -int sphw_get_chip_present_flag(const void *hwdev); - -/* * - * @brief sphw_support_nic - function support nic - * @param hwdev: device pointer to hwdev - * @param cap: nic service capbility - * @retval true: function support nic - * @retval false: function not support nic - */ -bool sphw_support_nic(void *hwdev, struct nic_service_cap *cap); - -/* * - * @brief sphw_support_ipsec - function support ipsec - * @param hwdev: device pointer to hwdev - * @param cap: ipsec service capbility - * @retval true: function support ipsec - * @retval false: function not support ipsec - */ -bool sphw_support_ipsec(void *hwdev, struct ipsec_service_cap *cap); - -/* * - * @brief sphw_support_roce - function support roce - * @param hwdev: device pointer to hwdev - * @param cap: roce service capbility - * @retval true: function support roce - * @retval false: function not support roce - */ -bool sphw_support_roce(void *hwdev, struct rdma_service_cap *cap); - -/* * - * @brief sphw_support_fc - function support fc - * @param hwdev: device pointer to hwdev - * @param cap: fc service capbility - * @retval true: function support fc - * @retval false: function not support fc - */ -bool sphw_support_fc(void *hwdev, struct fc_service_cap *cap); - -/* * - * @brief sphw_support_rdma - function support rdma - * @param hwdev: device pointer to hwdev - * @param cap: rdma service capbility - * @retval true: function support rdma - * @retval false: function not support rdma - */ -bool sphw_support_rdma(void *hwdev, struct rdma_service_cap *cap); - -/* * - * @brief sphw_support_ovs - function support ovs - * @param hwdev: device pointer to hwdev - * @param cap: ovs service capbility - * @retval true: function support ovs - * @retval false: function not support ovs - */ -bool sphw_support_ovs(void *hwdev, struct ovs_service_cap *cap); - -/* * - * @brief sphw_support_toe - sync time to hardware - * @param hwdev: device pointer to hwdev - * @param cap: toe service capbility - * @retval zero: success - * @retval non-zero: failure - */ -bool sphw_support_toe(void *hwdev, struct toe_service_cap *cap); - -/* * - * @brief sphw_sync_time - sync time to hardware - * @param hwdev: device pointer to hwdev - * @param time: time to sync - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_sync_time(void *hwdev, u64 time); - -/* * - * @brief sphw_disable_mgmt_msg_report - disable mgmt report msg - * @param hwdev: device pointer to hwdev - */ -void sphw_disable_mgmt_msg_report(void *hwdev); - -/* * - * @brief sphw_func_for_mgmt - get function service type - * @param hwdev: device pointer to hwdev - * @retval true: function for mgmt - * @retval false: function is not for mgmt - */ -bool sphw_func_for_mgmt(void *hwdev); - -/* * - * @brief sphw_set_pcie_order_cfg - set pcie order cfg - * @param handle: device pointer to hwdev - */ -void sphw_set_pcie_order_cfg(void *handle); - -/* * - * @brief sphw_init_hwdev - call to init hwdev - * @param para: device pointer to para - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_init_hwdev(struct sphw_init_para *para); - -/* * - * @brief sphw_free_hwdev - free hwdev - * @param hwdev: device pointer to hwdev - */ -void sphw_free_hwdev(void *hwdev); - -/* * - * @brief sphw_detect_hw_present - detect hardware present - * @param hwdev: device pointer to hwdev - */ -void sphw_detect_hw_present(void *hwdev); - -/* * - * @brief sphw_record_pcie_error - record pcie error - * @param hwdev: device pointer to hwdev - */ -void sphw_record_pcie_error(void *hwdev); - -/* * - * @brief sphw_shutdown_hwdev - shutdown hwdev - * @param hwdev: device pointer to hwdev - */ -void sphw_shutdown_hwdev(void *hwdev); - -/* * - * @brief sphw_get_mgmt_version - get management cpu version - * @param hwdev: device pointer to hwdev - * @param mgmt_ver: output management version - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_get_mgmt_version(void *hwdev, u8 *mgmt_ver, u8 version_size, u16 channel); - -/* * - * @brief sphw_get_fw_version - get firmware version - * @param hwdev: device pointer to hwdev - * @param fw_ver: firmware version - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_get_fw_version(void *hwdev, struct sphw_fw_version *fw_ver, u16 channel); - -/* * - * @brief sphw_global_func_id - get global function id - * @param hwdev: device pointer to hwdev - * @retval global function id - */ -u16 sphw_global_func_id(void *hwdev); - -/* * - * @brief sphw_vector_to_eqn - vector to eq id - * @param hwdev: device pointer to hwdev - * @param type: service type - * @param vector: vertor - * @retval eq id - */ -int sphw_vector_to_eqn(void *hwdev, enum sphw_service_type type, int vector); - -/* * - * @brief sphw_glb_pf_vf_offset - get vf offset id of pf - * @param hwdev: device pointer to hwdev - * @retval vf offset id - */ -u16 sphw_glb_pf_vf_offset(void *hwdev); - -/* * - * @brief sphw_pf_id_of_vf - get pf id of vf - * @param hwdev: device pointer to hwdev - * @retval pf id - */ -u8 sphw_pf_id_of_vf(void *hwdev); - -/* * - * @brief sphw_func_type - get function type - * @param hwdev: device pointer to hwdev - * @retval function type - */ -enum func_type sphw_func_type(void *hwdev); - -/* * - * @brief sphw_host_oq_id_mask - get oq id - * @param hwdev: device pointer to hwdev - * @retval oq id - */ -u8 sphw_host_oq_id_mask(void *hwdev); - -/* * - * @brief sphw_host_id - get host id - * @param hwdev: device pointer to hwdev - * @retval host id - */ -u8 sphw_host_id(void *hwdev); - -/* * - * @brief sphw_func_max_qnum - get host total function number - * @param hwdev: device pointer to hwdev - * @retval non-zero: host total function number - * @retval zero: failure - */ -u16 sphw_host_total_func(void *hwdev); - -/* * - * @brief sphw_func_max_qnum - get max nic queue number - * @param hwdev: device pointer to hwdev - * @retval non-zero: max nic queue number - * @retval zero: failure - */ -u16 sphw_func_max_nic_qnum(void *hwdev); - -/* * - * @brief sphw_func_max_qnum - get max queue number - * @param hwdev: device pointer to hwdev - * @retval non-zero: max queue number - * @retval zero: failure - */ -u16 sphw_func_max_qnum(void *hwdev); - -/* * - * @brief sphw_er_id - get ep id - * @param hwdev: device pointer to hwdev - * @retval ep id - */ -u8 sphw_ep_id(void *hwdev); /* Obtain service_cap.ep_id */ - -/* * - * @brief sphw_er_id - get er id - * @param hwdev: device pointer to hwdev - * @retval er id - */ -u8 sphw_er_id(void *hwdev); /* Obtain service_cap.er_id */ - -/* * - * @brief sphw_physical_port_id - get physical port id - * @param hwdev: device pointer to hwdev - * @retval physical port id - */ -u8 sphw_physical_port_id(void *hwdev); /* Obtain service_cap.port_id */ - -/* * - * @brief sphw_func_max_vf - get vf number - * @param hwdev: device pointer to hwdev - * @retval non-zero: vf number - * @retval zero: failure - */ -u16 sphw_func_max_vf(void *hwdev); /* Obtain service_cap.max_vf */ - -/* @brief sphw_max_pf_num - get global max pf number - */ -u8 sphw_max_pf_num(void *hwdev); - -/* * - * @brief sphw_host_pf_num - get current host pf number - * @param hwdev: device pointer to hwdev - * @retval non-zero: pf number - * @retval zero: failure - */ -u32 sphw_host_pf_num(void *hwdev); /* Obtain service_cap.pf_num */ - -/* * - * @brief sphw_pcie_itf_id - get pcie port id - * @param hwdev: device pointer to hwdev - * @retval pcie port id - */ -u8 sphw_pcie_itf_id(void *hwdev); - -/* * - * @brief sphw_vf_in_pf - get vf offset in pf - * @param hwdev: device pointer to hwdev - * @retval vf offset in pf - */ -u8 sphw_vf_in_pf(void *hwdev); - -/* * - * @brief sphw_cos_valid_bitmap - get cos valid bitmap - * @param hwdev: device pointer to hwdev - * @retval non-zero: valid cos bit map - * @retval zero: failure - */ -u8 sphw_cos_valid_bitmap(void *hwdev); - -/* * - * @brief sphw_get_card_present_state - get card present state - * @param hwdev: device pointer to hwdev - * @param card_present_state: return card present state - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_get_card_present_state(void *hwdev, bool *card_present_state); - -/* * - * @brief sphw_func_rx_tx_flush - function flush - * @param hwdev: device pointer to hwdev - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_func_rx_tx_flush(void *hwdev, u16 channel); - -/* * - * @brief sphw_flush_mgmt_workq - when remove function should flush work queue - * @param hwdev: device pointer to hwdev - */ -void sphw_flush_mgmt_workq(void *hwdev); - -/* @brief sphw_ceq_num get toe ceq num - */ -u8 sphw_ceq_num(void *hwdev); - -/* * - * @brief sphw_intr_num get intr num - */ -u16 sphw_intr_num(void *hwdev); - -/* @brief sphw_flexq_en get flexq en - */ -u8 sphw_flexq_en(void *hwdev); - -/** - * @brief sphw_fault_event_report - report fault event - * @param hwdev: device pointer to hwdev - * @param src: fault event source, reference to enum sphw_fault_source_type - * @param level: fault level, reference to enum sphw_fault_err_level - */ -void sphw_fault_event_report(void *hwdev, u16 src, u16 level); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h deleted file mode 100644 index d283c1456615..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_csr.h +++ /dev/null @@ -1,158 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_CSR_H -#define SPHW_CSR_H - -/* bit30/bit31 for bar index flag - * 00: bar0 - * 01: bar1 - * 10: bar2 - * 11: bar3 - */ -#define SPHW_CFG_REGS_FLAG 0x40000000 - -#define SPHW_MGMT_REGS_FLAG 0xC0000000 - -#define SPHW_REGS_FLAG_MAKS 0x3FFFFFFF - -#define SPHW_VF_CFG_REG_OFFSET 0x2000 - -#define SPHW_HOST_CSR_BASE_ADDR (SPHW_MGMT_REGS_FLAG + 0x6000) -#define SPHW_CSR_GLOBAL_BASE_ADDR (SPHW_MGMT_REGS_FLAG + 0x6400) - -/* HW interface registers */ -#define SPHW_CSR_FUNC_ATTR0_ADDR (SPHW_CFG_REGS_FLAG + 0x0) -#define SPHW_CSR_FUNC_ATTR1_ADDR (SPHW_CFG_REGS_FLAG + 0x4) -#define SPHW_CSR_FUNC_ATTR2_ADDR (SPHW_CFG_REGS_FLAG + 0x8) -#define SPHW_CSR_FUNC_ATTR3_ADDR (SPHW_CFG_REGS_FLAG + 0xC) -#define SPHW_CSR_FUNC_ATTR4_ADDR (SPHW_CFG_REGS_FLAG + 0x10) -#define SPHW_CSR_FUNC_ATTR5_ADDR (SPHW_CFG_REGS_FLAG + 0x14) -#define SPHW_CSR_FUNC_ATTR6_ADDR (SPHW_CFG_REGS_FLAG + 0x18) - -#define SPHW_FUNC_CSR_MAILBOX_DATA_OFF 0x80 -#define SPHW_FUNC_CSR_MAILBOX_CONTROL_OFF \ - (SPHW_CFG_REGS_FLAG + 0x0100) -#define SPHW_FUNC_CSR_MAILBOX_INT_OFFSET_OFF \ - (SPHW_CFG_REGS_FLAG + 0x0104) -#define SPHW_FUNC_CSR_MAILBOX_RESULT_H_OFF \ - (SPHW_CFG_REGS_FLAG + 0x0108) -#define SPHW_FUNC_CSR_MAILBOX_RESULT_L_OFF \ - (SPHW_CFG_REGS_FLAG + 0x010C) - -#define SPHW_PPF_ELECTION_OFFSET 0x0 -#define SPHW_MPF_ELECTION_OFFSET 0x20 - -#define SPHW_CSR_PPF_ELECTION_ADDR \ - (SPHW_HOST_CSR_BASE_ADDR + SPHW_PPF_ELECTION_OFFSET) - -#define SPHW_CSR_GLOBAL_MPF_ELECTION_ADDR \ - (SPHW_HOST_CSR_BASE_ADDR + SPHW_MPF_ELECTION_OFFSET) - -#define SPHW_CSR_FUNC_PPF_ELECT_BASE_ADDR (SPHW_CFG_REGS_FLAG + 0x60) -#define SPHW_CSR_FUNC_PPF_ELECT_PORT_STRIDE 0x4 - -#define SPHW_CSR_FUNC_PPF_ELECT(host_idx) \ - (SPHW_CSR_FUNC_PPF_ELECT_BASE_ADDR + \ - (host_idx) * SPHW_CSR_FUNC_PPF_ELECT_PORT_STRIDE) - -#define SPHW_CSR_DMA_ATTR_TBL_ADDR (SPHW_CFG_REGS_FLAG + 0x380) -#define SPHW_CSR_DMA_ATTR_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x390) - -/* MSI-X registers */ -#define SPHW_CSR_MSIX_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x310) -#define SPHW_CSR_MSIX_CTRL_ADDR (SPHW_CFG_REGS_FLAG + 0x300) -#define SPHW_CSR_MSIX_CNT_ADDR (SPHW_CFG_REGS_FLAG + 0x304) -#define SPHW_CSR_FUNC_MSI_CLR_WR_ADDR (SPHW_CFG_REGS_FLAG + 0x58) - -#define SPHW_MSI_CLR_INDIR_RESEND_TIMER_CLR_SHIFT 0 -#define SPHW_MSI_CLR_INDIR_INT_MSK_SET_SHIFT 1 -#define SPHW_MSI_CLR_INDIR_INT_MSK_CLR_SHIFT 2 -#define SPHW_MSI_CLR_INDIR_AUTO_MSK_SET_SHIFT 3 -#define SPHW_MSI_CLR_INDIR_AUTO_MSK_CLR_SHIFT 4 -#define SPHW_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_SHIFT 22 - -#define SPHW_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK 0x1U -#define SPHW_MSI_CLR_INDIR_INT_MSK_SET_MASK 0x1U -#define SPHW_MSI_CLR_INDIR_INT_MSK_CLR_MASK 0x1U -#define SPHW_MSI_CLR_INDIR_AUTO_MSK_SET_MASK 0x1U -#define SPHW_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK 0x1U -#define SPHW_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_MASK 0x3FFU - -#define SPHW_MSI_CLR_INDIR_SET(val, member) \ - (((val) & SPHW_MSI_CLR_INDIR_##member##_MASK) << \ - SPHW_MSI_CLR_INDIR_##member##_SHIFT) - -/* EQ registers */ -#define SPHW_AEQ_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x210) -#define SPHW_CEQ_INDIR_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x290) - -#define SPHW_EQ_INDIR_IDX_ADDR(type) \ - ((type == SPHW_AEQ) ? SPHW_AEQ_INDIR_IDX_ADDR : SPHW_CEQ_INDIR_IDX_ADDR) - -#define SPHW_AEQ_MTT_OFF_BASE_ADDR (SPHW_CFG_REGS_FLAG + 0x240) -#define SPHW_CEQ_MTT_OFF_BASE_ADDR (SPHW_CFG_REGS_FLAG + 0x2C0) - -#define SPHW_CSR_EQ_PAGE_OFF_STRIDE 8 - -#define SPHW_AEQ_HI_PHYS_ADDR_REG(pg_num) \ - (SPHW_AEQ_MTT_OFF_BASE_ADDR + \ - (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE) - -#define SPHW_AEQ_LO_PHYS_ADDR_REG(pg_num) \ - (SPHW_AEQ_MTT_OFF_BASE_ADDR + \ - (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE + 4) - -#define SPHW_CEQ_HI_PHYS_ADDR_REG(pg_num) \ - (SPHW_CEQ_MTT_OFF_BASE_ADDR + \ - (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE) - -#define SPHW_CEQ_LO_PHYS_ADDR_REG(pg_num) \ - (SPHW_CEQ_MTT_OFF_BASE_ADDR + \ - (pg_num) * SPHW_CSR_EQ_PAGE_OFF_STRIDE + 4) - -#define SPHW_CSR_AEQ_CTRL_0_ADDR (SPHW_CFG_REGS_FLAG + 0x200) -#define SPHW_CSR_AEQ_CTRL_1_ADDR (SPHW_CFG_REGS_FLAG + 0x204) -#define SPHW_CSR_AEQ_CONS_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x208) -#define SPHW_CSR_AEQ_PROD_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x20C) -#define SPHW_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (SPHW_CFG_REGS_FLAG + 0x50) - -#define SPHW_CSR_CEQ_CTRL_0_ADDR (SPHW_CFG_REGS_FLAG + 0x280) -#define SPHW_CSR_CEQ_CTRL_1_ADDR (SPHW_CFG_REGS_FLAG + 0x284) -#define SPHW_CSR_CEQ_CONS_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x288) -#define SPHW_CSR_CEQ_PROD_IDX_ADDR (SPHW_CFG_REGS_FLAG + 0x28c) -#define SPHW_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (SPHW_CFG_REGS_FLAG + 0x54) - -/* API CMD registers */ -#define SPHW_CSR_API_CMD_BASE (SPHW_MGMT_REGS_FLAG + 0x2000) - -#define SPHW_CSR_API_CMD_STRIDE 0x80 - -#define SPHW_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x0 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x4 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_STATUS_HI_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x8 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_STATUS_LO_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0xC + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x10 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x14 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x1C + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x20 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#define SPHW_CSR_API_CMD_STATUS_0_ADDR(idx) \ - (SPHW_CSR_API_CMD_BASE + 0x30 + (idx) * SPHW_CSR_API_CMD_STRIDE) - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c deleted file mode 100644 index 24c55d656f9c..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.c +++ /dev/null @@ -1,1272 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/workqueue.h> -#include <linux/pci.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/dma-mapping.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/delay.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" -#include "sphw_hw.h" -#include "sphw_csr.h" -#include "sphw_hw_comm.h" -#include "sphw_prof_adap.h" -#include "sphw_eqs.h" - -#define SPHW_EQS_WQ_NAME "sphw_eqs" - -#define AEQ_CTRL_0_INTR_IDX_SHIFT 0 -#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12 -#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 -#define AEQ_CTRL_0_INTR_MODE_SHIFT 31 - -#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU -#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU -#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x7U -#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U - -#define AEQ_CTRL_0_SET(val, member) \ - (((val) & AEQ_CTRL_0_##member##_MASK) << \ - AEQ_CTRL_0_##member##_SHIFT) - -#define AEQ_CTRL_0_CLEAR(val, member) \ - ((val) & (~(AEQ_CTRL_0_##member##_MASK << \ - AEQ_CTRL_0_##member##_SHIFT))) - -#define AEQ_CTRL_1_LEN_SHIFT 0 -#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 -#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 - -#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU -#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U -#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU - -#define AEQ_CTRL_1_SET(val, member) \ - (((val) & AEQ_CTRL_1_##member##_MASK) << \ - AEQ_CTRL_1_##member##_SHIFT) - -#define AEQ_CTRL_1_CLEAR(val, member) \ - ((val) & (~(AEQ_CTRL_1_##member##_MASK << \ - AEQ_CTRL_1_##member##_SHIFT))) - -#define SPHW_EQ_PROD_IDX_MASK 0xFFFFF -#define SPHW_TASK_PROCESS_EQE_LIMIT 1024 -#define SPHW_EQ_UPDATE_CI_STEP 64 - -static uint g_aeq_len = SPHW_DEFAULT_AEQ_LEN; -module_param(g_aeq_len, uint, 0444); -MODULE_PARM_DESC(g_aeq_len, - "aeq depth, valid range is " __stringify(SPHW_MIN_AEQ_LEN) - " - " __stringify(SPHW_MAX_AEQ_LEN)); - -static uint g_ceq_len = SPHW_DEFAULT_CEQ_LEN; -module_param(g_ceq_len, uint, 0444); -MODULE_PARM_DESC(g_ceq_len, - "ceq depth, valid range is " __stringify(SPHW_MIN_CEQ_LEN) - " - " __stringify(SPHW_MAX_CEQ_LEN)); - -static uint g_num_ceqe_in_tasklet = SPHW_TASK_PROCESS_EQE_LIMIT; -module_param(g_num_ceqe_in_tasklet, uint, 0444); -MODULE_PARM_DESC(g_num_ceqe_in_tasklet, - "The max number of ceqe can be processed in tasklet, default = 1024"); - -#define CEQ_CTRL_0_INTR_IDX_SHIFT 0 -#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 -#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 -#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 -#define CEQ_CTRL_0_PAGE_SIZE_SHIFT 27 -#define CEQ_CTRL_0_INTR_MODE_SHIFT 31 - -#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU -#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU -#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU -#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3U -#define CEQ_CTRL_0_PAGE_SIZE_MASK 0xF -#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U - -#define CEQ_CTRL_0_SET(val, member) \ - (((val) & CEQ_CTRL_0_##member##_MASK) << \ - CEQ_CTRL_0_##member##_SHIFT) - -#define CEQ_CTRL_1_LEN_SHIFT 0 -#define CEQ_CTRL_1_GLB_FUNC_ID_SHIFT 20 - -#define CEQ_CTRL_1_LEN_MASK 0xFFFFFU -#define CEQ_CTRL_1_GLB_FUNC_ID_MASK 0xFFFU - -#define CEQ_CTRL_1_SET(val, member) \ - (((val) & CEQ_CTRL_1_##member##_MASK) << \ - CEQ_CTRL_1_##member##_SHIFT) - -#define EQ_ELEM_DESC_TYPE_SHIFT 0 -#define EQ_ELEM_DESC_SRC_SHIFT 7 -#define EQ_ELEM_DESC_SIZE_SHIFT 8 -#define EQ_ELEM_DESC_WRAPPED_SHIFT 31 - -#define EQ_ELEM_DESC_TYPE_MASK 0x7FU -#define EQ_ELEM_DESC_SRC_MASK 0x1U -#define EQ_ELEM_DESC_SIZE_MASK 0xFFU -#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U - -#define EQ_ELEM_DESC_GET(val, member) \ - (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ - EQ_ELEM_DESC_##member##_MASK) - -#define EQ_CONS_IDX_CONS_IDX_SHIFT 0 -#define EQ_CONS_IDX_INT_ARMED_SHIFT 31 - -#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU -#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U - -#define EQ_CONS_IDX_SET(val, member) \ - (((val) & EQ_CONS_IDX_##member##_MASK) << \ - EQ_CONS_IDX_##member##_SHIFT) - -#define EQ_CONS_IDX_CLEAR(val, member) \ - ((val) & (~(EQ_CONS_IDX_##member##_MASK << \ - EQ_CONS_IDX_##member##_SHIFT))) - -#define EQ_CI_SIMPLE_INDIR_CI_SHIFT 0 -#define EQ_CI_SIMPLE_INDIR_ARMED_SHIFT 21 -#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_SHIFT 30 -#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_SHIFT 24 - -#define EQ_CI_SIMPLE_INDIR_CI_MASK 0x1FFFFFU -#define EQ_CI_SIMPLE_INDIR_ARMED_MASK 0x1U -#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK 0x3U -#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK 0xFFU - -#define EQ_CI_SIMPLE_INDIR_SET(val, member) \ - (((val) & EQ_CI_SIMPLE_INDIR_##member##_MASK) << \ - EQ_CI_SIMPLE_INDIR_##member##_SHIFT) - -#define EQ_CI_SIMPLE_INDIR_CLEAR(val, member) \ - ((val) & (~(EQ_CI_SIMPLE_INDIR_##member##_MASK << \ - EQ_CI_SIMPLE_INDIR_##member##_SHIFT))) - -#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT) - -#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ - ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) - -#define EQ_CONS_IDX_REG_ADDR(eq) \ - (((eq)->type == SPHW_AEQ) ? \ - SPHW_CSR_AEQ_CONS_IDX_ADDR : \ - SPHW_CSR_CEQ_CONS_IDX_ADDR) -#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \ - (((eq)->type == SPHW_AEQ) ? \ - SPHW_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \ - SPHW_CSR_CEQ_CI_SIMPLE_INDIR_ADDR) - -#define EQ_PROD_IDX_REG_ADDR(eq) \ - (((eq)->type == SPHW_AEQ) ? \ - SPHW_CSR_AEQ_PROD_IDX_ADDR : \ - SPHW_CSR_CEQ_PROD_IDX_ADDR) - -#define SPHW_EQ_HI_PHYS_ADDR_REG(type, pg_num) \ - ((u32)((type == SPHW_AEQ) ? \ - SPHW_AEQ_HI_PHYS_ADDR_REG(pg_num) : \ - SPHW_CEQ_HI_PHYS_ADDR_REG(pg_num))) - -#define SPHW_EQ_LO_PHYS_ADDR_REG(type, pg_num) \ - ((u32)((type == SPHW_AEQ) ? \ - SPHW_AEQ_LO_PHYS_ADDR_REG(pg_num) : \ - SPHW_CEQ_LO_PHYS_ADDR_REG(pg_num))) - -#define GET_EQ_NUM_PAGES(eq, size) \ - ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \ - (size)) / (size))) - -#define SPHW_EQ_MAX_PAGES(eq) \ - ((eq)->type == SPHW_AEQ ? SPHW_AEQ_MAX_PAGES : \ - SPHW_CEQ_MAX_PAGES) - -#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size) - -#define GET_EQ_ELEMENT(eq, idx) \ - (((u8 *)(eq)->eq_pages[(idx) / (eq)->num_elem_in_pg].align_vaddr) + \ - (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) - -#define GET_AEQ_ELEM(eq, idx) \ - ((struct sphw_aeq_elem *)GET_EQ_ELEMENT((eq), (idx))) - -#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx))) - -#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) - -#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx) - -#define PAGE_IN_4K(page_size) ((page_size) >> 12) -#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \ - ((u32)ilog2(PAGE_IN_4K((eq)->page_size))) - -#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) -#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) - -#define AEQ_DMA_ATTR_DEFAULT 0 -#define CEQ_DMA_ATTR_DEFAULT 0 - -#define CEQ_LMT_KICK_DEFAULT 0 - -#define EQ_MSIX_RESEND_TIMER_CLEAR 1 - -#define EQ_WRAPPED_SHIFT 20 - -#define EQ_VALID_SHIFT 31 - -#define CEQE_TYPE_SHIFT 23 -#define CEQE_TYPE_MASK 0x7 - -#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \ - CEQE_TYPE_MASK) - -#define CEQE_DATA_MASK 0x3FFFFFF -#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK) - -#define aeq_to_aeqs(eq) \ - container_of((eq) - (eq)->q_id, struct sphw_aeqs, aeq[0]) - -#define ceq_to_ceqs(eq) \ - container_of((eq) - (eq)->q_id, struct sphw_ceqs, ceq[0]) - -static irqreturn_t ceq_interrupt(int irq, void *data); -static irqreturn_t aeq_interrupt(int irq, void *data); - -static void ceq_tasklet(ulong eq_tasklet); - -/** - * sphw_aeq_register_hw_cb - register aeq callback for specific event - * @hwdev: the pointer to hw device - * @event: event for the handler - * @hw_cb: callback function - **/ -int sphw_aeq_register_hw_cb(void *hwdev, enum sphw_aeq_type event, sphw_aeq_hwe_cb hwe_cb) -{ - struct sphw_aeqs *aeqs = NULL; - - if (!hwdev || !hwe_cb || event >= SPHW_MAX_AEQ_EVENTS) - return -EINVAL; - - aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; - - aeqs->aeq_hwe_cb[event] = hwe_cb; - - set_bit(SPHW_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); - - return 0; -} - -/** - * sphw_aeq_unregister_hw_cb - unregister the aeq callback for specific event - * @hwdev: the pointer to hw device - * @event: event for the handler - **/ -void sphw_aeq_unregister_hw_cb(void *hwdev, enum sphw_aeq_type event) -{ - struct sphw_aeqs *aeqs = NULL; - - if (!hwdev || event >= SPHW_MAX_AEQ_EVENTS) - return; - - aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; - - clear_bit(SPHW_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); - - while (test_bit(SPHW_AEQ_HW_CB_RUNNING, - &aeqs->aeq_hw_cb_state[event])) - usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); - - aeqs->aeq_hwe_cb[event] = NULL; -} - -/** - * sphw_aeq_register_swe_cb - register aeq callback for sw event - * @hwdev: the pointer to hw device - * @event: soft event for the handler - * @sw_cb: callback function - **/ -int sphw_aeq_register_swe_cb(void *hwdev, enum sphw_aeq_sw_type event, sphw_aeq_swe_cb aeq_swe_cb) -{ - struct sphw_aeqs *aeqs = NULL; - - if (!hwdev || !aeq_swe_cb || event >= SPHW_MAX_AEQ_SW_EVENTS) - return -EINVAL; - - aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; - - aeqs->aeq_swe_cb[event] = aeq_swe_cb; - - set_bit(SPHW_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); - - return 0; -} - -/** - * sphw_aeq_unregister_swe_cb - unregister the aeq callback for sw event - * @hwdev: the pointer to hw device - * @event: soft event for the handler - **/ -void sphw_aeq_unregister_swe_cb(void *hwdev, enum sphw_aeq_sw_type event) -{ - struct sphw_aeqs *aeqs = NULL; - - if (!hwdev || event >= SPHW_MAX_AEQ_SW_EVENTS) - return; - - aeqs = ((struct sphw_hwdev *)hwdev)->aeqs; - - clear_bit(SPHW_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); - - while (test_bit(SPHW_AEQ_SW_CB_RUNNING, - &aeqs->aeq_sw_cb_state[event])) - usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); - - aeqs->aeq_swe_cb[event] = NULL; -} - -/** - * sphw_ceq_register_cb - register ceq callback for specific event - * @hwdev: the pointer to hw device - * @event: event for the handler - * @ceq_cb: callback function - **/ -int sphw_ceq_register_cb(void *hwdev, enum sphw_ceq_event event, sphw_ceq_event_cb callback) -{ - struct sphw_ceqs *ceqs = NULL; - - if (!hwdev || event >= SPHW_MAX_CEQ_EVENTS) - return -EINVAL; - - ceqs = ((struct sphw_hwdev *)hwdev)->ceqs; - - ceqs->ceq_cb[event] = callback; - - set_bit(SPHW_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); - - return 0; -} - -/** - * sphw_ceq_unregister_cb - unregister ceq callback for specific event - * @hwdev: the pointer to hw device - * @event: event for the handler - **/ -void sphw_ceq_unregister_cb(void *hwdev, enum sphw_ceq_event event) -{ - struct sphw_ceqs *ceqs = NULL; - - if (!hwdev || event >= SPHW_MAX_CEQ_EVENTS) - return; - - ceqs = ((struct sphw_hwdev *)hwdev)->ceqs; - - clear_bit(SPHW_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); - - while (test_bit(SPHW_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event])) - usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); - - ceqs->ceq_cb[event] = NULL; -} - -/** - * set_eq_cons_idx - write the cons idx to the hw - * @eq: The event queue to update the cons idx for - * @cons idx: consumer index value - **/ -static void set_eq_cons_idx(struct sphw_eq *eq, u32 arm_state) -{ - u32 eq_wrap_ci, val; - u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq); - - eq_wrap_ci = EQ_CONS_IDX(eq); - val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED); - if (eq->type == SPHW_AEQ) { - val = val | - EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | - EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX); - } else { - val = val | - EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | - EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX); - } - - sphw_hwif_write_reg(eq->hwdev->hwif, addr, val); -} - -/** - * ceq_event_handler - handle for the ceq events - * @ceqs: ceqs part of the chip - * @ceqe: ceq element of the event - **/ -static void ceq_event_handler(struct sphw_ceqs *ceqs, u32 ceqe) -{ - struct sphw_hwdev *hwdev = ceqs->hwdev; - enum sphw_ceq_event event = CEQE_TYPE(ceqe); - u32 ceqe_data = CEQE_DATA(ceqe); - - if (event >= SPHW_MAX_CEQ_EVENTS) { - sdk_err(hwdev->dev_hdl, "Ceq unknown event:%d, ceqe date: 0x%x\n", - event, ceqe_data); - return; - } - - set_bit(SPHW_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); - - if (ceqs->ceq_cb[event] && - test_bit(SPHW_CEQ_CB_REG, &ceqs->ceq_cb_state[event])) - ceqs->ceq_cb[event](hwdev, ceqe_data); - - clear_bit(SPHW_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); -} - -static void aeq_elem_handler(struct sphw_eq *eq, u32 aeqe_desc) -{ - struct sphw_aeqs *aeqs = aeq_to_aeqs(eq); - struct sphw_aeq_elem *aeqe_pos; - enum sphw_aeq_type event; - enum sphw_aeq_sw_type sw_type; - u32 sw_event; - u8 data[SPHW_AEQE_DATA_SIZE], size; - - aeqe_pos = GET_CURR_AEQ_ELEM(eq); - - event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); - if (EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { - sw_event = event; - sw_type = sw_event >= SPHW_NIC_FATAL_ERROR_MAX ? - SPHW_STATEFULL_EVENT : - SPHW_STATELESS_EVENT; - /* SW event uses only the first 8B */ - memcpy(data, aeqe_pos->aeqe_data, SPHW_AEQE_DATA_SIZE); - sphw_be32_to_cpu(data, SPHW_AEQE_DATA_SIZE); - set_bit(SPHW_AEQ_SW_CB_RUNNING, - &aeqs->aeq_sw_cb_state[sw_type]); - if (aeqs->aeq_swe_cb[sw_type] && - test_bit(SPHW_AEQ_SW_CB_REG, - &aeqs->aeq_sw_cb_state[sw_type])) - aeqs->aeq_swe_cb[sw_type](aeqs->hwdev, event, data); - - clear_bit(SPHW_AEQ_SW_CB_RUNNING, - &aeqs->aeq_sw_cb_state[sw_type]); - return; - } - - if (event < SPHW_MAX_AEQ_EVENTS) { - memcpy(data, aeqe_pos->aeqe_data, SPHW_AEQE_DATA_SIZE); - sphw_be32_to_cpu(data, SPHW_AEQE_DATA_SIZE); - - size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); - set_bit(SPHW_AEQ_HW_CB_RUNNING, - &aeqs->aeq_hw_cb_state[event]); - if (aeqs->aeq_hwe_cb[event] && - test_bit(SPHW_AEQ_HW_CB_REG, - &aeqs->aeq_hw_cb_state[event])) - aeqs->aeq_hwe_cb[event](aeqs->hwdev, data, size); - clear_bit(SPHW_AEQ_HW_CB_RUNNING, - &aeqs->aeq_hw_cb_state[event]); - return; - } - sdk_warn(eq->hwdev->dev_hdl, "Unknown aeq hw event %d\n", event); -} - -/** - * aeq_irq_handler - handler for the aeq event - * @eq: the async event queue of the event - **/ -static bool aeq_irq_handler(struct sphw_eq *eq) -{ - struct sphw_aeq_elem *aeqe_pos = NULL; - u32 aeqe_desc; - u32 i, eqe_cnt = 0; - - for (i = 0; i < SPHW_TASK_PROCESS_EQE_LIMIT; i++) { - aeqe_pos = GET_CURR_AEQ_ELEM(eq); - - /* Data in HW is in Big endian Format */ - aeqe_desc = be32_to_cpu(aeqe_pos->desc); - - /* HW updates wrapped bit, when it adds eq element event */ - if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) - return false; - - aeq_elem_handler(eq, aeqe_desc); - - eq->cons_idx++; - - if (eq->cons_idx == eq->eq_len) { - eq->cons_idx = 0; - eq->wrapped = !eq->wrapped; - } - - if (++eqe_cnt >= SPHW_EQ_UPDATE_CI_STEP) { - eqe_cnt = 0; - set_eq_cons_idx(eq, SPHW_EQ_NOT_ARMED); - } - } - - return true; -} - -/** - * ceq_irq_handler - handler for the ceq event - * @eq: the completion event queue of the event - **/ -static bool ceq_irq_handler(struct sphw_eq *eq) -{ - struct sphw_ceqs *ceqs = ceq_to_ceqs(eq); - u32 ceqe, eqe_cnt = 0; - u32 i; - - for (i = 0; i < g_num_ceqe_in_tasklet; i++) { - ceqe = *(GET_CURR_CEQ_ELEM(eq)); - ceqe = be32_to_cpu(ceqe); - - /* HW updates wrapped bit, when it adds eq element event */ - if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) - return false; - - ceq_event_handler(ceqs, ceqe); - - eq->cons_idx++; - - if (eq->cons_idx == eq->eq_len) { - eq->cons_idx = 0; - eq->wrapped = !eq->wrapped; - } - - if (++eqe_cnt >= SPHW_EQ_UPDATE_CI_STEP) { - eqe_cnt = 0; - set_eq_cons_idx(eq, SPHW_EQ_NOT_ARMED); - } - } - - return true; -} - -static void reschedule_eq_handler(struct sphw_eq *eq) -{ - if (eq->type == SPHW_AEQ) { - struct sphw_aeqs *aeqs = aeq_to_aeqs(eq); - struct workqueue_struct *workq = aeqs->workq; - struct sphw_eq_work *aeq_work = &eq->aeq_work; - - queue_work_on(sphw_get_work_cpu_affinity(eq->hwdev, WORK_TYPE_AEQ), - workq, &aeq_work->work); - } else { - tasklet_schedule(&eq->ceq_tasklet); - } -} - -/** - * eq_irq_handler - handler for the eq event - * @data: the event queue of the event - **/ -static bool eq_irq_handler(void *data) -{ - struct sphw_eq *eq = (struct sphw_eq *)data; - bool uncompleted = false; - - if (eq->type == SPHW_AEQ) - uncompleted = aeq_irq_handler(eq); - else - uncompleted = ceq_irq_handler(eq); - - set_eq_cons_idx(eq, uncompleted ? SPHW_EQ_NOT_ARMED : - SPHW_EQ_ARMED); - - return uncompleted; -} - -/** - * eq_irq_work - eq work for the event - * @work: the work that is associated with the eq - **/ -static void eq_irq_work(struct work_struct *work) -{ - struct sphw_eq_work *aeq_work = - container_of(work, struct sphw_eq_work, work); - - if (eq_irq_handler(aeq_work->data)) - reschedule_eq_handler(aeq_work->data); -} - -/** - * aeq_interrupt - aeq interrupt handler - * @irq: irq number - * @data: the async event queue of the event - **/ -static irqreturn_t aeq_interrupt(int irq, void *data) -{ - struct sphw_eq *aeq = (struct sphw_eq *)data; - struct sphw_hwdev *hwdev = aeq->hwdev; - struct sphw_aeqs *aeqs = aeq_to_aeqs(aeq); - struct workqueue_struct *workq = aeqs->workq; - struct sphw_eq_work *aeq_work = NULL; - - /* clear resend timer cnt register */ - sphw_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, - EQ_MSIX_RESEND_TIMER_CLEAR); - - aeq_work = &aeq->aeq_work; - aeq_work->data = aeq; - - queue_work_on(sphw_get_work_cpu_affinity(hwdev, WORK_TYPE_AEQ), - workq, &aeq_work->work); - - return IRQ_HANDLED; -} - -/** - * ceq_tasklet - ceq tasklet for the event - * @ceq_data: data that will be used by the tasklet(ceq) - **/ -static void ceq_tasklet(ulong ceq_data) -{ - struct sphw_ceq_tasklet_data *ceq_tasklet_data = - (struct sphw_ceq_tasklet_data *)ceq_data; - struct sphw_eq *eq = (struct sphw_eq *)ceq_tasklet_data->data; - - eq->soft_intr_jif = jiffies; - - if (eq_irq_handler(ceq_tasklet_data->data)) - reschedule_eq_handler(ceq_tasklet_data->data); -} - -/** - * ceq_interrupt - ceq interrupt handler - * @irq: irq number - * @data: the completion event queue of the event - **/ -static irqreturn_t ceq_interrupt(int irq, void *data) -{ - struct sphw_eq *ceq = (struct sphw_eq *)data; - struct sphw_ceq_tasklet_data *ceq_tasklet_data = NULL; - - ceq->hard_intr_jif = jiffies; - - /* clear resend timer counters */ - sphw_misx_intr_clear_resend_bit(ceq->hwdev, ceq->eq_irq.msix_entry_idx, - EQ_MSIX_RESEND_TIMER_CLEAR); - - ceq_tasklet_data = &ceq->ceq_tasklet_data; - ceq_tasklet_data->data = data; - tasklet_schedule(&ceq->ceq_tasklet); - - return IRQ_HANDLED; -} - -/** - * set_eq_ctrls - setting eq's ctrls registers - * @eq: the event queue for setting - **/ -static int set_eq_ctrls(struct sphw_eq *eq) -{ - enum sphw_eq_type type = eq->type; - struct sphw_hwif *hwif = eq->hwdev->hwif; - struct irq_info *eq_irq = &eq->eq_irq; - u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; - u32 pci_intf_idx = SPHW_PCI_INTF_IDX(hwif); - int err; - - if (type == SPHW_AEQ) { - /* set ctrl0 */ - addr = SPHW_CSR_AEQ_CTRL_0_ADDR; - - val = sphw_hwif_read_reg(hwif, addr); - - val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & - AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & - AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & - AEQ_CTRL_0_CLEAR(val, INTR_MODE); - - ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | - AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | - AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | - AEQ_CTRL_0_SET(SPHW_INTR_MODE_ARMED, INTR_MODE); - - val |= ctrl0; - - sphw_hwif_write_reg(hwif, addr, val); - - /* set ctrl1 */ - addr = SPHW_CSR_AEQ_CTRL_1_ADDR; - - page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); - elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); - - ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | - AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | - AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); - - sphw_hwif_write_reg(hwif, addr, ctrl1); - - } else { - page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); - ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | - CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | - CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) | - CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | - CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) | - CEQ_CTRL_0_SET(SPHW_INTR_MODE_ARMED, INTR_MODE); - - ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN); - - /* set ceq ctrl reg through mgmt cpu */ - err = sphw_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, ctrl1); - if (err) - return err; - } - - return 0; -} - -/** - * ceq_elements_init - Initialize all the elements in the ceq - * @eq: the event queue - * @init_val: value to init with it the elements - **/ -static void ceq_elements_init(struct sphw_eq *eq, u32 init_val) -{ - u32 *ceqe = NULL; - u32 i; - - for (i = 0; i < eq->eq_len; i++) { - ceqe = GET_CEQ_ELEM(eq, i); - *(ceqe) = cpu_to_be32(init_val); - } - - wmb(); /* Write the init values */ -} - -/** - * aeq_elements_init - initialize all the elements in the aeq - * @eq: the event queue - * @init_val: value to init with it the elements - **/ -static void aeq_elements_init(struct sphw_eq *eq, u32 init_val) -{ - struct sphw_aeq_elem *aeqe = NULL; - u32 i; - - for (i = 0; i < eq->eq_len; i++) { - aeqe = GET_AEQ_ELEM(eq, i); - aeqe->desc = cpu_to_be32(init_val); - } - - wmb(); /* Write the init values */ -} - -static void eq_elements_init(struct sphw_eq *eq, u32 init_val) -{ - if (eq->type == SPHW_AEQ) - aeq_elements_init(eq, init_val); - else - ceq_elements_init(eq, init_val); -} - -/** - * alloc_eq_pages - allocate the pages for the queue - * @eq: the event queue - **/ -static int alloc_eq_pages(struct sphw_eq *eq) -{ - struct sphw_hwif *hwif = eq->hwdev->hwif; - struct sphw_dma_addr_align *eq_page = NULL; - u32 reg, init_val; - u16 pg_idx, i; - int err; - - eq->eq_pages = kcalloc(eq->num_pages, sizeof(*eq->eq_pages), - GFP_KERNEL); - if (!eq->eq_pages) { - sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq pages description\n"); - return -ENOMEM; - } - - for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) { - eq_page = &eq->eq_pages[pg_idx]; - err = sphw_dma_alloc_coherent_align(eq->hwdev->dev_hdl, eq->page_size, - SPHW_MIN_EQ_PAGE_SIZE, GFP_KERNEL, eq_page); - if (err) { - sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq page, page index: %hu\n", - pg_idx); - goto dma_alloc_err; - } - - reg = SPHW_EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx); - sphw_hwif_write_reg(hwif, reg, upper_32_bits(eq_page->align_paddr)); - - reg = SPHW_EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx); - sphw_hwif_write_reg(hwif, reg, lower_32_bits(eq_page->align_paddr)); - } - - eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size); - if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { - sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n"); - err = -EINVAL; - goto dma_alloc_err; - } - init_val = EQ_WRAPPED(eq); - - eq_elements_init(eq, init_val); - - return 0; - -dma_alloc_err: - for (i = 0; i < pg_idx; i++) - sphw_dma_free_coherent_align(eq->hwdev->dev_hdl, &eq->eq_pages[i]); - - kfree(eq->eq_pages); - - return err; -} - -/** - * free_eq_pages - free the pages of the queue - * @eq: the event queue - **/ -static void free_eq_pages(struct sphw_eq *eq) -{ - u16 pg_idx; - - for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) - sphw_dma_free_coherent_align(eq->hwdev->dev_hdl, &eq->eq_pages[pg_idx]); - - kfree(eq->eq_pages); -} - -static inline u32 get_page_size(struct sphw_eq *eq) -{ - u32 total_size; - u32 count; - - total_size = ALIGN((eq->eq_len * eq->elem_size), - SPHW_MIN_EQ_PAGE_SIZE); - - if (total_size <= (SPHW_EQ_MAX_PAGES(eq) * SPHW_MIN_EQ_PAGE_SIZE)) - return SPHW_MIN_EQ_PAGE_SIZE; - - count = (u32)(ALIGN((total_size / SPHW_EQ_MAX_PAGES(eq)), - SPHW_MIN_EQ_PAGE_SIZE) / SPHW_MIN_EQ_PAGE_SIZE); - - /* round up to nearest power of two */ - count = 1U << (u8)fls((int)(count - 1)); - - return ((u32)SPHW_MIN_EQ_PAGE_SIZE) * count; -} - -static int request_eq_irq(struct sphw_eq *eq, struct irq_info *entry) -{ - int err = 0; - - if (eq->type == SPHW_AEQ) { - struct sphw_eq_work *aeq_work = &eq->aeq_work; - - INIT_WORK(&aeq_work->work, eq_irq_work); - } else { - tasklet_init(&eq->ceq_tasklet, ceq_tasklet, - (ulong)(&eq->ceq_tasklet_data)); - } - - if (eq->type == SPHW_AEQ) { - snprintf(eq->irq_name, sizeof(eq->irq_name), - "sphw_aeq%u@pci:%s", eq->q_id, - pci_name(eq->hwdev->pcidev_hdl)); - - err = request_irq(entry->irq_id, aeq_interrupt, 0UL, - eq->irq_name, eq); - } else { - snprintf(eq->irq_name, sizeof(eq->irq_name), - "sphw_ceq%u@pci:%s", eq->q_id, - pci_name(eq->hwdev->pcidev_hdl)); - err = request_irq(entry->irq_id, ceq_interrupt, 0UL, - eq->irq_name, eq); - } - - return err; -} - -static void reset_eq(struct sphw_eq *eq) -{ - /* clear eq_len to force eqe drop in hardware */ - if (eq->type == SPHW_AEQ) - sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_CSR_AEQ_CTRL_1_ADDR, 0); - else - sphw_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); - - wmb(); /* clear eq_len before clear prod idx */ - - sphw_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); -} - -/** - * init_eq - initialize eq - * @eq: the event queue - * @hwdev: the pointer to hw device - * @q_id: Queue id number - * @q_len: the number of EQ elements - * @type: the type of the event queue, ceq or aeq - * @entry: msix entry associated with the event queue - * Return: 0 - Success, Negative - failure - **/ -static int init_eq(struct sphw_eq *eq, struct sphw_hwdev *hwdev, u16 q_id, - u32 q_len, enum sphw_eq_type type, struct irq_info *entry) -{ - int err = 0; - - eq->hwdev = hwdev; - eq->q_id = q_id; - eq->type = type; - eq->eq_len = q_len; - - /* Indirect access should set q_id first */ - sphw_hwif_write_reg(hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); - wmb(); /* write index before config */ - - reset_eq(eq); - - eq->cons_idx = 0; - eq->wrapped = 0; - - eq->elem_size = (type == SPHW_AEQ) ? SPHW_AEQE_SIZE : SPHW_CEQE_SIZE; - - eq->page_size = get_page_size(eq); - eq->orig_page_size = eq->page_size; - eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size); - if (eq->num_pages > SPHW_EQ_MAX_PAGES(eq)) { - sdk_err(hwdev->dev_hdl, "Number pages: %u too many pages for eq\n", - eq->num_pages); - return -EINVAL; - } - - err = alloc_eq_pages(eq); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); - return err; - } - - eq->eq_irq.msix_entry_idx = entry->msix_entry_idx; - eq->eq_irq.irq_id = entry->irq_id; - - err = set_eq_ctrls(eq); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to set ctrls for eq\n"); - goto init_eq_ctrls_err; - } - - set_eq_cons_idx(eq, SPHW_EQ_ARMED); - - err = request_eq_irq(eq, entry); - if (err) { - sdk_err(hwdev->dev_hdl, - "Failed to request irq for the eq, err: %d\n", err); - goto req_irq_err; - } - - sphw_set_msix_state(hwdev, entry->msix_entry_idx, SPHW_MSIX_DISABLE); - - return 0; - -init_eq_ctrls_err: -req_irq_err: - free_eq_pages(eq); - return err; -} - -/** - * remove_eq - remove eq - * @eq: the event queue - **/ -static void remove_eq(struct sphw_eq *eq) -{ - struct irq_info *entry = &eq->eq_irq; - - sphw_set_msix_state(eq->hwdev, entry->msix_entry_idx, SPHW_MSIX_DISABLE); - synchronize_irq(entry->irq_id); - - free_irq(entry->irq_id, eq); - - /* Indirect access should set q_id first */ - sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); - - wmb(); /* write index before config */ - - if (eq->type == SPHW_AEQ) { - struct sphw_eq_work *aeq_work = &eq->aeq_work; - - cancel_work_sync(&aeq_work->work); - - /* clear eq_len to avoid hw access host memory */ - sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_CSR_AEQ_CTRL_1_ADDR, 0); - } else { - tasklet_kill(&eq->ceq_tasklet); - - sphw_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); - } - - /* update cons_idx to avoid invalid interrupt */ - eq->cons_idx = sphw_hwif_read_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq)); - set_eq_cons_idx(eq, SPHW_EQ_NOT_ARMED); - - free_eq_pages(eq); -} - -/** - * sphw_aeqs_init - init all the aeqs - * @hwdev: the pointer to hw device - * @num_aeqs: number of AEQs - * @msix_entries: msix entries associated with the event queues - * Return: 0 - Success, Negative - failure - **/ -int sphw_aeqs_init(struct sphw_hwdev *hwdev, u16 num_aeqs, struct irq_info *msix_entries) -{ - struct sphw_aeqs *aeqs = NULL; - int err; - u16 i, q_id; - - if (!hwdev) - return -EINVAL; - - aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); - if (!aeqs) - return -ENOMEM; - - hwdev->aeqs = aeqs; - aeqs->hwdev = hwdev; - aeqs->num_aeqs = num_aeqs; - aeqs->workq = alloc_workqueue(SPHW_EQS_WQ_NAME, WQ_MEM_RECLAIM, SPHW_MAX_AEQS); - if (!aeqs->workq) { - sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n"); - err = -ENOMEM; - goto create_work_err; - } - - if (g_aeq_len < SPHW_MIN_AEQ_LEN || g_aeq_len > SPHW_MAX_AEQ_LEN) { - sdk_warn(hwdev->dev_hdl, "Module Parameter g_aeq_len value %u out of range, resetting to %d\n", - g_aeq_len, SPHW_DEFAULT_AEQ_LEN); - g_aeq_len = SPHW_DEFAULT_AEQ_LEN; - } - - for (q_id = 0; q_id < num_aeqs; q_id++) { - err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, g_aeq_len, - SPHW_AEQ, &msix_entries[q_id]); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init aeq %u\n", - q_id); - goto init_aeq_err; - } - } - for (q_id = 0; q_id < num_aeqs; q_id++) - sphw_set_msix_state(hwdev, msix_entries[q_id].msix_entry_idx, SPHW_MSIX_ENABLE); - - return 0; - -init_aeq_err: - for (i = 0; i < q_id; i++) - remove_eq(&aeqs->aeq[i]); - - destroy_workqueue(aeqs->workq); - -create_work_err: - kfree(aeqs); - - return err; -} - -/** - * sphw_aeqs_free - free all the aeqs - * @hwdev: the pointer to hw device - **/ -void sphw_aeqs_free(struct sphw_hwdev *hwdev) -{ - struct sphw_aeqs *aeqs = hwdev->aeqs; - enum sphw_aeq_type aeq_event = SPHW_HW_INTER_INT; - enum sphw_aeq_sw_type sw_aeq_event = SPHW_STATELESS_EVENT; - u16 q_id; - - for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) - remove_eq(&aeqs->aeq[q_id]); - - for (; sw_aeq_event < SPHW_MAX_AEQ_SW_EVENTS; sw_aeq_event++) - sphw_aeq_unregister_swe_cb(hwdev, sw_aeq_event); - - for (; aeq_event < SPHW_MAX_AEQ_EVENTS; aeq_event++) - sphw_aeq_unregister_hw_cb(hwdev, aeq_event); - - destroy_workqueue(aeqs->workq); - - kfree(aeqs); -} - -/** - * sphw_ceqs_init - init all the ceqs - * @hwdev: the pointer to hw device - * @num_ceqs: number of CEQs - * @msix_entries: msix entries associated with the event queues - * Return: 0 - Success, Negative - failure - **/ -int sphw_ceqs_init(struct sphw_hwdev *hwdev, u16 num_ceqs, struct irq_info *msix_entries) -{ - struct sphw_ceqs *ceqs; - int err; - u16 i, q_id; - - ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL); - if (!ceqs) - return -ENOMEM; - - hwdev->ceqs = ceqs; - - ceqs->hwdev = hwdev; - ceqs->num_ceqs = num_ceqs; - - if (g_ceq_len < SPHW_MIN_CEQ_LEN || g_ceq_len > SPHW_MAX_CEQ_LEN) { - sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %u out of range, resetting to %d\n", - g_ceq_len, SPHW_DEFAULT_CEQ_LEN); - g_ceq_len = SPHW_DEFAULT_CEQ_LEN; - } - - if (!g_num_ceqe_in_tasklet) { - sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n", - SPHW_TASK_PROCESS_EQE_LIMIT); - g_num_ceqe_in_tasklet = SPHW_TASK_PROCESS_EQE_LIMIT; - } - for (q_id = 0; q_id < num_ceqs; q_id++) { - err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, g_ceq_len, - SPHW_CEQ, &msix_entries[q_id]); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init ceq %u\n", - q_id); - goto init_ceq_err; - } - } - for (q_id = 0; q_id < num_ceqs; q_id++) - sphw_set_msix_state(hwdev, msix_entries[q_id].msix_entry_idx, SPHW_MSIX_ENABLE); - - return 0; - -init_ceq_err: - for (i = 0; i < q_id; i++) - remove_eq(&ceqs->ceq[i]); - - kfree(ceqs); - - return err; -} - -/** - * sphw_ceqs_free - free all the ceqs - * @hwdev: the pointer to hw device - **/ -void sphw_ceqs_free(struct sphw_hwdev *hwdev) -{ - struct sphw_ceqs *ceqs = hwdev->ceqs; - enum sphw_ceq_event ceq_event = SPHW_CMDQ; - u16 q_id; - - for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) - remove_eq(&ceqs->ceq[q_id]); - - for (; ceq_event < SPHW_MAX_CEQ_EVENTS; ceq_event++) - sphw_ceq_unregister_cb(hwdev, ceq_event); - - kfree(ceqs); -} - -void sphw_get_ceq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs) -{ - struct sphw_ceqs *ceqs = hwdev->ceqs; - u16 q_id; - - for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { - irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id; - irqs[q_id].msix_entry_idx = - ceqs->ceq[q_id].eq_irq.msix_entry_idx; - } - - *num_irqs = ceqs->num_ceqs; -} - -void sphw_get_aeq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs) -{ - struct sphw_aeqs *aeqs = hwdev->aeqs; - u16 q_id; - - for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { - irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id; - irqs[q_id].msix_entry_idx = - aeqs->aeq[q_id].eq_irq.msix_entry_idx; - } - - *num_irqs = aeqs->num_aeqs; -} - -void sphw_dump_aeq_info(struct sphw_hwdev *hwdev) -{ - struct sphw_aeq_elem *aeqe_pos = NULL; - struct sphw_eq *eq = NULL; - u32 addr, ci, pi, ctrl0, idx; - int q_id; - - for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) { - eq = &hwdev->aeqs->aeq[q_id]; - /* Indirect access should set q_id first */ - sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); - wmb(); /* write index before config */ - - addr = SPHW_CSR_AEQ_CTRL_0_ADDR; - - ctrl0 = sphw_hwif_read_reg(hwdev->hwif, addr); - - idx = sphw_hwif_read_reg(hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type)); - - addr = EQ_CONS_IDX_REG_ADDR(eq); - ci = sphw_hwif_read_reg(hwdev->hwif, addr); - addr = EQ_PROD_IDX_REG_ADDR(eq); - pi = sphw_hwif_read_reg(hwdev->hwif, addr); - aeqe_pos = GET_CURR_AEQ_ELEM(eq); - sdk_err(hwdev->dev_hdl, "Aeq id: %d, idx: %u, ctrl0: 0x%08x, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %u, desc: 0x%x\n", - q_id, idx, ctrl0, ci, pi, work_busy(&eq->aeq_work.work), - eq->wrapped, be32_to_cpu(aeqe_pos->desc)); - } -} - -void sphw_dump_ceq_info(struct sphw_hwdev *hwdev) -{ - struct sphw_eq *eq = NULL; - u32 addr, ci, pi; - int q_id; - - for (q_id = 0; q_id < hwdev->ceqs->num_ceqs; q_id++) { - eq = &hwdev->ceqs->ceq[q_id]; - /* Indirect access should set q_id first */ - sphw_hwif_write_reg(eq->hwdev->hwif, SPHW_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); - wmb(); /* write index before config */ - - addr = EQ_CONS_IDX_REG_ADDR(eq); - ci = sphw_hwif_read_reg(hwdev->hwif, addr); - addr = EQ_PROD_IDX_REG_ADDR(eq); - pi = sphw_hwif_read_reg(hwdev->hwif, addr); - sdk_err(hwdev->dev_hdl, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %u, ceqe: 0x%x\n", - q_id, ci, eq->cons_idx, pi, eq->ceq_tasklet.state, - eq->wrapped, be32_to_cpu(*(GET_CURR_CEQ_ELEM(eq)))); - - sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n", - jiffies_to_msecs(jiffies - eq->hard_intr_jif)); - sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n", - jiffies_to_msecs(jiffies - eq->soft_intr_jif)); - } -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h deleted file mode 100644 index df25b5a5fdcf..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_eqs.h +++ /dev/null @@ -1,157 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_EQS_H -#define SPHW_EQS_H - -#define SPHW_MAX_AEQS 4 -#define SPHW_MAX_CEQS 32 - -#define SPHW_AEQ_MAX_PAGES 4 -#define SPHW_CEQ_MAX_PAGES 8 - -#define SPHW_AEQE_SIZE 64 -#define SPHW_CEQE_SIZE 4 - -#define SPHW_AEQE_DESC_SIZE 4 -#define SPHW_AEQE_DATA_SIZE \ - (SPHW_AEQE_SIZE - SPHW_AEQE_DESC_SIZE) - -#define SPHW_DEFAULT_AEQ_LEN 4096 -#define SPHW_DEFAULT_CEQ_LEN 8192 - -#define SPHW_MIN_EQ_PAGE_SIZE 0x1000 /* min eq page size 4K Bytes */ -#define SPHW_MAX_EQ_PAGE_SIZE 0x400000 /* max eq page size 4M Bytes */ - -#define SPHW_MIN_AEQ_LEN 64 -#define SPHW_MAX_AEQ_LEN \ - ((SPHW_MAX_EQ_PAGE_SIZE / SPHW_AEQE_SIZE) * SPHW_AEQ_MAX_PAGES) - -#define SPHW_MIN_CEQ_LEN 64 -#define SPHW_MAX_CEQ_LEN \ - ((SPHW_MAX_EQ_PAGE_SIZE / SPHW_CEQE_SIZE) * SPHW_CEQ_MAX_PAGES) -#define SPHW_CEQ_ID_CMDQ 0 - -#define EQ_IRQ_NAME_LEN 64 - -#define EQ_USLEEP_LOW_BOUND 900 -#define EQ_USLEEP_HIG_BOUND 1000 - -enum sphw_eq_type { - SPHW_AEQ, - SPHW_CEQ -}; - -enum sphw_eq_intr_mode { - SPHW_INTR_MODE_ARMED, - SPHW_INTR_MODE_ALWAYS, -}; - -enum sphw_eq_ci_arm_state { - SPHW_EQ_NOT_ARMED, - SPHW_EQ_ARMED, -}; - -struct sphw_eq_work { - struct work_struct work; - void *data; -}; - -struct sphw_ceq_tasklet_data { - void *data; -}; - -struct sphw_eq { - struct sphw_hwdev *hwdev; - u16 q_id; - enum sphw_eq_type type; - u32 page_size; - u32 orig_page_size; - u32 eq_len; - - u32 cons_idx; - u16 wrapped; - - u16 elem_size; - u16 num_pages; - u32 num_elem_in_pg; - - struct irq_info eq_irq; - char irq_name[EQ_IRQ_NAME_LEN]; - - struct sphw_dma_addr_align *eq_pages; - - struct sphw_eq_work aeq_work; - struct tasklet_struct ceq_tasklet; - struct sphw_ceq_tasklet_data ceq_tasklet_data; - - u64 hard_intr_jif; - u64 soft_intr_jif; -}; - -struct sphw_aeq_elem { - u8 aeqe_data[SPHW_AEQE_DATA_SIZE]; - u32 desc; -}; - -enum sphw_aeq_cb_state { - SPHW_AEQ_HW_CB_REG = 0, - SPHW_AEQ_HW_CB_RUNNING, - SPHW_AEQ_SW_CB_REG, - SPHW_AEQ_SW_CB_RUNNING, -}; - -struct sphw_aeqs { - struct sphw_hwdev *hwdev; - - sphw_aeq_hwe_cb aeq_hwe_cb[SPHW_MAX_AEQ_EVENTS]; - sphw_aeq_swe_cb aeq_swe_cb[SPHW_MAX_AEQ_SW_EVENTS]; - unsigned long aeq_hw_cb_state[SPHW_MAX_AEQ_EVENTS]; - unsigned long aeq_sw_cb_state[SPHW_MAX_AEQ_SW_EVENTS]; - - struct sphw_eq aeq[SPHW_MAX_AEQS]; - u16 num_aeqs; - struct workqueue_struct *workq; -}; - -enum sphw_ceq_cb_state { - SPHW_CEQ_CB_REG = 0, - SPHW_CEQ_CB_RUNNING, -}; - -struct sphw_ceqs { - struct sphw_hwdev *hwdev; - - sphw_ceq_event_cb ceq_cb[SPHW_MAX_CEQ_EVENTS]; - void *ceq_data[SPHW_MAX_CEQ_EVENTS]; - unsigned long ceq_cb_state[SPHW_MAX_CEQ_EVENTS]; - - struct sphw_eq ceq[SPHW_MAX_CEQS]; - u16 num_ceqs; -}; - -struct sphw_ceq_info { - u32 q_len; - u32 page_size; - u16 elem_size; - u16 num_pages; - u32 num_elem_in_pg; -}; - -int sphw_aeqs_init(struct sphw_hwdev *hwdev, u16 num_aeqs, struct irq_info *msix_entries); - -void sphw_aeqs_free(struct sphw_hwdev *hwdev); - -int sphw_ceqs_init(struct sphw_hwdev *hwdev, u16 num_ceqs, struct irq_info *msix_entries); - -void sphw_ceqs_free(struct sphw_hwdev *hwdev); - -void sphw_get_ceq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs); - -void sphw_get_aeq_irqs(struct sphw_hwdev *hwdev, struct irq_info *irqs, u16 *num_irqs); - -void sphw_dump_ceq_info(struct sphw_hwdev *hwdev); - -void sphw_dump_aeq_info(struct sphw_hwdev *hwdev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h deleted file mode 100644 index 41945efe86d8..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw.h +++ /dev/null @@ -1,643 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_HW_H -#define SPHW_HW_H - -#include "sphw_comm_cmd.h" -#include "sphw_comm_msg_intf.h" -#include "sphw_crm.h" - -enum sphw_mod_type { - SPHW_MOD_COMM = 0, /* HW communication module */ - SPHW_MOD_L2NIC = 1, /* L2NIC module */ - SPHW_MOD_ROCE = 2, - SPHW_MOD_PLOG = 3, - SPHW_MOD_TOE = 4, - SPHW_MOD_FLR = 5, - SPHW_MOD_FC = 6, - SPHW_MOD_CFGM = 7, /* Configuration module */ - SPHW_MOD_CQM = 8, - SPHW_MOD_VSWITCH = 9, - COMM_MOD_FC = 10, - SPHW_MOD_OVS = 11, - SPHW_MOD_DSW = 12, - SPHW_MOD_MIGRATE = 13, - SPHW_MOD_HILINK = 14, - SPHW_MOD_CRYPT = 15, /* secure crypto module */ - SPHW_MOD_VIO = 16, - SPHW_MOD_DFT = 17, /* DFT */ - SPHW_MOD_HW_MAX = 18, /* hardware max module id */ - /* Software module id, for PF/VF and multi-host */ - SPHW_MOD_SW_FUNC = 19, - SPHW_MOD_IOE = 20, - SPHW_MOD_MAX, -}; - -/* to use 0-level CLA, page size must be: SQ 16B(wqe) * 64k(max_q_depth) */ -#define SPHW_DEFAULT_WQ_PAGE_SIZE 0x100000 -#define SPHW_HW_WQ_PAGE_SIZE 0x1000 -#define SPHW_MAX_WQ_PAGE_SIZE_ORDER 8 - -enum sphw_channel_id { - SPHW_CHANNEL_DEFAULT, - SPHW_CHANNEL_COMM, - SPHW_CHANNEL_NIC, - SPHW_CHANNEL_ROCE, - SPHW_CHANNEL_TOE, - SPHW_CHANNEL_FC, - SPHW_CHANNEL_OVS, - SPHW_CHANNEL_DSW, - SPHW_CHANNEL_MIG, - SPHW_CHANNEL_CRYPT, - - SPHW_CHANNEL_MAX = 32, -}; - -struct sphw_cmd_buf { - void *buf; - dma_addr_t dma_addr; - u16 size; - /* Usage count, USERS DO NOT USE */ - atomic_t ref_cnt; -}; - -enum sphw_aeq_type { - SPHW_HW_INTER_INT = 0, - SPHW_MBX_FROM_FUNC = 1, - SPHW_MSG_FROM_MGMT_CPU = 2, - SPHW_API_RSP = 3, - SPHW_API_CHAIN_STS = 4, - SPHW_MBX_SEND_RSLT = 5, - SPHW_MAX_AEQ_EVENTS -}; - -#define SPHW_NIC_FATAL_ERROR_MAX 0x8U - -enum sphw_aeq_sw_type { - SPHW_STATELESS_EVENT = 0, - SPHW_STATEFULL_EVENT = 1, - SPHW_MAX_AEQ_SW_EVENTS -}; - -typedef void (*sphw_aeq_hwe_cb)(void *handle, u8 *data, u8 size); -typedef u8 (*sphw_aeq_swe_cb)(void *handle, u8 event, u8 *data); - -/** - * @brief sphw_aeq_register_hw_cb - register aeq hardware callback - * @param hwdev: device pointer to hwdev - * @param event: event type - * @param hwe_cb: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_aeq_register_hw_cb(void *hwdev, enum sphw_aeq_type event, sphw_aeq_hwe_cb hwe_cb); - -/** - * @brief sphw_aeq_unregister_hw_cb - unregister aeq hardware callback - * @param hwdev: device pointer to hwdev - * @param event: event type - **/ -void sphw_aeq_unregister_hw_cb(void *hwdev, enum sphw_aeq_type event); - -/** - * @brief sphw_aeq_register_swe_cb - register aeq soft event callback - * @param hwdev: device pointer to hwdev - * @param event: event type - * @param aeq_swe_cb: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_aeq_register_swe_cb(void *hwdev, enum sphw_aeq_sw_type event, sphw_aeq_swe_cb aeq_swe_cb); - -/** - * @brief sphw_aeq_unregister_swe_cb - unregister aeq soft event callback - * @param hwdev: device pointer to hwdev - * @param event: event type - **/ -void sphw_aeq_unregister_swe_cb(void *hwdev, enum sphw_aeq_sw_type event); - -enum sphw_ceq_event { - SPHW_NON_L2NIC_SCQ, - SPHW_NON_L2NIC_ECQ, - SPHW_NON_L2NIC_NO_CQ_EQ, - SPHW_CMDQ, - SPHW_L2NIC_SQ, - SPHW_L2NIC_RQ, - SPHW_MAX_CEQ_EVENTS, -}; - -typedef void (*sphw_ceq_event_cb)(void *handle, u32 ceqe_data); - -/** - * @brief sphw_ceq_register_cb - register ceq callback - * @param hwdev: device pointer to hwdev - * @param event: event type - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_ceq_register_cb(void *hwdev, enum sphw_ceq_event event, sphw_ceq_event_cb callback); -/** - * @brief sphw_ceq_unregister_cb - unregister ceq callback - * @param hwdev: device pointer to hwdev - * @param event: event type - **/ -void sphw_ceq_unregister_cb(void *hwdev, enum sphw_ceq_event event); - -typedef int (*sphw_vf_mbox_cb)(void *handle, void *pri_handle, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size); - -typedef int (*sphw_pf_mbox_cb)(void *handle, void *pri_handle, u16 vf_id, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size); - -typedef int (*sphw_ppf_mbox_cb)(void *handle, void *pri_handle, - u16 pf_idx, u16 vf_id, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size); - -typedef int (*sphw_pf_recv_from_ppf_mbox_cb)(void *handle, void *pri_handle, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size); - -/** - * @brief sphw_register_ppf_mbox_cb - ppf register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_ppf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_ppf_mbox_cb callback); - -/** - * @brief sphw_register_pf_mbox_cb - pf register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_pf_mbox_cb callback); -/** - * @brief sphw_register_vf_mbox_cb - vf register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_vf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_vf_mbox_cb callback); -/** - * @brief sphw_register_ppf_to_pf_mbox_cb - register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_ppf_to_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, - sphw_pf_recv_from_ppf_mbox_cb callback); - -/** - * @brief sphw_unregister_ppf_mbox_cb - ppf unregister mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - **/ -void sphw_unregister_ppf_mbox_cb(void *hwdev, u8 mod); - -/** - * @brief sphw_unregister_pf_mbox_cb - pf register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - **/ -void sphw_unregister_pf_mbox_cb(void *hwdev, u8 mod); - -/** - * @brief sphw_unregister_vf_mbox_cb - pf register mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - **/ -void sphw_unregister_vf_mbox_cb(void *hwdev, u8 mod); - -/** - * @brief sphw_unregister_ppf_to_pf_mbox_cb - unregister mbox msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - **/ -void sphw_unregister_ppf_to_pf_mbox_cb(void *hwdev, u8 mod); - -typedef void (*sphw_mgmt_msg_cb)(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); - -/** - * @brief sphw_register_mgmt_msg_cb - register mgmt msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param pri_handle: private data will be used by the callback - * @param callback: callback function - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_mgmt_msg_cb(void *hwdev, u8 mod, void *pri_handle, sphw_mgmt_msg_cb callback); - -/** - * @brief sphw_unregister_mgmt_msg_cb - unregister mgmt msg callback - * @param hwdev: device pointer to hwdev - * @param mod: mod type - **/ -void sphw_unregister_mgmt_msg_cb(void *hwdev, u8 mod); - -/** - * @brief sphw_register_service_adapter - register service adapter - * @param hwdev: device pointer to hwdev - * @param service_adapter: service adapter - * @param type: service type - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_register_service_adapter(void *hwdev, void *service_adapter, - enum sphw_service_type type); - -/** - * @brief sphw_unregister_service_adapter - unregister service adapter - * @param hwdev: device pointer to hwdev - * @param type: service type - **/ -void sphw_unregister_service_adapter(void *hwdev, enum sphw_service_type type); - -/** - * @brief sphw_get_service_adapter - get service adapter - * @param hwdev: device pointer to hwdev - * @param type: service type - * @retval non-zero: success - * @retval null: failure - **/ -void *sphw_get_service_adapter(void *hwdev, enum sphw_service_type type); - -/** - * @brief sphw_alloc_db_phy_addr - alloc doorbell & direct wqe pyhsical addr - * @param hwdev: device pointer to hwdev - * @param db_base: pointer to alloc doorbell base address - * @param dwqe_base: pointer to alloc direct base address - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base); - -/** - * @brief sphw_free_db_phy_addr - free doorbell & direct wqe physical address - * @param hwdev: device pointer to hwdev - * @param db_base: pointer to free doorbell base address - * @param dwqe_base: pointer to free direct base address - **/ -void sphw_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base); - -/** - * @brief sphw_alloc_db_addr - alloc doorbell & direct wqe - * @param hwdev: device pointer to hwdev - * @param db_base: pointer to alloc doorbell base address - * @param dwqe_base: pointer to alloc direct base address - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_alloc_db_addr(void *hwdev, void __iomem **db_base, void __iomem **dwqe_base); - -/** - * @brief sphw_free_db_addr - free doorbell & direct wqe - * @param hwdev: device pointer to hwdev - * @param db_base: pointer to free doorbell base address - * @param dwqe_base: pointer to free direct base address - **/ -void sphw_free_db_addr(void *hwdev, const void __iomem *db_base, void __iomem *dwqe_base); - -/** - * @brief sphw_alloc_db_phy_addr - alloc physical doorbell & direct wqe - * @param hwdev: device pointer to hwdev - * @param db_base: pointer to alloc doorbell base address - * @param dwqe_base: pointer to alloc direct base address - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base); - -/** - * @brief sphw_free_db_phy_addr - free physical doorbell & direct wqe - * @param hwdev: device pointer to hwdev - * @param db_base: free doorbell base address - * @param dwqe_base: free direct base address - **/ - -void sphw_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base); - -/** - * @brief sphw_set_root_ctxt - set root context - * @param hwdev: device pointer to hwdev - * @param rq_depth: rq depth - * @param sq_depth: sq depth - * @param rx_buf_sz: rx buffer size - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_set_root_ctxt(void *hwdev, u32 rq_depth, u32 sq_depth, int rx_buf_sz, u16 channel); - -/** - * @brief sphw_clean_root_ctxt - clean root context - * @param hwdev: device pointer to hwdev - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - **/ -int sphw_clean_root_ctxt(void *hwdev, u16 channel); - -/** - * @brief sphw_alloc_cmd_buf - alloc cmd buffer - * @param hwdev: device pointer to hwdev - * @retval non-zero: success - * @retval null: failure - **/ -struct sphw_cmd_buf *sphw_alloc_cmd_buf(void *hwdev); - -/** - * @brief sphw_free_cmd_buf - free cmd buffer - * @param hwdev: device pointer to hwdev - * @param cmd_buf: cmd buffer to free - **/ -void sphw_free_cmd_buf(void *hwdev, struct sphw_cmd_buf *cmd_buf); - -/** - * @brief sphw_dbg_get_hw_stats - get hardware stats - * @param hwdev: device pointer to hwdev - * @param hw_stats: pointer to memory caller to alloc - * @param out_size: out size - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size); - -/** - * @brief sphw_dbg_clear_hw_stats - clear hardware stats - * @param hwdev: device pointer to hwdev - * @retval clear hardware size - */ -u16 sphw_dbg_clear_hw_stats(void *hwdev); - -/** - * @brief sphw_get_chip_fault_stats - get chip fault stats - * @param hwdev: device pointer to hwdev - * @param chip_fault_stats: pointer to memory caller to alloc - * @param offset: offset - */ -void sphw_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, u32 offset); - -/** - * @brief sphw_msg_to_mgmt_sync - msg to management cpu - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param in_size: in buffer size - * @param buf_out: message buffer out - * @param out_size: out buffer size - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel); - -/** - * @brief sphw_msg_to_mgmt_async - msg to management cpu async - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param in_size: in buffer size - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - * - * The function does not sleep inside, allowing use in irq context - */ -int sphw_msg_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel); - -/** - * @brief sphw_msg_to_mgmt_no_ack - msg to management cpu don't need no ack - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param in_size: in buffer size - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - * - * The function will sleep inside, and it is not allowed to be used in - * interrupt context - */ -int sphw_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel); - -int sphw_msg_to_mgmt_api_chain_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size); - -int sphw_msg_to_mgmt_api_chain_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u32 timeout); - -/** - * @brief sphw_mbox_to_pf - vf mbox message to pf - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param in_size: in buffer size - * @param buf_out: message buffer out - * @param out_size: out buffer size - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_mbox_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel); - -/** - * @brief sphw_mbox_to_vf - mbox message to vf - * @param hwdev: device pointer to hwdev - * @param vf_id: vf index - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param in_size: in buffer size - * @param buf_out: message buffer out - * @param out_size: out buffer size - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); - -/** - * @brief sphw_cmdq_async - cmdq asynchronous message - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, u16 channel); - -/** - * @brief sphw_cmdq_direct_resp - cmdq direct message response - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param out_param: message out - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, - u64 *out_param, u32 timeout, u16 channel); - -/** - * @brief sphw_cmdq_detail_resp - cmdq detail message response - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param buf_in: message buffer in - * @param buf_out: message buffer out - * @param out_param: inline output data - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_cmdq_detail_resp(void *hwdev, u8 mod, u8 cmd, struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel); - -/** - * @brief sphw_cos_id_detail_resp - cmdq detail message response - * @param hwdev: device pointer to hwdev - * @param mod: mod type - * @param cmd: cmd - * @param cos_id: cos id - * @param buf_in: message buffer in - * @param buf_out: message buffer out - * @param out_param: inline output data - * @param timeout: timeout - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_cos_id_detail_resp(void *hwdev, u8 mod, u8 cmd, u8 cos_id, struct sphw_cmd_buf *buf_in, - struct sphw_cmd_buf *buf_out, u64 *out_param, u32 timeout, u16 channel); - -/** - * @brief sphw_ppf_tmr_start - start ppf timer - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_ppf_tmr_start(void *hwdev); - -/** - * @brief sphw_ppf_tmr_stop - stop ppf timer - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_ppf_tmr_stop(void *hwdev); - -/** - * @brief sphw_func_tmr_bitmap_set - set timer bitmap status - * @param hwdev: device pointer to hwdev - * @param enable: 0-disable, 1-enable - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_func_tmr_bitmap_set(void *hwdev, bool enable); - -/** - * @brief sphw_get_board_info - get board info - * @param hwdev: device pointer to hwdev - * @param info: board info - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_get_board_info(void *hwdev, struct sphw_board_info *info, u16 channel); - -/** - * @brief sphw_set_wq_page_size - set work queue page size - * @param hwdev: device pointer to hwdev - * @param func_idx: function id - * @param page_size: page size - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int sphw_set_wq_page_size(void *hwdev, u16 func_idx, u32 page_size, u16 channel); - -/** - * @brief sphw_event_callback - evnet callback to notify service driver - * @param hwdev: device pointer to hwdev - * @param event: event info to service driver - */ -void sphw_event_callback(void *hwdev, struct sphw_event_info *event); - -/** - * @brief sphw_link_event_stats - link event stats - * @param hwdev: device pointer to hwdev - * @param link: link status - */ -void sphw_link_event_stats(void *dev, u8 link); - -enum func_reset_flag { - RES_TYPE_FLUSH_BIT = 0, - RES_TYPE_MQM, - RES_TYPE_SMF, - - RES_TYPE_COMM = 10, - RES_TYPE_COMM_MGMT_CH, - RES_TYPE_COMM_CMD_CH, - RES_TYPE_NIC, - RES_TYPE_OVS, - RES_TYPE_VBS, - RES_TYPE_ROCE, - RES_TYPE_FC, - RES_TYPE_TOE, - RES_TYPE_IPSEC, -}; - -#define SPHW_COMM_RES (BIT(RES_TYPE_COMM) | BIT(RES_TYPE_FLUSH_BIT) | BIT(RES_TYPE_MQM) | \ - BIT(RES_TYPE_SMF) | BIT(RES_TYPE_COMM_CMD_CH)) - -#define SPHW_NIC_RES BIT(RES_TYPE_NIC) -#define SPHW_FC_RES BIT(RES_TYPE_FC) - -/** - * @brief sphw_func_reset - reset func - * @param hwdev: device pointer to hwdev - * @param func_id: global function index - * @param reset_flag: reset flag - * @param channel: channel id - */ -int sphw_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel); - -int sphw_get_dev_cap(void *hwdev); - -int sphw_set_bdf_ctxt(void *hwdev, u8 bus, u8 device, u8 function); - -int sphw_init_func_mbox_msg_channel(void *hwdev, u16 num_func); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c deleted file mode 100644 index 4b2674ec66f0..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.c +++ /dev/null @@ -1,1341 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/mutex.h> -#include <linux/device.h> -#include <linux/pci.h> -#include <linux/module.h> -#include <linux/semaphore.h> - -#include "sphw_common.h" -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" -#include "sphw_cfg_cmd.h" -#include "sphw_hw_cfg.h" - -uint g_rdma_mtts_num; - -uint intr_mode; -uint timer_enable = 1; -uint bloomfilter_enable; -uint g_test_mode; -uint g_test_qpc_num; -uint g_test_qpc_resvd_num; -uint g_test_pagesize_reorder; -uint g_test_xid_alloc_mode = 1; -uint g_test_gpa_check_enable = 1; -uint g_test_qpc_alloc_mode = 2; -uint g_test_scqc_alloc_mode = 2; -uint g_test_max_conn; -uint g_test_max_cache_conn; -uint g_test_scqc_num; -uint g_test_mpt_num; -uint g_test_mpt_resvd; -uint g_test_scq_resvd; -uint g_test_hash_num; -uint g_test_reorder_num; - -static void set_cfg_test_param(struct cfg_mgmt_info *cfg_mgmt) -{ - cfg_mgmt->svc_cap.test_mode = g_test_mode; - if (cfg_mgmt->svc_cap.test_mode == 0) - return; - - cfg_mgmt->svc_cap.timer_en = (u8)timer_enable; - cfg_mgmt->svc_cap.bloomfilter_en = (u8)bloomfilter_enable; - cfg_mgmt->svc_cap.test_qpc_num = g_test_qpc_num; - cfg_mgmt->svc_cap.test_qpc_resvd_num = g_test_qpc_resvd_num; - cfg_mgmt->svc_cap.test_page_size_reorder = g_test_pagesize_reorder; - cfg_mgmt->svc_cap.test_xid_alloc_mode = (bool)g_test_xid_alloc_mode; - cfg_mgmt->svc_cap.test_gpa_check_enable = (bool)g_test_gpa_check_enable; - cfg_mgmt->svc_cap.test_qpc_alloc_mode = (u8)g_test_qpc_alloc_mode; - cfg_mgmt->svc_cap.test_scqc_alloc_mode = (u8)g_test_scqc_alloc_mode; - cfg_mgmt->svc_cap.test_max_conn_num = g_test_max_conn; - cfg_mgmt->svc_cap.test_max_cache_conn_num = g_test_max_cache_conn; - cfg_mgmt->svc_cap.test_scqc_num = g_test_scqc_num; - cfg_mgmt->svc_cap.test_mpt_num = g_test_mpt_num; - cfg_mgmt->svc_cap.test_scq_resvd_num = g_test_scq_resvd; - cfg_mgmt->svc_cap.test_mpt_recvd_num = g_test_mpt_resvd; - cfg_mgmt->svc_cap.test_hash_num = g_test_hash_num; - cfg_mgmt->svc_cap.test_reorder_num = g_test_reorder_num; -} - -static void parse_pub_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct dev_sf_svc_attr *attr = &cap->sf_svc_attr; - - cap->host_id = dev_cap->host_id; - cap->ep_id = dev_cap->ep_id; - cap->er_id = dev_cap->er_id; - cap->port_id = dev_cap->port_id; - - cap->svc_type = dev_cap->svc_cap_en; - cap->chip_svc_type = cap->svc_type; - - cap->cos_valid_bitmap = dev_cap->valid_cos_bitmap; - cap->flexq_en = dev_cap->flexq_en; - - cap->host_total_function = dev_cap->host_total_func; - - if (type != TYPE_VF) { - cap->max_vf = dev_cap->max_vf; - cap->pf_num = dev_cap->host_pf_num; - cap->pf_id_start = dev_cap->pf_id_start; - cap->vf_num = dev_cap->host_vf_num; - cap->vf_id_start = dev_cap->vf_id_start; - } else { - cap->max_vf = 0; - } - - if (dev_cap->sf_svc_attr & SF_SVC_FT_BIT) - attr->ft_en = true; - else - attr->ft_en = false; - - if (dev_cap->sf_svc_attr & SF_SVC_RDMA_BIT) - attr->rdma_en = true; - else - attr->rdma_en = false; - - /* PPF will overwrite it when parse dynamic resource */ - if (dev_cap->func_sf_en) - cap->sf_en = true; - else - cap->sf_en = false; - - cap->lb_mode = dev_cap->lb_mode; - cap->smf_pg = dev_cap->smf_pg; - - cap->timer_en = (u8)timer_enable; /* timer enable */ - cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val; - cap->max_connect_num = dev_cap->max_conn_num; - cap->max_stick2cache_num = dev_cap->max_stick2cache_num; - cap->bfilter_start_addr = dev_cap->max_bfilter_start_addr; - cap->bfilter_len = dev_cap->bfilter_len; - cap->hash_bucket_num = dev_cap->hash_bucket_num; - - sdk_info(hwdev->dev_hdl, "Get public resource capbility: svc_cap_en: 0x%x\n", - cap->svc_type); - sdk_info(hwdev->dev_hdl, "Host_id: 0x%x, ep_id: 0x%x, er_id: 0x%x, port_id: 0x%x, cos_bitmap: 0x%x, flexq: 0x%x\n", - cap->host_id, cap->ep_id, cap->er_id, cap->port_id, - cap->cos_valid_bitmap, cap->flexq_en); - sdk_info(hwdev->dev_hdl, "Host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x\n", - cap->host_total_function, cap->host_oq_id_mask_val, - cap->max_vf); - sdk_info(hwdev->dev_hdl, "Host_pf_num: 0x%x, pf_id_start: 0x%x, host_vf_num: 0x%x, vf_id_start: 0x%x\n", - cap->pf_num, cap->pf_id_start, cap->vf_num, cap->vf_id_start); -} - -static void parse_dynamic_share_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct host_shared_resource_cap *shared_cap = &cap->shared_res_cap; - - if (dev_cap->host_sf_en) - cap->sf_en = true; - else - cap->sf_en = false; - - shared_cap->host_pctxs = dev_cap->host_pctx_num; - shared_cap->host_cctxs = dev_cap->host_ccxt_num; - shared_cap->host_scqs = dev_cap->host_scq_num; - shared_cap->host_srqs = dev_cap->host_srq_num; - shared_cap->host_mpts = dev_cap->host_mpt_num; - - sdk_info(hwdev->dev_hdl, "Dynamic share resource capbility:\n"); - sdk_info(hwdev->dev_hdl, "host_pctxs: 0x%x, host_cctxs: 0x%x, host_scqs: 0x%x, host_srqs: 0x%x, host_mpts: 0x%x\n", - shared_cap->host_pctxs, shared_cap->host_cctxs, - shared_cap->host_scqs, shared_cap->host_srqs, - shared_cap->host_mpts); -} - -static void parse_l2nic_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct nic_service_cap *nic_cap = &cap->nic_cap; - - nic_cap->max_sqs = dev_cap->nic_max_sq_id + 1; - nic_cap->max_rqs = dev_cap->nic_max_rq_id + 1; - - sdk_info(hwdev->dev_hdl, "L2nic resource capbility, max_sqs: 0x%x, max_rqs: 0x%x\n", - nic_cap->max_sqs, nic_cap->max_rqs); - - /* Check parameters from firmware */ - if (nic_cap->max_sqs > SPHW_CFG_MAX_QP || - nic_cap->max_rqs > SPHW_CFG_MAX_QP) { - sdk_info(hwdev->dev_hdl, "Number of qp exceed limit[1-%d]: sq: %u, rq: %u\n", - SPHW_CFG_MAX_QP, nic_cap->max_sqs, nic_cap->max_rqs); - nic_cap->max_sqs = SPHW_CFG_MAX_QP; - nic_cap->max_rqs = SPHW_CFG_MAX_QP; - } -} - -static void parse_fc_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap; - - fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx; - fc_cap->scq_num = dev_cap->fc_max_scq; - fc_cap->srq_num = dev_cap->fc_max_srq; - fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx; - fc_cap->child_qpc_id_start = dev_cap->fc_cctx_id_start; - fc_cap->vp_id_start = dev_cap->fc_vp_id_start; - fc_cap->vp_id_end = dev_cap->fc_vp_id_end; - - sdk_info(hwdev->dev_hdl, "Get fc resource capbility\n"); - sdk_info(hwdev->dev_hdl, "Max_parent_qpc_num: 0x%x, scq_num: 0x%x, srq_num: 0x%x, max_child_qpc_num: 0x%x, child_qpc_id_start: 0x%x\n", - fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num, - fc_cap->max_child_qpc_num, fc_cap->child_qpc_id_start); - sdk_info(hwdev->dev_hdl, "Vp_id_start: 0x%x, vp_id_end: 0x%x\n", - fc_cap->vp_id_start, fc_cap->vp_id_end); -} - -static void parse_roce_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct dev_roce_svc_own_cap *roce_cap = - &cap->rdma_cap.dev_rdma_cap.roce_own_cap; - - roce_cap->max_qps = dev_cap->roce_max_qp; - roce_cap->max_cqs = dev_cap->roce_max_cq; - roce_cap->max_srqs = dev_cap->roce_max_srq; - roce_cap->max_mpts = dev_cap->roce_max_mpt; - roce_cap->max_drc_qps = dev_cap->roce_max_drc_qp; - - roce_cap->wqe_cl_start = dev_cap->roce_wqe_cl_start; - roce_cap->wqe_cl_end = dev_cap->roce_wqe_cl_end; - roce_cap->wqe_cl_sz = dev_cap->roce_wqe_cl_size; - - sdk_info(hwdev->dev_hdl, "Get roce resource capbility, type: 0x%x\n", - type); - sdk_info(hwdev->dev_hdl, "Max_qps: 0x%x, max_cqs: 0x%x, max_srqs: 0x%x, max_mpts: 0x%x, max_drcts: 0x%x\n", - roce_cap->max_qps, roce_cap->max_cqs, roce_cap->max_srqs, - roce_cap->max_mpts, roce_cap->max_drc_qps); - - sdk_info(hwdev->dev_hdl, "Wqe_start: 0x%x, wqe_end: 0x%x, wqe_sz: 0x%x\n", - roce_cap->wqe_cl_start, roce_cap->wqe_cl_end, - roce_cap->wqe_cl_sz); - - if (roce_cap->max_qps == 0) { - if (type == TYPE_PF || type == TYPE_PPF) { - roce_cap->max_qps = 1024; - roce_cap->max_cqs = 2048; - roce_cap->max_srqs = 1024; - roce_cap->max_mpts = 1024; - roce_cap->max_drc_qps = 64; - } else { - roce_cap->max_qps = 512; - roce_cap->max_cqs = 1024; - roce_cap->max_srqs = 512; - roce_cap->max_mpts = 512; - roce_cap->max_drc_qps = 64; - } - } -} - -static void parse_rdma_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct dev_roce_svc_own_cap *roce_cap = - &cap->rdma_cap.dev_rdma_cap.roce_own_cap; - - roce_cap->cmtt_cl_start = dev_cap->roce_cmtt_cl_start; - roce_cap->cmtt_cl_end = dev_cap->roce_cmtt_cl_end; - roce_cap->cmtt_cl_sz = dev_cap->roce_cmtt_cl_size; - - roce_cap->dmtt_cl_start = dev_cap->roce_dmtt_cl_start; - roce_cap->dmtt_cl_end = dev_cap->roce_dmtt_cl_end; - roce_cap->dmtt_cl_sz = dev_cap->roce_dmtt_cl_size; - - sdk_info(hwdev->dev_hdl, "Get rdma resource capbility, Cmtt_start: 0x%x, cmtt_end: 0x%x, cmtt_sz: 0x%x\n", - roce_cap->cmtt_cl_start, roce_cap->cmtt_cl_end, - roce_cap->cmtt_cl_sz); - - sdk_info(hwdev->dev_hdl, "Dmtt_start: 0x%x, dmtt_end: 0x%x, dmtt_sz: 0x%x\n", - roce_cap->dmtt_cl_start, roce_cap->dmtt_cl_end, - roce_cap->dmtt_cl_sz); -} - -static void parse_ovs_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct ovs_service_cap *ovs_cap = &cap->ovs_cap; - - ovs_cap->dev_ovs_cap.max_pctxs = dev_cap->ovs_max_qpc; - ovs_cap->dev_ovs_cap.fake_vf_start_id = dev_cap->fake_vf_start_id; - ovs_cap->dev_ovs_cap.fake_vf_num = dev_cap->fake_vf_num; - ovs_cap->dev_ovs_cap.dynamic_qp_en = dev_cap->flexq_en; - - sdk_info(hwdev->dev_hdl, "Get ovs resource capbility, max_qpc: 0x%x, fake_vf_start_id: 0x%x, fake_vf_num: 0x%x, dynamic_qp_en: 0x%x\n", - ovs_cap->dev_ovs_cap.max_pctxs, - ovs_cap->dev_ovs_cap.fake_vf_start_id, - ovs_cap->dev_ovs_cap.fake_vf_num, - ovs_cap->dev_ovs_cap.dynamic_qp_en); -} - -static void parse_toe_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct dev_toe_svc_cap *toe_cap = &cap->toe_cap.dev_toe_cap; - - toe_cap->max_pctxs = dev_cap->toe_max_pctx; - toe_cap->max_cqs = dev_cap->toe_max_cq; - toe_cap->max_srqs = dev_cap->toe_max_srq; - toe_cap->srq_id_start = dev_cap->toe_srq_id_start; - toe_cap->max_mpts = dev_cap->toe_max_mpt; - - sdk_info(hwdev->dev_hdl, "Get toe resource capbility, max_pctxs: 0x%x, max_cqs: 0x%x, max_srqs: 0x%x, srq_id_start: 0x%x, max_mpts: 0x%x\n", - toe_cap->max_pctxs, toe_cap->max_cqs, toe_cap->max_srqs, - toe_cap->srq_id_start, toe_cap->max_mpts); -} - -static void parse_ipsec_res_cap(struct sphw_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct ipsec_service_cap *ipsec_cap = &cap->ipsec_cap; - - ipsec_cap->dev_ipsec_cap.max_sa_ctxs = dev_cap->ipsec_max_sactx; - - sdk_info(hwdev->dev_hdl, "Get IPsec resource capbility, max_sa_ctxs: 0x%x\n", - ipsec_cap->dev_ipsec_cap.max_sa_ctxs); -} - -static void parse_dev_cap(struct sphw_hwdev *dev, - struct cfg_cmd_dev_cap *dev_cap, enum func_type type) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - - /* Public resource */ - parse_pub_res_cap(dev, cap, dev_cap, type); - - /* PPF managed dynamic resource */ - if (type == TYPE_PPF) - parse_dynamic_share_res_cap(dev, cap, dev_cap, type); - - /* L2 NIC resource */ - if (IS_NIC_TYPE(dev)) - parse_l2nic_res_cap(dev, cap, dev_cap, type); - - /* FC without virtulization */ - if (type == TYPE_PF || type == TYPE_PPF) { - if (IS_FC_TYPE(dev)) - parse_fc_res_cap(dev, cap, dev_cap, type); - } - - /* toe resource */ - if (IS_TOE_TYPE(dev)) - parse_toe_res_cap(dev, cap, dev_cap, type); - - /* mtt cache line */ - if (IS_RDMA_ENABLE(dev)) - parse_rdma_res_cap(dev, cap, dev_cap, type); - - /* RoCE resource */ - if (IS_ROCE_TYPE(dev)) - parse_roce_res_cap(dev, cap, dev_cap, type); - - if (IS_OVS_TYPE(dev)) - parse_ovs_res_cap(dev, cap, dev_cap, type); - - if (IS_IPSEC_TYPE(dev)) - parse_ipsec_res_cap(dev, cap, dev_cap, type); -} - -static int get_cap_from_fw(struct sphw_hwdev *dev, enum func_type type) -{ - struct cfg_cmd_dev_cap dev_cap; - u16 out_len = sizeof(dev_cap); - int err; - - memset(&dev_cap, 0, sizeof(dev_cap)); - dev_cap.func_id = sphw_global_func_id(dev); - sdk_info(dev->dev_hdl, "Get cap from fw, func_idx: %u\n", - dev_cap.func_id); - - err = sphw_msg_to_mgmt_sync(dev, SPHW_MOD_CFGM, CFG_CMD_GET_DEV_CAP, - &dev_cap, sizeof(dev_cap), &dev_cap, &out_len, 0, - SPHW_CHANNEL_COMM); - if (err || dev_cap.head.status || !out_len) { - sdk_err(dev->dev_hdl, - "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n", - err, dev_cap.head.status, out_len); - return -EIO; - } - - parse_dev_cap(dev, &dev_cap, type); - - return 0; -} - -int sphw_get_dev_cap(void *dev) -{ - struct sphw_hwdev *hwdev = dev; - enum func_type type = SPHW_FUNC_TYPE(hwdev); - int err; - - switch (type) { - case TYPE_PF: - case TYPE_PPF: - case TYPE_VF: - err = get_cap_from_fw(hwdev, type); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to get PF/PPF capability\n"); - return err; - } - break; - default: - sdk_err(hwdev->dev_hdl, "Unsupported PCI Function type: %d\n", - type); - return -EINVAL; - } - - return 0; -} - -static void nic_param_fix(struct sphw_hwdev *dev) -{ -} - -static void rdma_mtt_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct rdma_service_cap *rdma_cap = &cap->rdma_cap; - - rdma_cap->log_mtt = LOG_MTT_SEG; - rdma_cap->log_mtt_seg = LOG_MTT_SEG; - rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; - rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; - rdma_cap->num_mtts = (g_rdma_mtts_num > 0 ? - g_rdma_mtts_num : RDMA_NUM_MTTS); -} - -static void rdma_param_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct rdma_service_cap *rdma_cap = &cap->rdma_cap; - struct dev_roce_svc_own_cap *roce_cap = - &rdma_cap->dev_rdma_cap.roce_own_cap; - - rdma_cap->log_mtt = LOG_MTT_SEG; - rdma_cap->log_rdmarc = LOG_RDMARC_SEG; - rdma_cap->reserved_qps = RDMA_RSVD_QPS; - rdma_cap->max_sq_sg = RDMA_MAX_SQ_SGE; - - /* RoCE */ - if (IS_ROCE_TYPE(dev)) { - roce_cap->qpc_entry_sz = ROCE_QPC_ENTRY_SZ; - roce_cap->max_wqes = ROCE_MAX_WQES; - roce_cap->max_rq_sg = ROCE_MAX_RQ_SGE; - roce_cap->max_sq_inline_data_sz = ROCE_MAX_SQ_INLINE_DATA_SZ; - roce_cap->max_rq_desc_sz = ROCE_MAX_RQ_DESC_SZ; - roce_cap->rdmarc_entry_sz = ROCE_RDMARC_ENTRY_SZ; - roce_cap->max_qp_init_rdma = ROCE_MAX_QP_INIT_RDMA; - roce_cap->max_qp_dest_rdma = ROCE_MAX_QP_DEST_RDMA; - roce_cap->max_srq_wqes = ROCE_MAX_SRQ_WQES; - roce_cap->reserved_srqs = ROCE_RSVD_SRQS; - roce_cap->max_srq_sge = ROCE_MAX_SRQ_SGE; - roce_cap->srqc_entry_sz = ROCE_SRQC_ENTERY_SZ; - roce_cap->max_msg_sz = ROCE_MAX_MSG_SZ; - } - - rdma_cap->max_sq_desc_sz = RDMA_MAX_SQ_DESC_SZ; - rdma_cap->wqebb_size = WQEBB_SZ; - rdma_cap->max_cqes = RDMA_MAX_CQES; - rdma_cap->reserved_cqs = RDMA_RSVD_CQS; - rdma_cap->cqc_entry_sz = RDMA_CQC_ENTRY_SZ; - rdma_cap->cqe_size = RDMA_CQE_SZ; - rdma_cap->reserved_mrws = RDMA_RSVD_MRWS; - rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; - - /* 2^8 - 1 - * +------------------------+-----------+ - * | 4B | 1M(20b) | Key(8b) | - * +------------------------+-----------+ - * key = 8bit key + 24bit index, - * now Lkey of SGE uses 2bit(bit31 and bit30), so key only have 10bit, - * we use original 8bits directly for simpilification - */ - rdma_cap->max_fmr_maps = 255; - rdma_cap->num_mtts = (g_rdma_mtts_num > 0 ? - g_rdma_mtts_num : RDMA_NUM_MTTS); - rdma_cap->log_mtt_seg = LOG_MTT_SEG; - rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; - rdma_cap->log_rdmarc_seg = LOG_RDMARC_SEG; - rdma_cap->local_ca_ack_delay = LOCAL_ACK_DELAY; - rdma_cap->num_ports = RDMA_NUM_PORTS; - rdma_cap->db_page_size = DB_PAGE_SZ; - rdma_cap->direct_wqe_size = DWQE_SZ; - rdma_cap->num_pds = NUM_PD; - rdma_cap->reserved_pds = RSVD_PD; - rdma_cap->max_xrcds = MAX_XRCDS; - rdma_cap->reserved_xrcds = RSVD_XRCDS; - rdma_cap->max_gid_per_port = MAX_GID_PER_PORT; - rdma_cap->gid_entry_sz = GID_ENTRY_SZ; - rdma_cap->reserved_lkey = RSVD_LKEY; - rdma_cap->num_comp_vectors = (u32)dev->cfg_mgmt->eq_info.num_ceq; - rdma_cap->page_size_cap = PAGE_SZ_CAP; - rdma_cap->flags = (RDMA_BMME_FLAG_LOCAL_INV | - RDMA_BMME_FLAG_REMOTE_INV | - RDMA_BMME_FLAG_FAST_REG_WR | - RDMA_DEV_CAP_FLAG_XRC | - RDMA_DEV_CAP_FLAG_MEM_WINDOW | - RDMA_BMME_FLAG_TYPE_2_WIN | - RDMA_BMME_FLAG_WIN_TYPE_2B | - RDMA_DEV_CAP_FLAG_ATOMIC); - rdma_cap->max_frpl_len = MAX_FRPL_LEN; - rdma_cap->max_pkeys = MAX_PKEYS; -} - -static void toe_param_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct toe_service_cap *toe_cap = &cap->toe_cap; - - toe_cap->pctx_sz = TOE_PCTX_SZ; - toe_cap->scqc_sz = TOE_CQC_SZ; -} - -static void ovs_param_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct ovs_service_cap *ovs_cap = &cap->ovs_cap; - - ovs_cap->pctx_sz = OVS_PCTX_SZ; -} - -static void fc_param_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct fc_service_cap *fc_cap = &cap->fc_cap; - - fc_cap->parent_qpc_size = FC_PCTX_SZ; - fc_cap->child_qpc_size = FC_CCTX_SZ; - fc_cap->sqe_size = FC_SQE_SZ; - - fc_cap->scqc_size = FC_SCQC_SZ; - fc_cap->scqe_size = FC_SCQE_SZ; - - fc_cap->srqc_size = FC_SRQC_SZ; - fc_cap->srqe_size = FC_SRQE_SZ; -} - -static void ipsec_param_fix(struct sphw_hwdev *dev) -{ - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; - struct ipsec_service_cap *ipsec_cap = &cap->ipsec_cap; - - ipsec_cap->sactx_sz = IPSEC_SACTX_SZ; -} - -static void init_service_param(struct sphw_hwdev *dev) -{ - if (IS_NIC_TYPE(dev)) - nic_param_fix(dev); - if (IS_RDMA_ENABLE(dev)) - rdma_mtt_fix(dev); - if (IS_ROCE_TYPE(dev)) - rdma_param_fix(dev); - if (IS_FC_TYPE(dev)) - fc_param_fix(dev); - if (IS_TOE_TYPE(dev)) - toe_param_fix(dev); - if (IS_OVS_TYPE(dev)) - ovs_param_fix(dev); - if (IS_IPSEC_TYPE(dev)) - ipsec_param_fix(dev); -} - -static void cfg_get_eq_num(struct sphw_hwdev *dev) -{ - struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info; - - eq_info->num_ceq = dev->hwif->attr.num_ceqs; - eq_info->num_ceq_remain = eq_info->num_ceq; -} - -static int cfg_init_eq(struct sphw_hwdev *dev) -{ - struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; - struct cfg_eq *eq = NULL; - u8 num_ceq, i = 0; - - cfg_get_eq_num(dev); - num_ceq = cfg_mgmt->eq_info.num_ceq; - - sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n", - cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain); - - if (!num_ceq) { - sdk_err(dev->dev_hdl, "Ceq num cfg in fw is zero\n"); - return -EFAULT; - } - - eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL); - if (!eq) - return -ENOMEM; - - for (i = 0; i < num_ceq; ++i) { - eq[i].eqn = i; - eq[i].free = CFG_FREE; - eq[i].type = SERVICE_T_MAX; - } - - cfg_mgmt->eq_info.eq = eq; - - mutex_init(&cfg_mgmt->eq_info.eq_mutex); - - return 0; -} - -int sphw_vector_to_eqn(void *hwdev, enum sphw_service_type type, int vector) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct cfg_eq *eq = NULL; - int eqn = -EINVAL; - - if (!hwdev || vector < 0) - return -EINVAL; - - if (type != SERVICE_T_ROCE) { - sdk_err(dev->dev_hdl, - "Service type :%d, only RDMA service could get eqn by vector.\n", - type); - return -EINVAL; - } - - cfg_mgmt = dev->cfg_mgmt; - vector = (vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE; - - eq = cfg_mgmt->eq_info.eq; - if (eq[vector].type == SERVICE_T_ROCE && eq[vector].free == CFG_BUSY) - eqn = eq[vector].eqn; - - return eqn; -} - -static int cfg_init_interrupt(struct sphw_hwdev *dev) -{ - struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; - struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info; - u16 intr_num = dev->hwif->attr.num_irqs; - - if (!intr_num) { - sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero\n"); - return -EFAULT; - } - irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info), - GFP_KERNEL); - if (!irq_info->alloc_info) - return -ENOMEM; - - irq_info->num_irq_hw = intr_num; - - /* Production requires VF only surppots MSI-X */ - if (SPHW_FUNC_TYPE(dev) == TYPE_VF) - cfg_mgmt->svc_cap.interrupt_type = INTR_TYPE_MSIX; - else - cfg_mgmt->svc_cap.interrupt_type = intr_mode; - - mutex_init(&irq_info->irq_mutex); - return 0; -} - -static int cfg_enable_interrupt(struct sphw_hwdev *dev) -{ - struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; - u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw; - - void *pcidev = dev->pcidev_hdl; - struct irq_alloc_info_st *irq_info = NULL; - struct msix_entry *entry = NULL; - u16 i = 0; - int actual_irq; - - irq_info = cfg_mgmt->irq_param_info.alloc_info; - - sdk_info(dev->dev_hdl, "Interrupt type: %u, irq num: %u.\n", - cfg_mgmt->svc_cap.interrupt_type, nreq); - - switch (cfg_mgmt->svc_cap.interrupt_type) { - case INTR_TYPE_MSIX: - if (!nreq) { - sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n"); - return -EINVAL; - } - entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - for (i = 0; i < nreq; i++) - entry[i].entry = i; - - actual_irq = pci_enable_msix_range(pcidev, entry, - VECTOR_THRESHOLD, nreq); - if (actual_irq < 0) { - sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n", - actual_irq); - kfree(entry); - return -ENOMEM; - } - - nreq = (u16)actual_irq; - cfg_mgmt->irq_param_info.num_total = nreq; - cfg_mgmt->irq_param_info.num_irq_remain = nreq; - sdk_info(dev->dev_hdl, "Request %u msix vector success.\n", - nreq); - - for (i = 0; i < nreq; ++i) { - /* u16 driver uses to specify entry, OS writes */ - irq_info[i].info.msix_entry_idx = entry[i].entry; - /* u32 kernel uses to write allocated vector */ - irq_info[i].info.irq_id = entry[i].vector; - irq_info[i].type = SERVICE_T_MAX; - irq_info[i].free = CFG_FREE; - } - - kfree(entry); - - break; - - default: - sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n", - cfg_mgmt->svc_cap.interrupt_type); - break; - } - - return 0; -} - -int sphw_alloc_irqs(void *hwdev, enum sphw_service_type type, u16 num, - struct irq_info *irq_info_array, u16 *act_num) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct cfg_irq_info *irq_info = NULL; - struct irq_alloc_info_st *alloc_info = NULL; - int max_num_irq; - u16 free_num_irq; - int i, j; - - if (!hwdev || !irq_info_array || !act_num) - return -EINVAL; - - cfg_mgmt = dev->cfg_mgmt; - irq_info = &cfg_mgmt->irq_param_info; - alloc_info = irq_info->alloc_info; - max_num_irq = irq_info->num_total; - free_num_irq = irq_info->num_irq_remain; - - mutex_lock(&irq_info->irq_mutex); - - if (num > free_num_irq) { - if (free_num_irq == 0) { - sdk_err(dev->dev_hdl, - "no free irq resource in cfg mgmt.\n"); - mutex_unlock(&irq_info->irq_mutex); - return -ENOMEM; - } - - sdk_warn(dev->dev_hdl, "only %u irq resource in cfg mgmt.\n", - free_num_irq); - num = free_num_irq; - } - - *act_num = 0; - - for (i = 0; i < num; i++) { - for (j = 0; j < max_num_irq; j++) { - if (alloc_info[j].free == CFG_FREE) { - if (irq_info->num_irq_remain == 0) { - sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n"); - mutex_unlock(&irq_info->irq_mutex); - return -EINVAL; - } - alloc_info[j].type = type; - alloc_info[j].free = CFG_BUSY; - - irq_info_array[i].msix_entry_idx = - alloc_info[j].info.msix_entry_idx; - irq_info_array[i].irq_id = - alloc_info[j].info.irq_id; - (*act_num)++; - irq_info->num_irq_remain--; - - break; - } - } - } - - mutex_unlock(&irq_info->irq_mutex); - return 0; -} - -void sphw_free_irq(void *hwdev, enum sphw_service_type type, u32 irq_id) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct cfg_irq_info *irq_info = NULL; - struct irq_alloc_info_st *alloc_info = NULL; - int max_num_irq; - int i; - - if (!hwdev) - return; - - cfg_mgmt = dev->cfg_mgmt; - irq_info = &cfg_mgmt->irq_param_info; - alloc_info = irq_info->alloc_info; - max_num_irq = irq_info->num_total; - - mutex_lock(&irq_info->irq_mutex); - - for (i = 0; i < max_num_irq; i++) { - if (irq_id == alloc_info[i].info.irq_id && - type == alloc_info[i].type) { - if (alloc_info[i].free == CFG_BUSY) { - alloc_info[i].free = CFG_FREE; - irq_info->num_irq_remain++; - if (irq_info->num_irq_remain > max_num_irq) { - sdk_err(dev->dev_hdl, "Find target,but over range\n"); - mutex_unlock(&irq_info->irq_mutex); - return; - } - break; - } - } - } - - if (i >= max_num_irq) - sdk_warn(dev->dev_hdl, "Irq %u don`t need to free\n", irq_id); - - mutex_unlock(&irq_info->irq_mutex); -} - -int sphw_vector_to_irq(void *hwdev, enum sphw_service_type type, int vector) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct irq_alloc_info_st *irq_info = NULL; - int irq = -EINVAL; - - if (!hwdev) - return -EINVAL; - - cfg_mgmt = dev->cfg_mgmt; - if (type != SERVICE_T_ROCE) { - sdk_err(dev->dev_hdl, - "Service type: %u, only RDMA service could get eqn by vector\n", - type); - return -EINVAL; - } - - /* Current RDMA CEQ are 2 - 31, will change in the future */ - vector = ((vector % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE); - - irq_info = cfg_mgmt->irq_param_info.alloc_info; - if (irq_info[vector].type == SERVICE_T_ROCE) - if (irq_info[vector].free == CFG_BUSY) - irq = (int)irq_info[vector].info.irq_id; - - return irq; -} - -int sphw_alloc_ceqs(void *hwdev, enum sphw_service_type type, int num, int *ceq_id_array, - int *act_num) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct cfg_eq_info *eq = NULL; - int free_ceq; - int i, j; - - if (!hwdev || !ceq_id_array || !act_num) - return -EINVAL; - - cfg_mgmt = dev->cfg_mgmt; - eq = &cfg_mgmt->eq_info; - free_ceq = eq->num_ceq_remain; - - mutex_lock(&eq->eq_mutex); - - if (num > free_ceq) { - if (free_ceq <= 0) { - sdk_err(dev->dev_hdl, "No free ceq resource in cfg mgmt\n"); - mutex_unlock(&eq->eq_mutex); - return -ENOMEM; - } - - sdk_warn(dev->dev_hdl, "Only %d ceq resource in cfg mgmt\n", - free_ceq); - } - - *act_num = 0; - - num = min(num, eq->num_ceq - CFG_RDMA_CEQ_BASE); - for (i = 0; i < num; i++) { - if (eq->num_ceq_remain == 0) { - sdk_warn(dev->dev_hdl, "Alloc %d ceqs, less than required %d ceqs\n", - *act_num, num); - mutex_unlock(&eq->eq_mutex); - return 0; - } - - for (j = CFG_RDMA_CEQ_BASE; j < eq->num_ceq; j++) { - if (eq->eq[j].free == CFG_FREE) { - eq->eq[j].type = type; - eq->eq[j].free = CFG_BUSY; - eq->num_ceq_remain--; - ceq_id_array[i] = eq->eq[j].eqn; - (*act_num)++; - break; - } - } - } - - mutex_unlock(&eq->eq_mutex); - return 0; -} - -void sphw_free_ceq(void *hwdev, enum sphw_service_type type, int ceq_id) -{ - struct sphw_hwdev *dev = hwdev; - struct cfg_mgmt_info *cfg_mgmt = NULL; - struct cfg_eq_info *eq = NULL; - u8 num_ceq; - u8 i = 0; - - if (!hwdev) - return; - - cfg_mgmt = dev->cfg_mgmt; - eq = &cfg_mgmt->eq_info; - num_ceq = eq->num_ceq; - - mutex_lock(&eq->eq_mutex); - - for (i = 0; i < num_ceq; i++) { - if (ceq_id == eq->eq[i].eqn && - type == cfg_mgmt->eq_info.eq[i].type) { - if (eq->eq[i].free == CFG_BUSY) { - eq->eq[i].free = CFG_FREE; - eq->num_ceq_remain++; - if (eq->num_ceq_remain > num_ceq) - eq->num_ceq_remain %= num_ceq; - - mutex_unlock(&eq->eq_mutex); - return; - } - } - } - - if (i >= num_ceq) - sdk_warn(dev->dev_hdl, "ceq %d don`t need to free.\n", ceq_id); - - mutex_unlock(&eq->eq_mutex); -} - -int init_cfg_mgmt(struct sphw_hwdev *dev) -{ - int err; - struct cfg_mgmt_info *cfg_mgmt; - - cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); - if (!cfg_mgmt) - return -ENOMEM; - - dev->cfg_mgmt = cfg_mgmt; - cfg_mgmt->hwdev = dev; - - err = cfg_init_eq(dev); - if (err) { - sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n", - err); - goto free_mgmt_mem; - } - - err = cfg_init_interrupt(dev); - if (err) { - sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n", - err); - goto free_eq_mem; - } - - err = cfg_enable_interrupt(dev); - if (err) { - sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n", - err); - goto free_interrupt_mem; - } - - return 0; - -free_interrupt_mem: - kfree(cfg_mgmt->irq_param_info.alloc_info); - cfg_mgmt->irq_param_info.alloc_info = NULL; - -free_eq_mem: - kfree(cfg_mgmt->eq_info.eq); - cfg_mgmt->eq_info.eq = NULL; - -free_mgmt_mem: - kfree(cfg_mgmt); - return err; -} - -void free_cfg_mgmt(struct sphw_hwdev *dev) -{ - struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; - - /* if the allocated resource were recycled */ - if (cfg_mgmt->irq_param_info.num_irq_remain != - cfg_mgmt->irq_param_info.num_total || - cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq) - sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n"); - - switch (cfg_mgmt->svc_cap.interrupt_type) { - case INTR_TYPE_MSIX: - pci_disable_msix(dev->pcidev_hdl); - break; - - case INTR_TYPE_MSI: - pci_disable_msi(dev->pcidev_hdl); - break; - - case INTR_TYPE_INT: - default: - break; - } - - kfree(cfg_mgmt->irq_param_info.alloc_info); - cfg_mgmt->irq_param_info.alloc_info = NULL; - - kfree(cfg_mgmt->eq_info.eq); - cfg_mgmt->eq_info.eq = NULL; - - kfree(cfg_mgmt); -} - -int init_capability(struct sphw_hwdev *dev) -{ - int err; - struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; - - set_cfg_test_param(cfg_mgmt); - - cfg_mgmt->svc_cap.sf_svc_attr.ft_pf_en = false; - cfg_mgmt->svc_cap.sf_svc_attr.rdma_pf_en = false; - - err = sphw_get_dev_cap(dev); - if (err) - return err; - - init_service_param(dev); - - sdk_info(dev->dev_hdl, "Init capability success\n"); - return 0; -} - -void free_capability(struct sphw_hwdev *dev) -{ - sdk_info(dev->dev_hdl, "Free capability success"); -} - -bool sphw_support_nic(void *hwdev, struct nic_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_NIC_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap)); - - return true; -} - -bool sphw_support_ipsec(void *hwdev, struct ipsec_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_IPSEC_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.ipsec_cap, sizeof(*cap)); - - return true; -} - -bool sphw_support_roce(void *hwdev, struct rdma_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_ROCE_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); - - return true; -} - -bool sphw_support_fc(void *hwdev, struct fc_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_FC_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap)); - - return true; -} - -bool sphw_support_rdma(void *hwdev, struct rdma_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_RDMA_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); - - return true; -} - -bool sphw_support_ovs(void *hwdev, struct ovs_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_OVS_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, sizeof(*cap)); - - return true; -} - -/* Only PPF support it, PF is not */ -bool sphw_support_toe(void *hwdev, struct toe_service_cap *cap) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (!IS_TOE_TYPE(dev)) - return false; - - if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, sizeof(*cap)); - - return true; -} - -bool sphw_func_for_mgmt(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - if (dev->cfg_mgmt->svc_cap.chip_svc_type >= CFG_SVC_NIC_BIT0) - return false; - else - return true; -} - -bool sphw_get_stateful_enable(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return false; - - return dev->cfg_mgmt->svc_cap.sf_en; -} - -u8 sphw_host_oq_id_mask(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting host oq id mask\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val; -} - -u8 sphw_host_id(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting host id\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.host_id; -} - -u16 sphw_host_total_func(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting host total function number\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.host_total_function; -} - -u16 sphw_func_max_qnum(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting function max queue number\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; -} - -u16 sphw_func_max_nic_qnum(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting function max queue number\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; -} - -u8 sphw_ep_id(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting ep id\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.ep_id; -} - -u8 sphw_er_id(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting er id\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.er_id; -} - -u8 sphw_physical_port_id(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting physical port id\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.port_id; -} - -u16 sphw_func_max_vf(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting max vf number\n"); - return 0; - } - return dev->cfg_mgmt->svc_cap.max_vf; -} - -u8 sphw_cos_valid_bitmap(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting cos valid bitmap\n"); - return 0; - } - return (u8)(dev->cfg_mgmt->svc_cap.cos_valid_bitmap); -} - -void sphw_shutdown_hwdev(void *hwdev) -{ - /* to do : if IS_SLAVE_HOST*/ -} - -u32 sphw_host_pf_num(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for getting pf number capability\n"); - return 0; - } - - return dev->cfg_mgmt->svc_cap.pf_num; -} - -u8 sphw_flexq_en(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return 0; - - return dev->cfg_mgmt->svc_cap.flexq_en; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h deleted file mode 100644 index 1b48e0991563..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_cfg.h +++ /dev/null @@ -1,329 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_HW_CFG_H -#define SPHW_HW_CFG_H - -#include <linux/types.h> - -#define CFG_MAX_CMD_TIMEOUT 30000 /* ms */ - -enum { - CFG_FREE = 0, - CFG_BUSY = 1 -}; - -/* start position for CEQs allocation, Max number of CEQs is 32 */ -enum { - CFG_RDMA_CEQ_BASE = 0 -}; - -enum { - SF_SVC_FT_BIT = (1 << 0), - SF_SVC_RDMA_BIT = (1 << 1), -}; - -/* RDMA resource */ -#define K_UNIT BIT(10) -#define M_UNIT BIT(20) -#define G_UNIT BIT(30) - -/* L2NIC */ -#define SPHW_CFG_MAX_QP 256 - -/* RDMA */ -#define RDMA_RSVD_QPS 2 -#define ROCE_MAX_WQES (32 * K_UNIT - 1) -#define IWARP_MAX_WQES (8 * K_UNIT) - -#define RDMA_MAX_SQ_SGE 32 - -#define ROCE_MAX_RQ_SGE 16 - -#define RDMA_MAX_SQ_DESC_SZ (1 * K_UNIT) - -/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 64B(max_task_seg_len)) */ -#define ROCE_MAX_SQ_INLINE_DATA_SZ 912 - -#define ROCE_MAX_RQ_DESC_SZ 256 - -#define ROCE_QPC_ENTRY_SZ 512 - -#define WQEBB_SZ 64 - -#define ROCE_RDMARC_ENTRY_SZ 32 -#define ROCE_MAX_QP_INIT_RDMA 128 -#define ROCE_MAX_QP_DEST_RDMA 128 - -#define ROCE_MAX_SRQ_WQES (16 * K_UNIT - 1) -#define ROCE_RSVD_SRQS 0 -#define ROCE_MAX_SRQ_SGE 15 -#define ROCE_SRQC_ENTERY_SZ 64 - -#define RDMA_MAX_CQES (8 * M_UNIT - 1) -#define RDMA_RSVD_CQS 0 - -#define RDMA_CQC_ENTRY_SZ 128 - -#define RDMA_CQE_SZ 64 -#define RDMA_RSVD_MRWS 128 -#define RDMA_MPT_ENTRY_SZ 64 -#define RDMA_NUM_MTTS (1 * G_UNIT) -#define LOG_MTT_SEG 5 -#define MTT_ENTRY_SZ 8 -#define LOG_RDMARC_SEG 3 - -#define LOCAL_ACK_DELAY 15 -#define RDMA_NUM_PORTS 1 -#define ROCE_MAX_MSG_SZ (2 * G_UNIT) - -#define DB_PAGE_SZ (4 * K_UNIT) -#define DWQE_SZ 256 - -#define NUM_PD (128 * K_UNIT) -#define RSVD_PD 0 - -#define MAX_XRCDS (64 * K_UNIT) -#define RSVD_XRCDS 0 - -#define MAX_GID_PER_PORT 128 -#define GID_ENTRY_SZ 32 -#define RSVD_LKEY ((RDMA_RSVD_MRWS - 1) << 8) -#define NUM_COMP_VECTORS 32 -#define PAGE_SZ_CAP ((1UL << 12) | (1UL << 16) | (1UL << 21)) -#define ROCE_MODE 1 - -#define MAX_FRPL_LEN 511 -#define MAX_PKEYS 1 - -/* ToE */ -#define TOE_PCTX_SZ 1024 -#define TOE_CQC_SZ 64 - -/* IoE */ -#define IOE_PCTX_SZ 512 - -/* FC */ -#define FC_PCTX_SZ 256 -#define FC_CCTX_SZ 256 -#define FC_SQE_SZ 128 -#define FC_SCQC_SZ 64 -#define FC_SCQE_SZ 64 -#define FC_SRQC_SZ 64 -#define FC_SRQE_SZ 32 - -/* OVS */ -#define OVS_PCTX_SZ 512 - -/* IPsec */ -#define IPSEC_SACTX_SZ 512 - -struct dev_sf_svc_attr { - bool ft_en; /* business enable flag (not include RDMA) */ - bool ft_pf_en; /* In FPGA Test VF resource is in PF or not, - * 0 - VF, 1 - PF, VF doesn't need this bit. - */ - bool rdma_en; - bool rdma_pf_en;/* In FPGA Test VF RDMA resource is in PF or not, - * 0 - VF, 1 - PF, VF doesn't need this bit. - */ -}; - -struct host_shared_resource_cap { - u32 host_pctxs; /* Parent Context max 1M, IOE and FCoE max 8K flows */ - u32 host_cctxs; /* Child Context: max 8K */ - u32 host_scqs; /* shared CQ, chip interface module uses 1 SCQ - * TOE/IOE/FCoE each uses 1 SCQ - * RoCE/IWARP uses multiple SCQs - * So 6 SCQ least - */ - u32 host_srqs; /* SRQ number: 256K */ - u32 host_mpts; /* MR number:1M */ -}; - -enum intr_type { - INTR_TYPE_MSIX, - INTR_TYPE_MSI, - INTR_TYPE_INT, - INTR_TYPE_NONE, - /* PXE,OVS need single thread processing, - * synchronization messages must use poll wait mechanism interface - */ -}; - -/* service type relates define */ -enum cfg_svc_type_en { - CFG_SVC_NIC_BIT0 = (1 << 0), - CFG_SVC_ROCE_BIT1 = (1 << 1), - CFG_SVC_VBS_BIT2 = (1 << 2), - CFG_SVC_TOE_BIT3 = (1 << 3), - CFG_SVC_IPSEC_BIT4 = (1 << 4), - CFG_SVC_FC_BIT5 = (1 << 5), - CFG_SVC_VIRTIO_BIT6 = (1 << 6), - CFG_SVC_OVS_BIT7 = (1 << 7), - CFG_SVC_RSV2_BIT8 = (1 << 8), - CFG_SVC_IOE_BIT9 = (1 << 9), - - CFG_SVC_FT_EN = (CFG_SVC_VBS_BIT2 | CFG_SVC_TOE_BIT3 | - CFG_SVC_IPSEC_BIT4 | CFG_SVC_FC_BIT5 | - CFG_SVC_VIRTIO_BIT6 | CFG_SVC_OVS_BIT7 | - CFG_SVC_IOE_BIT9), - CFG_SVC_RDMA_EN = CFG_SVC_ROCE_BIT1 -}; - -/* device capability */ -struct service_cap { - struct dev_sf_svc_attr sf_svc_attr; - enum cfg_svc_type_en svc_type; /* user input service type */ - enum cfg_svc_type_en chip_svc_type; /* HW supported service type */ - - u8 host_id; - u8 ep_id; - u8 er_id; /* PF/VF's ER */ - u8 port_id; /* PF/VF's physical port */ - - /* Host global resources */ - u16 host_total_function; - u8 pf_num; - u8 pf_id_start; - u16 vf_num; /* max numbers of vf in current host */ - u16 vf_id_start; - u8 host_oq_id_mask_val; - - u8 flexq_en; - u8 cos_valid_bitmap; - u16 max_vf; /* max VF number that PF supported */ - - /* DO NOT get interrupt_type from firmware */ - enum intr_type interrupt_type; - - bool sf_en; /* stateful business status */ - u8 timer_en; /* 0:disable, 1:enable */ - u8 bloomfilter_en; /* 0:disable, 1:enable*/ - - u8 lb_mode; - u8 smf_pg; - - /* For test */ - u32 test_mode; - u32 test_qpc_num; - u32 test_qpc_resvd_num; - u32 test_page_size_reorder; - bool test_xid_alloc_mode; - bool test_gpa_check_enable; - u8 test_qpc_alloc_mode; - u8 test_scqc_alloc_mode; - - u32 test_max_conn_num; - u32 test_max_cache_conn_num; - u32 test_scqc_num; - u32 test_mpt_num; - u32 test_scq_resvd_num; - u32 test_mpt_recvd_num; - u32 test_hash_num; - u32 test_reorder_num; - - u32 max_connect_num; /* PF/VF maximum connection number(1M) */ - /* The maximum connections which can be stick to cache memory, max 1K */ - u16 max_stick2cache_num; - /* Starting address in cache memory for bloom filter, 64Bytes aligned */ - u16 bfilter_start_addr; - /* Length for bloom filter, aligned on 64Bytes. The size is length*64B. - * Bloom filter memory size + 1 must be power of 2. - * The maximum memory size of bloom filter is 4M - */ - u16 bfilter_len; - /* The size of hash bucket tables, align on 64 entries. - * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2. - * The maximum number of hash bucket is 4M - */ - u16 hash_bucket_num; - - struct host_shared_resource_cap shared_res_cap; /* shared capability */ - struct nic_service_cap nic_cap; /* NIC capability */ - struct rdma_service_cap rdma_cap; /* RDMA capability */ - struct fc_service_cap fc_cap; /* FC capability */ - struct toe_service_cap toe_cap; /* ToE capability */ - struct ovs_service_cap ovs_cap; /* OVS capability */ - struct ipsec_service_cap ipsec_cap; /* IPsec capability */ -}; - -struct cfg_eq { - enum sphw_service_type type; - int eqn; - int free; /* 1 - alocated, 0- freed */ -}; - -struct cfg_eq_info { - struct cfg_eq *eq; - - u8 num_ceq; - - u8 num_ceq_remain; - - /* mutex used for allocate EQs */ - struct mutex eq_mutex; -}; - -struct irq_alloc_info_st { - enum sphw_service_type type; - int free; /* 1 - alocated, 0- freed */ - struct irq_info info; -}; - -struct cfg_irq_info { - struct irq_alloc_info_st *alloc_info; - u16 num_total; - u16 num_irq_remain; - u16 num_irq_hw; /* device max irq number */ - - /* mutex used for allocate EQs */ - struct mutex irq_mutex; -}; - -#define VECTOR_THRESHOLD 2 - -struct cfg_mgmt_info { - struct sphw_hwdev *hwdev; - struct service_cap svc_cap; - struct cfg_eq_info eq_info; /* EQ */ - struct cfg_irq_info irq_param_info; /* IRQ */ - u32 func_seq_num; /* temporary */ -}; - -#define IS_NIC_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_NIC_BIT0) -#define IS_ROCE_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_ROCE_BIT1) -#define IS_FCOE_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FCOE_BIT2) -#define IS_TOE_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_TOE_BIT3) -#define IS_IPSEC_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_IPSEC_BIT4) -#define IS_FC_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FC_BIT5) -#define IS_FIC_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FIC_BIT6) -#define IS_OVS_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_OVS_BIT7) -#define IS_ACL_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_ACL_BIT8) -#define IS_IOE_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_IOE_BIT9) -#define IS_FT_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_FT_EN) -#define IS_RDMA_TYPE(dev) \ - (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SVC_RDMA_EN) -#define IS_RDMA_ENABLE(dev) \ - ((dev)->cfg_mgmt->svc_cap.sf_svc_attr.rdma_en) - -int init_cfg_mgmt(struct sphw_hwdev *dev); - -void free_cfg_mgmt(struct sphw_hwdev *dev); - -int init_capability(struct sphw_hwdev *dev); - -void free_capability(struct sphw_hwdev *dev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c deleted file mode 100644 index b868bf8ed1cb..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.c +++ /dev/null @@ -1,1280 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/types.h> -#include <linux/delay.h> -#include <linux/module.h> -#include <linux/completion.h> -#include <linux/semaphore.h> -#include <linux/interrupt.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_csr.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" -#include "sphw_wq.h" -#include "sphw_cmdq.h" -#include "sphw_comm_msg_intf.h" -#include "sphw_hw_comm.h" - -#define SPHW_MSIX_CNT_LLI_TIMER_SHIFT 0 -#define SPHW_MSIX_CNT_LLI_CREDIT_SHIFT 8 -#define SPHW_MSIX_CNT_COALESC_TIMER_SHIFT 8 -#define SPHW_MSIX_CNT_PENDING_SHIFT 8 -#define SPHW_MSIX_CNT_RESEND_TIMER_SHIFT 29 - -#define SPHW_MSIX_CNT_LLI_TIMER_MASK 0xFFU -#define SPHW_MSIX_CNT_LLI_CREDIT_MASK 0xFFU -#define SPHW_MSIX_CNT_COALESC_TIMER_MASK 0xFFU -#define SPHW_MSIX_CNT_PENDING_MASK 0x1FU -#define SPHW_MSIX_CNT_RESEND_TIMER_MASK 0x7U - -#define SPHW_MSIX_CNT_SET(val, member) \ - (((val) & SPHW_MSIX_CNT_##member##_MASK) << \ - SPHW_MSIX_CNT_##member##_SHIFT) - -#define DEFAULT_RX_BUF_SIZE ((u16)0xB) - -enum sphw_rx_buf_size { - SPHW_RX_BUF_SIZE_32B = 0x20, - SPHW_RX_BUF_SIZE_64B = 0x40, - SPHW_RX_BUF_SIZE_96B = 0x60, - SPHW_RX_BUF_SIZE_128B = 0x80, - SPHW_RX_BUF_SIZE_192B = 0xC0, - SPHW_RX_BUF_SIZE_256B = 0x100, - SPHW_RX_BUF_SIZE_384B = 0x180, - SPHW_RX_BUF_SIZE_512B = 0x200, - SPHW_RX_BUF_SIZE_768B = 0x300, - SPHW_RX_BUF_SIZE_1K = 0x400, - SPHW_RX_BUF_SIZE_1_5K = 0x600, - SPHW_RX_BUF_SIZE_2K = 0x800, - SPHW_RX_BUF_SIZE_3K = 0xC00, - SPHW_RX_BUF_SIZE_4K = 0x1000, - SPHW_RX_BUF_SIZE_8K = 0x2000, - SPHW_RX_BUF_SIZE_16K = 0x4000, -}; - -const int sphw_hw_rx_buf_size[] = { - SPHW_RX_BUF_SIZE_32B, - SPHW_RX_BUF_SIZE_64B, - SPHW_RX_BUF_SIZE_96B, - SPHW_RX_BUF_SIZE_128B, - SPHW_RX_BUF_SIZE_192B, - SPHW_RX_BUF_SIZE_256B, - SPHW_RX_BUF_SIZE_384B, - SPHW_RX_BUF_SIZE_512B, - SPHW_RX_BUF_SIZE_768B, - SPHW_RX_BUF_SIZE_1K, - SPHW_RX_BUF_SIZE_1_5K, - SPHW_RX_BUF_SIZE_2K, - SPHW_RX_BUF_SIZE_3K, - SPHW_RX_BUF_SIZE_4K, - SPHW_RX_BUF_SIZE_8K, - SPHW_RX_BUF_SIZE_16K, -}; - -static inline int comm_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_COMM, cmd, buf_in, in_size, buf_out, - out_size, 0, SPHW_CHANNEL_COMM); -} - -static inline int comm_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u16 channel) -{ - return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_COMM, cmd, buf_in, - in_size, buf_out, out_size, 0, channel); -} - -int sphw_get_interrupt_cfg(void *dev, struct interrupt_info *info, u16 channel) -{ - struct sphw_hwdev *hwdev = dev; - struct comm_cmd_msix_config msix_cfg; - u16 out_size = sizeof(msix_cfg); - int err; - - if (!hwdev || !info) - return -EINVAL; - - memset(&msix_cfg, 0, sizeof(msix_cfg)); - msix_cfg.func_id = sphw_global_func_id(hwdev); - msix_cfg.msix_index = info->msix_index; - msix_cfg.opcode = MGMT_MSG_CMD_OP_GET; - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, - &msix_cfg, sizeof(msix_cfg), &msix_cfg, - &out_size, channel); - if (err || !out_size || msix_cfg.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, msix_cfg.head.status, out_size, channel); - return -EINVAL; - } - - info->lli_credit_limit = msix_cfg.lli_credit_cnt; - info->lli_timer_cfg = msix_cfg.lli_timer_cnt; - info->pending_limt = msix_cfg.pending_cnt; - info->coalesc_timer_cfg = msix_cfg.coalesce_timer_cnt; - info->resend_timer_cfg = msix_cfg.resend_timer_cnt; - - return 0; -} - -int sphw_set_interrupt_cfg_direct(void *hwdev, struct interrupt_info *info, u16 channel) -{ - struct comm_cmd_msix_config msix_cfg; - u16 out_size = sizeof(msix_cfg); - int err; - - if (!hwdev) - return -EINVAL; - - memset(&msix_cfg, 0, sizeof(msix_cfg)); - msix_cfg.func_id = sphw_global_func_id(hwdev); - msix_cfg.msix_index = (u16)info->msix_index; - msix_cfg.opcode = MGMT_MSG_CMD_OP_SET; - - msix_cfg.lli_credit_cnt = info->lli_credit_limit; - msix_cfg.lli_timer_cnt = info->lli_timer_cfg; - msix_cfg.pending_cnt = info->pending_limt; - msix_cfg.coalesce_timer_cnt = info->coalesc_timer_cfg; - msix_cfg.resend_timer_cnt = info->resend_timer_cfg; - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, - &msix_cfg, sizeof(msix_cfg), &msix_cfg, - &out_size, channel); - if (err || !out_size || msix_cfg.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, msix_cfg.head.status, out_size, channel); - return -EINVAL; - } - - return 0; -} - -int sphw_set_interrupt_cfg(void *dev, struct interrupt_info info, u16 channel) -{ - struct interrupt_info temp_info; - struct sphw_hwdev *hwdev = dev; - int err; - - if (!hwdev) - return -EINVAL; - - temp_info.msix_index = info.msix_index; - - err = sphw_get_interrupt_cfg(hwdev, &temp_info, channel); - if (err) - return -EINVAL; - - if (!info.lli_set) { - info.lli_credit_limit = temp_info.lli_credit_limit; - info.lli_timer_cfg = temp_info.lli_timer_cfg; - } - - if (!info.interrupt_coalesc_set) { - info.pending_limt = temp_info.pending_limt; - info.coalesc_timer_cfg = temp_info.coalesc_timer_cfg; - info.resend_timer_cfg = temp_info.resend_timer_cfg; - } - - return sphw_set_interrupt_cfg_direct(hwdev, &info, channel); -} - -void sphw_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, u8 clear_resend_en) -{ - struct sphw_hwif *hwif = NULL; - u32 msix_ctrl = 0, addr; - - if (!hwdev) - return; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - msix_ctrl = SPHW_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX) | - SPHW_MSI_CLR_INDIR_SET(clear_resend_en, RESEND_TIMER_CLR); - - addr = SPHW_CSR_FUNC_MSI_CLR_WR_ADDR; - sphw_hwif_write_reg(hwif, addr, msix_ctrl); -} - -int sphw_set_wq_page_size(void *hwdev, u16 func_idx, u32 page_size, u16 channel) -{ - struct comm_cmd_wq_page_size page_size_info; - u16 out_size = sizeof(page_size_info); - int err; - - memset(&page_size_info, 0, sizeof(page_size_info)); - page_size_info.func_id = func_idx; - page_size_info.page_size = SPHW_PAGE_SIZE_HW(page_size); - page_size_info.opcode = MGMT_MSG_CMD_OP_SET; - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_PAGESIZE, - &page_size_info, sizeof(page_size_info), - &page_size_info, &out_size, channel); - if (err || !out_size || page_size_info.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x, channel: 0x%x\n", - err, page_size_info.head.status, out_size, channel); - return -EFAULT; - } - - return 0; -} - -int sphw_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel) -{ - struct comm_cmd_func_reset func_reset; - struct sphw_hwdev *hwdev = dev; - u16 out_size = sizeof(func_reset); - int err = 0; - - if (!dev) { - pr_err("Invalid para: dev is null.\n"); - return -EINVAL; - } - - sdk_info(hwdev->dev_hdl, "Function is reset, flag: 0x%llx, channel:0x%x\n", - reset_flag, channel); - - memset(&func_reset, 0, sizeof(func_reset)); - func_reset.func_id = func_id; - func_reset.reset_flag = reset_flag; - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_FUNC_RESET, - &func_reset, sizeof(func_reset), - &func_reset, &out_size, channel); - if (err || !out_size || func_reset.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x, out_size: 0x%x\n", - reset_flag, err, func_reset.head.status, out_size); - return -EIO; - } - - return 0; -} - -static u16 get_hw_rx_buf_size(int rx_buf_sz) -{ - u16 num_hw_types = - sizeof(sphw_hw_rx_buf_size) / - sizeof(sphw_hw_rx_buf_size[0]); - u16 i; - - for (i = 0; i < num_hw_types; i++) { - if (sphw_hw_rx_buf_size[i] == rx_buf_sz) - return i; - } - - pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz); - - return DEFAULT_RX_BUF_SIZE; /* default 2K */ -} - -int sphw_set_root_ctxt(void *hwdev, u32 rq_depth, u32 sq_depth, int rx_buf_sz, u16 channel) -{ - struct comm_cmd_root_ctxt root_ctxt; - u16 out_size = sizeof(root_ctxt); - int err; - - if (!hwdev) - return -EINVAL; - - memset(&root_ctxt, 0, sizeof(root_ctxt)); - root_ctxt.func_id = sphw_global_func_id(hwdev); - - root_ctxt.set_cmdq_depth = 0; - root_ctxt.cmdq_depth = 0; - - root_ctxt.lro_en = 1; - - root_ctxt.rq_depth = (u16)ilog2(rq_depth); - root_ctxt.rx_buf_sz = get_hw_rx_buf_size(rx_buf_sz); - root_ctxt.sq_depth = (u16)ilog2(sq_depth); - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, - &root_ctxt, sizeof(root_ctxt), - &root_ctxt, &out_size, channel); - if (err || !out_size || root_ctxt.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", - err, root_ctxt.head.status, out_size, channel); - return -EFAULT; - } - - return 0; -} - -int sphw_clean_root_ctxt(void *hwdev, u16 channel) -{ - struct comm_cmd_root_ctxt root_ctxt; - u16 out_size = sizeof(root_ctxt); - int err; - - if (!hwdev) - return -EINVAL; - - memset(&root_ctxt, 0, sizeof(root_ctxt)); - root_ctxt.func_id = sphw_global_func_id(hwdev); - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, - &root_ctxt, sizeof(root_ctxt), - &root_ctxt, &out_size, channel); - if (err || !out_size || root_ctxt.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", - err, root_ctxt.head.status, out_size, channel); - return -EFAULT; - } - - return 0; -} - -int sphw_set_cmdq_depth(void *hwdev, u16 cmdq_depth) -{ - struct comm_cmd_root_ctxt root_ctxt; - u16 out_size = sizeof(root_ctxt); - int err; - - memset(&root_ctxt, 0, sizeof(root_ctxt)); - root_ctxt.func_id = sphw_global_func_id(hwdev); - - root_ctxt.set_cmdq_depth = 1; - root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_VAT, &root_ctxt, - sizeof(root_ctxt), &root_ctxt, &out_size); - if (err || !out_size || root_ctxt.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n", - err, root_ctxt.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int sphw_set_cmdq_ctxt(struct sphw_hwdev *hwdev, u8 cmdq_id, struct cmdq_ctxt_info *ctxt) -{ - struct comm_cmd_cmdq_ctxt cmdq_ctxt; - u16 out_size = sizeof(cmdq_ctxt); - int err; - - memset(&cmdq_ctxt, 0, sizeof(cmdq_ctxt)); - memcpy(&cmdq_ctxt.ctxt, ctxt, sizeof(*ctxt)); - cmdq_ctxt.func_id = sphw_global_func_id(hwdev); - cmdq_ctxt.cmdq_id = cmdq_id; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_CMDQ_CTXT, - &cmdq_ctxt, sizeof(cmdq_ctxt), - &cmdq_ctxt, &out_size); - if (err || !out_size || cmdq_ctxt.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", - err, cmdq_ctxt.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int sphw_set_ceq_ctrl_reg(struct sphw_hwdev *hwdev, u16 q_id, u32 ctrl0, u32 ctrl1) -{ - struct comm_cmd_ceq_ctrl_reg ceq_ctrl; - u16 out_size = sizeof(ceq_ctrl); - int err; - - memset(&ceq_ctrl, 0, sizeof(ceq_ctrl)); - ceq_ctrl.func_id = sphw_global_func_id(hwdev); - ceq_ctrl.q_id = q_id; - ceq_ctrl.ctrl0 = ctrl0; - ceq_ctrl.ctrl1 = ctrl1; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_CEQ_CTRL_REG, - &ceq_ctrl, sizeof(ceq_ctrl), - &ceq_ctrl, &out_size); - if (err || !out_size || ceq_ctrl.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n", - q_id, err, ceq_ctrl.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int sphw_set_dma_attr_tbl(struct sphw_hwdev *hwdev, u8 entry_idx, u8 st, u8 at, u8 ph, - u8 no_snooping, u8 tph_en) -{ - struct comm_cmd_dma_attr_config dma_attr; - u16 out_size = sizeof(dma_attr); - int err; - - memset(&dma_attr, 0, sizeof(dma_attr)); - dma_attr.func_id = sphw_global_func_id(hwdev); - dma_attr.entry_idx = entry_idx; - dma_attr.st = st; - dma_attr.at = at; - dma_attr.ph = ph; - dma_attr.no_snooping = no_snooping; - dma_attr.tph_en = tph_en; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_DMA_ATTR, &dma_attr, sizeof(dma_attr), - &dma_attr, &out_size); - if (err || !out_size || dma_attr.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to set dma_attr, err: %d, status: 0x%x, out_size: 0x%x\n", - err, dma_attr.head.status, out_size); - return -EIO; - } - - return 0; -} - -int sphw_set_bdf_ctxt(void *hwdev, u8 bus, u8 device, u8 function) -{ - struct comm_cmd_bdf_info bdf_info; - u16 out_size = sizeof(bdf_info); - int err; - - if (!hwdev) - return -EINVAL; - - memset(&bdf_info, 0, sizeof(bdf_info)); - bdf_info.function_idx = sphw_global_func_id(hwdev); - bdf_info.bus = bus; - bdf_info.device = device; - bdf_info.function = function; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SEND_BDF_INFO, - &bdf_info, sizeof(bdf_info), - &bdf_info, &out_size); - if (err || !out_size || bdf_info.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set bdf info to MPU, err: %d, status: 0x%x, out_size: 0x%x\n", - err, bdf_info.head.status, out_size); - return -EIO; - } - - return 0; -} - -int sphw_sync_time(void *hwdev, u64 time) -{ - struct comm_cmd_sync_time time_info; - u16 out_size = sizeof(time_info); - int err; - - memset(&time_info, 0, sizeof(time_info)); - time_info.mstime = time; - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SYNC_TIME, &time_info, - sizeof(time_info), &time_info, &out_size); - if (err || time_info.head.status || !out_size) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n", - err, time_info.head.status, out_size); - return -EIO; - } - - return 0; -} - -int sphw_get_fw_ver(void *hwdev, enum sphw_fw_ver_type type, u8 *mgmt_ver, - u8 version_size, u16 channel) -{ - struct comm_cmd_get_fw_version fw_ver; - struct sphw_hwdev *dev = hwdev; - u16 out_size = sizeof(fw_ver); - int err; - - if (!hwdev || !mgmt_ver) - return -EINVAL; - - memset(&fw_ver, 0, sizeof(fw_ver)); - fw_ver.fw_type = type; - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_FW_VERSION, - &fw_ver, sizeof(fw_ver), &fw_ver, - &out_size, channel); - if (err || !out_size || fw_ver.head.status) { - sdk_err(dev->dev_hdl, "Failed to get fw version, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, fw_ver.head.status, out_size, channel); - return -EIO; - } - - snprintf(mgmt_ver, version_size, "%s", fw_ver.ver); - - return 0; -} - -int sphw_get_mgmt_version(void *hwdev, u8 *mgmt_ver, u8 version_size, u16 channel) -{ - return sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_MPU, mgmt_ver, version_size, channel); -} - -int sphw_get_fw_version(void *hwdev, struct sphw_fw_version *fw_ver, u16 channel) -{ - int err; - - if (!hwdev || !fw_ver) - return -EINVAL; - - err = sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_MPU, fw_ver->mgmt_ver, - sizeof(fw_ver->mgmt_ver), channel); - if (err) - return err; - - err = sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_NPU, fw_ver->microcode_ver, - sizeof(fw_ver->microcode_ver), channel); - if (err) - return err; - - return sphw_get_fw_ver(hwdev, SPHW_FW_VER_TYPE_BOOT, fw_ver->boot_ver, - sizeof(fw_ver->boot_ver), channel); -} - -static int sphw_comm_features_nego(void *hwdev, u8 opcode, u64 *s_feature, u16 size) -{ - struct comm_cmd_feature_nego feature_nego; - u16 out_size = sizeof(feature_nego); - struct sphw_hwdev *dev = hwdev; - int err; - - if (!hwdev || !s_feature || size > COMM_MAX_FEATURE_QWORD) - return -EINVAL; - - memset(&feature_nego, 0, sizeof(feature_nego)); - feature_nego.func_id = sphw_global_func_id(hwdev); - feature_nego.opcode = opcode; - if (opcode == MGMT_MSG_CMD_OP_SET) - memcpy(feature_nego.s_feature, s_feature, (size * sizeof(u64))); - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_FEATURE_NEGO, - &feature_nego, sizeof(feature_nego), - &feature_nego, &out_size); - if (err || !out_size || feature_nego.head.status) { - sdk_err(dev->dev_hdl, "Failed to negotiate feature, err: %d, status: 0x%x, out size: 0x%x\n", - err, feature_nego.head.status, out_size); - return -EINVAL; - } - - if (opcode == MGMT_MSG_CMD_OP_GET) - memcpy(s_feature, feature_nego.s_feature, (size * sizeof(u64))); - - return 0; -} - -int sphw_get_comm_features(void *hwdev, u64 *s_feature, u16 size) -{ - return sphw_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature, size); -} - -int sphw_set_comm_features(void *hwdev, u64 *s_feature, u16 size) -{ - return sphw_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature, size); -} - -int sphw_func_tmr_bitmap_set(void *hwdev, bool en) -{ - struct comm_cmd_func_tmr_bitmap_op bitmap_op; - u16 out_size = sizeof(bitmap_op); - int err; - - if (!hwdev) - return -EINVAL; - - memset(&bitmap_op, 0, sizeof(bitmap_op)); - bitmap_op.func_id = sphw_global_func_id(hwdev); - bitmap_op.opcode = en ? FUNC_TMR_BITMAP_ENABLE : - FUNC_TMR_BITMAP_DISABLE; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, - &bitmap_op, sizeof(bitmap_op), - &bitmap_op, &out_size); - if (err || !out_size || bitmap_op.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n", - err, bitmap_op.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int ppf_ht_gpa_set(struct sphw_hwdev *hwdev, struct sphw_page_addr *pg0, struct sphw_page_addr *pg1) -{ - struct comm_cmd_ht_gpa ht_gpa_set; - u16 out_size = sizeof(ht_gpa_set); - int ret; - - memset(&ht_gpa_set, 0, sizeof(ht_gpa_set)); - pg0->virt_addr = dma_alloc_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, - &pg0->phys_addr, GFP_KERNEL); - if (!pg0->virt_addr) { - sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n"); - return -EFAULT; - } - - pg1->virt_addr = dma_alloc_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, - &pg1->phys_addr, GFP_KERNEL); - if (!pg1->virt_addr) { - sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n"); - return -EFAULT; - } - - ht_gpa_set.host_id = sphw_host_id(hwdev); - ht_gpa_set.page_pa0 = pg0->phys_addr; - ht_gpa_set.page_pa1 = pg1->phys_addr; - sdk_info(hwdev->dev_hdl, "PPF ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n", - pg0->phys_addr, pg1->phys_addr); - ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_HT_GPA, - &ht_gpa_set, sizeof(ht_gpa_set), - &ht_gpa_set, &out_size); - if (ret || !out_size || ht_gpa_set.head.status) { - sdk_warn(hwdev->dev_hdl, "PPF ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n", - ret, ht_gpa_set.head.status, out_size); - return -EFAULT; - } - - hwdev->page_pa0.phys_addr = pg0->phys_addr; - hwdev->page_pa0.virt_addr = pg0->virt_addr; - - hwdev->page_pa1.phys_addr = pg1->phys_addr; - hwdev->page_pa1.virt_addr = pg1->virt_addr; - - return 0; -} - -int sphw_ppf_ht_gpa_init(struct sphw_hwdev *hwdev) -{ - int ret; - int i; - int j; - int size; - - struct sphw_page_addr page_addr0[SPHW_PPF_HT_GPA_SET_RETRY_TIMES]; - struct sphw_page_addr page_addr1[SPHW_PPF_HT_GPA_SET_RETRY_TIMES]; - - size = SPHW_PPF_HT_GPA_SET_RETRY_TIMES * sizeof(page_addr0[0]); - memset(page_addr0, 0, size); - memset(page_addr1, 0, size); - - for (i = 0; i < SPHW_PPF_HT_GPA_SET_RETRY_TIMES; i++) { - ret = ppf_ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]); - if (!ret) - break; - } - - for (j = 0; j < i; j++) { - if (page_addr0[j].virt_addr) { - dma_free_coherent(hwdev->dev_hdl, - SPHW_HT_GPA_PAGE_SIZE, - page_addr0[j].virt_addr, - page_addr0[j].phys_addr); - page_addr0[j].virt_addr = NULL; - } - if (page_addr1[j].virt_addr) { - dma_free_coherent(hwdev->dev_hdl, - SPHW_HT_GPA_PAGE_SIZE, - page_addr1[j].virt_addr, - page_addr1[j].phys_addr); - page_addr1[j].virt_addr = NULL; - } - } - - if (i >= SPHW_PPF_HT_GPA_SET_RETRY_TIMES) { - sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n", - i); - return -EFAULT; - } - - return 0; -} - -void sphw_ppf_ht_gpa_deinit(struct sphw_hwdev *hwdev) -{ - if (hwdev->page_pa0.virt_addr) { - dma_free_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, - hwdev->page_pa0.virt_addr, - hwdev->page_pa0.phys_addr); - hwdev->page_pa0.virt_addr = NULL; - } - - if (hwdev->page_pa1.virt_addr) { - dma_free_coherent(hwdev->dev_hdl, SPHW_HT_GPA_PAGE_SIZE, - hwdev->page_pa1.virt_addr, - hwdev->page_pa1.phys_addr); - hwdev->page_pa1.virt_addr = NULL; - } -} - -static int set_ppf_tmr_status(struct sphw_hwdev *hwdev, - enum ppf_tmr_status status) -{ - struct comm_cmd_ppf_tmr_op op; - u16 out_size = sizeof(op); - int err = 0; - - if (!hwdev) - return -EINVAL; - - memset(&op, 0, sizeof(op)); - - if (sphw_func_type(hwdev) != TYPE_PPF) - return -EFAULT; - - if (status == SPHW_PPF_TMR_FLAG_START) { - err = sphw_ppf_ht_gpa_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "PPF ht gpa init fail!\n"); - return -EFAULT; - } - } else { - sphw_ppf_ht_gpa_deinit(hwdev); - } - - op.opcode = status; - op.ppf_id = sphw_ppf_idx(hwdev); - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_TMR, &op, - sizeof(op), &op, &out_size); - if (err || !out_size || op.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n", - err, op.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int sphw_ppf_tmr_start(void *hwdev) -{ - if (!hwdev) { - pr_err("Hwdev pointer is NULL for starting ppf timer\n"); - return -EINVAL; - } - - return set_ppf_tmr_status(hwdev, SPHW_PPF_TMR_FLAG_START); -} - -int sphw_ppf_tmr_stop(void *hwdev) -{ - if (!hwdev) { - pr_err("Hwdev pointer is NULL for stop ppf timer\n"); - return -EINVAL; - } - - return set_ppf_tmr_status(hwdev, SPHW_PPF_TMR_FLAG_STOP); -} - -int mqm_eqm_try_alloc_mem(struct sphw_hwdev *hwdev, u32 page_size, - u32 page_num) -{ - struct sphw_page_addr *page_addr = hwdev->mqm_att.brm_srch_page_addr; - u32 valid_num = 0; - u32 flag = 1; - u32 i = 0; - - for (i = 0; i < page_num; i++) { - page_addr->virt_addr = - dma_alloc_coherent(hwdev->dev_hdl, page_size, - &page_addr->phys_addr, GFP_KERNEL); - if (!page_addr->virt_addr) { - flag = 0; - break; - } - valid_num++; - page_addr++; - } - - if (flag == 1) { - hwdev->mqm_att.page_size = page_size; - hwdev->mqm_att.page_num = page_num; - } else { - page_addr = hwdev->mqm_att.brm_srch_page_addr; - for (i = 0; i < valid_num; i++) { - dma_free_coherent(hwdev->dev_hdl, page_size, - page_addr->virt_addr, - page_addr->phys_addr); - page_addr++; - } - return -EFAULT; - } - - return 0; -} - -int mqm_eqm_alloc_page_mem(struct sphw_hwdev *hwdev) -{ - int ret = 0; - int page_num; - - /* apply for 2M page, page number is chunk_num/1024 */ - page_num = (hwdev->mqm_att.chunk_num + 1023) >> 10; - ret = mqm_eqm_try_alloc_mem(hwdev, 2 * 1024 * 1024, page_num); - if (!ret) { - sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 2M OK\n"); - return 0; - } - - /* apply for 64KB page, page number is chunk_num/32 */ - page_num = (hwdev->mqm_att.chunk_num + 31) >> 5; - ret = mqm_eqm_try_alloc_mem(hwdev, 64 * 1024, page_num); - if (!ret) { - sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 64K OK\n"); - return 0; - } - - /* apply for 4KB page, page number is chunk_num/2 */ - page_num = (hwdev->mqm_att.chunk_num + 1) >> 1; - ret = mqm_eqm_try_alloc_mem(hwdev, 4 * 1024, page_num); - if (!ret) { - sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 4K OK\n"); - return 0; - } - - return ret; -} - -void mqm_eqm_free_page_mem(struct sphw_hwdev *hwdev) -{ - u32 i; - struct sphw_page_addr *page_addr; - u32 page_size; - - page_size = hwdev->mqm_att.page_size; - page_addr = hwdev->mqm_att.brm_srch_page_addr; - - for (i = 0; i < hwdev->mqm_att.page_num; i++) { - dma_free_coherent(hwdev->dev_hdl, page_size, - page_addr->virt_addr, page_addr->phys_addr); - page_addr++; - } -} - -int mqm_eqm_set_cfg_2_hw(struct sphw_hwdev *hwdev, u8 valid) -{ - struct comm_cmd_eqm_cfg info_eqm_cfg; - u16 out_size = sizeof(info_eqm_cfg); - int err; - - memset(&info_eqm_cfg, 0, sizeof(info_eqm_cfg)); - - info_eqm_cfg.host_id = sphw_host_id(hwdev); - info_eqm_cfg.page_size = hwdev->mqm_att.page_size; - info_eqm_cfg.valid = valid; - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_MQM_CFG_INFO, - &info_eqm_cfg, sizeof(info_eqm_cfg), - &info_eqm_cfg, &out_size); - if (err || !out_size || info_eqm_cfg.head.status) { - sdk_err(hwdev->dev_hdl, "Failed to init func table, err: %d, status: 0x%x, out_size: 0x%x\n", - err, info_eqm_cfg.head.status, out_size); - return -EFAULT; - } - - return 0; -} - -#define EQM_DATA_BUF_SIZE 1024 -#define MQM_ATT_PAGE_NUM 128 - -int mqm_eqm_set_page_2_hw(struct sphw_hwdev *hwdev) -{ - struct comm_cmd_eqm_search_gpa *info = NULL; - struct sphw_page_addr *page_addr = NULL; - void *send_buf = NULL; - u16 send_buf_size; - u32 i; - u64 *gpa_hi52 = NULL; - u64 gpa; - u32 num; - u32 start_idx; - int err = 0; - u16 out_size; - u8 cmd; - - send_buf_size = sizeof(struct comm_cmd_eqm_search_gpa) + - EQM_DATA_BUF_SIZE; - send_buf = kzalloc(send_buf_size, GFP_KERNEL); - if (!send_buf) { - sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); - return -EFAULT; - } - - page_addr = hwdev->mqm_att.brm_srch_page_addr; - info = (struct comm_cmd_eqm_search_gpa *)send_buf; - - gpa_hi52 = info->gpa_hi52; - num = 0; - start_idx = 0; - cmd = COMM_MGMT_CMD_SET_MQM_SRCH_GPA; - for (i = 0; i < hwdev->mqm_att.page_num; i++) { - /* gpa align to 4K, save gpa[31:12] */ - gpa = page_addr->phys_addr >> 12; - gpa_hi52[num] = gpa; - num++; - if (num == MQM_ATT_PAGE_NUM) { - info->num = num; - info->start_idx = start_idx; - info->host_id = sphw_host_id(hwdev); - out_size = send_buf_size; - err = comm_msg_to_mgmt_sync(hwdev, cmd, info, - (u16)send_buf_size, - info, &out_size); - if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, - info->head.status)) { - sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", - err, info->head.status, out_size); - err = -EFAULT; - goto set_page_2_hw_end; - } - - gpa_hi52 = info->gpa_hi52; - num = 0; - start_idx = i + 1; - } - page_addr++; - } - - if (num != 0) { - info->num = num; - info->start_idx = start_idx; - info->host_id = sphw_host_id(hwdev); - out_size = send_buf_size; - err = comm_msg_to_mgmt_sync(hwdev, cmd, info, - (u16)send_buf_size, info, - &out_size); - if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, - info->head.status)) { - sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", - err, info->head.status, out_size); - err = -EFAULT; - goto set_page_2_hw_end; - } - } - -set_page_2_hw_end: - kfree(send_buf); - return err; -} - -int mqm_eqm_init(struct sphw_hwdev *hwdev) -{ - struct comm_cmd_get_eqm_num info_eqm_fix; - u16 len = sizeof(info_eqm_fix); - int ret; - - if (hwdev->hwif->attr.func_type != TYPE_PPF) - return 0; - - memset(&info_eqm_fix, 0, sizeof(info_eqm_fix)); - - ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_MQM_FIX_INFO, - &info_eqm_fix, sizeof(info_eqm_fix), - &info_eqm_fix, &len); - if (ret || !len || info_eqm_fix.head.status) { - sdk_err(hwdev->dev_hdl, "Get mqm fix info fail,err: %d, status: 0x%x, out_size: 0x%x\n", - ret, info_eqm_fix.head.status, len); - return -EFAULT; - } - sdk_info(hwdev->dev_hdl, "get chunk_num: 0x%x, search_gpa_num: 0x%08x\n", - info_eqm_fix.chunk_num, info_eqm_fix.search_gpa_num); - if (!(info_eqm_fix.chunk_num)) - return 0; - - hwdev->mqm_att.chunk_num = info_eqm_fix.chunk_num; - hwdev->mqm_att.search_gpa_num = info_eqm_fix.search_gpa_num; - hwdev->mqm_att.page_size = 0; - hwdev->mqm_att.page_num = 0; - - hwdev->mqm_att.brm_srch_page_addr = - kcalloc(hwdev->mqm_att.chunk_num, - sizeof(struct sphw_page_addr), GFP_KERNEL); - if (!(hwdev->mqm_att.brm_srch_page_addr)) { - sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); - return -EFAULT; - } - - ret = mqm_eqm_alloc_page_mem(hwdev); - if (ret) { - sdk_err(hwdev->dev_hdl, "Alloc eqm page mem failed\r\n"); - goto err_page; - } - - ret = mqm_eqm_set_page_2_hw(hwdev); - if (ret) { - sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); - goto err_ecmd; - } - - ret = mqm_eqm_set_cfg_2_hw(hwdev, 1); - if (ret) { - sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); - goto err_ecmd; - } - - return 0; - -err_ecmd: - mqm_eqm_free_page_mem(hwdev); - -err_page: - kfree(hwdev->mqm_att.brm_srch_page_addr); - - return ret; -} - -void mqm_eqm_deinit(struct sphw_hwdev *hwdev) -{ - int ret; - - if (hwdev->hwif->attr.func_type != TYPE_PPF) - return; - - if (!(hwdev->mqm_att.chunk_num)) - return; - - mqm_eqm_free_page_mem(hwdev); - kfree(hwdev->mqm_att.brm_srch_page_addr); - - ret = mqm_eqm_set_cfg_2_hw(hwdev, 0); - if (ret) { - sdk_err(hwdev->dev_hdl, "Set mqm eqm cfg to chip fail! err: %d\n", - ret); - return; - } - - hwdev->mqm_att.chunk_num = 0; - hwdev->mqm_att.search_gpa_num = 0; - hwdev->mqm_att.page_num = 0; - hwdev->mqm_att.page_size = 0; -} - -int sphw_ppf_ext_db_init(void *dev) -{ - struct sphw_hwdev *hwdev = dev; - int ret; - - /* IS OVS MODE SURPORT EXT DB NEEDED */ - - ret = mqm_eqm_init(hwdev); - if (ret) { - sdk_err(hwdev->dev_hdl, "MQM eqm init fail!\n"); - return -EFAULT; - } - sdk_info(hwdev->dev_hdl, "ppf_ext_db_init ok\r\n"); - - return 0; -} - -int sphw_ppf_ext_db_deinit(void *dev) -{ - struct sphw_hwdev *hwdev = dev; - - if (!dev) - return -EINVAL; - - if (hwdev->hwif->attr.func_type != TYPE_PPF) - return 0; - - mqm_eqm_deinit(hwdev); - - return 0; -} - -#define SPHW_FLR_TIMEOUT 1000 - -static enum sphw_wait_return check_flr_finish_handler(void *priv_data) -{ - struct sphw_hwif *hwif = priv_data; - enum sphw_pf_status status; - - status = sphw_get_pf_status(hwif); - if (status == SPHW_PF_STATUS_FLR_FINISH_FLAG) { - sphw_set_pf_status(hwif, SPHW_PF_STATUS_ACTIVE_FLAG); - return WAIT_PROCESS_CPL; - } - - return WAIT_PROCESS_WAITING; -} - -static int wait_for_flr_finish(struct sphw_hwif *hwif) -{ - return sphw_wait_for_timeout(hwif, check_flr_finish_handler, - SPHW_FLR_TIMEOUT, 10 * USEC_PER_MSEC); -} - -#define SPHW_WAIT_CMDQ_IDLE_TIMEOUT 5000 - -static enum sphw_wait_return check_cmdq_stop_handler(void *priv_data) -{ - struct sphw_hwdev *hwdev = priv_data; - struct sphw_cmdqs *cmdqs = hwdev->cmdqs; - enum sphw_cmdq_type cmdq_type; - - /* Stop waiting when card unpresent */ - if (!hwdev->chip_present_flag) - return WAIT_PROCESS_CPL; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - if (!sphw_cmdq_idle(&cmdqs->cmdq[cmdq_type])) - return WAIT_PROCESS_WAITING; - } - - return WAIT_PROCESS_CPL; -} - -static int wait_cmdq_stop(struct sphw_hwdev *hwdev) -{ - enum sphw_cmdq_type cmdq_type; - struct sphw_cmdqs *cmdqs = hwdev->cmdqs; - int err; - - if (!(cmdqs->status & SPHW_CMDQ_ENABLE)) - return 0; - - cmdqs->status &= ~SPHW_CMDQ_ENABLE; - - err = sphw_wait_for_timeout(hwdev, check_cmdq_stop_handler, - SPHW_WAIT_CMDQ_IDLE_TIMEOUT, USEC_PER_MSEC); - if (!err) - return 0; - - cmdq_type = SPHW_CMDQ_SYNC; - for (; cmdq_type < SPHW_MAX_CMDQ_TYPES; cmdq_type++) { - if (!sphw_cmdq_idle(&cmdqs->cmdq[cmdq_type])) - sdk_err(hwdev->dev_hdl, "Cmdq %d is busy\n", cmdq_type); - } - - cmdqs->status |= SPHW_CMDQ_ENABLE; - - return err; -} - -static int sphw_pf_rx_tx_flush(struct sphw_hwdev *hwdev, u16 channel) -{ - struct sphw_hwif *hwif = hwdev->hwif; - struct comm_cmd_clear_doorbell clear_db; - struct comm_cmd_clear_resource clr_res; - u16 out_size; - int err; - int ret = 0; - - /*wait ucode stop I/O */ - msleep(100); - - err = wait_cmdq_stop(hwdev); - if (err) { - sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n"); - ret = err; - } - - sphw_disable_doorbell(hwif); - - out_size = sizeof(clear_db); - memset(&clear_db, 0, sizeof(clear_db)); - clear_db.func_id = SPHW_HWIF_GLOBAL_IDX(hwif); - - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_FLUSH_DOORBELL, - &clear_db, sizeof(clear_db), - &clear_db, &out_size, channel); - if (err || !out_size || clear_db.head.status) { - sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", - err, clear_db.head.status, out_size, channel); - if (err) - ret = err; - else - ret = -EFAULT; - } - - sphw_set_pf_status(hwif, SPHW_PF_STATUS_FLR_START_FLAG); - - memset(&clr_res, 0, sizeof(clr_res)); - clr_res.func_id = SPHW_HWIF_GLOBAL_IDX(hwif); - - err = sphw_msg_to_mgmt_no_ack(hwdev, SPHW_MOD_COMM, COMM_MGMT_CMD_START_FLUSH, &clr_res, - sizeof(clr_res), channel); - if (err) { - sdk_warn(hwdev->dev_hdl, "Failed to notice flush message, err: %d, channel: 0x%x\n", - err, channel); - ret = err; - } - - err = wait_for_flr_finish(hwif); - if (err) { - sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); - ret = err; - } - - sphw_enable_doorbell(hwif); - - err = sphw_reinit_cmdq_ctxts(hwdev); - if (err) { - sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); - ret = err; - } - - return ret; -} - -int sphw_func_rx_tx_flush(void *hwdev, u16 channel) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) - return -EINVAL; - - if (!dev->chip_present_flag) - return 0; - - if (SPHW_FUNC_TYPE(dev) == TYPE_VF) - /* TO DO */ - return 0; - else - return sphw_pf_rx_tx_flush(dev, channel); -} - -int sphw_get_board_info(void *hwdev, struct sphw_board_info *info, u16 channel) -{ - struct comm_cmd_board_info board_info; - u16 out_size = sizeof(board_info); - int err; - - if (!hwdev || !info) - return -EINVAL; - - memset(&board_info, 0, sizeof(board_info)); - err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_BOARD_INFO, - &board_info, sizeof(board_info), - &board_info, &out_size, channel); - if (err || board_info.head.status || !out_size) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, board_info.head.status, out_size, channel); - return -EIO; - } - - memcpy(info, &board_info.info, sizeof(*info)); - - return 0; -} - -int sphw_get_global_attr(void *hwdev, struct comm_global_attr *attr) -{ - struct comm_cmd_get_glb_attr get_attr; - u16 out_size = sizeof(get_attr); - int err = 0; - - err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_GLOBAL_ATTR, - &get_attr, sizeof(get_attr), &get_attr, - &out_size); - if (err || !out_size || get_attr.head.status) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "Failed to get global attribute, err: %d, status: 0x%x, out size: 0x%x\n", - err, get_attr.head.status, out_size); - return -EIO; - } - - memcpy(attr, &get_attr.attr, sizeof(*attr)); - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h deleted file mode 100644 index 4e0cf2dfb21e..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hw_comm.h +++ /dev/null @@ -1,45 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_HW_COMM_H -#define SPHW_HW_COMM_H - -#include "sphw_comm_msg_intf.h" - -#define MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, status) \ - ((err) || (status) || !(out_size)) - -#define SPHW_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) - -enum func_tmr_bitmap_status { - FUNC_TMR_BITMAP_DISABLE, - FUNC_TMR_BITMAP_ENABLE, -}; - -enum ppf_tmr_status { - SPHW_PPF_TMR_FLAG_STOP, - SPHW_PPF_TMR_FLAG_START, -}; - -#define SPHW_HT_GPA_PAGE_SIZE 4096UL -#define SPHW_PPF_HT_GPA_SET_RETRY_TIMES 10 - -int sphw_set_cmdq_depth(void *hwdev, u16 cmdq_depth); - -int sphw_set_cmdq_ctxt(struct sphw_hwdev *hwdev, u8 cmdq_id, struct cmdq_ctxt_info *ctxt); - -int sphw_ppf_ext_db_init(void *dev); - -int sphw_ppf_ext_db_deinit(void *dev); - -int sphw_set_ceq_ctrl_reg(struct sphw_hwdev *hwdev, u16 q_id, u32 ctrl0, u32 ctrl1); - -int sphw_set_dma_attr_tbl(struct sphw_hwdev *hwdevm, u8 entry_idx, u8 st, u8 at, u8 ph, - u8 no_snooping, u8 tph_en); - -int sphw_get_comm_features(void *hwdev, u64 *s_feature, u16 size); -int sphw_set_comm_features(void *hwdev, u64 *s_feature, u16 size); - -int sphw_get_global_attr(void *hwdev, struct comm_global_attr *attr); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c deleted file mode 100644 index 783fa46bcfe5..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.c +++ /dev/null @@ -1,1324 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/time.h> -#include <linux/timex.h> -#include <linux/rtc.h> -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/types.h> -#include <linux/module.h> -#include <linux/completion.h> -#include <linux/semaphore.h> -#include <linux/interrupt.h> -#include <linux/vmalloc.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_hwdev.h" -#include "sphw_csr.h" -#include "sphw_hwif.h" -#include "sphw_eqs.h" -#include "sphw_api_cmd.h" -#include "sphw_mgmt.h" -#include "sphw_mbox.h" -#include "sphw_wq.h" -#include "sphw_cmdq.h" -#include "sphw_hw_cfg.h" -#include "sphw_hw_comm.h" -#include "sphw_prof_adap.h" - -static bool disable_stateful_load; -module_param(disable_stateful_load, bool, 0444); -MODULE_PARM_DESC(disable_stateful_load, "Disable stateful load - default is false"); - -static bool disable_cfg_comm; -module_param(disable_cfg_comm, bool, 0444); -MODULE_PARM_DESC(disable_cfg_comm, "disable_cfg_comm or not - default is false"); - -static unsigned int wq_page_order = SPHW_MAX_WQ_PAGE_SIZE_ORDER; -module_param(wq_page_order, uint, 0444); -MODULE_PARM_DESC(wq_page_order, "Set wq page size order, wq page size is 4K * (2 ^ wq_page_order) - default is 8"); - -enum sphw_pcie_nosnoop { - SPHW_PCIE_SNOOP = 0, - SPHW_PCIE_NO_SNOOP = 1, -}; - -enum sphw_pcie_tph { - SPHW_PCIE_TPH_DISABLE = 0, - SPHW_PCIE_TPH_ENABLE = 1, -}; - -#define SPHW_DMA_ATTR_INDIR_IDX_SHIFT 0 - -#define SPHW_DMA_ATTR_INDIR_IDX_MASK 0x3FF - -#define SPHW_DMA_ATTR_INDIR_IDX_SET(val, member) \ - (((u32)(val) & SPHW_DMA_ATTR_INDIR_##member##_MASK) << \ - SPHW_DMA_ATTR_INDIR_##member##_SHIFT) - -#define SPHW_DMA_ATTR_INDIR_IDX_CLEAR(val, member) \ - ((val) & (~(SPHW_DMA_ATTR_INDIR_##member##_MASK \ - << SPHW_DMA_ATTR_INDIR_##member##_SHIFT))) - -#define SPHW_DMA_ATTR_ENTRY_ST_SHIFT 0 -#define SPHW_DMA_ATTR_ENTRY_AT_SHIFT 8 -#define SPHW_DMA_ATTR_ENTRY_PH_SHIFT 10 -#define SPHW_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 -#define SPHW_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 - -#define SPHW_DMA_ATTR_ENTRY_ST_MASK 0xFF -#define SPHW_DMA_ATTR_ENTRY_AT_MASK 0x3 -#define SPHW_DMA_ATTR_ENTRY_PH_MASK 0x3 -#define SPHW_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 -#define SPHW_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 - -#define SPHW_DMA_ATTR_ENTRY_SET(val, member) \ - (((u32)(val) & SPHW_DMA_ATTR_ENTRY_##member##_MASK) << \ - SPHW_DMA_ATTR_ENTRY_##member##_SHIFT) - -#define SPHW_DMA_ATTR_ENTRY_CLEAR(val, member) \ - ((val) & (~(SPHW_DMA_ATTR_ENTRY_##member##_MASK \ - << SPHW_DMA_ATTR_ENTRY_##member##_SHIFT))) - -#define SPHW_PCIE_ST_DISABLE 0 -#define SPHW_PCIE_AT_DISABLE 0 -#define SPHW_PCIE_PH_DISABLE 0 - -#define PCIE_MSIX_ATTR_ENTRY 0 - -#define SPHW_CHIP_PRESENT 1 -#define SPHW_CHIP_ABSENT 0 - -#define SPHW_DEAULT_EQ_MSIX_PENDING_LIMIT 0 -#define SPHW_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF -#define SPHW_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7 - -#define SPHW_HWDEV_WQ_NAME "sphw_hardware" -#define SPHW_WQ_MAX_REQ 10 - -static void sphw_init_heartbeat_detect(struct sphw_hwdev *hwdev); -static void sphw_destroy_heartbeat_detect(struct sphw_hwdev *hwdev); - -typedef void (*mgmt_event_cb)(void *handle, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); - -struct mgmt_event_handle { - u16 cmd; - mgmt_event_cb proc; -}; - -int pf_handle_vf_comm_mbox(void *handle, void *pri_handle, - u16 vf_id, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size) -{ - struct sphw_hwdev *hwdev = handle; - - if (!hwdev) - return -EINVAL; - - sdk_warn(hwdev->dev_hdl, "Unsupported vf mbox event %u to process\n", - cmd); - - return 0; -} - -int vf_handle_pf_comm_mbox(void *handle, void *pri_handle, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size) -{ - struct sphw_hwdev *hwdev = handle; - - if (!hwdev) - return -EINVAL; - - sdk_warn(hwdev->dev_hdl, "Unsupported pf mbox event %u to process\n", - cmd); - return 0; -} - -static void chip_fault_show(struct sphw_hwdev *hwdev, struct sphw_fault_event *event) -{ - char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = { - "fatal", "reset", "host", "flr", "general", "suggestion"}; - char level_str[FAULT_SHOW_STR_LEN + 1]; - u8 level; - - memset(level_str, 0, FAULT_SHOW_STR_LEN + 1); - level = event->event.chip.err_level; - if (level < FAULT_LEVEL_MAX) - strncpy(level_str, fault_level[level], - FAULT_SHOW_STR_LEN); - else - strncpy(level_str, "Unknown", FAULT_SHOW_STR_LEN); - - if (level == FAULT_LEVEL_SERIOUS_FLR) - dev_err(hwdev->dev_hdl, "err_level: %u [%s], flr func_id: %u\n", - level, level_str, event->event.chip.func_id); - - dev_err(hwdev->dev_hdl, "Module_id: 0x%x, err_type: 0x%x, err_level: %u[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", - event->event.chip.node_id, - event->event.chip.err_type, level, level_str, - event->event.chip.err_csr_addr, - event->event.chip.err_csr_value); -} - -static void fault_report_show(struct sphw_hwdev *hwdev, - struct sphw_fault_event *event) -{ - char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { - "chip", "ucode", "mem rd timeout", "mem wr timeout", - "reg rd timeout", "reg wr timeout", "phy fault" - }; - char type_str[FAULT_SHOW_STR_LEN + 1]; - struct fault_event_stats *fault = NULL; - - sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %u\n", - sphw_global_func_id(hwdev)); - - memset(type_str, 0, FAULT_SHOW_STR_LEN + 1); - if (event->type < FAULT_TYPE_MAX) - strncpy(type_str, fault_type[event->type], - strlen(fault_type[event->type])); - else - strncpy(type_str, "Unknown", strlen("Unknown")); - - sdk_err(hwdev->dev_hdl, "Fault type: %u [%s]\n", event->type, type_str); - /* 0, 1, 2 and 3 word Represents array event->event.val index */ - sdk_err(hwdev->dev_hdl, "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", - event->event.val[0], event->event.val[1], event->event.val[2], - event->event.val[3]); - - fault = &hwdev->hw_stats.fault_event_stats; - - switch (event->type) { - case FAULT_TYPE_CHIP: - chip_fault_show(hwdev, event); - break; - case FAULT_TYPE_UCODE: - atomic_inc(&fault->fault_type_stat[event->type]); - sdk_err(hwdev->dev_hdl, "Cause_id: %u, core_id: %u, c_id: %u, epc: 0x%08x\n", - event->event.ucode.cause_id, event->event.ucode.core_id, - event->event.ucode.c_id, event->event.ucode.epc); - break; - case FAULT_TYPE_MEM_RD_TIMEOUT: - case FAULT_TYPE_MEM_WR_TIMEOUT: - atomic_inc(&fault->fault_type_stat[event->type]); - sdk_err(hwdev->dev_hdl, "Err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n", - event->event.mem_timeout.err_csr_ctrl, - event->event.mem_timeout.err_csr_data, - event->event.mem_timeout.ctrl_tab, - event->event.mem_timeout.mem_index); - break; - case FAULT_TYPE_REG_RD_TIMEOUT: - case FAULT_TYPE_REG_WR_TIMEOUT: - atomic_inc(&fault->fault_type_stat[event->type]); - sdk_err(hwdev->dev_hdl, "Err_csr: 0x%08x\n", - event->event.reg_timeout.err_csr); - break; - case FAULT_TYPE_PHY_FAULT: - atomic_inc(&fault->fault_type_stat[event->type]); - sdk_err(hwdev->dev_hdl, "Op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", - event->event.phy_fault.op_type, - event->event.phy_fault.port_id, - event->event.phy_fault.dev_ad, - event->event.phy_fault.csr_addr, - event->event.phy_fault.op_data); - break; - default: - break; - } -} - -static void fault_event_handler(void *dev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct sphw_cmd_fault_event *fault_event = NULL; - struct sphw_event_info event_info; - struct sphw_hwdev *hwdev = dev; - u8 fault_src = SPHW_FAULT_SRC_TYPE_MAX; - u8 fault_level; - - if (in_size != sizeof(*fault_event)) { - sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %u, should be %ld\n", - in_size, sizeof(*fault_event)); - return; - } - - fault_event = buf_in; - fault_report_show(hwdev, &fault_event->event); - - if (fault_event->event.type == FAULT_TYPE_CHIP) - fault_level = fault_event->event.event.chip.err_level; - else - fault_level = FAULT_LEVEL_FATAL; - - if (hwdev->event_callback) { - event_info.type = SPHW_EVENT_FAULT; - memcpy(&event_info.info, &fault_event->event, - sizeof(struct sphw_fault_event)); - event_info.info.fault_level = fault_level; - hwdev->event_callback(hwdev->event_pri_handle, &event_info); - } - - if (fault_event->event.type <= FAULT_TYPE_REG_WR_TIMEOUT) - fault_src = fault_event->event.type; - else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT) - fault_src = SPHW_FAULT_SRC_HW_PHY_FAULT; - - sphw_fault_post_process(hwdev, fault_src, fault_level); -} - -static void ffm_event_msg_handler(void *hwdev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct ffm_intr_info *intr = NULL; - struct sphw_hwdev *dev = hwdev; - - if (in_size != sizeof(*intr)) { - sdk_err(dev->dev_hdl, "Invalid fault event report, length: %u, should be %ld.\n", - in_size, sizeof(*intr)); - return; - } - - intr = buf_in; - - sdk_err(dev->dev_hdl, "node_id: 0x%x, err_type: 0x%x, err_level: %u, err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", - intr->node_id, intr->err_type, intr->err_level, - intr->err_csr_addr, intr->err_csr_value); -} - -const struct mgmt_event_handle mgmt_event_proc[] = { - { - .cmd = COMM_MGMT_CMD_FAULT_REPORT, - .proc = fault_event_handler, - }, - - { - .cmd = COMM_MGMT_CMD_FFM_SET, - .proc = ffm_event_msg_handler, - }, -}; - -void pf_handle_mgmt_comm_event(void *handle, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size) -{ - struct sphw_hwdev *hwdev = handle; - u32 i, event_num = ARRAY_LEN(mgmt_event_proc); - - if (!hwdev) - return; - - for (i = 0; i < event_num; i++) { - if (cmd == mgmt_event_proc[i].cmd) { - if (mgmt_event_proc[i].proc) - mgmt_event_proc[i].proc(handle, buf_in, in_size, - buf_out, out_size); - - return; - } - } - - sdk_warn(hwdev->dev_hdl, "Unsupported mgmt cpu event %u to process\n", - cmd); -} - -void sphw_set_chip_present(void *hwdev) -{ - ((struct sphw_hwdev *)hwdev)->chip_present_flag = SPHW_CHIP_PRESENT; -} - -void sphw_set_chip_absent(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - sdk_err(dev->dev_hdl, "Card not present\n"); - dev->chip_present_flag = SPHW_CHIP_ABSENT; -} - -int sphw_get_chip_present_flag(const void *hwdev) -{ - if (!hwdev) - return 0; - - return ((struct sphw_hwdev *)hwdev)->chip_present_flag; -} - -/* TODO */ -void sphw_force_complete_all(void *hwdev) -{ -} - -void sphw_detect_hw_present(void *hwdev) -{ - u32 addr, attr1; - - addr = SPHW_CSR_FUNC_ATTR1_ADDR; - attr1 = sphw_hwif_read_reg(((struct sphw_hwdev *)hwdev)->hwif, addr); - if (attr1 == SPHW_PCIE_LINK_DOWN) { - sphw_set_chip_absent(hwdev); - sphw_force_complete_all(hwdev); - } -} - -/** - * dma_attr_table_init - initialize the default dma attributes - * @hwdev: the pointer to hw device - **/ -static int dma_attr_table_init(struct sphw_hwdev *hwdev) -{ - u32 addr, val, dst_attr; - - /* Use indirect access should set entry_idx first*/ - addr = SPHW_CSR_DMA_ATTR_INDIR_IDX_ADDR; - val = sphw_hwif_read_reg(hwdev->hwif, addr); - val = SPHW_DMA_ATTR_INDIR_IDX_CLEAR(val, IDX); - - val |= SPHW_DMA_ATTR_INDIR_IDX_SET(PCIE_MSIX_ATTR_ENTRY, IDX); - - sphw_hwif_write_reg(hwdev->hwif, addr, val); - - wmb(); /* write index before config */ - - addr = SPHW_CSR_DMA_ATTR_TBL_ADDR; - val = sphw_hwif_read_reg(hwdev->hwif, addr); - dst_attr = SPHW_DMA_ATTR_ENTRY_SET(SPHW_PCIE_ST_DISABLE, ST) | - SPHW_DMA_ATTR_ENTRY_SET(SPHW_PCIE_AT_DISABLE, AT) | - SPHW_DMA_ATTR_ENTRY_SET(SPHW_PCIE_PH_DISABLE, PH) | - SPHW_DMA_ATTR_ENTRY_SET(SPHW_PCIE_SNOOP, NO_SNOOPING) | - SPHW_DMA_ATTR_ENTRY_SET(SPHW_PCIE_TPH_DISABLE, TPH_EN); - if (dst_attr == val) - return 0; - - return sphw_set_dma_attr_tbl(hwdev, PCIE_MSIX_ATTR_ENTRY, SPHW_PCIE_ST_DISABLE, - SPHW_PCIE_AT_DISABLE, SPHW_PCIE_PH_DISABLE, - SPHW_PCIE_SNOOP, SPHW_PCIE_TPH_DISABLE); -} - -static int init_aeqs_msix_attr(struct sphw_hwdev *hwdev) -{ - struct sphw_aeqs *aeqs = hwdev->aeqs; - struct interrupt_info info = {0}; - struct sphw_eq *eq = NULL; - int q_id; - int err; - - info.lli_set = 0; - info.interrupt_coalesc_set = 1; - info.pending_limt = SPHW_DEAULT_EQ_MSIX_PENDING_LIMIT; - info.coalesc_timer_cfg = SPHW_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; - info.resend_timer_cfg = SPHW_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; - - for (q_id = aeqs->num_aeqs - 1; q_id >= 0; q_id--) { - eq = &aeqs->aeq[q_id]; - info.msix_index = eq->eq_irq.msix_entry_idx; - err = sphw_set_interrupt_cfg_direct(hwdev, &info, SPHW_CHANNEL_COMM); - if (err) { - sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n", - q_id); - return -EFAULT; - } - } - - return 0; -} - -static int init_ceqs_msix_attr(struct sphw_hwdev *hwdev) -{ - struct sphw_ceqs *ceqs = hwdev->ceqs; - struct interrupt_info info = {0}; - struct sphw_eq *eq = NULL; - u16 q_id; - int err; - - info.lli_set = 0; - info.interrupt_coalesc_set = 1; - info.pending_limt = SPHW_DEAULT_EQ_MSIX_PENDING_LIMIT; - info.coalesc_timer_cfg = SPHW_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; - info.resend_timer_cfg = SPHW_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; - - for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { - eq = &ceqs->ceq[q_id]; - info.msix_index = eq->eq_irq.msix_entry_idx; - err = sphw_set_interrupt_cfg(hwdev, info, SPHW_CHANNEL_COMM); - if (err) { - sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %u failed\n", - q_id); - return -EFAULT; - } - } - - return 0; -} - -static int sphw_comm_aeqs_init(struct sphw_hwdev *hwdev) -{ - struct irq_info aeq_irqs[SPHW_MAX_AEQS] = {{0} }; - u16 num_aeqs, resp_num_irq = 0, i; - int err; - - num_aeqs = SPHW_HWIF_NUM_AEQS(hwdev->hwif); - if (num_aeqs > SPHW_MAX_AEQS) { - sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", - SPHW_MAX_AEQS); - num_aeqs = SPHW_MAX_AEQS; - } - err = sphw_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs, &resp_num_irq); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %u\n", - num_aeqs); - return err; - } - - if (resp_num_irq < num_aeqs) { - sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %u\n", - resp_num_irq); - num_aeqs = resp_num_irq; - } - - err = sphw_aeqs_init(hwdev, num_aeqs, aeq_irqs); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n"); - goto aeqs_init_err; - } - - return 0; - -aeqs_init_err: - for (i = 0; i < num_aeqs; i++) - sphw_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); - - return err; -} - -static void sphw_comm_aeqs_free(struct sphw_hwdev *hwdev) -{ - struct irq_info aeq_irqs[SPHW_MAX_AEQS] = {{0} }; - u16 num_irqs, i; - - sphw_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs); - - sphw_aeqs_free(hwdev); - - for (i = 0; i < num_irqs; i++) - sphw_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); -} - -static int sphw_comm_ceqs_init(struct sphw_hwdev *hwdev) -{ - struct irq_info ceq_irqs[SPHW_MAX_CEQS] = {{0} }; - u16 num_ceqs, resp_num_irq = 0, i; - int err; - - num_ceqs = SPHW_HWIF_NUM_CEQS(hwdev->hwif); - if (num_ceqs > SPHW_MAX_CEQS) { - sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", - SPHW_MAX_CEQS); - num_ceqs = SPHW_MAX_CEQS; - } - - err = sphw_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs, &resp_num_irq); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %u\n", - num_ceqs); - return err; - } - - if (resp_num_irq < num_ceqs) { - sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %u\n", - resp_num_irq); - num_ceqs = resp_num_irq; - } - - err = sphw_ceqs_init(hwdev, num_ceqs, ceq_irqs); - if (err) { - sdk_err(hwdev->dev_hdl, - "Failed to init ceqs, err:%d\n", err); - goto ceqs_init_err; - } - - return 0; - -ceqs_init_err: - for (i = 0; i < num_ceqs; i++) - sphw_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); - - return err; -} - -static void sphw_comm_ceqs_free(struct sphw_hwdev *hwdev) -{ - struct irq_info ceq_irqs[SPHW_MAX_CEQS] = {{0} }; - u16 num_irqs; - int i; - - sphw_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs); - - sphw_ceqs_free(hwdev); - - for (i = 0; i < num_irqs; i++) - sphw_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); -} - -static int sphw_comm_func_to_func_init(struct sphw_hwdev *hwdev) -{ - int err; - - err = sphw_func_to_func_init(hwdev); - if (err) - return err; - - sphw_aeq_register_hw_cb(hwdev, SPHW_MBX_FROM_FUNC, sphw_mbox_func_aeqe_handler); - sphw_aeq_register_hw_cb(hwdev, SPHW_MSG_FROM_MGMT_CPU, sphw_mgmt_msg_aeqe_handler); - - if (!SPHW_IS_VF(hwdev)) - sphw_register_pf_mbox_cb(hwdev, SPHW_MOD_COMM, hwdev->func_to_func, - pf_handle_vf_comm_mbox); - else - sphw_register_vf_mbox_cb(hwdev, SPHW_MOD_COMM, hwdev->func_to_func, - vf_handle_pf_comm_mbox); - - return 0; -} - -static void sphw_comm_func_to_func_free(struct sphw_hwdev *hwdev) -{ - sphw_aeq_unregister_hw_cb(hwdev, SPHW_MBX_FROM_FUNC); - - if (!SPHW_IS_VF(hwdev)) { - sphw_unregister_pf_mbox_cb(hwdev, SPHW_MOD_COMM); - } else { - sphw_unregister_vf_mbox_cb(hwdev, SPHW_MOD_COMM); - - sphw_aeq_unregister_hw_cb(hwdev, SPHW_MSG_FROM_MGMT_CPU); - } - - sphw_func_to_func_free(hwdev); -} - -static int sphw_comm_pf_to_mgmt_init(struct sphw_hwdev *hwdev) -{ - int err; - - /* VF do not support api chain */ - if (sphw_func_type(hwdev) == TYPE_VF || - !COMM_SUPPORT_API_CHAIN(hwdev)) - return 0; - - err = sphw_pf_to_mgmt_init(hwdev); - if (err) - return err; - - sphw_register_mgmt_msg_cb(hwdev, SPHW_MOD_COMM, hwdev->pf_to_mgmt, - pf_handle_mgmt_comm_event); - - return 0; -} - -static void sphw_comm_pf_to_mgmt_free(struct sphw_hwdev *hwdev) -{ - /* VF do not support api chain */ - if (sphw_func_type(hwdev) == TYPE_VF || - !COMM_SUPPORT_API_CHAIN(hwdev)) - return; - - sphw_unregister_mgmt_msg_cb(hwdev, SPHW_MOD_COMM); - - sphw_aeq_unregister_hw_cb(hwdev, SPHW_MSG_FROM_MGMT_CPU); - - sphw_pf_to_mgmt_free(hwdev); -} - -static int sphw_comm_cmdqs_init(struct sphw_hwdev *hwdev) -{ - int err; - - err = sphw_cmdqs_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); - return err; - } - - sphw_ceq_register_cb(hwdev, SPHW_CMDQ, sphw_cmdq_ceq_handler); - - err = sphw_set_cmdq_depth(hwdev, SPHW_CMDQ_DEPTH); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n"); - goto set_cmdq_depth_err; - } - - return 0; - -set_cmdq_depth_err: - sphw_cmdqs_free(hwdev); - - return err; -} - -static void sphw_comm_cmdqs_free(struct sphw_hwdev *hwdev) -{ - sphw_ceq_unregister_cb(hwdev, SPHW_CMDQ); - sphw_cmdqs_free(hwdev); -} - -static void sphw_sync_mgmt_func_state(struct sphw_hwdev *hwdev) -{ - sphw_set_pf_status(hwdev->hwif, SPHW_PF_STATUS_ACTIVE_FLAG); -} - -static void sphw_unsync_mgmt_func_state(struct sphw_hwdev *hwdev) -{ - sphw_set_pf_status(hwdev->hwif, SPHW_PF_STATUS_INIT); -} - -static int init_basic_attributes(struct sphw_hwdev *hwdev) -{ - u64 drv_feature[COMM_MAX_FEATURE_QWORD] = {SPHW_DRV_FEATURE_QW0}; - int err, i; - - err = sphw_get_board_info(hwdev, &hwdev->board_info, SPHW_CHANNEL_COMM); - if (err) - return err; - - err = sphw_get_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); - if (err) { - sdk_err(hwdev->dev_hdl, "Get comm features failed\n"); - return err; - } - - sdk_info(hwdev->dev_hdl, "Comm hw features: 0x%llx, drv features: 0x%llx\n", - hwdev->features[0], drv_feature[0]); - - for (i = 0; i < COMM_MAX_FEATURE_QWORD; i++) - hwdev->features[i] &= drv_feature[i]; - - err = sphw_get_global_attr(hwdev, &hwdev->glb_attr); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to get global attribute\n"); - return err; - } - - sdk_info(hwdev->dev_hdl, "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt cpu node id: 0x%x\n", - hwdev->glb_attr.max_host_num, hwdev->glb_attr.max_pf_num, - hwdev->glb_attr.vf_id_start, - hwdev->glb_attr.mgmt_host_node_id); - - sphw_init_profile_adapter(hwdev); - - return 0; -} - -static int init_basic_mgmt_channel(struct sphw_hwdev *hwdev) -{ - int err; - - err = sphw_comm_aeqs_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n"); - return err; - } - - err = sphw_comm_func_to_func_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init mailbox\n"); - goto func_to_func_init_err; - } - - err = init_aeqs_msix_attr(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n"); - goto aeqs_msix_attr_init_err; - } - - return 0; - -aeqs_msix_attr_init_err: - sphw_comm_func_to_func_free(hwdev); - -func_to_func_init_err: - sphw_comm_aeqs_free(hwdev); - - return err; -} - -static void free_base_mgmt_channel(struct sphw_hwdev *hwdev) -{ - sphw_comm_func_to_func_free(hwdev); - sphw_comm_aeqs_free(hwdev); -} - -static int init_pf_mgmt_channel(struct sphw_hwdev *hwdev) -{ - int err; - - err = sphw_comm_pf_to_mgmt_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init pf to mgmt\n"); - return err; - } - - return 0; -} - -static void free_pf_mgmt_channel(struct sphw_hwdev *hwdev) -{ - sphw_comm_pf_to_mgmt_free(hwdev); -} - -static int init_mgmt_channel_post(struct sphw_hwdev *hwdev) -{ - int err; - - /* mbox host channel resources will be freed in - * sphw_func_to_func_free - */ - err = sphw_mbox_init_host_msg_channel(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init mbox host channel\n"); - return err; - } - - err = init_pf_mgmt_channel(hwdev); - if (err) - return err; - - return 0; -} - -static void free_mgmt_msg_channel_post(struct sphw_hwdev *hwdev) -{ - free_pf_mgmt_channel(hwdev); -} - -static int init_cmdqs_channel(struct sphw_hwdev *hwdev) -{ - int err; - - err = dma_attr_table_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init dma attr table\n"); - goto dma_attr_init_err; - } - - err = sphw_comm_ceqs_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n"); - goto ceqs_init_err; - } - - err = init_ceqs_msix_attr(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n"); - goto init_ceq_msix_err; - } - - /* set default wq page_size */ - if (wq_page_order > SPHW_MAX_WQ_PAGE_SIZE_ORDER) { - sdk_info(hwdev->dev_hdl, "wq_page_order exceed limit[0, %d], reset to %d\n", - SPHW_MAX_WQ_PAGE_SIZE_ORDER, - SPHW_MAX_WQ_PAGE_SIZE_ORDER); - wq_page_order = SPHW_MAX_WQ_PAGE_SIZE_ORDER; - } - hwdev->wq_page_size = SPHW_HW_WQ_PAGE_SIZE * (1U << wq_page_order); - sdk_info(hwdev->dev_hdl, "WQ page size: 0x%x\n", hwdev->wq_page_size); - err = sphw_set_wq_page_size(hwdev, sphw_global_func_id(hwdev), hwdev->wq_page_size, - SPHW_CHANNEL_COMM); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n"); - goto init_wq_pg_size_err; - } - - err = sphw_comm_cmdqs_init(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); - goto cmdq_init_err; - } - - return 0; - -cmdq_init_err: - if (SPHW_FUNC_TYPE(hwdev) != TYPE_VF) - sphw_set_wq_page_size(hwdev, sphw_global_func_id(hwdev), SPHW_HW_WQ_PAGE_SIZE, - SPHW_CHANNEL_COMM); -init_wq_pg_size_err: -init_ceq_msix_err: - sphw_comm_ceqs_free(hwdev); - -ceqs_init_err: -dma_attr_init_err: - - return err; -} - -int sphw_init_comm_ch(struct sphw_hwdev *hwdev) -{ - int err; - - err = init_basic_mgmt_channel(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init mgmt channel\n"); - return err; - } - - err = sphw_func_reset(hwdev, sphw_global_func_id(hwdev), SPHW_COMM_RES, SPHW_CHANNEL_COMM); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to reset function\n"); - goto func_reset_err; - } - - err = init_basic_attributes(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init basic attributes\n"); - goto init_basic_attr_err; - } - - err = init_mgmt_channel_post(hwdev); - if (err) - goto init_mgmt_channel_post_err; - - err = init_cmdqs_channel(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init cmdq channel\n"); - goto init_cmdqs_channel_err; - } - - sphw_sync_mgmt_func_state(hwdev); - - if (SPHW_F_CHANNEL_LOCK_EN(hwdev)) { - sphw_mbox_enable_channel_lock(hwdev, true); - sphw_cmdq_enable_channel_lock(hwdev, true); - } - - return 0; - -init_cmdqs_channel_err: - free_mgmt_msg_channel_post(hwdev); -init_mgmt_channel_post_err: -init_basic_attr_err: -func_reset_err: - free_base_mgmt_channel(hwdev); - - return err; -} - -void sphw_uninit_comm_ch(struct sphw_hwdev *hwdev) -{ - sphw_unsync_mgmt_func_state(hwdev); - - sphw_comm_cmdqs_free(hwdev); - - if (SPHW_FUNC_TYPE(hwdev) != TYPE_VF) - sphw_set_wq_page_size(hwdev, sphw_global_func_id(hwdev), SPHW_HW_WQ_PAGE_SIZE, - SPHW_CHANNEL_COMM); - - sphw_comm_ceqs_free(hwdev); - - sphw_deinit_profile_adapter(hwdev); - - free_mgmt_msg_channel_post(hwdev); - - free_base_mgmt_channel(hwdev); -} - -int sphw_init_hwdev(struct sphw_init_para *para) -{ - struct sphw_hwdev *hwdev; - int err; - - hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); - if (!hwdev) - return -ENOMEM; - - *para->hwdev = hwdev; - hwdev->adapter_hdl = para->adapter_hdl; - hwdev->pcidev_hdl = para->pcidev_hdl; - hwdev->dev_hdl = para->dev_hdl; - hwdev->chip_node = para->chip_node; - - hwdev->chip_fault_stats = vzalloc(SPHW_CHIP_FAULT_SIZE); - if (!hwdev->chip_fault_stats) - goto alloc_chip_fault_stats_err; - - err = sphw_init_hwif(hwdev, para->cfg_reg_base, para->intr_reg_base, - para->mgmt_reg_base, para->db_base_phy, - para->db_base, para->db_dwqe_len); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init hwif\n"); - goto init_hwif_err; - } - - sphw_set_chip_present(hwdev); - - if (disable_cfg_comm) - return 0; - - hwdev->workq = alloc_workqueue(SPHW_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, - SPHW_WQ_MAX_REQ); - if (!hwdev->workq) { - sdk_err(hwdev->dev_hdl, "Failed to alloc hardware workq\n"); - goto alloc_workq_err; - } - - sphw_init_heartbeat_detect(hwdev); - - err = init_cfg_mgmt(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n"); - goto init_cfg_mgmt_err; - } - - err = sphw_init_comm_ch(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n"); - goto init_comm_ch_err; - } - - err = init_capability(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init capability\n"); - goto init_cap_err; - } - - err = sphw_set_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to set comm features\n"); - goto set_feature_err; - } - - return 0; - -set_feature_err: - free_capability(hwdev); - -init_cap_err: - sphw_uninit_comm_ch(hwdev); - -init_comm_ch_err: - free_cfg_mgmt(hwdev); - -init_cfg_mgmt_err: - sphw_destroy_heartbeat_detect(hwdev); - destroy_workqueue(hwdev->workq); - -alloc_workq_err: - sphw_free_hwif(hwdev); - -init_hwif_err: - vfree(hwdev->chip_fault_stats); - -alloc_chip_fault_stats_err: - kfree(hwdev); - *para->hwdev = NULL; - - return -EFAULT; -} - -void sphw_free_hwdev(void *hwdev) -{ - struct sphw_hwdev *dev = hwdev; - - sphw_func_rx_tx_flush(hwdev, SPHW_CHANNEL_COMM); - - free_capability(dev); - - sphw_uninit_comm_ch(dev); - - free_cfg_mgmt(dev); - sphw_destroy_heartbeat_detect(hwdev); - destroy_workqueue(dev->workq); - sphw_free_hwif(dev); - vfree(dev->chip_fault_stats); - - kfree(dev); -} - -void *sphw_get_pcidev_hdl(void *hwdev) -{ - struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; - - if (!hwdev) - return NULL; - - return dev->pcidev_hdl; -} - -int sphw_register_service_adapter(void *hwdev, void *service_adapter, enum sphw_service_type type) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev || !service_adapter || type >= SERVICE_T_MAX) - return -EINVAL; - - if (dev->service_adapter[type]) - return -EINVAL; - - dev->service_adapter[type] = service_adapter; - - return 0; -} - -void sphw_unregister_service_adapter(void *hwdev, enum sphw_service_type type) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev || type >= SERVICE_T_MAX) - return; - - dev->service_adapter[type] = NULL; -} - -void *sphw_get_service_adapter(void *hwdev, enum sphw_service_type type) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev || type >= SERVICE_T_MAX) - return NULL; - - return dev->service_adapter[type]; -} - -int sphw_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, u16 *out_size) -{ - if (*out_size != sizeof(struct sphw_hw_stats)) { - pr_err("Unexpect out buf size from user :%u, expect: %lu\n", - *out_size, sizeof(struct sphw_hw_stats)); - return -EFAULT; - } - - memcpy(hw_stats, &((struct sphw_hwdev *)hwdev)->hw_stats, - sizeof(struct sphw_hw_stats)); - return 0; -} - -u16 sphw_dbg_clear_hw_stats(void *hwdev) -{ - memset((void *)&((struct sphw_hwdev *)hwdev)->hw_stats, 0, - sizeof(struct sphw_hw_stats)); - memset((void *)((struct sphw_hwdev *)hwdev)->chip_fault_stats, 0, - SPHW_CHIP_FAULT_SIZE); - return sizeof(struct sphw_hw_stats); -} - -void sphw_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, u32 offset) -{ - u32 copy_len = offset + MAX_DRV_BUF_SIZE - SPHW_CHIP_FAULT_SIZE; - - if (offset + MAX_DRV_BUF_SIZE <= SPHW_CHIP_FAULT_SIZE) - memcpy(chip_fault_stats, - ((struct sphw_hwdev *)hwdev)->chip_fault_stats - + offset, MAX_DRV_BUF_SIZE); - else - memcpy(chip_fault_stats, - ((struct sphw_hwdev *)hwdev)->chip_fault_stats - + offset, copy_len); -} - -void sphw_event_register(void *dev, void *pri_handle, sphw_event_handler callback) -{ - struct sphw_hwdev *hwdev = dev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for register event\n"); - return; - } - - hwdev->event_callback = callback; - hwdev->event_pri_handle = pri_handle; -} - -void sphw_event_unregister(void *dev) -{ - struct sphw_hwdev *hwdev = dev; - - if (!dev) { - pr_err("Hwdev pointer is NULL for register event\n"); - return; - } - - hwdev->event_callback = NULL; - hwdev->event_pri_handle = NULL; -} - -void sphw_event_callback(void *hwdev, struct sphw_event_info *event) -{ - struct sphw_hwdev *dev = hwdev; - - if (!hwdev) { - pr_err("Hwdev pointer is NULL for event callback\n"); - return; - } - - if (!dev->event_callback) { - sdk_info(dev->dev_hdl, "Event callback function not register\n"); - return; - } - - dev->event_callback(dev->event_pri_handle, event); -} - -void sphw_set_pcie_order_cfg(void *handle) -{ -} - -void sphw_disable_mgmt_msg_report(void *hwdev) -{ - struct sphw_hwdev *hw_dev = (struct sphw_hwdev *)hwdev; - - sphw_set_pf_status(hw_dev->hwif, SPHW_PF_STATUS_INIT); -} - -void sphw_record_pcie_error(void *hwdev) -{ - struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; - - if (!hwdev) - return; - - atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats); -} - -int sphw_get_card_present_state(void *hwdev, bool *card_present_state) -{ - struct sphw_hwdev *dev = hwdev; - u32 addr, attr1; - - if (!hwdev || !card_present_state) - return -EINVAL; - - addr = SPHW_CSR_FUNC_ATTR1_ADDR; - attr1 = sphw_hwif_read_reg(dev->hwif, addr); - if (attr1 == SPHW_PCIE_LINK_DOWN) { - sdk_warn(dev->dev_hdl, "Card is not present\n"); - *card_present_state = (bool)0; - } else { - *card_present_state = (bool)1; - } - - return 0; -} - -void sphw_link_event_stats(void *dev, u8 link) -{ - struct sphw_hwdev *hwdev = dev; - - if (link) - atomic_inc(&hwdev->hw_stats.link_event_stats.link_up_stats); - else - atomic_inc(&hwdev->hw_stats.link_event_stats.link_down_stats); -} - -u8 sphw_max_pf_num(void *hwdev) -{ - if (!hwdev) - return 0; - - return SPHW_MAX_PF_NUM((struct sphw_hwdev *)hwdev); -} - -void sphw_fault_event_report(void *hwdev, u16 src, u16 level) -{ - if (!hwdev) - return; - - sdk_info(((struct sphw_hwdev *)hwdev)->dev_hdl, "Fault event report, src: %u, level: %u\n", - src, level); - - sphw_fault_post_process(hwdev, src, level); -} - -void sphw_heartbeat_lost_handler(struct work_struct *work) -{ - struct sphw_event_info event_info = { 0 }; - struct sphw_hwdev *hwdev = container_of(work, struct sphw_hwdev, - heartbeat_lost_work); - u16 src, level; - - atomic_inc(&hwdev->hw_stats.heart_lost_stats); - - if (hwdev->event_callback) { - event_info.type = - hwdev->pcie_link_down ? SPHW_EVENT_PCIE_LINK_DOWN : - SPHW_EVENT_HEART_LOST; - hwdev->event_callback(hwdev->event_pri_handle, &event_info); - } - - if (hwdev->pcie_link_down) { - src = SPHW_FAULT_SRC_PCIE_LINK_DOWN; - level = FAULT_LEVEL_HOST; - sdk_err(hwdev->dev_hdl, "Detect pcie is link down\n"); - } else { - src = SPHW_FAULT_SRC_HOST_HEARTBEAT_LOST; - level = FAULT_LEVEL_FATAL; - sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %d\n", - sphw_global_func_id(hwdev)); - } - - sphw_fault_post_process(hwdev, src, level); -} - -#define DETECT_PCIE_LINK_DOWN_RETRY 2 -#define SPHW_HEARTBEAT_START_EXPIRE 5000 -#define SPHW_HEARTBEAT_PERIOD 1000 - -static bool sphw_is_hw_abnormal(struct sphw_hwdev *hwdev) -{ - u32 status; - - if (!sphw_get_chip_present_flag(hwdev)) - return false; - - status = sphw_get_heartbeat_status(hwdev); - if (status == SPHW_PCIE_LINK_DOWN) { - sdk_warn(hwdev->dev_hdl, "Detect BAR register read failed\n"); - hwdev->rd_bar_err_cnt++; - if (hwdev->rd_bar_err_cnt >= DETECT_PCIE_LINK_DOWN_RETRY) { - sphw_set_chip_absent(hwdev); - sphw_force_complete_all(hwdev); - hwdev->pcie_link_down = true; - return true; - } - - return false; - } - - if (status) { - hwdev->heartbeat_lost = true; - return true; - } - - hwdev->rd_bar_err_cnt = 0; - - return false; -} - -static void sphw_heartbeat_timer_handler(struct timer_list *t) -{ - struct sphw_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer); - - if (sphw_is_hw_abnormal(hwdev)) - queue_work(hwdev->workq, &hwdev->heartbeat_lost_work); - else - mod_timer(&hwdev->heartbeat_timer, - jiffies + msecs_to_jiffies(SPHW_HEARTBEAT_PERIOD)); -} - -static void sphw_init_heartbeat_detect(struct sphw_hwdev *hwdev) -{ - timer_setup(&hwdev->heartbeat_timer, sphw_heartbeat_timer_handler, 0); - - hwdev->heartbeat_timer.expires = - jiffies + msecs_to_jiffies(SPHW_HEARTBEAT_START_EXPIRE); - - add_timer(&hwdev->heartbeat_timer); - - INIT_WORK(&hwdev->heartbeat_lost_work, sphw_heartbeat_lost_handler); -} - -static void sphw_destroy_heartbeat_detect(struct sphw_hwdev *hwdev) -{ - del_timer_sync(&hwdev->heartbeat_timer); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h deleted file mode 100644 index 10da31bda3d2..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwdev.h +++ /dev/null @@ -1,93 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_HWDEV_H -#define SPHW_HWDEV_H - -#include "sphw_mt.h" -#include "sphw_crm.h" -#include "sphw_hw.h" - -struct cfg_mgmt_info; - -struct sphw_hwif; -struct sphw_aeqs; -struct sphw_ceqs; -struct sphw_mbox; -struct sphw_msg_pf_to_mgmt; - -struct sphw_page_addr { - void *virt_addr; - u64 phys_addr; -}; - -struct mqm_addr_trans_tbl_info { - u32 chunk_num; - u32 search_gpa_num; - u32 page_size; - u32 page_num; - struct sphw_page_addr *brm_srch_page_addr; -}; - -struct sphw_hwdev { - void *adapter_hdl; /* pointer to spnic_pcidev or NDIS_Adapter */ - void *pcidev_hdl; /* pointer to pcidev or Handler */ - void *dev_hdl; /* pointer to pcidev->dev or Handler, for - * sdk_err() or dma_alloc() - */ - - void *service_adapter[SERVICE_T_MAX]; - void *chip_node; - void *ppf_hwdev; - - u32 wq_page_size; - int chip_present_flag; - - struct sphw_hwif *hwif; /* include void __iomem *bar */ - struct comm_global_attr glb_attr; - u64 features[COMM_MAX_FEATURE_QWORD]; - - struct cfg_mgmt_info *cfg_mgmt; - - struct sphw_cmdqs *cmdqs; - struct sphw_aeqs *aeqs; - struct sphw_ceqs *ceqs; - struct sphw_mbox *func_to_func; - struct sphw_msg_pf_to_mgmt *pf_to_mgmt; - - void *cqm_hdl; - struct mqm_addr_trans_tbl_info mqm_att; - struct sphw_page_addr page_pa0; - struct sphw_page_addr page_pa1; - u32 statufull_ref_cnt; - - struct sphw_hw_stats hw_stats; - u8 *chip_fault_stats; - - sphw_event_handler event_callback; - void *event_pri_handle; - - struct sphw_board_info board_info; - - int prof_adap_type; - struct sphw_prof_attr *prof_attr; - - struct workqueue_struct *workq; - - u32 rd_bar_err_cnt; - bool pcie_link_down; - bool heartbeat_lost; - struct timer_list heartbeat_timer; - struct work_struct heartbeat_lost_work; -}; - -#define SPHW_MAX_HOST_NUM(hwdev) ((hwdev)->glb_attr.max_host_num) -#define SPHW_MAX_PF_NUM(hwdev) ((hwdev)->glb_attr.max_pf_num) -#define SPHW_MGMT_CPU_NODE_ID(hwdev) ((hwdev)->glb_attr.mgmt_host_node_id) - -#define COMM_FEATURE_QW0(hwdev, feature) ((hwdev)->features[0] & COMM_F_##feature) -#define COMM_SUPPORT_API_CHAIN(hwdev) COMM_FEATURE_QW0(hwdev, API_CHAIN) - -#define SPHW_DRV_FEATURE_QW0 COMM_F_API_CHAIN - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c deleted file mode 100644 index fbb1128957f0..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.c +++ /dev/null @@ -1,886 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/types.h> -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/module.h> - -#include "sphw_csr.h" -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_hwdev.h" -#include "sphw_hwif.h" - -#define WAIT_HWIF_READY_TIMEOUT 10000 -#define SPHW_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 60000 - -#define DB_IDX(db, db_base) \ - ((u32)(((ulong)(db) - (ulong)(db_base)) / \ - SPHW_DB_PAGE_SIZE)) - -#define SPHW_AF0_FUNC_GLOBAL_IDX_SHIFT 0 -#define SPHW_AF0_P2P_IDX_SHIFT 12 -#define SPHW_AF0_PCI_INTF_IDX_SHIFT 17 -#define SPHW_AF0_VF_IN_PF_SHIFT 20 -#define SPHW_AF0_FUNC_TYPE_SHIFT 28 - -#define SPHW_AF0_FUNC_GLOBAL_IDX_MASK 0xFFF -#define SPHW_AF0_P2P_IDX_MASK 0x1F -#define SPHW_AF0_PCI_INTF_IDX_MASK 0x7 -#define SPHW_AF0_VF_IN_PF_MASK 0xFF -#define SPHW_AF0_FUNC_TYPE_MASK 0x1 - -#define SPHW_AF0_GET(val, member) \ - (((val) >> SPHW_AF0_##member##_SHIFT) & SPHW_AF0_##member##_MASK) - -#define SPHW_AF1_PPF_IDX_SHIFT 0 -#define SPHW_AF1_AEQS_PER_FUNC_SHIFT 8 -#define SPHW_AF1_MGMT_INIT_STATUS_SHIFT 30 -#define SPHW_AF1_PF_INIT_STATUS_SHIFT 31 - -#define SPHW_AF1_PPF_IDX_MASK 0x3F -#define SPHW_AF1_AEQS_PER_FUNC_MASK 0x3 -#define SPHW_AF1_MGMT_INIT_STATUS_MASK 0x1 -#define SPHW_AF1_PF_INIT_STATUS_MASK 0x1 - -#define SPHW_AF1_GET(val, member) \ - (((val) >> SPHW_AF1_##member##_SHIFT) & SPHW_AF1_##member##_MASK) - -#define SPHW_AF2_CEQS_PER_FUNC_SHIFT 0 -#define SPHW_AF2_DMA_ATTR_PER_FUNC_SHIFT 9 -#define SPHW_AF2_IRQS_PER_FUNC_SHIFT 16 - -#define SPHW_AF2_CEQS_PER_FUNC_MASK 0x1FF -#define SPHW_AF2_DMA_ATTR_PER_FUNC_MASK 0x7 -#define SPHW_AF2_IRQS_PER_FUNC_MASK 0x7FF - -#define SPHW_AF2_GET(val, member) \ - (((val) >> SPHW_AF2_##member##_SHIFT) & SPHW_AF2_##member##_MASK) - -#define SPHW_AF3_GLOBAL_VF_ID_OF_NXT_PF_SHIFT 0 -#define SPHW_AF3_GLOBAL_VF_ID_OF_PF_SHIFT 16 - -#define SPHW_AF3_GLOBAL_VF_ID_OF_NXT_PF_MASK 0xFFF -#define SPHW_AF3_GLOBAL_VF_ID_OF_PF_MASK 0xFFF - -#define SPHW_AF3_GET(val, member) \ - (((val) >> SPHW_AF3_##member##_SHIFT) & SPHW_AF3_##member##_MASK) - -#define SPHW_AF4_DOORBELL_CTRL_SHIFT 0 -#define SPHW_AF4_DOORBELL_CTRL_MASK 0x1 - -#define SPHW_AF4_GET(val, member) \ - (((val) >> SPHW_AF4_##member##_SHIFT) & SPHW_AF4_##member##_MASK) - -#define SPHW_AF4_SET(val, member) \ - (((val) & SPHW_AF4_##member##_MASK) << SPHW_AF4_##member##_SHIFT) - -#define SPHW_AF4_CLEAR(val, member) \ - ((val) & (~(SPHW_AF4_##member##_MASK << SPHW_AF4_##member##_SHIFT))) - -#define SPHW_AF5_OUTBOUND_CTRL_SHIFT 0 -#define SPHW_AF5_OUTBOUND_CTRL_MASK 0x1 - -#define SPHW_AF5_GET(val, member) \ - (((val) >> SPHW_AF5_##member##_SHIFT) & SPHW_AF5_##member##_MASK) - -#define SPHW_AF5_SET(val, member) \ - (((val) & SPHW_AF5_##member##_MASK) << SPHW_AF5_##member##_SHIFT) - -#define SPHW_AF5_CLEAR(val, member) \ - ((val) & (~(SPHW_AF5_##member##_MASK << SPHW_AF5_##member##_SHIFT))) - -#define SPHW_AF6_PF_STATUS_SHIFT 0 -#define SPHW_AF6_PF_STATUS_MASK 0xFFFF - -#define SPHW_AF6_SET(val, member) \ - ((((u32)(val)) & SPHW_AF6_##member##_MASK) << \ - SPHW_AF6_##member##_SHIFT) - -#define SPHW_AF6_GET(val, member) \ - (((val) >> SPHW_AF6_##member##_SHIFT) & SPHW_AF6_##member##_MASK) - -#define SPHW_AF6_CLEAR(val, member) \ - ((val) & (~(SPHW_AF6_##member##_MASK << \ - SPHW_AF6_##member##_SHIFT))) - -#define sphw_PPF_ELECT_PORT_IDX_SHIFT 0 - -#define sphw_PPF_ELECT_PORT_IDX_MASK 0x3F - -#define sphw_PPF_ELECT_PORT_GET(val, member) \ - (((val) >> sphw_PPF_ELECT_PORT_##member##_SHIFT) & \ - sphw_PPF_ELECT_PORT_##member##_MASK) - -#define SPHW_PPF_ELECTION_IDX_SHIFT 0 - -#define SPHW_PPF_ELECTION_IDX_MASK 0x3F - -#define SPHW_PPF_ELECTION_SET(val, member) \ - (((val) & SPHW_PPF_ELECTION_##member##_MASK) << \ - SPHW_PPF_ELECTION_##member##_SHIFT) - -#define SPHW_PPF_ELECTION_GET(val, member) \ - (((val) >> SPHW_PPF_ELECTION_##member##_SHIFT) & \ - SPHW_PPF_ELECTION_##member##_MASK) - -#define SPHW_PPF_ELECTION_CLEAR(val, member) \ - ((val) & (~(SPHW_PPF_ELECTION_##member##_MASK << \ - SPHW_PPF_ELECTION_##member##_SHIFT))) - -#define SPHW_MPF_ELECTION_IDX_SHIFT 0 - -#define SPHW_MPF_ELECTION_IDX_MASK 0x1F - -#define SPHW_MPF_ELECTION_SET(val, member) \ - (((val) & SPHW_MPF_ELECTION_##member##_MASK) << \ - SPHW_MPF_ELECTION_##member##_SHIFT) - -#define SPHW_MPF_ELECTION_GET(val, member) \ - (((val) >> SPHW_MPF_ELECTION_##member##_SHIFT) & \ - SPHW_MPF_ELECTION_##member##_MASK) - -#define SPHW_MPF_ELECTION_CLEAR(val, member) \ - ((val) & (~(SPHW_MPF_ELECTION_##member##_MASK << \ - SPHW_MPF_ELECTION_##member##_SHIFT))) - -#define SPHW_GET_REG_FLAG(reg) ((reg) & (~(SPHW_REGS_FLAG_MAKS))) - -#define SPHW_GET_REG_ADDR(reg) ((reg) & (SPHW_REGS_FLAG_MAKS)) - -u32 sphw_hwif_read_reg(struct sphw_hwif *hwif, u32 reg) -{ - if (SPHW_GET_REG_FLAG(reg) == SPHW_MGMT_REGS_FLAG) - return be32_to_cpu(readl(hwif->mgmt_regs_base + - SPHW_GET_REG_ADDR(reg))); - else - return be32_to_cpu(readl(hwif->cfg_regs_base + - SPHW_GET_REG_ADDR(reg))); -} - -void sphw_hwif_write_reg(struct sphw_hwif *hwif, u32 reg, u32 val) -{ - if (SPHW_GET_REG_FLAG(reg) == SPHW_MGMT_REGS_FLAG) - writel(cpu_to_be32(val), - hwif->mgmt_regs_base + SPHW_GET_REG_ADDR(reg)); - else - writel(cpu_to_be32(val), - hwif->cfg_regs_base + SPHW_GET_REG_ADDR(reg)); -} - -/** - * sphw_get_heartbeat_status - get heart beat status - * @hwdev: the pointer to hw device - * Return: 0 - normal, 1 - heart lost, 0xFFFFFFFF - Pcie link down - **/ -u32 sphw_get_heartbeat_status(struct sphw_hwdev *hwdev) -{ - u32 attr1; - - attr1 = sphw_hwif_read_reg(hwdev->hwif, SPHW_CSR_FUNC_ATTR1_ADDR); - if (attr1 == SPHW_PCIE_LINK_DOWN) - return attr1; - - return !SPHW_AF1_GET(attr1, MGMT_INIT_STATUS); -} - -/** - * hwif_ready - test if the HW initialization passed - * @hwdev: the pointer to hw device - * Return: 0 - success, negative - failure - **/ -static int hwif_ready(struct sphw_hwdev *hwdev) -{ - if (sphw_get_heartbeat_status(hwdev)) - return -EBUSY; - - return 0; -} - -static enum sphw_wait_return check_hwif_ready_handler(void *priv_data) -{ - if (!hwif_ready(priv_data)) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -static int wait_hwif_ready(struct sphw_hwdev *hwdev) -{ - if (!sphw_wait_for_timeout(hwdev, check_hwif_ready_handler, - WAIT_HWIF_READY_TIMEOUT, USEC_PER_MSEC)) - return 0; - - sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n"); - return -EBUSY; -} - -/** - * set_hwif_attr - set the attributes as members in hwif - * @hwif: the hardware interface of a pci function device - * @attr0: the first attribute that was read from the hw - * @attr1: the second attribute that was read from the hw - * @attr2: the third attribute that was read from the hw - * @attr3: the fourth attribute that was read from the hw - **/ -static void set_hwif_attr(struct sphw_hwif *hwif, u32 attr0, u32 attr1, - u32 attr2, u32 attr3) -{ - hwif->attr.func_global_idx = SPHW_AF0_GET(attr0, FUNC_GLOBAL_IDX); - hwif->attr.port_to_port_idx = SPHW_AF0_GET(attr0, P2P_IDX); - hwif->attr.pci_intf_idx = SPHW_AF0_GET(attr0, PCI_INTF_IDX); - hwif->attr.vf_in_pf = SPHW_AF0_GET(attr0, VF_IN_PF); - hwif->attr.func_type = SPHW_AF0_GET(attr0, FUNC_TYPE); - - hwif->attr.ppf_idx = SPHW_AF1_GET(attr1, PPF_IDX); - hwif->attr.num_aeqs = BIT(SPHW_AF1_GET(attr1, AEQS_PER_FUNC)); - hwif->attr.num_ceqs = (u8)SPHW_AF2_GET(attr2, CEQS_PER_FUNC); - hwif->attr.num_irqs = SPHW_AF2_GET(attr2, IRQS_PER_FUNC); - hwif->attr.num_dma_attr = BIT(SPHW_AF2_GET(attr2, DMA_ATTR_PER_FUNC)); - - hwif->attr.global_vf_id_of_pf = SPHW_AF3_GET(attr3, GLOBAL_VF_ID_OF_PF); - - pr_info("func_global_idx: 0x%x, port_to_port_idx: 0x%x, pci_intf_idx: 0x%x, vf_in_pf: 0x%x, func_type: %d\n", - hwif->attr.func_global_idx, hwif->attr.port_to_port_idx, - hwif->attr.pci_intf_idx, hwif->attr.vf_in_pf, - hwif->attr.func_type); - - pr_info("ppf_idx: 0x%x, num_aeqs: 0x%x, num_ceqs: 0x%x, num_irqs: 0x%x, num_dma_attr: 0x%x, global_vf_id_of_pf: %u\n", - hwif->attr.ppf_idx, hwif->attr.num_aeqs, - hwif->attr.num_ceqs, hwif->attr.num_irqs, - hwif->attr.num_dma_attr, hwif->attr.global_vf_id_of_pf); -} - -/** - * get_hwif_attr - read and set the attributes as members in hwif - * @hwif: the hardware interface of a pci function device - **/ -static void get_hwif_attr(struct sphw_hwif *hwif) -{ - u32 addr, attr0, attr1, attr2, attr3; - - addr = SPHW_CSR_FUNC_ATTR0_ADDR; - attr0 = sphw_hwif_read_reg(hwif, addr); - - addr = SPHW_CSR_FUNC_ATTR1_ADDR; - attr1 = sphw_hwif_read_reg(hwif, addr); - - addr = SPHW_CSR_FUNC_ATTR2_ADDR; - attr2 = sphw_hwif_read_reg(hwif, addr); - - addr = SPHW_CSR_FUNC_ATTR3_ADDR; - attr3 = sphw_hwif_read_reg(hwif, addr); - - pr_info("attr0: 0x%08x, attr1: 0x%08x, attr2: 0x%08x, attr3: 0x%08x\n", - attr0, attr1, attr2, attr3); - set_hwif_attr(hwif, attr0, attr1, attr2, attr3); -} - -void sphw_set_pf_status(struct sphw_hwif *hwif, enum sphw_pf_status status) -{ - u32 attr6 = SPHW_AF6_SET(status, PF_STATUS); - u32 addr = SPHW_CSR_FUNC_ATTR6_ADDR; - - if (hwif->attr.func_type == TYPE_VF) - return; - - sphw_hwif_write_reg(hwif, addr, attr6); -} - -enum sphw_pf_status sphw_get_pf_status(struct sphw_hwif *hwif) -{ - u32 attr6 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR6_ADDR); - - return SPHW_AF6_GET(attr6, PF_STATUS); -} - -enum sphw_doorbell_ctrl sphw_get_doorbell_ctrl_status(struct sphw_hwif *hwif) -{ - u32 attr4 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR4_ADDR); - - return SPHW_AF4_GET(attr4, DOORBELL_CTRL); -} - -enum sphw_outbound_ctrl sphw_get_outbound_ctrl_status(struct sphw_hwif *hwif) -{ - u32 attr5 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR5_ADDR); - - return SPHW_AF5_GET(attr5, OUTBOUND_CTRL); -} - -void sphw_enable_doorbell(struct sphw_hwif *hwif) -{ - u32 addr, attr4; - - addr = SPHW_CSR_FUNC_ATTR4_ADDR; - attr4 = sphw_hwif_read_reg(hwif, addr); - - attr4 = SPHW_AF4_CLEAR(attr4, DOORBELL_CTRL); - attr4 |= SPHW_AF4_SET(ENABLE_DOORBELL, DOORBELL_CTRL); - - sphw_hwif_write_reg(hwif, addr, attr4); -} - -void sphw_disable_doorbell(struct sphw_hwif *hwif) -{ - u32 addr, attr4; - - addr = SPHW_CSR_FUNC_ATTR4_ADDR; - attr4 = sphw_hwif_read_reg(hwif, addr); - - attr4 = SPHW_AF4_CLEAR(attr4, DOORBELL_CTRL); - attr4 |= SPHW_AF4_SET(DISABLE_DOORBELL, DOORBELL_CTRL); - - sphw_hwif_write_reg(hwif, addr, attr4); -} - -/** - * set_ppf - try to set hwif as ppf and set the type of hwif in this case - * @hwif: the hardware interface of a pci function device - **/ -static void set_ppf(struct sphw_hwif *hwif) -{ - struct sphw_func_attr *attr = &hwif->attr; - u32 addr, val, ppf_election; - - /* Read Modify Write */ - addr = SPHW_CSR_PPF_ELECTION_ADDR; - - val = sphw_hwif_read_reg(hwif, addr); - val = SPHW_PPF_ELECTION_CLEAR(val, IDX); - - ppf_election = SPHW_PPF_ELECTION_SET(attr->func_global_idx, IDX); - val |= ppf_election; - - sphw_hwif_write_reg(hwif, addr, val); - - /* Check PPF */ - val = sphw_hwif_read_reg(hwif, addr); - - attr->ppf_idx = SPHW_PPF_ELECTION_GET(val, IDX); - if (attr->ppf_idx == attr->func_global_idx) - attr->func_type = TYPE_PPF; -} - -/** - * get_mpf - get the mpf index into the hwif - * @hwif: the hardware interface of a pci function device - **/ -static void get_mpf(struct sphw_hwif *hwif) -{ - struct sphw_func_attr *attr = &hwif->attr; - u32 mpf_election, addr; - - addr = SPHW_CSR_GLOBAL_MPF_ELECTION_ADDR; - - mpf_election = sphw_hwif_read_reg(hwif, addr); - attr->mpf_idx = SPHW_MPF_ELECTION_GET(mpf_election, IDX); -} - -/** - * set_mpf - try to set hwif as mpf and set the mpf idx in hwif - * @hwif: the hardware interface of a pci function device - **/ -static void set_mpf(struct sphw_hwif *hwif) -{ - struct sphw_func_attr *attr = &hwif->attr; - u32 addr, val, mpf_election; - - /* Read Modify Write */ - addr = SPHW_CSR_GLOBAL_MPF_ELECTION_ADDR; - - val = sphw_hwif_read_reg(hwif, addr); - - val = SPHW_MPF_ELECTION_CLEAR(val, IDX); - mpf_election = SPHW_MPF_ELECTION_SET(attr->func_global_idx, IDX); - - val |= mpf_election; - sphw_hwif_write_reg(hwif, addr, val); -} - -static int init_db_area_idx(struct sphw_free_db_area *free_db_area, u64 db_dwqe_len) -{ - u32 db_max_areas; - - db_max_areas = (db_dwqe_len > SPHW_DB_DWQE_SIZE) ? SPHW_DB_MAX_AREAS : - (u32)(db_dwqe_len / SPHW_DB_PAGE_SIZE); - free_db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL); - if (!free_db_area->db_bitmap_array) { - pr_err("Failed to allocate db area.\n"); - return -ENOMEM; - } - free_db_area->db_max_areas = db_max_areas; - spin_lock_init(&free_db_area->idx_lock); - - return 0; -} - -static void free_db_area(struct sphw_free_db_area *free_db_area) -{ - kfree(free_db_area->db_bitmap_array); -} - -static int get_db_idx(struct sphw_hwif *hwif, u32 *idx) -{ - struct sphw_free_db_area *free_db_area = &hwif->free_db_area; - u32 pg_idx; - - spin_lock(&free_db_area->idx_lock); - pg_idx = (u32)find_first_zero_bit(free_db_area->db_bitmap_array, - free_db_area->db_max_areas); - if (pg_idx == free_db_area->db_max_areas) { - spin_unlock(&free_db_area->idx_lock); - return -ENOMEM; - } - set_bit(pg_idx, free_db_area->db_bitmap_array); - spin_unlock(&free_db_area->idx_lock); - - *idx = pg_idx; - - return 0; -} - -static void free_db_idx(struct sphw_hwif *hwif, u32 idx) -{ - struct sphw_free_db_area *free_db_area = &hwif->free_db_area; - - if (idx >= free_db_area->db_max_areas) - return; - - spin_lock(&free_db_area->idx_lock); - clear_bit((int)idx, free_db_area->db_bitmap_array); - - spin_unlock(&free_db_area->idx_lock); -} - -void sphw_free_db_addr(void *hwdev, const void __iomem *db_base, void __iomem *dwqe_base) -{ - struct sphw_hwif *hwif = NULL; - u32 idx; - - if (!hwdev || !db_base) - return; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - idx = DB_IDX(db_base, hwif->db_base); - - free_db_idx(hwif, idx); -} - -int sphw_alloc_db_addr(void *hwdev, void __iomem **db_base, void __iomem **dwqe_base) -{ - struct sphw_hwif *hwif = NULL; - u32 idx = 0; - int err; - - if (!hwdev || !db_base) - return -EINVAL; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - err = get_db_idx(hwif, &idx); - if (err) - return -EFAULT; - - *db_base = hwif->db_base + idx * SPHW_DB_PAGE_SIZE; - - if (!dwqe_base) - return 0; - - *dwqe_base = (u8 *)*db_base + SPHW_DWQE_OFFSET; - - return 0; -} - -void sphw_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base) -{ - struct sphw_hwif *hwif = NULL; - u32 idx; - - if (!hwdev) - return; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - idx = DB_IDX(db_base, hwif->db_base_phy); - - free_db_idx(hwif, idx); -} - -int sphw_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base) -{ - struct sphw_hwif *hwif = NULL; - u32 idx; - int err; - - if (!hwdev || !db_base || !dwqe_base) - return -EINVAL; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - err = get_db_idx(hwif, &idx); - if (err) - return -EFAULT; - - *db_base = hwif->db_base_phy + idx * SPHW_DB_PAGE_SIZE; - *dwqe_base = *db_base + SPHW_DWQE_OFFSET; - - return 0; -} - -void sphw_set_msix_auto_mask_state(void *hwdev, u16 msix_idx, enum sphw_msix_auto_mask flag) -{ - struct sphw_hwif *hwif = NULL; - u32 mask_bits; - u32 addr; - - if (!hwdev) - return; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - if (flag) - mask_bits = SPHW_MSI_CLR_INDIR_SET(1, AUTO_MSK_SET); - else - mask_bits = SPHW_MSI_CLR_INDIR_SET(1, AUTO_MSK_CLR); - - mask_bits = mask_bits | SPHW_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); - - addr = SPHW_CSR_FUNC_MSI_CLR_WR_ADDR; - sphw_hwif_write_reg(hwif, addr, mask_bits); -} - -void sphw_set_msix_state(void *hwdev, u16 msix_idx, enum sphw_msix_state flag) -{ - struct sphw_hwif *hwif = NULL; - u32 mask_bits; - u32 addr; - u8 int_msk = 1; - - if (!hwdev) - return; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - if (flag) - mask_bits = SPHW_MSI_CLR_INDIR_SET(int_msk, INT_MSK_SET); - else - mask_bits = SPHW_MSI_CLR_INDIR_SET(int_msk, INT_MSK_CLR); - mask_bits = mask_bits | SPHW_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); - - addr = SPHW_CSR_FUNC_MSI_CLR_WR_ADDR; - sphw_hwif_write_reg(hwif, addr, mask_bits); -} - -static void disable_all_msix(struct sphw_hwdev *hwdev) -{ - u16 num_irqs = hwdev->hwif->attr.num_irqs; - u16 i; - - for (i = 0; i < num_irqs; i++) - sphw_set_msix_state(hwdev, i, SPHW_MSIX_DISABLE); -} - -static enum sphw_wait_return check_db_flush_enable_handler(void *priv_data) -{ - struct sphw_hwif *hwif = priv_data; - enum sphw_doorbell_ctrl db_ctrl; - - db_ctrl = sphw_get_doorbell_ctrl_status(hwif); - if (db_ctrl == ENABLE_DOORBELL) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -static enum sphw_wait_return check_db_flush_disable_handler(void *priv_data) -{ - struct sphw_hwif *hwif = priv_data; - enum sphw_doorbell_ctrl db_ctrl; - - db_ctrl = sphw_get_doorbell_ctrl_status(hwif); - if (db_ctrl == DISABLE_DOORBELL) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -int wait_until_doorbell_flush_states(struct sphw_hwif *hwif, - enum sphw_doorbell_ctrl states) -{ - if (!hwif) - return -EFAULT; - - return sphw_wait_for_timeout(hwif, states == ENABLE_DOORBELL ? - check_db_flush_enable_handler : check_db_flush_disable_handler, - SPHW_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT, USEC_PER_MSEC); -} - -static enum sphw_wait_return check_db_outbound_enable_handler(void *priv_data) -{ - struct sphw_hwif *hwif = priv_data; - enum sphw_doorbell_ctrl db_ctrl; - enum sphw_outbound_ctrl outbound_ctrl; - - db_ctrl = sphw_get_doorbell_ctrl_status(hwif); - outbound_ctrl = sphw_get_outbound_ctrl_status(hwif); - - if (outbound_ctrl == ENABLE_OUTBOUND && db_ctrl == ENABLE_DOORBELL) - return WAIT_PROCESS_CPL; - - return WAIT_PROCESS_WAITING; -} - -static int wait_until_doorbell_and_outbound_enabled(struct sphw_hwif *hwif) -{ - return sphw_wait_for_timeout(hwif, check_db_outbound_enable_handler, - SPHW_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT, USEC_PER_MSEC); -} - -/** - * sphw_init_hwif - initialize the hw interface - * @hwif: the hardware interface of a pci function device - * @pdev: the pci device that will be part of the hwif struct - * Return: 0 - success, negative - failure - **/ -int sphw_init_hwif(struct sphw_hwdev *hwdev, void *cfg_reg_base, void *intr_reg_base, - void *mgmt_regs_base, u64 db_base_phy, void *db_base, u64 db_dwqe_len) -{ - struct sphw_hwif *hwif = NULL; - u32 attr4, attr5; - int err; - - hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); - if (!hwif) - return -ENOMEM; - - hwdev->hwif = hwif; - hwif->pdev = hwdev->pcidev_hdl; - - /* if function is VF, mgmt_regs_base will be NULL */ - if (!mgmt_regs_base) - hwif->cfg_regs_base = (u8 *)cfg_reg_base + - SPHW_VF_CFG_REG_OFFSET; - else - hwif->cfg_regs_base = cfg_reg_base; - - hwif->intr_regs_base = intr_reg_base; - hwif->mgmt_regs_base = mgmt_regs_base; - sdk_info(hwdev->dev_hdl, "init intr_regs_base: %p, mgmt_regs_base: %p, db_base: %p, db_dwqe_len: 0x%llx\n", - hwif->intr_regs_base, hwif->mgmt_regs_base, - db_base, db_dwqe_len); - - hwif->db_base_phy = db_base_phy; - hwif->db_base = db_base; - hwif->db_dwqe_len = db_dwqe_len; - err = init_db_area_idx(&hwif->free_db_area, hwif->db_dwqe_len); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to init db area.\n"); - goto init_db_area_err; - } - - err = wait_hwif_ready(hwdev); - if (err) { - sdk_err(hwdev->dev_hdl, "Chip status is not ready\n"); - goto hwif_ready_err; - } - - get_hwif_attr(hwif); - - err = wait_until_doorbell_and_outbound_enabled(hwif); - if (err) { - attr4 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR4_ADDR); - attr5 = sphw_hwif_read_reg(hwif, SPHW_CSR_FUNC_ATTR5_ADDR); - sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n", - attr4, attr5); - goto hwif_ready_err; - } - - if (!SPHW_IS_VF(hwdev)) { - set_ppf(hwif); - - if (SPHW_IS_PPF(hwdev)) - set_mpf(hwif); - - get_mpf(hwif); - } - - disable_all_msix(hwdev); - /* disable mgmt cpu report any event */ - sphw_set_pf_status(hwdev->hwif, SPHW_PF_STATUS_INIT); - - sdk_info(hwdev->dev_hdl, "global_func_idx: %u, func_type: %d, host_id: %u, ppf: %u, mpf: %u\n", - hwif->attr.func_global_idx, hwif->attr.func_type, - hwif->attr.pci_intf_idx, hwif->attr.ppf_idx, - hwif->attr.mpf_idx); - - return 0; - -hwif_ready_err: - free_db_area(&hwif->free_db_area); -init_db_area_err: - kfree(hwif); - - return err; -} - -/** - * sphw_free_hwif - free the hw interface - * @hwif: the hardware interface of a pci function device - * @pdev: the pci device that will be part of the hwif struct - **/ -void sphw_free_hwif(struct sphw_hwdev *hwdev) -{ - free_db_area(&hwdev->hwif->free_db_area); - kfree(hwdev->hwif); -} - -u16 sphw_global_func_id(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.func_global_idx; -} - -u16 sphw_intr_num(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.num_irqs; -} - -u8 sphw_pf_id_of_vf(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.port_to_port_idx; -} - -u8 sphw_pcie_itf_id(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.pci_intf_idx; -} - -u8 sphw_vf_in_pf(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.vf_in_pf; -} - -enum func_type sphw_func_type(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.func_type; -} - -u8 sphw_ceq_num(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.num_ceqs; -} - -u8 sphw_dma_attr_entry_num(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.num_dma_attr; -} - -u16 sphw_glb_pf_vf_offset(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.global_vf_id_of_pf; -} - -u8 sphw_mpf_idx(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.mpf_idx; -} - -u8 sphw_ppf_idx(void *hwdev) -{ - struct sphw_hwif *hwif = NULL; - - if (!hwdev) - return 0; - - hwif = ((struct sphw_hwdev *)hwdev)->hwif; - - return hwif->attr.ppf_idx; -} - -u8 sphw_host_ppf_idx(void *hwdev, u8 host_id) -{ - struct sphw_hwdev *dev = hwdev; - u32 ppf_elect_port_addr; - u32 val; - - if (!hwdev) - return 0; - - ppf_elect_port_addr = SPHW_CSR_FUNC_PPF_ELECT(host_id); - val = sphw_hwif_read_reg(dev->hwif, ppf_elect_port_addr); - - return sphw_PPF_ELECT_PORT_GET(val, IDX); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h deleted file mode 100644 index 9035baf8a66e..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_hwif.h +++ /dev/null @@ -1,102 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_HWIF_H -#define SPHW_HWIF_H - -#define SPHW_PCIE_LINK_DOWN 0xFFFFFFFF - -struct sphw_free_db_area { - unsigned long *db_bitmap_array; - u32 db_max_areas; - /* spinlock for allocating doorbell area */ - spinlock_t idx_lock; -}; - -struct sphw_func_attr { - u16 func_global_idx; - u8 port_to_port_idx; - u8 pci_intf_idx; - u8 vf_in_pf; - enum func_type func_type; - - u8 mpf_idx; - - u8 ppf_idx; - - u16 num_irqs; - u8 num_aeqs; - u8 num_ceqs; - - u8 num_dma_attr; - - u16 global_vf_id_of_pf; -}; - -struct sphw_hwif { - u8 __iomem *cfg_regs_base; - u8 __iomem *intr_regs_base; - u8 __iomem *mgmt_regs_base; - u64 db_base_phy; - u64 db_dwqe_len; - u8 __iomem *db_base; - - struct sphw_free_db_area free_db_area; - - struct sphw_func_attr attr; - - void *pdev; -}; - -enum sphw_outbound_ctrl { - ENABLE_OUTBOUND = 0x0, - DISABLE_OUTBOUND = 0x1, -}; - -enum sphw_doorbell_ctrl { - ENABLE_DOORBELL = 0x0, - DISABLE_DOORBELL = 0x1, -}; - -enum sphw_pf_status { - SPHW_PF_STATUS_INIT = 0X0, - SPHW_PF_STATUS_ACTIVE_FLAG = 0x11, - SPHW_PF_STATUS_FLR_START_FLAG = 0x12, - SPHW_PF_STATUS_FLR_FINISH_FLAG = 0x13, -}; - -#define SPHW_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) -#define SPHW_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) -#define SPHW_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) -#define SPHW_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx) -#define SPHW_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf) -#define SPHW_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) -#define SPHW_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx) - -#define SPHW_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type) -#define SPHW_IS_PF(dev) (SPHW_FUNC_TYPE(dev) == TYPE_PF) -#define SPHW_IS_VF(dev) (SPHW_FUNC_TYPE(dev) == TYPE_VF) -#define SPHW_IS_PPF(dev) (SPHW_FUNC_TYPE(dev) == TYPE_PPF) - -u32 sphw_hwif_read_reg(struct sphw_hwif *hwif, u32 reg); - -void sphw_hwif_write_reg(struct sphw_hwif *hwif, u32 reg, u32 val); - -void sphw_set_pf_status(struct sphw_hwif *hwif, enum sphw_pf_status status); - -enum sphw_pf_status sphw_get_pf_status(struct sphw_hwif *hwif); - -void sphw_disable_doorbell(struct sphw_hwif *hwif); - -void sphw_enable_doorbell(struct sphw_hwif *hwif); - -int sphw_init_hwif(struct sphw_hwdev *hwdev, void *cfg_reg_base, void *intr_reg_base, - void *mgmt_regs_base, u64 db_base_phy, void *db_base, u64 db_dwqe_len); - -void sphw_free_hwif(struct sphw_hwdev *hwdev); - -u8 sphw_host_ppf_idx(void *hwdev, u8 host_id); - -u32 sphw_get_heartbeat_status(struct sphw_hwdev *hwdev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c deleted file mode 100644 index 694463ca0a93..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.c +++ /dev/null @@ -1,1792 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/pci.h> -#include <linux/delay.h> -#include <linux/types.h> -#include <linux/semaphore.h> -#include <linux/spinlock.h> -#include <linux/workqueue.h> - -#include "sphw_hw.h" -#include "sphw_hwdev.h" -#include "sphw_csr.h" -#include "sphw_hwif.h" -#include "sphw_eqs.h" -#include "sphw_prof_adap.h" -#include "sphw_mbox.h" -#include "sphw_common.h" - -#define SPHW_MBOX_INT_DST_AEQN_SHIFT 10 -#define SPHW_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 -#define SPHW_MBOX_INT_STAT_DMA_SHIFT 14 -/* The size of data to be send (unit of 4 bytes) */ -#define SPHW_MBOX_INT_TX_SIZE_SHIFT 20 -/* SO_RO(strong order, relax order) */ -#define SPHW_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 -#define SPHW_MBOX_INT_WB_EN_SHIFT 28 - -#define SPHW_MBOX_INT_DST_AEQN_MASK 0x3 -#define SPHW_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 -#define SPHW_MBOX_INT_STAT_DMA_MASK 0x3F -#define SPHW_MBOX_INT_TX_SIZE_MASK 0x1F -#define SPHW_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 -#define SPHW_MBOX_INT_WB_EN_MASK 0x1 - -#define SPHW_MBOX_INT_SET(val, field) \ - (((val) & SPHW_MBOX_INT_##field##_MASK) << \ - SPHW_MBOX_INT_##field##_SHIFT) - -enum sphw_mbox_tx_status { - TX_NOT_DONE = 1, -}; - -#define SPHW_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 -/* specifies the issue request for the message data. - * 0 - Tx request is done; - * 1 - Tx request is in process. - */ -#define SPHW_MBOX_CTRL_TX_STATUS_SHIFT 1 -#define SPHW_MBOX_CTRL_DST_FUNC_SHIFT 16 - -#define SPHW_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 -#define SPHW_MBOX_CTRL_TX_STATUS_MASK 0x1 -#define SPHW_MBOX_CTRL_DST_FUNC_MASK 0x1FFF - -#define SPHW_MBOX_CTRL_SET(val, field) \ - (((val) & SPHW_MBOX_CTRL_##field##_MASK) << \ - SPHW_MBOX_CTRL_##field##_SHIFT) - -#define MBOX_SEGLEN_MASK SPHW_MSG_HEADER_SET(SPHW_MSG_HEADER_SEG_LEN_MASK, SEG_LEN) - -#define MBOX_MSG_POLLING_TIMEOUT 8000 -#define SPHW_MBOX_COMP_TIME 25000U - -#define MBOX_MAX_BUF_SZ 2048U -#define MBOX_HEADER_SZ 8 -#define SPHW_MBOX_DATA_SIZE (MBOX_MAX_BUF_SZ - MBOX_HEADER_SZ) - -/* MBOX size is 64B, 8B for mbox_header, 8B reserved */ -#define MBOX_SEG_LEN 48 -#define MBOX_SEG_LEN_ALIGN 4 -#define MBOX_WB_STATUS_LEN 16UL - -#define SEQ_ID_START_VAL 0 -#define SEQ_ID_MAX_VAL 42 -#define MBOX_LAST_SEG_MAX_LEN (MBOX_MAX_BUF_SZ - \ - SEQ_ID_MAX_VAL * MBOX_SEG_LEN) - -/* mbox write back status is 16B, only first 4B is used */ -#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF -#define MBOX_WB_STATUS_MASK 0xFF -#define MBOX_WB_ERROR_CODE_MASK 0xFF00 -#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF -#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE -#define MBOX_WB_STATUS_NOT_FINISHED 0x00 - -#define MBOX_STATUS_FINISHED(wb) \ - (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) -#define MBOX_STATUS_SUCCESS(wb) \ - (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) -#define MBOX_STATUS_ERRCODE(wb) \ - ((wb) & MBOX_WB_ERROR_CODE_MASK) - -#define DST_AEQ_IDX_DEFAULT_VAL 0 -#define SRC_AEQ_IDX_DEFAULT_VAL 0 -#define NO_DMA_ATTRIBUTE_VAL 0 - -#define MBOX_MSG_NO_DATA_LEN 1 - -#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) -#define MBOX_AREA(hwif) \ - ((hwif)->cfg_regs_base + SPHW_FUNC_CSR_MAILBOX_DATA_OFF) - -#define MBOX_DMA_MSG_QUEUE_DEPTH 32 - -#define MBOX_MQ_CI_OFFSET (SPHW_CFG_REGS_FLAG + SPHW_FUNC_CSR_MAILBOX_DATA_OFF + \ - MBOX_HEADER_SZ + MBOX_SEG_LEN) - -#define MBOX_MQ_SYNC_CI_SHIFT 0 -#define MBOX_MQ_ASYNC_CI_SHIFT 8 - -#define MBOX_MQ_SYNC_CI_MASK 0xFF -#define MBOX_MQ_ASYNC_CI_MASK 0xFF - -#define MBOX_MQ_CI_SET(val, field) \ - (((val) & MBOX_MQ_##field##_CI_MASK) << MBOX_MQ_##field##_CI_SHIFT) -#define MBOX_MQ_CI_GET(val, field) \ - (((val) >> MBOX_MQ_##field##_CI_SHIFT) & MBOX_MQ_##field##_CI_MASK) -#define MBOX_MQ_CI_CLEAR(val, field) \ - ((val) & (~(MBOX_MQ_##field##_CI_MASK << MBOX_MQ_##field##_CI_SHIFT))) - -#define IS_PF_OR_PPF_SRC(hwdev, src_func_idx) \ - ((src_func_idx) < SPHW_MAX_PF_NUM(hwdev)) - -#define MBOX_RESPONSE_ERROR 0x1 -#define MBOX_MSG_ID_MASK 0xF -#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id) -#define MBOX_MSG_ID_INC(func_to_func) \ - (MBOX_MSG_ID(func_to_func) = \ - (MBOX_MSG_ID(func_to_func) + 1) & MBOX_MSG_ID_MASK) - -/* max message counter wait to process for one function */ -#define SPHW_MAX_MSG_CNT_TO_PROCESS 10 - -#define MBOX_MSG_CHANNEL_STOP(func_to_func) \ - ((((func_to_func)->lock_channel_en) && \ - test_bit((func_to_func)->cur_msg_channel, \ - &(func_to_func)->channel_stop)) ? true : false) - -enum mbox_ordering_type { - STRONG_ORDER, -}; - -enum mbox_write_back_type { - WRITE_BACK = 1, -}; - -enum mbox_aeq_trig_type { - NOT_TRIGGER, - TRIGGER, -}; - -static int send_mbox_msg(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, - void *msg, u16 msg_len, u16 dst_func, - enum sphw_msg_direction_type direction, - enum sphw_msg_ack_type ack_type, - struct mbox_msg_info *msg_info); - -struct sphw_msg_desc *get_mbox_msg_desc(struct sphw_mbox *func_to_func, - u64 dir, u64 src_func_id); - -/** - * sphw_register_ppf_mbox_cb - register mbox callback for ppf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * @pri_handle specific mod's private data that will be used in callback - * @callback: callback function - * Return: 0 - success, negative - failure - */ -int sphw_register_ppf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_ppf_mbox_cb callback) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return -EFAULT; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - func_to_func->ppf_mbox_cb[mod] = callback; - func_to_func->ppf_mbox_data[mod] = pri_handle; - - set_bit(SPHW_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]); - - return 0; -} - -/** - * sphw_register_pf_mbox_cb - register mbox callback for pf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * @pri_handle specific mod's private data that will be used in callback - * @callback: callback function - * Return: 0 - success, negative - failure - */ -int sphw_register_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_pf_mbox_cb callback) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return -EFAULT; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - func_to_func->pf_mbox_cb[mod] = callback; - func_to_func->pf_mbox_data[mod] = pri_handle; - - set_bit(SPHW_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); - - return 0; -} - -/** - * sphw_register_vf_mbox_cb - register mbox callback for vf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * @pri_handle specific mod's private data that will be used in callback - * @callback: callback function - * Return: 0 - success, negative - failure - */ -int sphw_register_vf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, sphw_vf_mbox_cb callback) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return -EFAULT; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - func_to_func->vf_mbox_cb[mod] = callback; - func_to_func->vf_mbox_data[mod] = pri_handle; - - set_bit(SPHW_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); - - return 0; -} - -/** - * sphw_register_ppf_to_pf_mbox_cb - register mbox callback for pf from ppf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * @pri_handle specific mod's private data that will be used in callback - * @callback: callback function - * Return: 0 - success, negative - failure - */ -int sphw_register_ppf_to_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, - sphw_pf_recv_from_ppf_mbox_cb callback) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return -EFAULT; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - func_to_func->pf_recv_ppf_mbox_cb[mod] = callback; - func_to_func->pf_recv_ppf_mbox_data[mod] = pri_handle; - - set_bit(SPHW_PPF_TO_PF_MBOX_CB_REG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod]); - - return 0; -} - -/** - * sphw_unregister_ppf_mbox_cb - unregister the mbox callback for ppf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * Return: - */ -void sphw_unregister_ppf_mbox_cb(void *hwdev, u8 mod) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - clear_bit(SPHW_PPF_MBOX_CB_REG, - &func_to_func->ppf_mbox_cb_state[mod]); - - while (test_bit(SPHW_PPF_MBOX_CB_RUNNING, - &func_to_func->ppf_mbox_cb_state[mod])) - usleep_range(900, 1000); - - func_to_func->ppf_mbox_data[mod] = NULL; - func_to_func->ppf_mbox_cb[mod] = NULL; -} - -/** - * sphw_unregister_ppf_mbox_cb - unregister the mbox callback for pf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * Return: - */ -void sphw_unregister_pf_mbox_cb(void *hwdev, u8 mod) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - clear_bit(SPHW_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); - - while (test_bit(SPHW_PF_MBOX_CB_RUNNING, - &func_to_func->pf_mbox_cb_state[mod])) - usleep_range(900, 1000); - - func_to_func->pf_mbox_data[mod] = NULL; - func_to_func->pf_mbox_cb[mod] = NULL; -} - -/** - * sphw_unregister_vf_mbox_cb - unregister the mbox callback for vf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * Return: - */ -void sphw_unregister_vf_mbox_cb(void *hwdev, u8 mod) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - clear_bit(SPHW_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); - - while (test_bit(SPHW_VF_MBOX_CB_RUNNING, - &func_to_func->vf_mbox_cb_state[mod])) - usleep_range(900, 1000); - - func_to_func->vf_mbox_data[mod] = NULL; - func_to_func->vf_mbox_cb[mod] = NULL; -} - -/** - * sphw_unregister_ppf_mbox_cb - unregister the mbox callback for pf from ppf - * @hwdev: the pointer to hw device - * @mod: specific mod that the callback will handle - * Return: - */ -void sphw_unregister_ppf_to_pf_mbox_cb(void *hwdev, u8 mod) -{ - struct sphw_mbox *func_to_func = NULL; - - if (mod >= SPHW_MOD_MAX || !hwdev) - return; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - - clear_bit(SPHW_PPF_TO_PF_MBOX_CB_REG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod]); - - while (test_bit(SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod])) - usleep_range(900, 1000); - - func_to_func->pf_recv_ppf_mbox_data[mod] = NULL; - func_to_func->pf_recv_ppf_mbox_cb[mod] = NULL; -} - -static int recv_vf_mbox_handler(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox, - void *buf_out, u16 *out_size) -{ - sphw_vf_mbox_cb cb; - int ret; - - if (recv_mbox->mod >= SPHW_MOD_MAX) { - sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", - recv_mbox->mod); - return -EINVAL; - } - - set_bit(SPHW_VF_MBOX_CB_RUNNING, &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); - - cb = func_to_func->vf_mbox_cb[recv_mbox->mod]; - if (cb && test_bit(SPHW_VF_MBOX_CB_REG, - &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) { - ret = cb(func_to_func->hwdev, - func_to_func->vf_mbox_data[recv_mbox->mod], - recv_mbox->cmd, recv_mbox->msg, - recv_mbox->msg_len, buf_out, out_size); - } else { - sdk_warn(func_to_func->hwdev->dev_hdl, "VF mbox cb is not registered\n"); - ret = -EINVAL; - } - - clear_bit(SPHW_VF_MBOX_CB_RUNNING, - &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); - - return ret; -} - -static int recv_pf_from_ppf_handler(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox, - void *buf_out, u16 *out_size) -{ - sphw_pf_recv_from_ppf_mbox_cb cb; - enum sphw_mod_type mod = recv_mbox->mod; - int ret; - - if (mod >= SPHW_MOD_MAX) { - sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", - mod); - return -EINVAL; - } - - set_bit(SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod]); - - cb = func_to_func->pf_recv_ppf_mbox_cb[mod]; - if (cb && test_bit(SPHW_PPF_TO_PF_MBOX_CB_REG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod])) { - ret = cb(func_to_func->hwdev, - func_to_func->pf_recv_ppf_mbox_data[mod], - recv_mbox->cmd, recv_mbox->msg, recv_mbox->msg_len, - buf_out, out_size); - } else { - sdk_warn(func_to_func->hwdev->dev_hdl, "PF receive ppf mailbox callback is not registered\n"); - ret = -EINVAL; - } - - clear_bit(SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, - &func_to_func->ppf_to_pf_mbox_cb_state[mod]); - - return ret; -} - -static int recv_ppf_mbox_handler(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox, - u8 pf_id, void *buf_out, u16 *out_size) -{ - sphw_ppf_mbox_cb cb; - u16 vf_id = 0; - int ret; - - if (recv_mbox->mod >= SPHW_MOD_MAX) { - sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", - recv_mbox->mod); - return -EINVAL; - } - - set_bit(SPHW_PPF_MBOX_CB_RUNNING, - &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); - - cb = func_to_func->ppf_mbox_cb[recv_mbox->mod]; - if (cb && test_bit(SPHW_PPF_MBOX_CB_REG, - &func_to_func->ppf_mbox_cb_state[recv_mbox->mod])) { - ret = cb(func_to_func->hwdev, - func_to_func->ppf_mbox_data[recv_mbox->mod], - pf_id, vf_id, recv_mbox->cmd, recv_mbox->msg, - recv_mbox->msg_len, buf_out, out_size); - } else { - sdk_warn(func_to_func->hwdev->dev_hdl, "PPF mbox cb is not registered, mod = %hhu\n", - recv_mbox->mod); - ret = -EINVAL; - } - - clear_bit(SPHW_PPF_MBOX_CB_RUNNING, - &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); - - return ret; -} - -static int recv_pf_from_vf_mbox_handler(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox, - u16 src_func_idx, void *buf_out, - u16 *out_size) -{ - sphw_pf_mbox_cb cb; - u16 vf_id = 0; - int ret; - - if (recv_mbox->mod >= SPHW_MOD_MAX) { - sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %hhu\n", - recv_mbox->mod); - return -EINVAL; - } - - set_bit(SPHW_PF_MBOX_CB_RUNNING, - &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); - - cb = func_to_func->pf_mbox_cb[recv_mbox->mod]; - if (cb && test_bit(SPHW_PF_MBOX_CB_REG, - &func_to_func->pf_mbox_cb_state[recv_mbox->mod])) { - vf_id = src_func_idx - - sphw_glb_pf_vf_offset(func_to_func->hwdev); - ret = cb(func_to_func->hwdev, - func_to_func->pf_mbox_data[recv_mbox->mod], - vf_id, recv_mbox->cmd, recv_mbox->msg, - recv_mbox->msg_len, buf_out, out_size); - } else { - sdk_warn(func_to_func->hwdev->dev_hdl, "PF mbox mod(0x%x) cb is not registered\n", - recv_mbox->mod); - ret = -EINVAL; - } - - clear_bit(SPHW_PF_MBOX_CB_RUNNING, - &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); - - return ret; -} - -static void response_for_recv_func_mbox(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox, - int err, u16 out_size, u16 src_func_idx) -{ - struct mbox_msg_info msg_info = {0}; - - msg_info.msg_id = recv_mbox->msg_id; - if (err) - msg_info.status = SPHW_MBOX_PF_SEND_ERR; - - /* if not data need to response, set out_size to 1 */ - if (!out_size || err) - out_size = MBOX_MSG_NO_DATA_LEN; - - send_mbox_msg(func_to_func, recv_mbox->mod, recv_mbox->cmd, - recv_mbox->resp_buff, out_size, src_func_idx, - SPHW_MSG_RESPONSE, SPHW_MSG_NO_ACK, &msg_info); -} - -static void recv_func_mbox_handler(struct sphw_mbox *func_to_func, - struct sphw_recv_mbox *recv_mbox) -{ - struct sphw_hwdev *dev = func_to_func->hwdev; - void *buf_out = recv_mbox->resp_buff; - u16 src_func_idx = recv_mbox->src_func_idx; - u16 out_size = MBOX_MAX_BUF_SZ; - int err = 0; - - if (SPHW_IS_VF(dev)) { - err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, - &out_size); - } else { /* pf/ppf process */ - if (IS_PF_OR_PPF_SRC(dev, src_func_idx)) { - if (SPHW_IS_PPF(dev)) { - err = recv_ppf_mbox_handler(func_to_func, - recv_mbox, - (u8)src_func_idx, - buf_out, &out_size); - if (err) - goto out; - } else { - err = recv_pf_from_ppf_handler(func_to_func, - recv_mbox, - buf_out, - &out_size); - if (err) - goto out; - } - /* The source is neither PF nor PPF, so it is from VF */ - } else { - err = recv_pf_from_vf_mbox_handler(func_to_func, - recv_mbox, - src_func_idx, - buf_out, &out_size); - } - } - -out: - if (recv_mbox->ack_type == SPHW_MSG_ACK) - response_for_recv_func_mbox(func_to_func, recv_mbox, err, - out_size, src_func_idx); -} - -static struct sphw_recv_mbox *alloc_recv_mbox(void) -{ - struct sphw_recv_mbox *recv_msg = NULL; - - recv_msg = kzalloc(sizeof(*recv_msg), GFP_KERNEL); - if (!recv_msg) - return NULL; - - recv_msg->msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); - if (!recv_msg->msg) - goto alloc_msg_err; - - recv_msg->resp_buff = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); - if (!recv_msg->resp_buff) - goto alloc_resp_bff_err; - - return recv_msg; - -alloc_resp_bff_err: - kfree(recv_msg->msg); - -alloc_msg_err: - kfree(recv_msg); - - return NULL; -} - -static void free_recv_mbox(struct sphw_recv_mbox *recv_msg) -{ - kfree(recv_msg->resp_buff); - kfree(recv_msg->msg); - kfree(recv_msg); -} - -static void recv_func_mbox_work_handler(struct work_struct *work) -{ - struct sphw_mbox_work *mbox_work = - container_of(work, struct sphw_mbox_work, work); - - recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox); - - atomic_dec(&mbox_work->msg_ch->recv_msg_cnt); - - free_recv_mbox(mbox_work->recv_mbox); - kfree(mbox_work); -} - -static void resp_mbox_handler(struct sphw_mbox *func_to_func, - struct sphw_msg_desc *msg_desc) -{ - spin_lock(&func_to_func->mbox_lock); - if (msg_desc->msg_info.msg_id == func_to_func->send_msg_id && - func_to_func->event_flag == EVENT_START) - func_to_func->event_flag = EVENT_SUCCESS; - else - sdk_err(func_to_func->hwdev->dev_hdl, - "Mbox response timeout, current send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n", - func_to_func->send_msg_id, msg_desc->msg_info.msg_id, - msg_desc->msg_info.status); - spin_unlock(&func_to_func->mbox_lock); -} - -static void recv_mbox_msg_handler(struct sphw_mbox *func_to_func, - struct sphw_msg_desc *msg_desc, - u64 mbox_header) -{ - struct sphw_hwdev *hwdev = func_to_func->hwdev; - struct sphw_recv_mbox *recv_msg = NULL; - struct sphw_mbox_work *mbox_work = NULL; - struct sphw_msg_channel *msg_ch = - container_of(msg_desc, struct sphw_msg_channel, recv_msg); - u16 src_func_idx = SPHW_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); - - if (atomic_read(&msg_ch->recv_msg_cnt) > - SPHW_MAX_MSG_CNT_TO_PROCESS) { - sdk_warn(hwdev->dev_hdl, "This function(%u) have %d message wait to process, can't add to work queue\n", - src_func_idx, atomic_read(&msg_ch->recv_msg_cnt)); - return; - } - - recv_msg = alloc_recv_mbox(); - if (!recv_msg) { - sdk_err(hwdev->dev_hdl, "Failed to alloc receive mbox message buffer\n"); - return; - } - recv_msg->msg_len = msg_desc->msg_len; - memcpy(recv_msg->msg, msg_desc->msg, recv_msg->msg_len); - recv_msg->msg_id = msg_desc->msg_info.msg_id; - recv_msg->mod = SPHW_MSG_HEADER_GET(mbox_header, MODULE); - recv_msg->cmd = SPHW_MSG_HEADER_GET(mbox_header, CMD); - recv_msg->ack_type = SPHW_MSG_HEADER_GET(mbox_header, NO_ACK); - recv_msg->src_func_idx = src_func_idx; - - mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); - if (!mbox_work) { - sdk_err(hwdev->dev_hdl, "Allocate mbox work memory failed.\n"); - free_recv_mbox(recv_msg); - return; - } - - atomic_inc(&msg_ch->recv_msg_cnt); - - mbox_work->func_to_func = func_to_func; - mbox_work->recv_mbox = recv_msg; - mbox_work->msg_ch = msg_ch; - - INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler); - queue_work_on(sphw_get_work_cpu_affinity(hwdev, WORK_TYPE_MBOX), - func_to_func->workq, &mbox_work->work); -} - -static bool check_mbox_segment(struct sphw_mbox *func_to_func, - struct sphw_msg_desc *msg_desc, - u64 mbox_header) -{ - u8 seq_id, seg_len, msg_id, mod; - u16 src_func_idx, cmd; - - seq_id = SPHW_MSG_HEADER_GET(mbox_header, SEQID); - seg_len = SPHW_MSG_HEADER_GET(mbox_header, SEG_LEN); - msg_id = SPHW_MSG_HEADER_GET(mbox_header, MSG_ID); - mod = SPHW_MSG_HEADER_GET(mbox_header, MODULE); - cmd = SPHW_MSG_HEADER_GET(mbox_header, CMD); - src_func_idx = SPHW_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); - - if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN || - (seq_id == SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN)) - goto seg_err; - - if (seq_id == 0) { - msg_desc->seq_id = seq_id; - msg_desc->msg_info.msg_id = msg_id; - msg_desc->mod = mod; - msg_desc->cmd = cmd; - } else { - if (seq_id != msg_desc->seq_id + 1 || - msg_id != msg_desc->msg_info.msg_id || - mod != msg_desc->mod || cmd != msg_desc->cmd) - goto seg_err; - - msg_desc->seq_id = seq_id; - } - - return true; - -seg_err: - sdk_err(func_to_func->hwdev->dev_hdl, - "Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", - src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id, - msg_desc->mod, msg_desc->cmd); - sdk_err(func_to_func->hwdev->dev_hdl, - "Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", - seg_len, seq_id, msg_id, mod, cmd); - - return false; -} - -static void recv_mbox_handler(struct sphw_mbox *func_to_func, - void *header, struct sphw_msg_desc *msg_desc) -{ - u64 mbox_header = *((u64 *)header); - void *mbox_body = MBOX_BODY_FROM_HDR(header); - u8 seq_id, seg_len; - int pos; - - if (!check_mbox_segment(func_to_func, msg_desc, mbox_header)) { - msg_desc->seq_id = SEQ_ID_MAX_VAL; - return; - } - - seq_id = SPHW_MSG_HEADER_GET(mbox_header, SEQID); - seg_len = SPHW_MSG_HEADER_GET(mbox_header, SEG_LEN); - - pos = seq_id * MBOX_SEG_LEN; - memcpy((u8 *)msg_desc->msg + pos, mbox_body, seg_len); - - if (!SPHW_MSG_HEADER_GET(mbox_header, LAST)) - return; - - msg_desc->msg_len = SPHW_MSG_HEADER_GET(mbox_header, MSG_LEN); - msg_desc->msg_info.status = SPHW_MSG_HEADER_GET(mbox_header, STATUS); - - if (SPHW_MSG_HEADER_GET(mbox_header, DIRECTION) == - SPHW_MSG_RESPONSE) { - resp_mbox_handler(func_to_func, msg_desc); - return; - } - - recv_mbox_msg_handler(func_to_func, msg_desc, mbox_header); -} - -void sphw_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size) -{ - struct sphw_mbox *func_to_func = NULL; - struct sphw_msg_desc *msg_desc = NULL; - u64 mbox_header = *((u64 *)header); - u64 src, dir; - - func_to_func = ((struct sphw_hwdev *)handle)->func_to_func; - - dir = SPHW_MSG_HEADER_GET(mbox_header, DIRECTION); - src = SPHW_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); - - msg_desc = get_mbox_msg_desc(func_to_func, dir, src); - if (!msg_desc) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Mailbox source function id: %u is invalid for current function\n", - (u32)src); - return; - } - - recv_mbox_handler(func_to_func, (u64 *)header, msg_desc); -} - -static int init_mbox_dma_queue(struct sphw_hwdev *hwdev, struct mbox_dma_queue *mq) -{ - u32 size; - - mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH; - mq->prod_idx = 0; - mq->cons_idx = 0; - - size = mq->depth * MBOX_MAX_BUF_SZ; - mq->dma_buff_vaddr = dma_alloc_coherent(hwdev->dev_hdl, size, &mq->dma_buff_paddr, - GFP_KERNEL); - if (!mq->dma_buff_vaddr) { - sdk_err(hwdev->dev_hdl, "Failed to alloc dma_buffer\n"); - return -ENOMEM; - } - - return 0; -} - -static void deinit_mbox_dma_queue(struct sphw_hwdev *hwdev, struct mbox_dma_queue *mq) -{ - dma_free_coherent(hwdev->dev_hdl, mq->depth * MBOX_MAX_BUF_SZ, - mq->dma_buff_vaddr, mq->dma_buff_paddr); -} - -static int sphw_init_mbox_dma_queue(struct sphw_mbox *func_to_func) -{ - u32 val; - int err; - - err = init_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); - if (err) - return err; - - err = init_mbox_dma_queue(func_to_func->hwdev, &func_to_func->async_msg_queue); - if (err) { - deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); - return err; - } - - val = sphw_hwif_read_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET); - val = MBOX_MQ_CI_CLEAR(val, SYNC); - val = MBOX_MQ_CI_CLEAR(val, ASYNC); - sphw_hwif_write_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET, val); - - return 0; -} - -static void sphw_deinit_mbox_dma_queue(struct sphw_mbox *func_to_func) -{ - deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); - deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->async_msg_queue); -} - -#define MBOX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a -static u32 mbox_dma_msg_xor(u32 *data, u16 msg_len) -{ - u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL; - u16 dw_len = msg_len / sizeof(u32); - u16 i; - - for (i = 0; i < dw_len; i++) - xor ^= data[i]; - - return xor; -} - -#define MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1)) -#define IS_MSG_QUEUE_FULL(mq) (MQ_ID_MASK(mq, (mq)->prod_idx + 1) == \ - MQ_ID_MASK(mq, (mq)->cons_idx)) - -static int mbox_prepare_dma_entry(struct sphw_mbox *func_to_func, struct mbox_dma_queue *mq, - struct mbox_dma_msg *dma_msg, void *msg, u16 msg_len) -{ - u64 dma_addr, offset; - void *dma_vaddr; - - if (IS_MSG_QUEUE_FULL(mq)) { - sdk_err(func_to_func->hwdev->dev_hdl, "Mbox sync message queue is busy, pi: %u, ci: %u\n", - mq->prod_idx, MQ_ID_MASK(mq, mq->cons_idx)); - return -EBUSY; - } - - /* copy data to DMA buffer */ - offset = mq->prod_idx * MBOX_MAX_BUF_SZ; - dma_vaddr = (u8 *)mq->dma_buff_vaddr + offset; - memcpy(dma_vaddr, msg, msg_len); - dma_addr = mq->dma_buff_paddr + offset; - dma_msg->dma_addr_high = upper_32_bits(dma_addr); - dma_msg->dma_addr_low = lower_32_bits(dma_addr); - dma_msg->msg_len = msg_len; - dma_msg->xor = mbox_dma_msg_xor(dma_vaddr, ALIGN(msg_len, sizeof(u32))); - - mq->prod_idx++; - mq->prod_idx = MQ_ID_MASK(mq, mq->prod_idx); - - return 0; -} - -static int mbox_prepare_dma_msg(struct sphw_mbox *func_to_func, enum sphw_msg_ack_type ack_type, - struct mbox_dma_msg *dma_msg, void *msg, u16 msg_len) -{ - struct mbox_dma_queue *mq = NULL; - u32 val; - - val = sphw_hwif_read_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET); - if (ack_type == SPHW_MSG_ACK) { - mq = &func_to_func->sync_msg_queue; - mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC); - } else { - mq = &func_to_func->async_msg_queue; - mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC); - } - - return mbox_prepare_dma_entry(func_to_func, mq, dma_msg, msg, msg_len); -} - -static void clear_mbox_status(struct sphw_send_mbox *mbox) -{ - *mbox->wb_status = 0; - - /* clear mailbox write back status */ - wmb(); -} - -static void mbox_copy_header(struct sphw_hwdev *hwdev, - struct sphw_send_mbox *mbox, u64 *header) -{ - u32 *data = (u32 *)header; - u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); - - for (i = 0; i < idx_max; i++) - __raw_writel(cpu_to_be32(*(data + i)), mbox->data + i * sizeof(u32)); -} - -static void mbox_copy_send_data(struct sphw_hwdev *hwdev, struct sphw_send_mbox *mbox, void *seg, - u16 seg_len) -{ - u32 *data = seg; - u32 data_len, chk_sz = sizeof(u32); - u32 i, idx_max; - - data_len = seg_len; - idx_max = ALIGN(data_len, chk_sz) / chk_sz; - - for (i = 0; i < idx_max; i++) - __raw_writel(cpu_to_be32(*(data + i)), - mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); -} - -static void write_mbox_msg_attr(struct sphw_mbox *func_to_func, - u16 dst_func, u16 dst_aeqn, u16 seg_len) -{ - u32 mbox_int, mbox_ctrl; - - /* for VF to PF's message, dest func id will self-learning by HW */ - if (SPHW_IS_VF(func_to_func->hwdev) && - dst_func != SPHW_MGMT_SRC_ID) - dst_func = 0; /* the destination is the VF's PF */ - - mbox_int = SPHW_MBOX_INT_SET(dst_aeqn, DST_AEQN) | - SPHW_MBOX_INT_SET(0, SRC_RESP_AEQN) | - SPHW_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | - SPHW_MBOX_INT_SET(ALIGN(seg_len + MBOX_HEADER_SZ, - MBOX_SEG_LEN_ALIGN) >> 2, TX_SIZE) | - SPHW_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | - SPHW_MBOX_INT_SET(WRITE_BACK, WB_EN); - - sphw_hwif_write_reg(func_to_func->hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, - mbox_int); - - wmb(); /* writing the mbox int attributes */ - mbox_ctrl = SPHW_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS); - - mbox_ctrl |= SPHW_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE); - - mbox_ctrl |= SPHW_MBOX_CTRL_SET(dst_func, DST_FUNC); - - sphw_hwif_write_reg(func_to_func->hwdev->hwif, - SPHW_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); -} - -void dump_mbox_reg(struct sphw_hwdev *hwdev) -{ - u32 val; - - val = sphw_hwif_read_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_CONTROL_OFF); - sdk_err(hwdev->dev_hdl, "Mailbox control reg: 0x%x\n", val); - val = sphw_hwif_read_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); - sdk_err(hwdev->dev_hdl, "Mailbox interrupt offset: 0x%x\n", val); -} - -static u16 get_mbox_status(struct sphw_send_mbox *mbox) -{ - /* write back is 16B, but only use first 4B */ - u64 wb_val = be64_to_cpu(*mbox->wb_status); - - rmb(); /* verify reading before check */ - - return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); -} - -enum sphw_wait_return check_mbox_wb_status(void *priv_data) -{ - struct sphw_mbox *func_to_func = priv_data; - u16 wb_status; - - if (MBOX_MSG_CHANNEL_STOP(func_to_func)) - return WAIT_PROCESS_ERR; - - wb_status = get_mbox_status(&func_to_func->send_mbox); - - return MBOX_STATUS_FINISHED(wb_status) ? - WAIT_PROCESS_CPL : WAIT_PROCESS_WAITING; -} - -static int send_mbox_seg(struct sphw_mbox *func_to_func, u64 header, - u16 dst_func, void *seg, u16 seg_len, void *msg_info) -{ - struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; - struct sphw_hwdev *hwdev = func_to_func->hwdev; - u8 num_aeqs = hwdev->hwif->attr.num_aeqs; - u16 dst_aeqn, wb_status = 0, errcode; - u16 seq_dir = SPHW_MSG_HEADER_GET(header, DIRECTION); - int err; - - /* mbox to mgmt cpu, hardware don't care dst aeq id*/ - if (num_aeqs > SPHW_MBOX_RSP_MSG_AEQ) - dst_aeqn = (seq_dir == SPHW_MSG_DIRECT_SEND) ? - SPHW_ASYNC_MSG_AEQ : SPHW_MBOX_RSP_MSG_AEQ; - else - dst_aeqn = 0; - - clear_mbox_status(send_mbox); - - mbox_copy_header(hwdev, send_mbox, &header); - - mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); - - write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len); - - wmb(); /* writing the mbox msg attributes */ - - err = sphw_wait_for_timeout(func_to_func, check_mbox_wb_status, - MBOX_MSG_POLLING_TIMEOUT, USEC_PER_MSEC); - wb_status = get_mbox_status(send_mbox); - if (err) { - sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout, wb status: 0x%x\n", - wb_status); - dump_mbox_reg(hwdev); - return -ETIMEDOUT; - } - - if (!MBOX_STATUS_SUCCESS(wb_status)) { - sdk_err(hwdev->dev_hdl, "Send mailbox segment to function %u error, wb status: 0x%x\n", - dst_func, wb_status); - errcode = MBOX_STATUS_ERRCODE(wb_status); - return errcode ? errcode : -EFAULT; - } - - return 0; -} - -static int send_mbox_msg(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, - void *msg, u16 msg_len, u16 dst_func, - enum sphw_msg_direction_type direction, - enum sphw_msg_ack_type ack_type, - struct mbox_msg_info *msg_info) -{ - struct sphw_hwdev *hwdev = func_to_func->hwdev; - struct mbox_dma_msg dma_msg = {0}; - enum sphw_data_type data_type = SPHW_DATA_INLINE; - int err = 0; - u32 seq_id = 0; - u16 seg_len = MBOX_SEG_LEN; - u16 rsp_aeq_id, left; - u8 *msg_seg = NULL; - u64 header = 0; - - if (hwdev->hwif->attr.num_aeqs >= 2) - rsp_aeq_id = SPHW_MBOX_RSP_MSG_AEQ; - else - rsp_aeq_id = 0; - - mutex_lock(&func_to_func->msg_send_lock); - - if (IS_DMA_MBX_MSG(dst_func)) { - err = mbox_prepare_dma_msg(func_to_func, ack_type, &dma_msg, msg, msg_len); - if (err) - goto send_err; - - msg = &dma_msg; - msg_len = sizeof(dma_msg); - data_type = SPHW_DATA_DMA; - } - - msg_seg = (u8 *)msg; - left = msg_len; - - header = SPHW_MSG_HEADER_SET(msg_len, MSG_LEN) | - SPHW_MSG_HEADER_SET(mod, MODULE) | - SPHW_MSG_HEADER_SET(seg_len, SEG_LEN) | - SPHW_MSG_HEADER_SET(ack_type, NO_ACK) | - SPHW_MSG_HEADER_SET(data_type, DATA_TYPE) | - SPHW_MSG_HEADER_SET(SEQ_ID_START_VAL, SEQID) | - SPHW_MSG_HEADER_SET(NOT_LAST_SEGMENT, LAST) | - SPHW_MSG_HEADER_SET(direction, DIRECTION) | - SPHW_MSG_HEADER_SET(cmd, CMD) | - /* The vf's offset to it's associated pf */ - SPHW_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) | - SPHW_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) | - SPHW_MSG_HEADER_SET(SPHW_MSG_FROM_MBOX, SOURCE) | - SPHW_MSG_HEADER_SET(!!msg_info->status, STATUS) | - SPHW_MSG_HEADER_SET(sphw_global_func_id(hwdev), SRC_GLB_FUNC_IDX); - - while (!(SPHW_MSG_HEADER_GET(header, LAST))) { - if (left <= MBOX_SEG_LEN) { - header &= ~MBOX_SEGLEN_MASK; - header |= SPHW_MSG_HEADER_SET(left, SEG_LEN); - header |= SPHW_MSG_HEADER_SET(LAST_SEGMENT, LAST); - - seg_len = left; - } - - err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, - seg_len, msg_info); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to send mbox seg, seq_id=0x%llx\n", - SPHW_MSG_HEADER_GET(header, SEQID)); - goto send_err; - } - - left -= MBOX_SEG_LEN; - msg_seg += MBOX_SEG_LEN; - - seq_id++; - header &= ~(SPHW_MSG_HEADER_SET(SPHW_MSG_HEADER_SEQID_MASK, SEQID)); - header |= SPHW_MSG_HEADER_SET(seq_id, SEQID); - } - -send_err: - mutex_unlock(&func_to_func->msg_send_lock); - - return err; -} - -static void set_mbox_to_func_event(struct sphw_mbox *func_to_func, - enum mbox_event_state event_flag) -{ - spin_lock(&func_to_func->mbox_lock); - func_to_func->event_flag = event_flag; - spin_unlock(&func_to_func->mbox_lock); -} - -static enum sphw_wait_return check_mbox_msg_finish(void *priv_data) -{ - struct sphw_mbox *func_to_func = priv_data; - - if (MBOX_MSG_CHANNEL_STOP(func_to_func)) - return WAIT_PROCESS_ERR; - - return (func_to_func->event_flag == EVENT_SUCCESS) ? - WAIT_PROCESS_CPL : WAIT_PROCESS_WAITING; -} - -static int wait_mbox_msg_completion(struct sphw_mbox *func_to_func, - u32 timeout) -{ - int err; - - timeout = timeout ? timeout : SPHW_MBOX_COMP_TIME; - err = sphw_wait_for_timeout(func_to_func, check_mbox_msg_finish, - timeout, USEC_PER_MSEC); - if (err) { - set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); - return -ETIMEDOUT; - } - - set_mbox_to_func_event(func_to_func, EVENT_END); - - return 0; -} - -#define TRY_MBOX_LOCK_SLEPP 1000 -static int send_mbox_msg_lock(struct sphw_mbox *func_to_func, u16 channel) -{ - if (!func_to_func->lock_channel_en) { - mutex_lock(&func_to_func->mbox_send_lock); - return 0; - } - - while (!test_bit(channel, &func_to_func->channel_stop)) { - if (mutex_trylock(&func_to_func->mbox_send_lock)) - return 0; - - usleep_range(TRY_MBOX_LOCK_SLEPP - 1, TRY_MBOX_LOCK_SLEPP); - } - - return -EAGAIN; -} - -static void send_mbox_msg_unlock(struct sphw_mbox *func_to_func) -{ - mutex_unlock(&func_to_func->mbox_send_lock); -} - -int sphw_mbox_to_func(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, u16 dst_func, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel) -{ - /* use mbox_resp to hole data which responsed from other function */ - struct sphw_msg_desc *msg_desc = NULL; - struct mbox_msg_info msg_info = {0}; - int err; - - if (!func_to_func->hwdev->chip_present_flag) - return -EPERM; - - /* expect response message */ - msg_desc = get_mbox_msg_desc(func_to_func, SPHW_MSG_RESPONSE, - dst_func); - if (!msg_desc) - return -EFAULT; - - err = send_mbox_msg_lock(func_to_func, channel); - if (err) - return err; - - func_to_func->cur_msg_channel = channel; - msg_info.msg_id = MBOX_MSG_ID_INC(func_to_func); - - set_mbox_to_func_event(func_to_func, EVENT_START); - - err = send_mbox_msg(func_to_func, mod, cmd, buf_in, in_size, dst_func, - SPHW_MSG_DIRECT_SEND, SPHW_MSG_ACK, &msg_info); - if (err) { - sdk_err(func_to_func->hwdev->dev_hdl, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n", - mod, cmd, msg_info.msg_id, err); - set_mbox_to_func_event(func_to_func, EVENT_FAIL); - goto send_err; - } - - if (wait_mbox_msg_completion(func_to_func, timeout)) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id); - sphw_dump_aeq_info(func_to_func->hwdev); - err = -ETIMEDOUT; - goto send_err; - } - - if (mod != msg_desc->mod || cmd != msg_desc->cmd) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n", - msg_desc->mod, msg_desc->cmd, mod, cmd); - err = -EFAULT; - goto send_err; - } - - if (msg_desc->msg_info.status) { - err = msg_desc->msg_info.status; - goto send_err; - } - - if (buf_out && out_size) { - if (*out_size < msg_desc->msg_len) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Invalid response mbox message length: %u for mod %d cmd %u, should less than: %u\n", - msg_desc->msg_len, mod, cmd, *out_size); - err = -EFAULT; - goto send_err; - } - - if (msg_desc->msg_len) - memcpy(buf_out, msg_desc->msg, msg_desc->msg_len); - - *out_size = msg_desc->msg_len; - } - -send_err: - send_mbox_msg_unlock(func_to_func); - - return err; -} - -static int mbox_func_params_valid(struct sphw_mbox *func_to_func, - void *buf_in, u16 in_size, u16 channel) -{ - if (!buf_in || !in_size) - return -EINVAL; - - if (in_size > SPHW_MBOX_DATA_SIZE) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Mbox msg len %u exceed limit: [1, %u]\n", - in_size, SPHW_MBOX_DATA_SIZE); - return -EINVAL; - } - - if (channel >= SPHW_CHANNEL_MAX) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Invalid channel id: 0x%x\n", channel); - return -EINVAL; - } - - return 0; -} - -int sphw_mbox_to_func_no_ack(struct sphw_hwdev *hwdev, u16 func_idx, u8 mod, u16 cmd, - void *buf_in, u16 in_size, u16 channel) -{ - struct mbox_msg_info msg_info = {0}; - int err = mbox_func_params_valid(hwdev->func_to_func, buf_in, in_size, - channel); - - if (err) - return err; - - err = send_mbox_msg_lock(hwdev->func_to_func, channel); - if (err) - return err; - - err = send_mbox_msg(hwdev->func_to_func, mod, cmd, buf_in, in_size, - func_idx, SPHW_MSG_DIRECT_SEND, - SPHW_MSG_NO_ACK, &msg_info); - if (err) - sdk_err(hwdev->dev_hdl, "Send mailbox no ack failed\n"); - - send_mbox_msg_unlock(hwdev->func_to_func); - - return err; -} - -int sphw_send_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u32 timeout, u16 channel) -{ - struct sphw_mbox *func_to_func = hwdev->func_to_func; - int err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); - - if (err) - return err; - - return sphw_mbox_to_func(func_to_func, mod, cmd, SPHW_MGMT_SRC_ID, - buf_in, in_size, buf_out, out_size, timeout, channel); -} - -void sphw_response_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, - void *buf_in, u16 in_size, u16 msg_id) -{ - struct mbox_msg_info msg_info; - - msg_info.msg_id = (u8)msg_id; - msg_info.status = 0; - - send_mbox_msg(hwdev->func_to_func, mod, cmd, buf_in, in_size, - SPHW_MGMT_SRC_ID, SPHW_MSG_RESPONSE, - SPHW_MSG_NO_ACK, &msg_info); -} - -int sphw_send_mbox_to_mgmt_no_ack(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, - void *buf_in, u16 in_size, u16 channel) -{ - struct sphw_mbox *func_to_func = hwdev->func_to_func; - int err = mbox_func_params_valid(func_to_func, buf_in, in_size, - channel); - - if (err) - return err; - - return sphw_mbox_to_func_no_ack(hwdev, SPHW_MGMT_SRC_ID, mod, cmd, - buf_in, in_size, channel); -} - -int sphw_mbox_ppf_to_host(void *hwdev, u8 mod, u16 cmd, u8 host_id, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel) -{ - struct sphw_hwdev *dev = hwdev; - u16 dst_ppf_func; - int err; - - if (!hwdev) - return -EINVAL; - - if (!(dev->chip_present_flag)) - return -EPERM; - - err = mbox_func_params_valid(dev->func_to_func, buf_in, in_size, - channel); - if (err) - return err; - - if (!SPHW_IS_PPF(dev)) { - sdk_err(dev->dev_hdl, "Params error, only ppf support send mbox to ppf. func_type: %d\n", - sphw_func_type(dev)); - return -EINVAL; - } - - if (host_id >= SPHW_MAX_HOST_NUM(dev) || - host_id == SPHW_PCI_INTF_IDX(dev->hwif)) { - sdk_err(dev->dev_hdl, "Params error, host id: %u\n", host_id); - return -EINVAL; - } - - dst_ppf_func = sphw_host_ppf_idx(dev, host_id); - if (dst_ppf_func >= SPHW_MAX_PF_NUM(dev)) { - sdk_err(dev->dev_hdl, "Dest host(%u) have not elect ppf(0x%x).\n", - host_id, dst_ppf_func); - return -EINVAL; - } - - return sphw_mbox_to_func(dev->func_to_func, mod, cmd, dst_ppf_func, buf_in, in_size, - buf_out, out_size, timeout, channel); -} - -int sphw_mbox_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel) -{ - struct sphw_hwdev *dev = hwdev; - int err; - - if (!hwdev) - return -EINVAL; - - if (!(dev->chip_present_flag)) - return -EPERM; - - err = mbox_func_params_valid(dev->func_to_func, buf_in, in_size, - channel); - if (err) - return err; - - if (!SPHW_IS_VF(dev)) { - sdk_err(dev->dev_hdl, "Params error, func_type: %d\n", - sphw_func_type(dev)); - return -EINVAL; - } - - return sphw_mbox_to_func(dev->func_to_func, mod, cmd, sphw_pf_id_of_vf(dev), buf_in, - in_size, buf_out, out_size, timeout, channel); -} - -int sphw_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u32 timeout, u16 channel) -{ - struct sphw_mbox *func_to_func = NULL; - int err = 0; - u16 dst_func_idx; - - if (!hwdev) - return -EINVAL; - - func_to_func = ((struct sphw_hwdev *)hwdev)->func_to_func; - err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); - if (err) - return err; - - if (SPHW_IS_VF((struct sphw_hwdev *)hwdev)) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", - sphw_func_type(hwdev)); - return -EINVAL; - } - - if (!vf_id) { - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "VF id(%u) error!\n", vf_id); - return -EINVAL; - } - - /* vf_offset_to_pf + vf_id is the vf's global function id of vf in - * this pf - */ - dst_func_idx = sphw_glb_pf_vf_offset(hwdev) + vf_id; - - return sphw_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in, - in_size, buf_out, out_size, timeout, channel); -} - -void sphw_mbox_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable) -{ - hwdev->func_to_func->lock_channel_en = enable; - - sdk_info(hwdev->dev_hdl, "%s mbox channel lock\n", - enable ? "Enable" : "Disable"); -} - -static int alloc_mbox_msg_channel(struct sphw_msg_channel *msg_ch) -{ - msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); - if (!msg_ch->resp_msg.msg) - return -ENOMEM; - - msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); - if (!msg_ch->recv_msg.msg) { - kfree(msg_ch->resp_msg.msg); - return -ENOMEM; - } - - msg_ch->resp_msg.seq_id = SEQ_ID_MAX_VAL; - msg_ch->recv_msg.seq_id = SEQ_ID_MAX_VAL; - atomic_set(&msg_ch->recv_msg_cnt, 0); - - return 0; -} - -static void free_mbox_msg_channel(struct sphw_msg_channel *msg_ch) -{ - kfree(msg_ch->recv_msg.msg); - kfree(msg_ch->resp_msg.msg); -} - -static int init_mgmt_msg_channel(struct sphw_mbox *func_to_func) -{ - int err; - - err = alloc_mbox_msg_channel(&func_to_func->mgmt_msg); - if (err) { - sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc mgmt message channel\n"); - return err; - } - - err = sphw_init_mbox_dma_queue(func_to_func); - if (err) { - sdk_err(func_to_func->hwdev->dev_hdl, "Failed to init mbox dma queue\n"); - free_mbox_msg_channel(&func_to_func->mgmt_msg); - } - - return err; -} - -static void deinit_mgmt_msg_channel(struct sphw_mbox *func_to_func) -{ - sphw_deinit_mbox_dma_queue(func_to_func); - free_mbox_msg_channel(&func_to_func->mgmt_msg); -} - -int sphw_mbox_init_host_msg_channel(struct sphw_hwdev *hwdev) -{ - struct sphw_mbox *func_to_func = hwdev->func_to_func; - u8 host_num = SPHW_MAX_HOST_NUM(hwdev); - int i, host_id, err; - - if (host_num == 0) - return 0; - - func_to_func->host_msg = kcalloc(host_num, - sizeof(*func_to_func->host_msg), - GFP_KERNEL); - if (!func_to_func->host_msg) { - sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc host message array\n"); - return -ENOMEM; - } - - for (host_id = 0; host_id < host_num; host_id++) { - err = alloc_mbox_msg_channel(&func_to_func->host_msg[host_id]); - if (err) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Failed to alloc host %d message channel\n", - host_id); - goto alloc_msg_ch_err; - } - } - - func_to_func->support_h2h_msg = true; - - return 0; - -alloc_msg_ch_err: - for (i = 0; i < host_id; i++) - free_mbox_msg_channel(&func_to_func->host_msg[i]); - - kfree(func_to_func->host_msg); - func_to_func->host_msg = NULL; - - return -ENOMEM; -} - -static void deinit_host_msg_channel(struct sphw_mbox *func_to_func) -{ - int i; - - if (!func_to_func->host_msg) - return; - - for (i = 0; i < SPHW_MAX_HOST_NUM(func_to_func->hwdev); i++) - free_mbox_msg_channel(&func_to_func->host_msg[i]); - - kfree(func_to_func->host_msg); - func_to_func->host_msg = NULL; -} - -int sphw_init_func_mbox_msg_channel(void *hwdev, u16 num_func) -{ - struct sphw_hwdev *dev = hwdev; - struct sphw_mbox *func_to_func = NULL; - u16 func_id, i; - int err; - - if (!hwdev || !num_func || num_func > SPHW_MAX_FUNCTIONS) - return -EINVAL; - - func_to_func = dev->func_to_func; - if (func_to_func->func_msg) - return (func_to_func->num_func_msg == num_func) ? 0 : -EFAULT; - - func_to_func->func_msg = - kcalloc(num_func, sizeof(*func_to_func->func_msg), GFP_KERNEL); - if (!func_to_func->func_msg) { - sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc func message array\n"); - return -ENOMEM; - } - - for (func_id = 0; func_id < num_func; func_id++) { - err = alloc_mbox_msg_channel(&func_to_func->func_msg[func_id]); - if (err) { - sdk_err(func_to_func->hwdev->dev_hdl, - "Failed to alloc func %hu message channel\n", - func_id); - goto alloc_msg_ch_err; - } - } - - func_to_func->num_func_msg = num_func; - - return 0; - -alloc_msg_ch_err: - for (i = 0; i < func_id; i++) - free_mbox_msg_channel(&func_to_func->func_msg[i]); - - kfree(func_to_func->func_msg); - func_to_func->func_msg = NULL; - - return -ENOMEM; -} - -void sphw_deinit_func_mbox_msg_channel(struct sphw_hwdev *hwdev) -{ - struct sphw_mbox *func_to_func = hwdev->func_to_func; - u16 i; - - if (!func_to_func->func_msg) - return; - - for (i = 0; i < func_to_func->num_func_msg; i++) - free_mbox_msg_channel(&func_to_func->func_msg[i]); - - kfree(func_to_func->func_msg); - func_to_func->func_msg = NULL; -} - -struct sphw_msg_desc *get_mbox_msg_desc(struct sphw_mbox *func_to_func, u64 dir, u64 src_func_id) -{ - struct sphw_hwdev *hwdev = func_to_func->hwdev; - struct sphw_msg_channel *msg_ch = NULL; - u16 id; - - if (src_func_id == SPHW_MGMT_SRC_ID) { - msg_ch = &func_to_func->mgmt_msg; - } else if (SPHW_IS_VF(hwdev)) { - /* message from pf */ - msg_ch = func_to_func->func_msg; - if (src_func_id != sphw_pf_id_of_vf(hwdev) || !msg_ch) - return NULL; - } else if (src_func_id > sphw_glb_pf_vf_offset(hwdev)) { - /* message from vf */ - id = (u16)(src_func_id - 1U) - sphw_glb_pf_vf_offset(hwdev); - if (id >= func_to_func->num_func_msg) - return NULL; - - msg_ch = &func_to_func->func_msg[id]; - } else { - /* message from other host's ppf */ - if (!func_to_func->support_h2h_msg) - return NULL; - - for (id = 0; id < SPHW_MAX_HOST_NUM(hwdev); id++) { - if (src_func_id == sphw_host_ppf_idx(hwdev, (u8)id)) - break; - } - - if (id == SPHW_MAX_HOST_NUM(hwdev) || !func_to_func->host_msg) - return NULL; - - msg_ch = &func_to_func->host_msg[id]; - } - - return (dir == SPHW_MSG_DIRECT_SEND) ? - &msg_ch->recv_msg : &msg_ch->resp_msg; -} - -static void prepare_send_mbox(struct sphw_mbox *func_to_func) -{ - struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; - - send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif); -} - -static int alloc_mbox_wb_status(struct sphw_mbox *func_to_func) -{ - struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; - struct sphw_hwdev *hwdev = func_to_func->hwdev; - u32 addr_h, addr_l; - - send_mbox->wb_vaddr = dma_alloc_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN, - &send_mbox->wb_paddr, GFP_KERNEL); - if (!send_mbox->wb_vaddr) - return -ENOMEM; - - send_mbox->wb_status = send_mbox->wb_vaddr; - - addr_h = upper_32_bits(send_mbox->wb_paddr); - addr_l = lower_32_bits(send_mbox->wb_paddr); - - sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_H_OFF, addr_h); - sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_L_OFF, addr_l); - - return 0; -} - -static void free_mbox_wb_status(struct sphw_mbox *func_to_func) -{ - struct sphw_send_mbox *send_mbox = &func_to_func->send_mbox; - struct sphw_hwdev *hwdev = func_to_func->hwdev; - - sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_H_OFF, 0); - sphw_hwif_write_reg(hwdev->hwif, SPHW_FUNC_CSR_MAILBOX_RESULT_L_OFF, 0); - - dma_free_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN, - send_mbox->wb_vaddr, send_mbox->wb_paddr); -} - -int sphw_func_to_func_init(struct sphw_hwdev *hwdev) -{ - struct sphw_mbox *func_to_func; - int err; - - func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); - if (!func_to_func) - return -ENOMEM; - - hwdev->func_to_func = func_to_func; - func_to_func->hwdev = hwdev; - mutex_init(&func_to_func->mbox_send_lock); - mutex_init(&func_to_func->msg_send_lock); - spin_lock_init(&func_to_func->mbox_lock); - func_to_func->workq = - create_singlethread_workqueue(SPHW_MBOX_WQ_NAME); - if (!func_to_func->workq) { - sdk_err(hwdev->dev_hdl, "Failed to initialize MBOX workqueue\n"); - err = -ENOMEM; - goto create_mbox_workq_err; - } - - err = init_mgmt_msg_channel(func_to_func); - if (err) - goto init_mgmt_msg_ch_err; - - if (SPHW_IS_VF(hwdev)) { - /* VF to PF mbox message channel */ - err = sphw_init_func_mbox_msg_channel(hwdev, 1); - if (err) - goto init_func_msg_ch_err; - } - - err = alloc_mbox_wb_status(func_to_func); - if (err) { - sdk_err(hwdev->dev_hdl, "Failed to alloc mbox write back status\n"); - goto alloc_wb_status_err; - } - - prepare_send_mbox(func_to_func); - - return 0; - -alloc_wb_status_err: - if (SPHW_IS_VF(hwdev)) - sphw_deinit_func_mbox_msg_channel(hwdev); - -init_func_msg_ch_err: - deinit_mgmt_msg_channel(func_to_func); - -init_mgmt_msg_ch_err: - destroy_workqueue(func_to_func->workq); - -create_mbox_workq_err: - kfree(func_to_func); - - return err; -} - -void sphw_func_to_func_free(struct sphw_hwdev *hwdev) -{ - struct sphw_mbox *func_to_func = hwdev->func_to_func; - - /* destroy workqueue before free related mbox resources in case of - * illegal resource access - */ - destroy_workqueue(func_to_func->workq); - - free_mbox_wb_status(func_to_func); - if (SPHW_IS_PPF(hwdev)) - deinit_host_msg_channel(func_to_func); - sphw_deinit_func_mbox_msg_channel(hwdev); - deinit_mgmt_msg_channel(func_to_func); - - kfree(func_to_func); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h deleted file mode 100644 index 9aebee1c088a..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mbox.h +++ /dev/null @@ -1,271 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_MBOX_H -#define SPHW_MBOX_H - -#include "sphw_crm.h" - -#define SPHW_MBOX_PF_SEND_ERR 0x1 - -#define SPHW_MGMT_SRC_ID 0x1FFF -#define SPHW_MAX_FUNCTIONS 4096 - -/* message header define */ -#define SPHW_MSG_HEADER_SRC_GLB_FUNC_IDX_SHIFT 0 -#define SPHW_MSG_HEADER_STATUS_SHIFT 13 -#define SPHW_MSG_HEADER_SOURCE_SHIFT 15 -#define SPHW_MSG_HEADER_AEQ_ID_SHIFT 16 -#define SPHW_MSG_HEADER_MSG_ID_SHIFT 18 -#define SPHW_MSG_HEADER_CMD_SHIFT 22 - -#define SPHW_MSG_HEADER_MSG_LEN_SHIFT 32 -#define SPHW_MSG_HEADER_MODULE_SHIFT 43 -#define SPHW_MSG_HEADER_SEG_LEN_SHIFT 48 -#define SPHW_MSG_HEADER_NO_ACK_SHIFT 54 -#define SPHW_MSG_HEADER_DATA_TYPE_SHIFT 55 -#define SPHW_MSG_HEADER_SEQID_SHIFT 56 -#define SPHW_MSG_HEADER_LAST_SHIFT 62 -#define SPHW_MSG_HEADER_DIRECTION_SHIFT 63 - -#define SPHW_MSG_HEADER_SRC_GLB_FUNC_IDX_MASK 0x1FFF -#define SPHW_MSG_HEADER_STATUS_MASK 0x1 -#define SPHW_MSG_HEADER_SOURCE_MASK 0x1 -#define SPHW_MSG_HEADER_AEQ_ID_MASK 0x3 -#define SPHW_MSG_HEADER_MSG_ID_MASK 0xF -#define SPHW_MSG_HEADER_CMD_MASK 0x3FF - -#define SPHW_MSG_HEADER_MSG_LEN_MASK 0x7FF -#define SPHW_MSG_HEADER_MODULE_MASK 0x1F -#define SPHW_MSG_HEADER_SEG_LEN_MASK 0x3F -#define SPHW_MSG_HEADER_NO_ACK_MASK 0x1 -#define SPHW_MSG_HEADER_DATA_TYPE_MASK 0x1 -#define SPHW_MSG_HEADER_SEQID_MASK 0x3F -#define SPHW_MSG_HEADER_LAST_MASK 0x1 -#define SPHW_MSG_HEADER_DIRECTION_MASK 0x1 - -#define SPHW_MSG_HEADER_GET(val, field) \ - (((val) >> SPHW_MSG_HEADER_##field##_SHIFT) & \ - SPHW_MSG_HEADER_##field##_MASK) -#define SPHW_MSG_HEADER_SET(val, field) \ - ((u64)(((u64)(val)) & SPHW_MSG_HEADER_##field##_MASK) << \ - SPHW_MSG_HEADER_##field##_SHIFT) - -#define IS_DMA_MBX_MSG(dst_func) ((dst_func) == SPHW_MGMT_SRC_ID) - -enum sphw_msg_direction_type { - SPHW_MSG_DIRECT_SEND = 0, - SPHW_MSG_RESPONSE = 1, -}; - -enum sphw_msg_segment_type { - NOT_LAST_SEGMENT = 0, - LAST_SEGMENT = 1, -}; - -enum sphw_msg_ack_type { - SPHW_MSG_ACK, - SPHW_MSG_NO_ACK, -}; - -enum sphw_data_type { - SPHW_DATA_INLINE = 0, - SPHW_DATA_DMA = 1, -}; - -enum sphw_msg_src_type { - SPHW_MSG_FROM_MGMT = 0, - SPHW_MSG_FROM_MBOX = 1, -}; - -enum sphw_msg_aeq_type { - SPHW_ASYNC_MSG_AEQ = 0, - /* indicate dest func or mgmt cpu which aeq to response mbox message */ - SPHW_MBOX_RSP_MSG_AEQ = 1, - /* indicate mgmt cpu which aeq to response api cmd message */ - SPHW_MGMT_RSP_MSG_AEQ = 2, -}; - -#define SPHW_MBOX_WQ_NAME "sphw_mbox" - -enum sphw_mbox_seg_errcode { - MBOX_ERRCODE_NO_ERRORS = 0, - /* VF send the mailbox data to the wrong destination functions */ - MBOX_ERRCODE_VF_TO_WRONG_FUNC = 0x100, - /* PPF send the mailbox data to the wrong destination functions */ - MBOX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200, - /* PF send the mailbox data to the wrong destination functions */ - MBOX_ERRCODE_PF_TO_WRONG_FUNC = 0x300, - /* The mailbox data size is set to all zero */ - MBOX_ERRCODE_ZERO_DATA_SIZE = 0x400, - /* The sender function attribute has not been learned by hardware */ - MBOX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500, - /* The receiver function attr has not been learned by hardware */ - MBOX_ERRCODE_UNKNOWN_DES_FUNC = 0x600, -}; - -struct mbox_msg_info { - u8 msg_id; - u8 status; /* can only use 1 bit */ -}; - -struct sphw_msg_desc { - void *msg; - u16 msg_len; - u8 seq_id; - u8 mod; - u16 cmd; - struct mbox_msg_info msg_info; -}; - -struct sphw_msg_channel { - struct sphw_msg_desc resp_msg; - struct sphw_msg_desc recv_msg; - - atomic_t recv_msg_cnt; -}; - -/* Receive other functions mbox message */ -struct sphw_recv_mbox { - void *msg; - u16 msg_len; - u8 msg_id; - u8 mod; - u16 cmd; - u16 src_func_idx; - enum sphw_msg_ack_type ack_type; - void *resp_buff; -}; - -struct sphw_send_mbox { - u8 *data; - - u64 *wb_status; /* write back status */ - void *wb_vaddr; - dma_addr_t wb_paddr; -}; - -enum mbox_event_state { - EVENT_START = 0, - EVENT_FAIL, - EVENT_SUCCESS, - EVENT_TIMEOUT, - EVENT_END, -}; - -enum sphw_mbox_cb_state { - SPHW_VF_MBOX_CB_REG = 0, - SPHW_VF_MBOX_CB_RUNNING, - SPHW_PF_MBOX_CB_REG, - SPHW_PF_MBOX_CB_RUNNING, - SPHW_PPF_MBOX_CB_REG, - SPHW_PPF_MBOX_CB_RUNNING, - SPHW_PPF_TO_PF_MBOX_CB_REG, - SPHW_PPF_TO_PF_MBOX_CB_RUNNIG, -}; - -struct mbox_dma_msg { - u32 xor; - u32 dma_addr_high; - u32 dma_addr_low; - u32 msg_len; - u64 rsvd; -}; - -struct mbox_dma_queue { - void *dma_buff_vaddr; - dma_addr_t dma_buff_paddr; - - u16 depth; - u16 prod_idx; - u16 cons_idx; -}; - -struct sphw_mbox { - struct sphw_hwdev *hwdev; - - bool lock_channel_en; - unsigned long channel_stop; - u16 cur_msg_channel; - - /* lock for send mbox message and ack message */ - struct mutex mbox_send_lock; - /* lock for send mbox message */ - struct mutex msg_send_lock; - struct sphw_send_mbox send_mbox; - - struct mbox_dma_queue sync_msg_queue; - struct mbox_dma_queue async_msg_queue; - - struct workqueue_struct *workq; - - struct sphw_msg_channel mgmt_msg; /* driver and MGMT CPU */ - struct sphw_msg_channel *host_msg; /* PPF message between hosts */ - struct sphw_msg_channel *func_msg; /* PF to VF or VF to PF */ - u16 num_func_msg; - bool support_h2h_msg; /* host to host */ - - /* vf receive pf/ppf callback */ - sphw_vf_mbox_cb vf_mbox_cb[SPHW_MOD_MAX]; - void *vf_mbox_data[SPHW_MOD_MAX]; - /* pf/ppf receive vf callback */ - sphw_pf_mbox_cb pf_mbox_cb[SPHW_MOD_MAX]; - void *pf_mbox_data[SPHW_MOD_MAX]; - /* ppf receive pf/ppf callback */ - sphw_ppf_mbox_cb ppf_mbox_cb[SPHW_MOD_MAX]; - void *ppf_mbox_data[SPHW_MOD_MAX]; - /* pf receive ppf callback */ - sphw_pf_recv_from_ppf_mbox_cb pf_recv_ppf_mbox_cb[SPHW_MOD_MAX]; - void *pf_recv_ppf_mbox_data[SPHW_MOD_MAX]; - unsigned long ppf_to_pf_mbox_cb_state[SPHW_MOD_MAX]; - unsigned long ppf_mbox_cb_state[SPHW_MOD_MAX]; - unsigned long pf_mbox_cb_state[SPHW_MOD_MAX]; - unsigned long vf_mbox_cb_state[SPHW_MOD_MAX]; - - u8 send_msg_id; - enum mbox_event_state event_flag; - /* lock for mbox event flag */ - spinlock_t mbox_lock; -}; - -struct sphw_mbox_work { - struct work_struct work; - struct sphw_mbox *func_to_func; - struct sphw_recv_mbox *recv_mbox; - struct sphw_msg_channel *msg_ch; -}; - -struct vf_cmd_check_handle { - u16 cmd; - bool (*check_cmd)(struct sphw_hwdev *hwdev, u16 src_func_idx, - void *buf_in, u16 in_size); -}; - -void sphw_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size); - -int sphw_func_to_func_init(struct sphw_hwdev *hwdev); - -void sphw_func_to_func_free(struct sphw_hwdev *hwdev); - -int sphw_send_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel); - -void sphw_response_mbox_to_mgmt(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, - void *buf_in, u16 in_size, u16 msg_id); - -int sphw_send_mbox_to_mgmt_no_ack(struct sphw_hwdev *hwdev, u8 mod, u16 cmd, - void *buf_in, u16 in_size, u16 channel); -int sphw_mbox_to_func(struct sphw_mbox *func_to_func, u8 mod, u16 cmd, - u16 dst_func, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u32 timeout, u16 channel); - -int sphw_mbox_ppf_to_host(void *hwdev, u8 mod, u16 cmd, u8 host_id, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel); - -int sphw_mbox_init_host_msg_channel(struct sphw_hwdev *hwdev); - -void sphw_mbox_enable_channel_lock(struct sphw_hwdev *hwdev, bool enable); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c deleted file mode 100644 index a66f40635963..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.c +++ /dev/null @@ -1,895 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/spinlock.h> -#include <linux/completion.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/interrupt.h> -#include <linux/semaphore.h> -#include <linux/delay.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "sphw_comm_cmd.h" -#include "sphw_hwdev.h" -#include "sphw_eqs.h" -#include "sphw_mbox.h" -#include "sphw_api_cmd.h" -#include "sphw_prof_adap.h" -#include "sphw_mgmt.h" -#include "sphw_csr.h" - -#define SPHW_MSG_TO_MGMT_MAX_LEN 2016 - -#define SPHW_API_CHAIN_AEQ_ID 2 -#define MAX_PF_MGMT_BUF_SIZE 2048UL -#define SEGMENT_LEN 48 -#define ASYNC_MSG_FLAG 0x8 -#define MGMT_MSG_MAX_SEQ_ID (ALIGN(SPHW_MSG_TO_MGMT_MAX_LEN, \ - SEGMENT_LEN) / SEGMENT_LEN) - -#define BUF_OUT_DEFAULT_SIZE 1 - -#define MGMT_MSG_SIZE_MIN 20 -#define MGMT_MSG_SIZE_STEP 16 -#define MGMT_MSG_RSVD_FOR_DEV 8 - -#define SYNC_MSG_ID_MASK 0x7 -#define ASYNC_MSG_ID_MASK 0x7 - -#define SYNC_FLAG 0 -#define ASYNC_FLAG 1 - -#define MSG_NO_RESP 0xFFFF - -#define MGMT_MSG_TIMEOUT 20000 /* millisecond */ - -#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) - -#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ - (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) -#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) - -#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ - ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) | ASYNC_MSG_FLAG) - -static void pf_to_mgmt_send_event_set(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - int event_flag) -{ - spin_lock(&pf_to_mgmt->sync_event_lock); - pf_to_mgmt->event_flag = event_flag; - spin_unlock(&pf_to_mgmt->sync_event_lock); -} - -/** - * sphw_register_mgmt_msg_cb - register sync msg handler for a module - * @hwdev: the pointer to hw device - * @mod: module in the chip that this handler will handle its sync messages - * @pri_handle: specific mod's private data that will be used in callback - * @callback: the handler for a sync message that will handle messages - **/ -int sphw_register_mgmt_msg_cb(void *hwdev, u8 mod, void *pri_handle, sphw_mgmt_msg_cb callback) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - - if (mod >= SPHW_MOD_HW_MAX || !hwdev) - return -EFAULT; - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - if (!pf_to_mgmt) - return -EINVAL; - - pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback; - pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle; - - set_bit(SPHW_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); - - return 0; -} - -/** - * sphw_unregister_mgmt_msg_cb - unregister sync msg handler for a module - * @hwdev: the pointer to hw device - * @mod: module in the chip that this handler will handle its sync messages - **/ -void sphw_unregister_mgmt_msg_cb(void *hwdev, u8 mod) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - - if (!hwdev || mod >= SPHW_MOD_HW_MAX) - return; - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - if (!pf_to_mgmt) - return; - - clear_bit(SPHW_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); - - while (test_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[mod])) - usleep_range(900, 1000); - - pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL; - pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL; -} - -/** - * mgmt_msg_len - calculate the total message length - * @msg_data_len: the length of the message data - * Return: the total message length - **/ -static u16 mgmt_msg_len(u16 msg_data_len) -{ - /* u64 - the size of the header */ - u16 msg_size; - - msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); - - if (msg_size > MGMT_MSG_SIZE_MIN) - msg_size = MGMT_MSG_SIZE_MIN + - ALIGN((msg_size - MGMT_MSG_SIZE_MIN), - MGMT_MSG_SIZE_STEP); - else - msg_size = MGMT_MSG_SIZE_MIN; - - return msg_size; -} - -/** - * prepare_header - prepare the header of the message - * @pf_to_mgmt: PF to MGMT channel - * @header: pointer of the header to prepare - * @msg_len: the length of the message - * @mod: module in the chip that will get the message - * @direction: the direction of the original message - * @msg_id: message id - **/ -static void prepare_header(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u64 *header, u16 msg_len, u8 mod, - enum sphw_msg_ack_type ack_type, - enum sphw_msg_direction_type direction, - enum sphw_mgmt_cmd cmd, u32 msg_id) -{ - struct sphw_hwif *hwif = pf_to_mgmt->hwdev->hwif; - - *header = SPHW_MSG_HEADER_SET(msg_len, MSG_LEN) | - SPHW_MSG_HEADER_SET(mod, MODULE) | - SPHW_MSG_HEADER_SET(msg_len, SEG_LEN) | - SPHW_MSG_HEADER_SET(ack_type, NO_ACK) | - SPHW_MSG_HEADER_SET(SPHW_DATA_INLINE, DATA_TYPE) | - SPHW_MSG_HEADER_SET(0, SEQID) | - SPHW_MSG_HEADER_SET(SPHW_API_CHAIN_AEQ_ID, AEQ_ID) | - SPHW_MSG_HEADER_SET(LAST_SEGMENT, LAST) | - SPHW_MSG_HEADER_SET(direction, DIRECTION) | - SPHW_MSG_HEADER_SET(cmd, CMD) | - SPHW_MSG_HEADER_SET(SPHW_MSG_FROM_MGMT, SOURCE) | - SPHW_MSG_HEADER_SET(hwif->attr.func_global_idx, SRC_GLB_FUNC_IDX) | - SPHW_MSG_HEADER_SET(msg_id, MSG_ID); -} - -/** - * prepare_mgmt_cmd - prepare the mgmt command - * @mgmt_cmd: pointer to the command to prepare - * @header: pointer of the header to prepare - * @msg: the data of the message - * @msg_len: the length of the message - **/ -static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, - int msg_len) -{ - memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); - - mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; - memcpy(mgmt_cmd, header, sizeof(*header)); - - mgmt_cmd += sizeof(*header); - memcpy(mgmt_cmd, msg, msg_len); -} - -/** - * send_msg_to_mgmt_sync - send async message - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @msg: the msg data - * @msg_len: the msg data length - * @direction: the direction of the original message - * @resp_msg_id: msg id to response for - * Return: 0 - success, negative - failure - **/ -static int send_msg_to_mgmt_sync(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u8 mod, u16 cmd, const void *msg, u16 msg_len, - enum sphw_msg_ack_type ack_type, - enum sphw_msg_direction_type direction, - u16 resp_msg_id) -{ - void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; - struct sphw_api_cmd_chain *chain = NULL; - u8 node_id = SPHW_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); - u64 header; - u16 cmd_size = mgmt_msg_len(msg_len); - - if (!sphw_get_chip_present_flag(pf_to_mgmt->hwdev)) - return -EFAULT; - - if (direction == SPHW_MSG_RESPONSE) - prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, - direction, cmd, resp_msg_id); - else - prepare_header(pf_to_mgmt, &header, msg_len, mod, ack_type, - direction, cmd, SYNC_MSG_ID_INC(pf_to_mgmt)); - chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_WRITE_TO_MGMT_CPU]; - - if (ack_type == SPHW_MSG_ACK) - pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START); - - prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); - - return sphw_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); -} - -/** - * send_msg_to_mgmt_async - send async message - * @pf_to_mgmt: PF to MGMT channel - * @mod: module in the chip that will get the message - * @cmd: command of the message - * @msg: the data of the message - * @msg_len: the length of the message - * @direction: the direction of the original message - * Return: 0 - success, negative - failure - **/ -static int send_msg_to_mgmt_async(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u8 mod, u16 cmd, const void *msg, u16 msg_len, - enum sphw_msg_direction_type direction) -{ - void *mgmt_cmd = pf_to_mgmt->async_msg_buf; - struct sphw_api_cmd_chain *chain = NULL; - u8 node_id = SPHW_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); - u64 header; - u16 cmd_size = mgmt_msg_len(msg_len); - - if (!sphw_get_chip_present_flag(pf_to_mgmt->hwdev)) - return -EFAULT; - - prepare_header(pf_to_mgmt, &header, msg_len, mod, SPHW_MSG_NO_ACK, - direction, cmd, ASYNC_MSG_ID(pf_to_mgmt)); - - prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); - - chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; - - return sphw_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); -} - -static inline void msg_to_mgmt_pre(u8 mod, void *buf_in) -{ - struct sphw_msg_head *msg_head = NULL; - - /* set aeq fix num to 3, need to ensure response aeq id < 3*/ - if (mod == SPHW_MOD_COMM || mod == SPHW_MOD_L2NIC) { - msg_head = buf_in; - - if (msg_head->resp_aeq_num >= SPHW_MAX_AEQS) - msg_head->resp_aeq_num = 0; - } -} - -int sphw_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - void *dev = ((struct sphw_hwdev *)hwdev)->dev_hdl; - struct sphw_recv_msg *recv_msg = NULL; - struct completion *recv_done = NULL; - ulong timeo; - int err; - ulong ret; - - msg_to_mgmt_pre(mod, buf_in); - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - - /* Lock the sync_msg_buf */ - down(&pf_to_mgmt->sync_msg_lock); - recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; - recv_done = &recv_msg->recv_done; - - init_completion(recv_done); - - err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, - SPHW_MSG_ACK, SPHW_MSG_DIRECT_SEND, - MSG_NO_RESP); - if (err) { - sdk_err(dev, "Failed to send sync msg to mgmt, sync_msg_id: %u\n", - pf_to_mgmt->sync_msg_id); - pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL); - goto unlock_sync_msg; - } - - timeo = msecs_to_jiffies(timeout ? timeout : MGMT_MSG_TIMEOUT); - - ret = wait_for_completion_timeout(recv_done, timeo); - if (!ret) { - sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %u\n", - pf_to_mgmt->sync_msg_id); - sphw_dump_aeq_info((struct sphw_hwdev *)hwdev); - err = -ETIMEDOUT; - pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT); - goto unlock_sync_msg; - } - pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END); - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) { - up(&pf_to_mgmt->sync_msg_lock); - return -ETIMEDOUT; - } - - if (buf_out && out_size) { - if (*out_size < recv_msg->msg_len) { - sdk_err(dev, "Invalid response message length: %u for mod %d cmd %u from mgmt, should less than: %u\n", - recv_msg->msg_len, mod, cmd, *out_size); - err = -EFAULT; - goto unlock_sync_msg; - } - - if (recv_msg->msg_len) - memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); - - *out_size = recv_msg->msg_len; - } - -unlock_sync_msg: - up(&pf_to_mgmt->sync_msg_lock); - - return err; -} - -int sphw_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt; - void *dev = ((struct sphw_hwdev *)hwdev)->dev_hdl; - int err; - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - - /* Lock the async_msg_buf */ - spin_lock_bh(&pf_to_mgmt->async_msg_lock); - ASYNC_MSG_ID_INC(pf_to_mgmt); - - err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size, - SPHW_MSG_DIRECT_SEND); - spin_unlock_bh(&pf_to_mgmt->async_msg_lock); - - if (err) { - sdk_err(dev, "Failed to send async mgmt msg\n"); - return err; - } - - return 0; -} - -int sphw_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout) -{ - if (!hwdev) - return -EINVAL; - - if (!sphw_get_chip_present_flag(hwdev)) - return -EPERM; - - if (in_size > SPHW_MSG_TO_MGMT_MAX_LEN) - return -EINVAL; - - return sphw_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, buf_out, out_size, timeout); -} - -int sphw_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, - u16 *out_size, u32 timeout, u16 channel) -{ - if (!hwdev) - return -EINVAL; - - if (!sphw_get_chip_present_flag(hwdev)) - return -EPERM; - - return sphw_send_mbox_to_mgmt(hwdev, mod, cmd, buf_in, in_size, - buf_out, out_size, timeout, channel); -} - -int sphw_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel) -{ - if (!hwdev) - return -EINVAL; - - if (!sphw_get_chip_present_flag(hwdev)) - return -EPERM; - - return sphw_send_mbox_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, in_size, channel); -} - -int sphw_msg_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel) -{ - return sphw_msg_to_mgmt_api_chain_async(hwdev, mod, cmd, buf_in, in_size); -} - -int sphw_msg_to_mgmt_api_chain_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u32 timeout) -{ - if (!hwdev) - return -EINVAL; - - if (!sphw_get_chip_present_flag(hwdev)) - return -EPERM; - - return sphw_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, - buf_out, out_size, timeout); -} - -int sphw_msg_to_mgmt_api_chain_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size) -{ - int err; - - if (!hwdev) - return -EINVAL; - - if (sphw_func_type(hwdev) == TYPE_VF) { - err = -EFAULT; - sdk_err(((struct sphw_hwdev *)hwdev)->dev_hdl, - "VF don't support async cmd\n"); - } else { - err = sphw_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size); - } - - return err; -} - -static void send_mgmt_ack(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u8 mod, u16 cmd, void *buf_in, u16 in_size, - u16 msg_id) -{ - u16 buf_size; - - if (!in_size) - buf_size = BUF_OUT_DEFAULT_SIZE; - else - buf_size = in_size; - - sphw_response_mbox_to_mgmt(pf_to_mgmt->hwdev, mod, cmd, buf_in, buf_size, msg_id); -} - -static void mgmt_recv_msg_handler(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u8 mod, u16 cmd, void *buf_in, u16 in_size, - u16 msg_id, int need_resp) -{ - void *dev = pf_to_mgmt->hwdev->dev_hdl; - void *buf_out = pf_to_mgmt->mgmt_ack_buf; - enum sphw_mod_type tmp_mod = mod; - bool ack_first = false; - u16 out_size = 0; - - memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); - - if (mod >= SPHW_MOD_HW_MAX) { - sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %d\n", - mod); - goto resp; - } - - set_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); - - if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] || - !test_bit(SPHW_MGMT_MSG_CB_REG, - &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) { - sdk_warn(dev, "Receive mgmt callback is null, mod = %d\n", - mod); - clear_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); - goto resp; - } - - pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->hwdev, - pf_to_mgmt->recv_mgmt_msg_data[tmp_mod], - cmd, buf_in, in_size, buf_out, &out_size); - - clear_bit(SPHW_MGMT_MSG_CB_RUNNING, &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); - -resp: - if (!ack_first && need_resp) - send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size, msg_id); -} - -/** - * mgmt_resp_msg_handler - handler for response message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @recv_msg: received message details - **/ -static void mgmt_resp_msg_handler(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - struct sphw_recv_msg *recv_msg) -{ - void *dev = pf_to_mgmt->hwdev->dev_hdl; - - /* delete async msg */ - if (recv_msg->msg_id & ASYNC_MSG_FLAG) - return; - - spin_lock(&pf_to_mgmt->sync_event_lock); - if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id && - pf_to_mgmt->event_flag == SEND_EVENT_START) { - pf_to_mgmt->event_flag = SEND_EVENT_SUCCESS; - complete(&recv_msg->recv_done); - } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) { - sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", - pf_to_mgmt->sync_msg_id, recv_msg->msg_id, - pf_to_mgmt->event_flag); - } else { - sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state=%d!\n", - pf_to_mgmt->sync_msg_id, recv_msg->msg_id, - pf_to_mgmt->event_flag); - } - spin_unlock(&pf_to_mgmt->sync_event_lock); -} - -static void recv_mgmt_msg_work_handler(struct work_struct *work) -{ - struct sphw_mgmt_msg_handle_work *mgmt_work = - container_of(work, struct sphw_mgmt_msg_handle_work, work); - - mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod, - mgmt_work->cmd, mgmt_work->msg, - mgmt_work->msg_len, mgmt_work->msg_id, - !mgmt_work->async_mgmt_to_pf); - - kfree(mgmt_work->msg); - kfree(mgmt_work); -} - -static bool check_mgmt_seq_id_and_seg_len(struct sphw_recv_msg *recv_msg, - u8 seq_id, u8 seg_len) -{ - if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN) - return false; - - if (seq_id == 0) { - recv_msg->seq_id = seq_id; - } else { - if (seq_id != recv_msg->seq_id + 1) - return false; - - recv_msg->seq_id = seq_id; - } - - return true; -} - -/** - * recv_mgmt_msg_handler - handler a message from mgmt cpu - * @pf_to_mgmt: PF to MGMT channel - * @header: the header of the message - * @recv_msg: received message details - **/ -static void recv_mgmt_msg_handler(struct sphw_msg_pf_to_mgmt *pf_to_mgmt, - u8 *header, struct sphw_recv_msg *recv_msg) -{ - struct sphw_hwdev *hwdev = pf_to_mgmt->hwdev; - struct sphw_mgmt_msg_handle_work *mgmt_work = NULL; - u64 mbox_header = *((u64 *)header); - void *msg_body = header + sizeof(mbox_header); - u8 seq_id, seq_len; - u32 offset; - u64 dir; - - /* Don't need to get anything from hw when cmd is async */ - dir = SPHW_MSG_HEADER_GET(mbox_header, DIRECTION); - if (dir == SPHW_MSG_RESPONSE && SPHW_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG) - return; - - seq_len = SPHW_MSG_HEADER_GET(mbox_header, SEG_LEN); - seq_id = SPHW_MSG_HEADER_GET(mbox_header, SEQID); - - if (!check_mgmt_seq_id_and_seg_len(recv_msg, seq_id, seq_len)) { - sdk_err(hwdev->dev_hdl, - "Mgmt msg sequence id and segment length check fail, front seq_id: 0x%x,current seq_id: 0x%x, seg len: 0x%x\n", - recv_msg->seq_id, seq_id, seq_len); - /* set seq_id to invalid seq_id */ - recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; - return; - } - - offset = seq_id * SEGMENT_LEN; - memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len); - - if (!SPHW_MSG_HEADER_GET(mbox_header, LAST)) - return; - - recv_msg->cmd = SPHW_MSG_HEADER_GET(mbox_header, CMD); - recv_msg->mod = SPHW_MSG_HEADER_GET(mbox_header, MODULE); - recv_msg->async_mgmt_to_pf = SPHW_MSG_HEADER_GET(mbox_header, NO_ACK); - recv_msg->msg_len = SPHW_MSG_HEADER_GET(mbox_header, MSG_LEN); - recv_msg->msg_id = SPHW_MSG_HEADER_GET(mbox_header, MSG_ID); - recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; - - if (SPHW_MSG_HEADER_GET(mbox_header, DIRECTION) == SPHW_MSG_RESPONSE) { - mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); - return; - } - - mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); - if (!mgmt_work) { - sdk_err(hwdev->dev_hdl, "Allocate mgmt work memory failed\n"); - return; - } - - if (recv_msg->msg_len) { - mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); - if (!mgmt_work->msg) { - sdk_err(hwdev->dev_hdl, "Allocate mgmt msg memory failed\n"); - kfree(mgmt_work); - return; - } - } - - mgmt_work->pf_to_mgmt = pf_to_mgmt; - mgmt_work->msg_len = recv_msg->msg_len; - memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); - mgmt_work->msg_id = recv_msg->msg_id; - mgmt_work->mod = recv_msg->mod; - mgmt_work->cmd = recv_msg->cmd; - mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; - - INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); - queue_work_on(sphw_get_work_cpu_affinity(hwdev, WORK_TYPE_MGMT_MSG), - pf_to_mgmt->workq, &mgmt_work->work); -} - -/** - * sphw_mgmt_msg_aeqe_handler - handler for a mgmt message event - * @handle: PF to MGMT channel - * @header: the header of the message - * @size: unused - **/ -void sphw_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size) -{ - struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - struct sphw_recv_msg *recv_msg = NULL; - bool is_send_dir = false; - - if ((SPHW_MSG_HEADER_GET(*(u64 *)header, SOURCE) == - SPHW_MSG_FROM_MBOX)) { - sphw_mbox_func_aeqe_handler(hwdev, header, size); - return; - } - - pf_to_mgmt = dev->pf_to_mgmt; - if (!pf_to_mgmt) - return; - - is_send_dir = (SPHW_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == - SPHW_MSG_DIRECT_SEND) ? true : false; - - recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt : - &pf_to_mgmt->recv_resp_msg_from_mgmt; - - recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); -} - -/** - * alloc_recv_msg - allocate received message memory - * @recv_msg: pointer that will hold the allocated data - * Return: 0 - success, negative - failure - **/ -static int alloc_recv_msg(struct sphw_recv_msg *recv_msg) -{ - recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; - - recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); - if (!recv_msg->msg) - return -ENOMEM; - - return 0; -} - -/** - * free_recv_msg - free received message memory - * @recv_msg: pointer that holds the allocated data - **/ -static void free_recv_msg(struct sphw_recv_msg *recv_msg) -{ - kfree(recv_msg->msg); -} - -/** - * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel - * @pf_to_mgmt: PF to MGMT channel - * Return: 0 - success, negative - failure - **/ -static int alloc_msg_buf(struct sphw_msg_pf_to_mgmt *pf_to_mgmt) -{ - int err; - void *dev = pf_to_mgmt->hwdev->dev_hdl; - - err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); - if (err) { - sdk_err(dev, "Failed to allocate recv msg\n"); - return err; - } - - err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); - if (err) { - sdk_err(dev, "Failed to allocate resp recv msg\n"); - goto alloc_msg_for_resp_err; - } - - pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); - if (!pf_to_mgmt->async_msg_buf) { - err = -ENOMEM; - goto async_msg_buf_err; - } - - pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); - if (!pf_to_mgmt->sync_msg_buf) { - err = -ENOMEM; - goto sync_msg_buf_err; - } - - pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); - if (!pf_to_mgmt->mgmt_ack_buf) { - err = -ENOMEM; - goto ack_msg_buf_err; - } - - return 0; - -ack_msg_buf_err: - kfree(pf_to_mgmt->sync_msg_buf); - -sync_msg_buf_err: - kfree(pf_to_mgmt->async_msg_buf); - -async_msg_buf_err: - free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); - -alloc_msg_for_resp_err: - free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); - return err; -} - -/** - * free_msg_buf - free all the message buffers of PF to MGMT channel - * @pf_to_mgmt: PF to MGMT channel - * Return: 0 - success, negative - failure - **/ -static void free_msg_buf(struct sphw_msg_pf_to_mgmt *pf_to_mgmt) -{ - kfree(pf_to_mgmt->mgmt_ack_buf); - kfree(pf_to_mgmt->sync_msg_buf); - kfree(pf_to_mgmt->async_msg_buf); - - free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); - free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); -} - -/** - * sphw_pf_to_mgmt_init - initialize PF to MGMT channel - * @hwdev: the pointer to hw device - * Return: 0 - success, negative - failure - **/ -int sphw_pf_to_mgmt_init(struct sphw_hwdev *hwdev) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt; - void *dev = hwdev->dev_hdl; - int err; - - pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); - if (!pf_to_mgmt) - return -ENOMEM; - - hwdev->pf_to_mgmt = pf_to_mgmt; - pf_to_mgmt->hwdev = hwdev; - spin_lock_init(&pf_to_mgmt->async_msg_lock); - spin_lock_init(&pf_to_mgmt->sync_event_lock); - sema_init(&pf_to_mgmt->sync_msg_lock, 1); - pf_to_mgmt->workq = create_singlethread_workqueue(SPHW_MGMT_WQ_NAME); - if (!pf_to_mgmt->workq) { - sdk_err(dev, "Failed to initialize MGMT workqueue\n"); - err = -ENOMEM; - goto create_mgmt_workq_err; - } - - err = alloc_msg_buf(pf_to_mgmt); - if (err) { - sdk_err(dev, "Failed to allocate msg buffers\n"); - goto alloc_msg_buf_err; - } - - err = sphw_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); - if (err) { - sdk_err(dev, "Failed to init the api cmd chains\n"); - goto api_cmd_init_err; - } - - return 0; - -api_cmd_init_err: - free_msg_buf(pf_to_mgmt); - -alloc_msg_buf_err: - destroy_workqueue(pf_to_mgmt->workq); - -create_mgmt_workq_err: - kfree(pf_to_mgmt); - - return err; -} - -/** - * sphw_pf_to_mgmt_free - free PF to MGMT channel - * @hwdev: the pointer to hw device - **/ -void sphw_pf_to_mgmt_free(struct sphw_hwdev *hwdev) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; - - /* destroy workqueue before free related pf_to_mgmt resources in case of - * illegal resource access - */ - destroy_workqueue(pf_to_mgmt->workq); - sphw_api_cmd_free(pf_to_mgmt->cmd_chain); - - free_msg_buf(pf_to_mgmt); - kfree(pf_to_mgmt); -} - -void sphw_flush_mgmt_workq(void *hwdev) -{ - struct sphw_hwdev *dev = (struct sphw_hwdev *)hwdev; - - flush_workqueue(dev->aeqs->workq); - - if (sphw_func_type(dev) != TYPE_VF) - flush_workqueue(dev->pf_to_mgmt->workq); -} - -int sphw_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, u16 size, void *ack, u16 ack_size) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - struct sphw_api_cmd_chain *chain = NULL; - - if (!hwdev || !cmd || (ack_size && !ack)) - return -EINVAL; - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_POLL_READ]; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -EPERM; - - return sphw_api_cmd_read(chain, dest, cmd, size, ack, ack_size); -} - -/** - * api cmd write or read bypass default use poll, if want to use aeq interrupt, - * please set wb_trigger_aeqe to 1 - **/ -int sphw_api_cmd_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size) -{ - struct sphw_msg_pf_to_mgmt *pf_to_mgmt = NULL; - struct sphw_api_cmd_chain *chain = NULL; - - if (!hwdev || !size || !cmd) - return -EINVAL; - - pf_to_mgmt = ((struct sphw_hwdev *)hwdev)->pf_to_mgmt; - chain = pf_to_mgmt->cmd_chain[SPHW_API_CMD_POLL_WRITE]; - - if (!(((struct sphw_hwdev *)hwdev)->chip_present_flag)) - return -EPERM; - - return sphw_api_cmd_write(chain, dest, cmd, size); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h deleted file mode 100644 index 802336bd5cb1..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt.h +++ /dev/null @@ -1,106 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_MGMT_H -#define SPHW_MGMT_H - -#define SPHW_MGMT_WQ_NAME "sphw_mgmt" - -struct sphw_recv_msg { - void *msg; - - u16 msg_len; - enum sphw_mod_type mod; - u16 cmd; - u8 seq_id; - u16 msg_id; - int async_mgmt_to_pf; - - struct completion recv_done; -}; - -struct sphw_msg_head { - u8 status; - u8 version; - u8 resp_aeq_num; - u8 rsvd0[5]; -}; - -enum comm_pf_to_mgmt_event_state { - SEND_EVENT_UNINIT = 0, - SEND_EVENT_START, - SEND_EVENT_SUCCESS, - SEND_EVENT_FAIL, - SEND_EVENT_TIMEOUT, - SEND_EVENT_END, -}; - -enum sphw_mgmt_msg_cb_state { - SPHW_MGMT_MSG_CB_REG = 0, - SPHW_MGMT_MSG_CB_RUNNING, -}; - -struct sphw_msg_pf_to_mgmt { - struct sphw_hwdev *hwdev; - - /* Async cmd can not be scheduling */ - spinlock_t async_msg_lock; - struct semaphore sync_msg_lock; - - struct workqueue_struct *workq; - - void *async_msg_buf; - void *sync_msg_buf; - void *mgmt_ack_buf; - - struct sphw_recv_msg recv_msg_from_mgmt; - struct sphw_recv_msg recv_resp_msg_from_mgmt; - - u16 async_msg_id; - u16 sync_msg_id; - struct sphw_api_cmd_chain *cmd_chain[SPHW_API_CMD_MAX]; - - sphw_mgmt_msg_cb recv_mgmt_msg_cb[SPHW_MOD_HW_MAX]; - void *recv_mgmt_msg_data[SPHW_MOD_HW_MAX]; - unsigned long mgmt_msg_cb_state[SPHW_MOD_HW_MAX]; - - void *async_msg_cb_data[SPHW_MOD_HW_MAX]; - - /* lock when sending msg */ - spinlock_t sync_event_lock; - enum comm_pf_to_mgmt_event_state event_flag; -}; - -struct sphw_mgmt_msg_handle_work { - struct work_struct work; - struct sphw_msg_pf_to_mgmt *pf_to_mgmt; - - void *msg; - u16 msg_len; - - enum sphw_mod_type mod; - u16 cmd; - u16 msg_id; - - int async_mgmt_to_pf; -}; - -void sphw_mgmt_msg_aeqe_handler(void *handle, u8 *header, u8 size); - -int sphw_pf_to_mgmt_init(struct sphw_hwdev *hwdev); - -void sphw_pf_to_mgmt_free(struct sphw_hwdev *hwdev); - -int sphw_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout); -int sphw_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size); - -int sphw_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u32 timeout); - -int sphw_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, u16 size, - void *ack, u16 ack_size); - -int sphw_api_cmd_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h deleted file mode 100644 index 13f726895f58..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mgmt_msg_base.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_MGMT_MSG_BASE_H -#define SPHW_MGMT_MSG_BASE_H - -#define MGMT_MSG_CMD_OP_SET 1 -#define MGMT_MSG_CMD_OP_GET 0 - -#define MGMT_MSG_CMD_OP_START 1 -#define MGMT_MSG_CMD_OP_STOP 0 - -struct mgmt_msg_head { - u8 status; - u8 version; - u8 rsvd0[6]; -}; - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h deleted file mode 100644 index d7fb58054202..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_mt.h +++ /dev/null @@ -1,533 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_MT_H -#define SPHW_MT_H - -#define NICTOOL_CMD_TYPE 0x18 - -struct api_cmd_rd { - u32 pf_id; - u8 dest; - u8 *cmd; - u16 size; - void *ack; - u16 ack_size; -}; - -struct api_cmd_wr { - u32 pf_id; - u8 dest; - u8 *cmd; - u16 size; -}; - -struct pf_dev_info { - u64 bar0_size; - u8 bus; - u8 slot; - u8 func; - u64 phy_addr; -}; - -/* Indicates the maximum number of interrupts that can be recorded. - * Subsequent interrupts are not recorded in FFM. - */ -#define FFM_RECORD_NUM_MAX 64 - -struct ffm_intr_info { - u8 node_id; - /* error level of the interrupt source */ - u8 err_level; - /* Classification by interrupt source properties */ - u16 err_type; - u32 err_csr_addr; - u32 err_csr_value; -}; - -struct ffm_intr_tm_info { - struct ffm_intr_info intr_info; - u8 times; - u8 sec; - u8 min; - u8 hour; - u8 mday; - u8 mon; - u16 year; -}; - -struct ffm_record_info { - u32 ffm_num; - u32 last_err_csr_addr; - u32 last_err_csr_value; - struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX]; -}; - -struct dbgtool_k_glb_info { - struct semaphore dbgtool_sem; - struct ffm_record_info *ffm; -}; - -struct msg_2_up { - u8 pf_id; - u8 mod; - u8 cmd; - void *buf_in; - u16 in_size; - void *buf_out; - u16 *out_size; -}; - -struct dbgtool_param { - union { - struct api_cmd_rd api_rd; - struct api_cmd_wr api_wr; - struct pf_dev_info *dev_info; - struct ffm_record_info *ffm_rd; - struct msg_2_up msg2up; - } param; - char chip_name[16]; -}; - -/* dbgtool command type */ -/* You can add commands as required. The dbgtool command can be - * used to invoke all interfaces of the kernel-mode x86 driver. - */ -enum dbgtool_cmd { - DBGTOOL_CMD_API_RD = 0, - DBGTOOL_CMD_API_WR, - DBGTOOL_CMD_FFM_RD, - DBGTOOL_CMD_FFM_CLR, - DBGTOOL_CMD_PF_DEV_INFO_GET, - DBGTOOL_CMD_MSG_2_UP, - DBGTOOL_CMD_FREE_MEM, - DBGTOOL_CMD_NUM -}; - -#define PF_MAX_SIZE 16 -#define BUSINFO_LEN 32 -#define SELF_TEST_BAR_ADDR_OFFSET 0x883c - -enum module_name { - SEND_TO_NPU = 1, - SEND_TO_MPU, - SEND_TO_SM, - - SEND_TO_HW_DRIVER, - SEND_TO_NIC_DRIVER, - SEND_TO_OVS_DRIVER, - SEND_TO_ROCE_DRIVER, - SEND_TO_TOE_DRIVER, - SEND_TO_IWAP_DRIVER, - SEND_TO_FC_DRIVER, - SEND_FCOE_DRIVER, -}; - -enum driver_cmd_type { - TX_INFO = 1, - Q_NUM, - TX_WQE_INFO, - TX_MAPPING, - RX_INFO, - RX_WQE_INFO, - RX_CQE_INFO, - UPRINT_FUNC_EN, - UPRINT_FUNC_RESET, - UPRINT_SET_PATH, - UPRINT_GET_STATISTICS, - FUNC_TYPE, - GET_FUNC_IDX, - GET_INTER_NUM, - CLOSE_TX_STREAM, - GET_DRV_VERSION, - CLEAR_FUNC_STASTIC, - GET_HW_STATS, - CLEAR_HW_STATS, - GET_SELF_TEST_RES, - GET_CHIP_FAULT_STATS, - NIC_RSVD1, - NIC_RSVD2, - NIC_RSVD3, - GET_CHIP_ID, - GET_SINGLE_CARD_INFO, - GET_FIRMWARE_ACTIVE_STATUS, - ROCE_DFX_FUNC, - GET_DEVICE_ID, - GET_PF_DEV_INFO, - CMD_FREE_MEM, - GET_LOOPBACK_MODE = 32, - SET_LOOPBACK_MODE, - SET_LINK_MODE, - SET_PF_BW_LIMIT, - GET_PF_BW_LIMIT, - ROCE_CMD, - GET_POLL_WEIGHT, - SET_POLL_WEIGHT, - GET_HOMOLOGUE, - SET_HOMOLOGUE, - GET_SSET_COUNT, - GET_SSET_ITEMS, - IS_DRV_IN_VM, - LRO_ADPT_MGMT, - SET_INTER_COAL_PARAM, - GET_INTER_COAL_PARAM, - GET_CHIP_INFO, - GET_NIC_STATS_LEN, - GET_NIC_STATS_STRING, - GET_NIC_STATS_INFO, - GET_PF_ID, - NIC_RSVD4, - NIC_RSVD5, - DCB_QOS_INFO, - DCB_PFC_STATE, - DCB_ETS_STATE, - DCB_STATE, - NIC_RSVD6, - NIC_RSVD7, - GET_ULD_DEV_NAME, - - RSS_CFG = 0x40, - RSS_INDIR, - PORT_ID, - - GET_FUNC_CAP = 0x50, - - GET_WIN_STAT = 0x60, - WIN_CSR_READ = 0x61, - WIN_CSR_WRITE = 0x62, - WIN_API_CMD_RD = 0x63, - - VM_COMPAT_TEST = 0xFF -}; - -enum api_chain_cmd_type { - API_CSR_READ, - API_CSR_WRITE -}; - -enum sm_cmd_type { - SM_CTR_RD32 = 1, - SM_CTR_RD64_PAIR, - SM_CTR_RD64, - SM_CTR_RD32_CLEAR, - SM_CTR_RD64_PAIR_CLEAR, - SM_CTR_RD64_CLEAR -}; - -struct cqm_stats { - atomic_t cqm_cmd_alloc_cnt; - atomic_t cqm_cmd_free_cnt; - atomic_t cqm_send_cmd_box_cnt; - atomic_t cqm_send_cmd_imm_cnt; - atomic_t cqm_db_addr_alloc_cnt; - atomic_t cqm_db_addr_free_cnt; - atomic_t cqm_fc_srq_create_cnt; - atomic_t cqm_srq_create_cnt; - atomic_t cqm_rq_create_cnt; - atomic_t cqm_qpc_mpt_create_cnt; - atomic_t cqm_nonrdma_queue_create_cnt; - atomic_t cqm_rdma_queue_create_cnt; - atomic_t cqm_rdma_table_create_cnt; - atomic_t cqm_qpc_mpt_delete_cnt; - atomic_t cqm_nonrdma_queue_delete_cnt; - atomic_t cqm_rdma_queue_delete_cnt; - atomic_t cqm_rdma_table_delete_cnt; - atomic_t cqm_func_timer_clear_cnt; - atomic_t cqm_func_hash_buf_clear_cnt; - atomic_t cqm_scq_callback_cnt; - atomic_t cqm_ecq_callback_cnt; - atomic_t cqm_nocq_callback_cnt; - atomic_t cqm_aeq_callback_cnt[112]; -}; - -enum sphw_fault_err_level { - FAULT_LEVEL_FATAL, - FAULT_LEVEL_SERIOUS_RESET, - FAULT_LEVEL_HOST, - FAULT_LEVEL_SERIOUS_FLR, - FAULT_LEVEL_GENERAL, - FAULT_LEVEL_SUGGESTION, - FAULT_LEVEL_MAX, -}; - -struct link_event_stats { - atomic_t link_down_stats; - atomic_t link_up_stats; -}; - -enum sphw_fault_type { - FAULT_TYPE_CHIP, - FAULT_TYPE_UCODE, - FAULT_TYPE_MEM_RD_TIMEOUT, - FAULT_TYPE_MEM_WR_TIMEOUT, - FAULT_TYPE_REG_RD_TIMEOUT, - FAULT_TYPE_REG_WR_TIMEOUT, - FAULT_TYPE_PHY_FAULT, - FAULT_TYPE_MAX, -}; - -struct fault_event_stats { - atomic_t chip_fault_stats[22][FAULT_LEVEL_MAX]; - atomic_t fault_type_stat[FAULT_TYPE_MAX]; - atomic_t pcie_fault_stats; -}; - -struct sphw_hw_stats { - atomic_t heart_lost_stats; - struct cqm_stats cqm_stats; - struct link_event_stats link_event_stats; - struct fault_event_stats fault_event_stats; -}; - -#ifndef IFNAMSIZ -#define IFNAMSIZ 16 -#endif - -struct pf_info { - char name[IFNAMSIZ]; - char bus_info[BUSINFO_LEN]; - u32 pf_type; -}; - -struct card_info { - struct pf_info pf[PF_MAX_SIZE]; - u32 pf_num; -}; - -struct spnic_nic_loop_mode { - u32 loop_mode; - u32 loop_ctrl; -}; - -enum spnic_show_set { - SHOW_SSET_IO_STATS = 1, -}; - -#define SPNIC_SHOW_ITEM_LEN 32 -struct spnic_show_item { - char name[SPNIC_SHOW_ITEM_LEN]; - u8 hexadecimal; /* 0: decimal , 1: Hexadecimal */ - u8 rsvd[7]; - u64 value; -}; - -#define SPHW_CHIP_FAULT_SIZE (110 * 1024) -#define MAX_DRV_BUF_SIZE 4096 - -struct nic_cmd_chip_fault_stats { - u32 offset; - u8 chip_fault_stats[MAX_DRV_BUF_SIZE]; -}; - -#define NIC_TOOL_MAGIC 'x' - -#define CARD_MAX_SIZE 16 -struct nic_card_id { - u32 id[CARD_MAX_SIZE]; - u32 num; -}; - -struct func_pdev_info { - u64 bar0_phy_addr; - u64 bar0_size; - u64 bar1_phy_addr; - u64 bar1_size; - u64 bar3_phy_addr; - u64 bar3_size; - u64 rsvd1[4]; -}; - -struct sphw_card_func_info { - u32 num_pf; - u32 rsvd0; - u64 usr_api_phy_addr; - struct func_pdev_info pdev_info[CARD_MAX_SIZE]; -}; - -struct wqe_info { - int q_id; - void *slq_handle; - unsigned int wqe_id; -}; - -#define MAX_VER_INFO_LEN 128 -struct drv_version_info { - char ver[MAX_VER_INFO_LEN]; -}; - -struct spnic_tx_hw_page { - u64 phy_addr; - u64 *map_addr; -}; - -struct nic_sq_info { - u16 q_id; - u16 pi; - u16 ci; /* sw_ci */ - u16 fi; /* hw_ci */ - u32 q_depth; - u16 pi_reverse; - u16 wqebb_size; - u8 priority; - u16 *ci_addr; - u64 cla_addr; - void *slq_handle; - struct spnic_tx_hw_page direct_wqe; - struct spnic_tx_hw_page doorbell; - u32 page_idx; - u32 glb_sq_id; -}; - -struct nic_rq_info { - u16 q_id; - u16 glb_rq_id; - u16 hw_pi; - u16 ci; /* sw_ci */ - u16 sw_pi; - u16 wqebb_size; - u16 q_depth; - u16 buf_len; - - void *slq_handle; - u64 ci_wqe_page_addr; - u64 ci_cla_tbl_addr; - - u8 coalesc_timer_cfg; - u8 pending_limt; - u16 msix_idx; - u32 msix_vector; -}; - -#define MT_EPERM 1 /* Operation not permitted */ -#define MT_EIO 2 /* I/O error */ -#define MT_EINVAL 3 /* Invalid argument */ -#define MT_EBUSY 4 /* Device or resource busy */ -#define MT_EOPNOTSUPP 0xFF /* Operation not supported */ - -struct mt_msg_head { - u8 status; - u8 rsvd1[3]; -}; - -#define MT_DCB_OPCODE_WR BIT(0) /* 1 - write, 0 - read */ -struct spnic_mt_qos_info { - struct mt_msg_head head; - - u16 op_code; - u8 valid_cos_bitmap; - u8 valid_up_bitmap; - u32 rsvd1; -}; - -struct spnic_mt_dcb_state { - struct mt_msg_head head; - - u16 op_code; - u8 state; - u8 rsvd; -}; - -#define MT_DCB_ETS_UP_TC BIT(1) -#define MT_DCB_ETS_UP_BW BIT(2) -#define MT_DCB_ETS_UP_PRIO BIT(3) -#define MT_DCB_ETS_TC_BW BIT(4) -#define MT_DCB_ETS_TC_PRIO BIT(5) - -#define DCB_UP_TC_NUM 0x8 -struct spnic_mt_ets_state { - struct mt_msg_head head; - - u16 op_code; - u8 up_tc[DCB_UP_TC_NUM]; - u8 up_bw[DCB_UP_TC_NUM]; - u8 tc_bw[DCB_UP_TC_NUM]; - u8 up_prio_bitmap; - u8 tc_prio_bitmap; - u32 rsvd; -}; - -#define MT_DCB_PFC_PFC_STATE BIT(1) -#define MT_DCB_PFC_PFC_PRI_EN BIT(2) -struct spnic_mt_pfc_state { - struct mt_msg_head head; - - u16 op_code; - u8 state; - u8 pfc_en_bitpamp; - u32 rsvd; -}; - -enum mt_api_type { - API_TYPE_MBOX = 1, - API_TYPE_API_CHAIN_BYPASS, - API_TYPE_API_CHAIN_TO_MPU, -}; - -struct npu_cmd_st { - u32 mod : 8; - u32 cmd : 8; - u32 ack_type : 3; - u32 direct_resp : 1; - u32 len : 12; -}; - -struct mpu_cmd_st { - u32 api_type : 8; - u32 mod : 8; - u32 cmd : 16; -}; - -struct msg_module { - char device_name[IFNAMSIZ]; - u32 module; - union { - u32 msg_formate; /* for driver */ - struct npu_cmd_st npu_cmd; - struct mpu_cmd_st mpu_cmd; - }; - u32 timeout; /* for mpu/npu cmd */ - u32 func_idx; - u32 buf_in_size; - u32 buf_out_size; - void *in_buf; - void *out_buf; - int bus_num; - u32 rsvd2[5]; -}; - -int alloc_buff_in(void *hwdev, struct msg_module *nt_msg, u32 in_size, void **buf_in); - -int alloc_buff_out(void *hwdev, struct msg_module *nt_msg, u32 out_size, void **buf_out); - -void free_buff_in(void *hwdev, struct msg_module *nt_msg, void *buf_in); - -void free_buff_out(void *hwdev, struct msg_module *nt_msg, void *buf_out); - -int copy_buf_out_to_user(struct msg_module *nt_msg, u32 out_size, void *buf_out); - -int get_func_type(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); - -int get_func_id(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); - -int get_drv_version(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); - -int get_hw_driver_stats(const void *hwdev, const void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); - -int clear_hw_driver_stats(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); - -int get_chip_faults_stats(const void *hwdev, const void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); - -int get_chip_id_test(void *hwdev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); - -int send_to_mpu(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); -int send_to_npu(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); - -int send_to_sm(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, - u32 *out_size); - -#endif /* SPHW_MT_H_ */ diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c deleted file mode 100644 index 20ebda15cda2..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.c +++ /dev/null @@ -1,94 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/kernel.h> -#include <linux/semaphore.h> -#include <linux/workqueue.h> -#include <linux/dev_printk.h> - -#include "sphw_common.h" -#include "sphw_hwdev.h" -#include "sphw_profile.h" -#include "sphw_prof_adap.h" - -typedef bool (*sphw_is_match_prof)(struct sphw_hwdev *hwdev); - -static bool is_match_prof_default_adapter(struct sphw_hwdev *hwdev) -{ - /* always match default profile adapter in standard scene */ - return true; -} - -enum prof_adapter_type { - PROF_ADAP_TYPE_PANGEA = 1, - - /* Add prof adapter type before default */ - PROF_ADAP_TYPE_DEFAULT, -}; - -/** - * struct sphw_prof_adapter - custom scene's profile adapter - * @type: adapter type - * @match: Check whether the current function is used in the custom scene. - * Implemented in the current source file - * @init: When @match return true, the initialization function called in probe. - * Implemented in the source file of the custom scene - * @deinit: When @match return true, the deinitialization function called when - * remove. Implemented in the source file of the custom scene - */ -struct sphw_prof_adapter { - enum prof_adapter_type type; - sphw_is_match_prof match; - sphw_init_prof_attr init; - sphw_deinit_prof_attr deinit; -}; - -struct sphw_prof_adapter prof_adap_objs[] = { - /* Add prof adapter before default profile */ - { - .type = PROF_ADAP_TYPE_DEFAULT, - .match = is_match_prof_default_adapter, - .init = NULL, - .deinit = NULL, - }, -}; - -void sphw_init_profile_adapter(struct sphw_hwdev *hwdev) -{ - struct sphw_prof_adapter *prof_obj = NULL; - u16 num_adap = ARRAY_SIZE(prof_adap_objs); - u16 i; - - for (i = 0; i < num_adap; i++) { - prof_obj = &prof_adap_objs[i]; - if (!(prof_obj->match && prof_obj->match(hwdev))) - continue; - - hwdev->prof_adap_type = prof_obj->type; - hwdev->prof_attr = prof_obj->init ? - prof_obj->init(hwdev) : NULL; - sdk_info(hwdev->dev_hdl, "Find profile adapter, type: %d\n", - hwdev->prof_adap_type); - - break; - } -} - -void sphw_deinit_profile_adapter(struct sphw_hwdev *hwdev) -{ - struct sphw_prof_adapter *prof_obj = NULL; - u16 num_adap = ARRAY_SIZE(prof_adap_objs); - u16 i; - - for (i = 0; i < num_adap; i++) { - prof_obj = &prof_adap_objs[i]; - if (hwdev->prof_adap_type != prof_obj->type) - continue; - - if (prof_obj->deinit) - prof_obj->deinit(hwdev->prof_attr); - break; - } -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h deleted file mode 100644 index f83d3a28c834..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_prof_adap.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_PROF_ADAP_H -#define SPHW_PROF_ADAP_H - -#include <linux/workqueue.h> - -#include "sphw_profile.h" - -#define GET_PROF_ATTR_OPS(hwdev) \ - ((hwdev)->prof_attr ? (hwdev)->prof_attr->ops : NULL) - -static inline int sphw_get_work_cpu_affinity(struct sphw_hwdev *hwdev, - enum cpu_affinity_work_type type) -{ - struct sphw_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); - - if (ops && ops->get_work_cpu_affinity) - return ops->get_work_cpu_affinity(hwdev->prof_attr->priv_data, type); - - return WORK_CPU_UNBOUND; -} - -static inline void sphw_fault_post_process(struct sphw_hwdev *hwdev, u16 src, u16 level) -{ - struct sphw_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); - - if (ops && ops->fault_recover) - ops->fault_recover(hwdev->prof_attr->priv_data, src, level); -} - -static inline bool sphw_sw_feature_en(struct sphw_hwdev *hwdev, u64 feature_bit) -{ - if (!hwdev->prof_attr) - return false; - - return (hwdev->prof_attr->sw_feature_cap & feature_bit) && - (hwdev->prof_attr->dft_sw_feature & feature_bit); -} - -#define SW_FEATURE_EN(hwdev, f_bit) \ - sphw_sw_feature_en(hwdev, SPHW_SW_F_##f_bit) -#define SPHW_F_CHANNEL_LOCK_EN(hwdev) SW_FEATURE_EN(hwdev, CHANNEL_LOCK) - -void sphw_init_profile_adapter(struct sphw_hwdev *hwdev); -void sphw_deinit_profile_adapter(struct sphw_hwdev *hwdev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h deleted file mode 100644 index 0e1c6c91ba31..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_profile.h +++ /dev/null @@ -1,36 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_PROFILE_H -#define SPHW_PROFILE_H - -enum cpu_affinity_work_type { - WORK_TYPE_AEQ, - WORK_TYPE_MBOX, - WORK_TYPE_MGMT_MSG, - WORK_TYPE_COMM, -}; - -enum sphw_sw_features { - SPHW_SW_F_CHANNEL_LOCK = BIT(0), -}; - -struct sphw_prof_ops { - void (*fault_recover)(void *data, u16 src, u16 level); - int (*get_work_cpu_affinity)(void *data, u32 work_type); -}; - -struct sphw_prof_attr { - void *priv_data; - u64 hw_feature_cap; - u64 sw_feature_cap; - u64 dft_hw_feature; - u64 dft_sw_feature; - - struct sphw_prof_ops *ops; -}; - -typedef struct sphw_prof_attr *(*sphw_init_prof_attr)(void *hwdev); -typedef void (*sphw_deinit_prof_attr)(struct sphw_prof_attr *porf_attr); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c deleted file mode 100644 index 0ec202dfc4d7..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.c +++ /dev/null @@ -1,152 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/dma-mapping.h> -#include <linux/device.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/spinlock.h> - -#include "sphw_common.h" -#include "sphw_wq.h" -#include "sphw_hwdev.h" - -#define WQ_MIN_DEPTH 64 -#define WQ_MAX_DEPTH 65536 -#define WQ_MAX_NUM_PAGES (PAGE_SIZE / sizeof(u64)) - -static int wq_init_wq_block(struct sphw_wq *wq) -{ - int i; - - if (WQ_IS_0_LEVEL_CLA(wq)) { - wq->wq_block_paddr = wq->wq_pages[0].align_paddr; - wq->wq_block_vaddr = wq->wq_pages[0].align_vaddr; - - return 0; - } - - if (wq->num_wq_pages > WQ_MAX_NUM_PAGES) { - sdk_err(wq->dev_hdl, "num_wq_pages exceed limit: %lu\n", - WQ_MAX_NUM_PAGES); - return -EFAULT; - } - - wq->wq_block_vaddr = dma_alloc_coherent(wq->dev_hdl, PAGE_SIZE, &wq->wq_block_paddr, - GFP_KERNEL); - if (!wq->wq_block_vaddr) { - sdk_err(wq->dev_hdl, "Failed to alloc wq block\n"); - return -ENOMEM; - } - - for (i = 0; i < wq->num_wq_pages; i++) - wq->wq_block_vaddr[i] = - cpu_to_be64(wq->wq_pages[i].align_paddr); - - return 0; -} - -static int wq_alloc_pages(struct sphw_wq *wq) -{ - int i, page_idx, err; - - wq->wq_pages = kcalloc(wq->num_wq_pages, sizeof(*wq->wq_pages), - GFP_KERNEL); - if (!wq->wq_pages) { - sdk_err(wq->dev_hdl, "Failed to alloc wq pages handle\n"); - return -ENOMEM; - } - - for (page_idx = 0; page_idx < wq->num_wq_pages; page_idx++) { - err = sphw_dma_alloc_coherent_align(wq->dev_hdl, wq->wq_page_size, - wq->wq_page_size, GFP_KERNEL, - &wq->wq_pages[page_idx]); - if (err) { - sdk_err(wq->dev_hdl, "Failed to alloc wq page\n"); - goto free_wq_pages; - } - } - - err = wq_init_wq_block(wq); - if (err) - goto free_wq_pages; - - return 0; - -free_wq_pages: - for (i = 0; i < page_idx; i++) - sphw_dma_free_coherent_align(wq->dev_hdl, &wq->wq_pages[i]); - - kfree(wq->wq_pages); - wq->wq_pages = NULL; - - return -ENOMEM; -} - -static void wq_free_pages(struct sphw_wq *wq) -{ - int i; - - if (!WQ_IS_0_LEVEL_CLA(wq)) - dma_free_coherent(wq->dev_hdl, PAGE_SIZE, wq->wq_block_vaddr, - wq->wq_block_paddr); - - for (i = 0; i < wq->num_wq_pages; i++) - sphw_dma_free_coherent_align(wq->dev_hdl, &wq->wq_pages[i]); - - kfree(wq->wq_pages); - wq->wq_pages = NULL; -} - -int sphw_wq_create(void *hwdev, struct sphw_wq *wq, u32 q_depth, u16 wqebb_size) -{ - struct sphw_hwdev *dev = hwdev; - u32 wq_page_size; - - if (!wq || !dev) { - pr_err("Invalid wq or dev_hdl\n"); - return -EINVAL; - } - - if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH || - (q_depth & (q_depth - 1)) || !wqebb_size || - (wqebb_size & (wqebb_size - 1))) { - sdk_err(dev->dev_hdl, "Wq q_depth(%u) or wqebb_size(%u) is invalid\n", - q_depth, wqebb_size); - return -EINVAL; - } - - wq_page_size = ALIGN(dev->wq_page_size, PAGE_SIZE); - - memset(wq, 0, sizeof(*wq)); - wq->dev_hdl = dev->dev_hdl; - wq->q_depth = q_depth; - wq->idx_mask = (u16)(q_depth - 1); - wq->wqebb_size = wqebb_size; - wq->wqebb_size_shift = (u16)ilog2(wq->wqebb_size); - wq->wq_page_size = wq_page_size; - - wq->wqebbs_per_page = wq_page_size / wqebb_size; - /* In case of wq_page_size is larger than q_depth * wqebb_size */ - if (wq->wqebbs_per_page > q_depth) - wq->wqebbs_per_page = q_depth; - wq->wqebbs_per_page_shift = (u16)ilog2(wq->wqebbs_per_page); - wq->wqebbs_per_page_mask = (u16)(wq->wqebbs_per_page - 1); - wq->num_wq_pages = (u16)(ALIGN(((u32)q_depth * wqebb_size), - wq_page_size) / wq_page_size); - - return wq_alloc_pages(wq); -} - -void sphw_wq_destroy(struct sphw_wq *wq) -{ - if (!wq) - return; - - wq_free_pages(wq); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h b/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h deleted file mode 100644 index 01d564ca527a..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/hw/sphw_wq.h +++ /dev/null @@ -1,119 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPHW_WQ_H -#define SPHW_WQ_H - -struct sphw_wq { - u16 cons_idx; - u16 prod_idx; - - u32 q_depth; - u16 idx_mask; - u16 wqebb_size_shift; - u16 num_wq_pages; - u32 wqebbs_per_page; - u16 wqebbs_per_page_shift; - u16 wqebbs_per_page_mask; - - struct sphw_dma_addr_align *wq_pages; - - dma_addr_t wq_block_paddr; - u64 *wq_block_vaddr; - - void *dev_hdl; - u32 wq_page_size; - u16 wqebb_size; -} ____cacheline_aligned; - -int sphw_wq_create(void *hwdev, struct sphw_wq *wq, u32 q_depth, u16 wqebb_size); -void sphw_wq_destroy(struct sphw_wq *wq); - -#define WQ_MASK_IDX(wq, idx) ((idx) & (wq)->idx_mask) -#define WQ_MASK_PAGE(wq, pg_idx) ((pg_idx) < (wq)->num_wq_pages ? (pg_idx) : 0) -#define WQ_PAGE_IDX(wq, idx) ((idx) >> (wq)->wqebbs_per_page_shift) -#define WQ_OFFSET_IN_PAGE(wq, idx) ((idx) & (wq)->wqebbs_per_page_mask) -#define WQ_GET_WQEBB_ADDR(wq, pg_idx, idx_in_pg) \ - ((u8 *)(wq)->wq_pages[pg_idx].align_vaddr + ((idx_in_pg) << (wq)->wqebb_size_shift)) -#define WQ_IS_0_LEVEL_CLA(wq) ((wq)->num_wq_pages == 1) - -static inline u16 sphw_wq_free_wqebbs(struct sphw_wq *wq) -{ - return wq->q_depth - ((wq->q_depth + wq->prod_idx - wq->cons_idx) & - wq->idx_mask) - 1; -} - -static inline bool sphw_wq_is_empty(struct sphw_wq *wq) -{ - return WQ_MASK_IDX(wq, wq->prod_idx) == WQ_MASK_IDX(wq, wq->cons_idx); -} - -static inline void *sphw_wq_get_one_wqebb(struct sphw_wq *wq, u16 *pi) -{ - *pi = WQ_MASK_IDX(wq, wq->prod_idx); - wq->prod_idx++; - - return WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, *pi), - WQ_OFFSET_IN_PAGE(wq, *pi)); -} - -static inline void *sphw_wq_get_multi_wqebbs(struct sphw_wq *wq, u16 num_wqebbs, u16 *prod_idx, - void **second_part_wqebbs_addr, - u16 *first_part_wqebbs_num) -{ - u32 pg_idx, off_in_page; - - *prod_idx = WQ_MASK_IDX(wq, wq->prod_idx); - wq->prod_idx += num_wqebbs; - - pg_idx = WQ_PAGE_IDX(wq, *prod_idx); - off_in_page = WQ_OFFSET_IN_PAGE(wq, *prod_idx); - - if (off_in_page + num_wqebbs > wq->wqebbs_per_page) { - /* wqe across wq page boundary */ - *second_part_wqebbs_addr = - WQ_GET_WQEBB_ADDR(wq, WQ_MASK_PAGE(wq, pg_idx + 1), 0); - *first_part_wqebbs_num = wq->wqebbs_per_page - off_in_page; - } else { - *second_part_wqebbs_addr = NULL; - *first_part_wqebbs_num = num_wqebbs; - } - - return WQ_GET_WQEBB_ADDR(wq, pg_idx, off_in_page); -} - -static inline void sphw_wq_put_wqebbs(struct sphw_wq *wq, u16 num_wqebbs) -{ - wq->cons_idx += num_wqebbs; -} - -static inline void *sphw_wq_wqebb_addr(struct sphw_wq *wq, u16 idx) -{ - return WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, idx), - WQ_OFFSET_IN_PAGE(wq, idx)); -} - -static inline void *sphw_wq_read_one_wqebb(struct sphw_wq *wq, u16 *cons_idx) -{ - *cons_idx = WQ_MASK_IDX(wq, wq->cons_idx); - - return sphw_wq_wqebb_addr(wq, *cons_idx); -} - -static inline u64 sphw_wq_get_first_wqe_page_addr(struct sphw_wq *wq) -{ - return wq->wq_pages[0].align_paddr; -} - -static inline void sphw_wq_reset(struct sphw_wq *wq) -{ - u16 pg_idx; - - wq->cons_idx = 0; - wq->prod_idx = 0; - - for (pg_idx = 0; pg_idx < wq->num_wq_pages; pg_idx++) - memset(wq->wq_pages[pg_idx].align_vaddr, 0, wq->wq_page_size); -} - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c deleted file mode 100644 index 910baed023a5..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_dbg.c +++ /dev/null @@ -1,752 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/types.h> -#include <linux/semaphore.h> - -#include "sphw_mt.h" -#include "sphw_crm.h" -#include "spnic_nic_dev.h" -#include "spnic_nic_dbg.h" -#include "spnic_nic_qp.h" -#include "spnic_rx.h" -#include "spnic_tx.h" -#include "spnic_dcb.h" - -typedef int (*nic_driv_module)(struct spnic_nic_dev *nic_dev, const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size); - -struct nic_drv_module_handle { - enum driver_cmd_type driv_cmd_name; - nic_driv_module driv_func; -}; - -int get_nic_drv_version(void *buf_out, u32 *out_size) -{ - struct drv_version_info *ver_info = buf_out; - - if (!buf_out) { - pr_err("Buf_out is NULL.\n"); - return -EINVAL; - } - - if (*out_size != sizeof(*ver_info)) { - pr_err("Unexpect out buf size from user :%u, expect: %lu\n", - *out_size, sizeof(*ver_info)); - return -EINVAL; - } - - snprintf(ver_info->ver, sizeof(ver_info->ver), "%s [compiled with the kernel]", - SPNIC_DRV_VERSION); - - return 0; -} - -static int get_tx_info(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - u16 q_id; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get tx info\n"); - return -EFAULT; - } - - if (!buf_in || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Buf_in or buf_out is NULL.\n"); - return -EINVAL; - } - - q_id = *((u16 *)buf_in); - - return spnic_dbg_get_sq_info(nic_dev->hwdev, q_id, buf_out, *out_size); -} - -static int get_q_num(struct spnic_nic_dev *nic_dev, - const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size) -{ - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get queue number\n"); - return -EFAULT; - } - - if (!buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Get queue number para buf_out is NULL.\n"); - return -EINVAL; - } - - if (*out_size != sizeof(u16)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user: %u, expect: %lu\n", - *out_size, sizeof(u16)); - return -EINVAL; - } - - *((u16 *)buf_out) = nic_dev->q_params.num_qps; - - return 0; -} - -static int get_tx_wqe_info(struct spnic_nic_dev *nic_dev, - const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size) -{ - const struct wqe_info *info = buf_in; - u16 wqebb_cnt = 1; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get tx wqe info\n"); - return -EFAULT; - } - - if (!info || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Buf_in or buf_out is NULL.\n"); - return -EINVAL; - } - - return spnic_dbg_get_wqe_info(nic_dev->hwdev, (u16)info->q_id, - (u16)info->wqe_id, wqebb_cnt, - buf_out, (u16 *)out_size, SPNIC_SQ); -} - -static int get_rx_info(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - struct nic_rq_info *rq_info = buf_out; - u16 q_id; - int err; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get rx info\n"); - return -EFAULT; - } - - if (!buf_in || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Buf_in or buf_out is NULL.\n"); - return -EINVAL; - } - - q_id = *((u16 *)buf_in); - - err = spnic_dbg_get_rq_info(nic_dev->hwdev, q_id, buf_out, *out_size); - - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Get rq info failed, ret is %d.\n", err); - return err; - } - - rq_info->ci = (u16)nic_dev->rxqs[q_id].cons_idx & - nic_dev->rxqs[q_id].q_mask; - - rq_info->sw_pi = nic_dev->rxqs[q_id].next_to_update; - rq_info->msix_vector = nic_dev->rxqs[q_id].irq_id; - - rq_info->coalesc_timer_cfg = nic_dev->rxqs[q_id].last_coalesc_timer_cfg; - rq_info->pending_limt = nic_dev->rxqs[q_id].last_pending_limt; - - return 0; -} - -static int get_rx_wqe_info(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct wqe_info *info = buf_in; - u16 wqebb_cnt = 1; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get rx wqe info\n"); - return -EFAULT; - } - - if (!info || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Buf_in or buf_out is NULL.\n"); - return -EINVAL; - } - - return spnic_dbg_get_wqe_info(nic_dev->hwdev, (u16)info->q_id, - (u16)info->wqe_id, wqebb_cnt, - buf_out, (u16 *)out_size, SPNIC_RQ); -} - -static int get_rx_cqe_info(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct wqe_info *info = buf_in; - u16 q_id = 0; - u16 idx = 0; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't get rx cqe info\n"); - return -EFAULT; - } - - if (!info || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Buf_in or buf_out is NULL.\n"); - return -EINVAL; - } - - if (*out_size != sizeof(struct spnic_rq_cqe)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user :%u, expect: %lu\n", - *out_size, sizeof(struct spnic_rq_cqe)); - return -EINVAL; - } - q_id = (u16)info->q_id; - idx = (u16)info->wqe_id; - - if (q_id >= nic_dev->q_params.num_qps || - idx >= nic_dev->rxqs[q_id].q_depth) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Invalid q_id[%u] >= %u, or wqe idx[%u] >= %u.\n", - q_id, nic_dev->q_params.num_qps, idx, - nic_dev->rxqs[q_id].q_depth); - return -EFAULT; - } - - memcpy(buf_out, nic_dev->rxqs[q_id].rx_info[idx].cqe, - sizeof(struct spnic_rq_cqe)); - - return 0; -} - -static void clean_nicdev_stats(struct spnic_nic_dev *nic_dev) -{ - u64_stats_update_begin(&nic_dev->stats.syncp); - nic_dev->stats.netdev_tx_timeout = 0; - nic_dev->stats.tx_carrier_off_drop = 0; - nic_dev->stats.tx_invalid_qid = 0; - u64_stats_update_end(&nic_dev->stats.syncp); -} - -static int clear_func_static(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - int i; - - *out_size = 0; - clean_nicdev_stats(nic_dev); - for (i = 0; i < nic_dev->max_qps; i++) { - spnic_rxq_clean_stats(&nic_dev->rxqs[i].rxq_stats); - spnic_txq_clean_stats(&nic_dev->txqs[i].txq_stats); - } - - return 0; -} - -static int get_loopback_mode(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - struct spnic_nic_loop_mode *mode = buf_out; - - if (!out_size || !mode) - return -EINVAL; - - if (*out_size != sizeof(*mode)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user: %u, expect: %lu\n", - *out_size, sizeof(*mode)); - return -EINVAL; - } - - return spnic_get_loopback_mode(nic_dev->hwdev, (u8 *)&mode->loop_mode, - (u8 *)&mode->loop_ctrl); -} - -static int set_loopback_mode(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct spnic_nic_loop_mode *mode = buf_in; - int err; - - if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't set loopback mode\n"); - return -EFAULT; - } - - if (!mode || !out_size || in_size != sizeof(*mode)) - return -EINVAL; - - if (*out_size != sizeof(*mode)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user: %u, expect: %lu\n", - *out_size, sizeof(*mode)); - return -EINVAL; - } - - err = spnic_set_loopback_mode(nic_dev->hwdev, (u8)mode->loop_mode, (u8)mode->loop_ctrl); - if (err == 0) - nicif_info(nic_dev, drv, nic_dev->netdev, "Set loopback mode %u en %u succeed\n", - mode->loop_mode, mode->loop_ctrl); - - return err; -} - -enum spnic_nic_link_mode { - SPNIC_LINK_MODE_AUTO = 0, - SPNIC_LINK_MODE_UP, - SPNIC_LINK_MODE_DOWN, - SPNIC_LINK_MODE_MAX, -}; - -static int set_link_mode_param_valid(struct spnic_nic_dev *nic_dev, - const void *buf_in, u32 in_size, - u32 *out_size) -{ - if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Netdev is down, can't set link mode\n"); - return -EFAULT; - } - - if (!buf_in || !out_size || - in_size != sizeof(enum spnic_nic_link_mode)) - return -EINVAL; - - if (*out_size != sizeof(enum spnic_nic_link_mode)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user: %u, expect: %lu\n", - *out_size, sizeof(enum spnic_nic_link_mode)); - return -EINVAL; - } - - return 0; -} - -static int set_link_mode(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const enum spnic_nic_link_mode *link = buf_in; - u8 link_status; - - if (set_link_mode_param_valid(nic_dev, buf_in, in_size, out_size)) - return -EFAULT; - - switch (*link) { - case SPNIC_LINK_MODE_AUTO: - if (spnic_get_link_state(nic_dev->hwdev, &link_status)) - link_status = false; - spnic_link_status_change(nic_dev, (bool)link_status); - nicif_info(nic_dev, drv, nic_dev->netdev, - "Set link mode: auto succeed, now is link %s\n", - (link_status ? "up" : "down")); - break; - case SPNIC_LINK_MODE_UP: - spnic_link_status_change(nic_dev, true); - nicif_info(nic_dev, drv, nic_dev->netdev, - "Set link mode: up succeed\n"); - break; - case SPNIC_LINK_MODE_DOWN: - spnic_link_status_change(nic_dev, false); - nicif_info(nic_dev, drv, nic_dev->netdev, - "Set link mode: down succeed\n"); - break; - default: - nicif_err(nic_dev, drv, nic_dev->netdev, - "Invalid link mode %d to set\n", *link); - return -EINVAL; - } - - return 0; -} - -static int get_sset_count(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - u32 count; - - if (!buf_in || in_size != sizeof(u32) || !out_size || - *out_size != sizeof(u32) || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %u\n", - in_size); - return -EINVAL; - } - - switch (*((u32 *)buf_in)) { - case SHOW_SSET_IO_STATS: - count = spnic_get_io_stats_size(nic_dev); - break; - default: - count = 0; - break; - } - - *((u32 *)buf_out) = count; - - return 0; -} - -static int get_sset_stats(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - struct spnic_show_item *items = buf_out; - u32 sset, count, size; - int err; - - if (!buf_in || in_size != sizeof(u32) || !out_size || !buf_out) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %u\n", - in_size); - return -EINVAL; - } - - size = sizeof(u32); - err = get_sset_count(nic_dev, buf_in, in_size, &count, &size); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Get sset count failed, ret=%d\n", - err); - return -EINVAL; - } - if (count * sizeof(*items) != *out_size) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user :%u, expect: %lu\n", - *out_size, count * sizeof(*items)); - return -EINVAL; - } - - sset = *((u32 *)buf_in); - - switch (sset) { - case SHOW_SSET_IO_STATS: - spnic_get_io_stats(nic_dev, items); - break; - - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "Unknown %u to get stats\n", - sset); - err = -EINVAL; - break; - } - - return err; -} - -static int dcb_mt_qos_map(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct spnic_mt_qos_info *qos = buf_in; - struct spnic_mt_qos_info *qos_out = buf_out; - u8 up_cnt, up; - int err; - - if (!buf_out || !out_size || !buf_in) - return -EINVAL; - - if (*out_size != sizeof(*qos_out) || in_size != sizeof(*qos)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", - in_size, *out_size, sizeof(*qos)); - return -EINVAL; - } - - memcpy(qos_out, qos, sizeof(*qos)); - qos_out->head.status = 0; - if (qos->op_code & MT_DCB_OPCODE_WR) { - up_cnt = 0; - for (up = 0; up < SPNIC_DCB_UP_MAX; up++) { - if (qos->valid_up_bitmap & BIT(up)) - up_cnt++; - } - - if (up_cnt != nic_dev->wanted_dcb_cfg.max_cos) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Invalid up bitmap: 0x%x", - qos->valid_up_bitmap); - qos_out->head.status = MT_EINVAL; - return 0; - } - - err = spnic_dcbcfg_set_up_bitmap(nic_dev, qos->valid_up_bitmap); - if (err) - qos_out->head.status = MT_EIO; - } else { - qos_out->valid_up_bitmap = - spnic_get_valid_up_bitmap(&nic_dev->wanted_dcb_cfg); - qos_out->valid_cos_bitmap = - nic_dev->wanted_dcb_cfg.valid_cos_bitmap; - } - - return 0; -} - -static int dcb_mt_dcb_state(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct spnic_mt_dcb_state *dcb = buf_in; - struct spnic_mt_dcb_state *dcb_out = buf_out; - int err; - - if (!buf_in || !buf_out || !out_size) - return -EINVAL; - - if (*out_size != sizeof(*dcb_out) || in_size != sizeof(*dcb)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", - in_size, *out_size, sizeof(*dcb)); - return -EINVAL; - } - - memcpy(dcb_out, dcb, sizeof(*dcb)); - dcb_out->head.status = 0; - if (dcb->op_code & MT_DCB_OPCODE_WR) { - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags) == dcb->state) - return 0; - - /* nic_mutex has been acquired by send_to_nic_driver and will - * also be acquired inside spnic_setup_tc - */ - mutex_unlock(&nic_dev->nic_mutex); - rtnl_lock(); - err = spnic_setup_tc(nic_dev->netdev, - dcb->state ? nic_dev->wanted_dcb_cfg.max_cos : 0); - rtnl_unlock(); - mutex_lock(&nic_dev->nic_mutex); - if (err) - dcb_out->head.status = MT_EIO; - } else { - dcb_out->state = !!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); - } - - return 0; -} - -static int dcb_mt_pfc_state(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct spnic_mt_pfc_state *pfc = buf_in; - struct spnic_mt_pfc_state *pfc_out = buf_out; - u8 cur_pfc_state, cur_pfc_en_bitmap; - int err; - - if (!buf_in || !buf_out || !out_size) - return -EINVAL; - - if (*out_size != sizeof(*pfc_out) || in_size != sizeof(*pfc)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", - in_size, *out_size, sizeof(*pfc)); - return -EINVAL; - } - - cur_pfc_state = spnic_dcbcfg_get_pfc_state(nic_dev); - cur_pfc_en_bitmap = spnic_dcbcfg_get_pfc_pri_en(nic_dev); - - memcpy(pfc_out, pfc, sizeof(*pfc)); - pfc_out->head.status = 0; - if (pfc->op_code & MT_DCB_OPCODE_WR) { - if (pfc->op_code & MT_DCB_PFC_PFC_STATE) - spnic_dcbcfg_set_pfc_state(nic_dev, pfc->state); - - if (pfc->op_code & MT_DCB_PFC_PFC_PRI_EN) - spnic_dcbcfg_set_pfc_pri_en(nic_dev, pfc->pfc_en_bitpamp); - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - err = spnic_configure_dcb(nic_dev->netdev); - if (err) { - pfc_out->head.status = MT_EIO; - goto set_err; - } - } - } else { - pfc_out->state = cur_pfc_state; - pfc_out->pfc_en_bitpamp = cur_pfc_en_bitmap; - } - - return 0; - -set_err: - spnic_dcbcfg_set_pfc_state(nic_dev, cur_pfc_state); - spnic_dcbcfg_set_pfc_pri_en(nic_dev, cur_pfc_en_bitmap); - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - err = spnic_configure_dcb(nic_dev->netdev); - if (err) - nicif_warn(nic_dev, drv, nic_dev->netdev, - "Failed to rollback pfc config\n"); - } - return 0; -} - -static int dcb_mt_ets_state(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - const struct spnic_mt_ets_state *ets = buf_in; - struct spnic_mt_ets_state *ets_out = buf_out; - struct spnic_dcb_config dcb_cfg_backup; - int err; - - if (!buf_in || !buf_out || !out_size) - return -EINVAL; - - if (*out_size != sizeof(*ets_out) || in_size != sizeof(*ets)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", - in_size, *out_size, sizeof(*ets)); - return -EINVAL; - } - - memcpy(ets_out, ets, sizeof(*ets)); - ets_out->head.status = 0; - if (ets->op_code & MT_DCB_OPCODE_WR) { - if (ets->op_code & (MT_DCB_ETS_UP_BW | MT_DCB_ETS_UP_PRIO)) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Not support to set up bw and up prio\n"); - ets_out->head.status = MT_EOPNOTSUPP; - return 0; - } - - dcb_cfg_backup = nic_dev->wanted_dcb_cfg; - - if (ets->op_code & MT_DCB_ETS_UP_TC) { - err = spnic_dcbcfg_set_ets_up_tc_map(nic_dev, ets->up_tc); - if (err) { - ets_out->head.status = MT_EIO; - return 0; - } - } - if (ets->op_code & MT_DCB_ETS_TC_BW) { - err = spnic_dcbcfg_set_ets_tc_bw(nic_dev, ets->tc_bw); - if (err) { - ets_out->head.status = MT_EIO; - goto set_err; - } - } - if (ets->op_code & MT_DCB_ETS_TC_PRIO) - spnic_dcbcfg_set_ets_tc_prio_type(nic_dev, ets->tc_prio_bitmap); - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - err = spnic_configure_dcb(nic_dev->netdev); - if (err) { - ets_out->head.status = MT_EIO; - goto set_err; - } - } - } else { - spnic_dcbcfg_get_ets_up_tc_map(nic_dev, ets_out->up_tc); - spnic_dcbcfg_get_ets_tc_bw(nic_dev, ets_out->tc_bw); - spnic_dcbcfg_get_ets_tc_prio_type(nic_dev, &ets_out->tc_prio_bitmap); - } - - return 0; - -set_err: - nic_dev->wanted_dcb_cfg = dcb_cfg_backup; - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - err = spnic_configure_dcb(nic_dev->netdev); - if (err) - nicif_warn(nic_dev, drv, nic_dev->netdev, - "Failed to rollback ets config\n"); - } - - return 0; -} - -static int get_inter_num(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - u16 intr_num; - - intr_num = sphw_intr_num(nic_dev->hwdev); - - if (*out_size != sizeof(u16)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user :%u, expect: %lu\n", - *out_size, sizeof(u16)); - return -EFAULT; - } - *(u16 *)buf_out = intr_num; - - *out_size = sizeof(u16); - - return 0; -} - -static int get_netdev_name(struct spnic_nic_dev *nic_dev, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - if (*out_size != IFNAMSIZ) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Unexpect out buf size from user :%u, expect: %u\n", - *out_size, IFNAMSIZ); - return -EFAULT; - } - - strlcpy(buf_out, nic_dev->netdev->name, IFNAMSIZ); - - return 0; -} - -struct nic_drv_module_handle nic_driv_module_cmd_handle[] = { - {TX_INFO, get_tx_info}, - {Q_NUM, get_q_num}, - {TX_WQE_INFO, get_tx_wqe_info}, - {RX_INFO, get_rx_info}, - {RX_WQE_INFO, get_rx_wqe_info}, - {RX_CQE_INFO, get_rx_cqe_info}, - {GET_INTER_NUM, get_inter_num}, - {CLEAR_FUNC_STASTIC, clear_func_static}, - {GET_LOOPBACK_MODE, get_loopback_mode}, - {SET_LOOPBACK_MODE, set_loopback_mode}, - {SET_LINK_MODE, set_link_mode}, - {GET_SSET_COUNT, get_sset_count}, - {GET_SSET_ITEMS, get_sset_stats}, - {DCB_QOS_INFO, dcb_mt_qos_map}, - {DCB_STATE, dcb_mt_dcb_state}, - {DCB_PFC_STATE, dcb_mt_pfc_state}, - {DCB_ETS_STATE, dcb_mt_ets_state}, - {GET_ULD_DEV_NAME, get_netdev_name}, -}; - -static int send_to_nic_driver(struct spnic_nic_dev *nic_dev, - u32 cmd, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - int index, num_cmds = sizeof(nic_driv_module_cmd_handle) / - sizeof(nic_driv_module_cmd_handle[0]); - enum driver_cmd_type cmd_type = (enum driver_cmd_type)cmd; - int err = 0; - - mutex_lock(&nic_dev->nic_mutex); - for (index = 0; index < num_cmds; index++) { - if (cmd_type == - nic_driv_module_cmd_handle[index].driv_cmd_name) { - err = nic_driv_module_cmd_handle[index].driv_func - (nic_dev, buf_in, - in_size, buf_out, out_size); - break; - } - } - mutex_unlock(&nic_dev->nic_mutex); - - if (index == num_cmds) - pr_err("Can't find callback for %d\n", cmd_type); - - return err; -} - -int nic_ioctl(void *uld_dev, u32 cmd, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size) -{ - if (cmd == GET_DRV_VERSION) - return get_nic_drv_version(buf_out, out_size); - else if (!uld_dev) - return -EINVAL; - - return send_to_nic_driver(uld_dev, cmd, buf_in, - in_size, buf_out, out_size); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c b/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c deleted file mode 100644 index 7108430e0618..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.c +++ /dev/null @@ -1,965 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> - -#include "sphw_crm.h" -#include "spnic_lld.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_dev.h" -#include "spnic_dcb.h" - -#define DCB_CFG_CHG_ETS BIT(0) -#define DCB_CFG_CHG_PFC BIT(1) -#define DCB_CFG_CHG_UP_COS BIT(2) - -#define MAX_BW_PERCENT 100 - -void spnic_set_prio_tc_map(struct spnic_nic_dev *nic_dev) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; - u8 valid_up_bitmap = spnic_get_valid_up_bitmap(dcb_cfg); - u8 default_tc = dcb_cfg->max_cos - 1; - u8 i, tc_id; - - /* use 0~max_cos-1 as tc for netdev */ - for (tc_id = 0, i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (dcb_cfg->valid_cos_bitmap & BIT(i)) { - netdev_set_prio_tc_map(nic_dev->netdev, - dcb_cfg->cos_cfg[i].up, tc_id); - tc_id++; - } - } - - /* set invalid up mapping to the default tc */ - for (i = 0; i < SPNIC_DCB_UP_MAX; i++) { - if (!(valid_up_bitmap & BIT(i))) - netdev_set_prio_tc_map(nic_dev->netdev, i, default_tc); - } -} - -void spnic_update_tx_db_cos(struct spnic_nic_dev *nic_dev) -{ - u8 i, valid_cos_bitmap, cos; - u16 num_rss; - - if (!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - spnic_set_txq_cos(nic_dev, 0, nic_dev->q_params.num_qps, - nic_dev->hw_dcb_cfg.default_cos); - return; - } - - num_rss = nic_dev->q_params.num_rss; - valid_cos_bitmap = nic_dev->hw_dcb_cfg.valid_cos_bitmap; - for (i = 0; i < nic_dev->q_params.num_tc; i++) { - cos = (u8)(ffs(valid_cos_bitmap) - 1); - spnic_set_txq_cos(nic_dev, (u16)(i * num_rss), num_rss, cos); - valid_cos_bitmap &= (~BIT(cos)); - } -} - -int spnic_set_tx_cos_state(struct spnic_nic_dev *nic_dev) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; - struct spnic_dcb_state dcb_state = {0}; - u8 default_cos, i; - int err; - - if (SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { - err = spnic_get_pf_dcb_state(nic_dev->hwdev, &dcb_state); - if (err) { - spnic_err(nic_dev, drv, "Failed to get vf default cos\n"); - return err; - } - /* VF does not support DCB, use the default cos */ - dcb_cfg->default_cos = dcb_state.default_cos; - - return 0; - } - - default_cos = dcb_cfg->default_cos; - dcb_state.dcb_on = !!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); - dcb_state.default_cos = default_cos; - memset(dcb_state.up_cos, default_cos, sizeof(dcb_state.up_cos)); - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (dcb_cfg->valid_cos_bitmap & BIT(i)) - dcb_state.up_cos[dcb_cfg->cos_cfg[i].up] = i; - } - } - - err = spnic_set_dcb_state(nic_dev->hwdev, &dcb_state); - if (err) - spnic_err(nic_dev, drv, "Failed to set dcb state\n"); - - return err; -} - -static void setup_tc_reopen_handler(struct spnic_nic_dev *nic_dev, - const void *priv_data) -{ - u8 tc = *((u8 *)priv_data); - - if (tc) { - netdev_set_num_tc(nic_dev->netdev, tc); - spnic_set_prio_tc_map(nic_dev); - - set_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); - } else { - netdev_reset_tc(nic_dev->netdev); - - clear_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); - } - - spnic_set_tx_cos_state(nic_dev); -} - -int spnic_setup_tc(struct net_device *netdev, u8 tc) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_dyna_txrxq_params q_params = {0}; - u8 cur_tc; - int err; - - if (tc && test_bit(SPNIC_SAME_RXTX, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, "Failed to enable DCB while Symmetric RSS is enabled\n"); - return -EOPNOTSUPP; - } - - if (tc > nic_dev->hw_dcb_cfg.max_cos) { - nicif_err(nic_dev, drv, netdev, "Invalid num_tc: %u, max tc: %u\n", - tc, nic_dev->hw_dcb_cfg.max_cos); - return -EINVAL; - } - - if (tc & (tc - 1)) { - nicif_err(nic_dev, drv, netdev, - "Invalid num_tc: %u, must be power of 2\n", tc); - return -EINVAL; - } - - if (netif_running(netdev)) { - cur_tc = nic_dev->q_params.num_tc; - q_params = nic_dev->q_params; - q_params.num_tc = tc; - q_params.txqs_res = NULL; - q_params.rxqs_res = NULL; - q_params.irq_cfg = NULL; - - nicif_info(nic_dev, drv, netdev, "Change num_tc to %u, restarting channel\n", - tc); - err = spnic_change_channel_settings(nic_dev, &q_params, setup_tc_reopen_handler, - &tc); - if (err) { - if (cur_tc != nic_dev->q_params.num_tc) { - nicif_err(nic_dev, drv, netdev, - "Restore num_tc to %u\n", cur_tc); - /* In this case, the channel resource is - * invalid, so we can safely modify the number - * of tc in netdev. - */ - nic_dev->q_params.num_tc = cur_tc; - setup_tc_reopen_handler(nic_dev, &cur_tc); - } - nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); - return err; - } - } else { - setup_tc_reopen_handler(nic_dev, &tc); - spnic_update_num_qps(netdev); - } - - spnic_configure_dcb(netdev); - - return 0; -} - -/* Ucode thread timeout is 210ms, must be lagger then 210ms */ -#define SPNIC_WAIT_PORT_IO_STOP 250 - -static int spnic_stop_port_traffic_flow(struct spnic_nic_dev *nic_dev, bool wait) -{ - int err = 0; - - down(&nic_dev->dcb_sem); - - if (nic_dev->disable_port_cnt++ != 0) - goto out; - - err = spnic_force_port_disable(nic_dev); - if (err) { - spnic_err(nic_dev, drv, "Failed to disable port\n"); - goto set_port_err; - } - - err = spnic_set_port_funcs_state(nic_dev->hwdev, false); - if (err) { - spnic_err(nic_dev, drv, "Failed to disable all functions in port\n"); - goto set_port_funcs_err; - } - - spnic_info(nic_dev, drv, "Stop port traffic flow\n"); - - goto out; - -set_port_funcs_err: - spnic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev)); - -set_port_err: -out: - if (err) - nic_dev->disable_port_cnt--; - - up(&nic_dev->dcb_sem); - if (!err && wait && nic_dev->netdev->reg_state == NETREG_REGISTERED) - msleep(SPNIC_WAIT_PORT_IO_STOP); - - return err; -} - -static int spnic_start_port_traffic_flow(struct spnic_nic_dev *nic_dev) -{ - int err; - - down(&nic_dev->dcb_sem); - - nic_dev->disable_port_cnt--; - if (nic_dev->disable_port_cnt > 0) { - up(&nic_dev->dcb_sem); - return 0; - } - - nic_dev->disable_port_cnt = 0; - up(&nic_dev->dcb_sem); - - err = spnic_force_set_port_state(nic_dev, !!netif_running(nic_dev->netdev)); - if (err) - spnic_err(nic_dev, drv, "Failed to disable port\n"); - - err = spnic_set_port_funcs_state(nic_dev->hwdev, true); - if (err) - spnic_err(nic_dev, drv, "Failed to disable all functions in port\n"); - - spnic_info(nic_dev, drv, "Start port traffic flow\n"); - - return err; -} - -static u8 get_cos_settings(u8 hw_valid_cos_bitmap, u8 *dst_valid_cos_bitmap) -{ - u8 support_cos = 0; - u8 num_cos, overflow; - u8 i; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (hw_valid_cos_bitmap & BIT(i)) - support_cos++; - } - - num_cos = (u8)(1U << (u8)ilog2(support_cos)); - if (num_cos != support_cos) { - /* Remove unused cos id */ - overflow = support_cos - num_cos; - i = SPNIC_DCB_COS_MAX - 1; - while (overflow) { - if (hw_valid_cos_bitmap & BIT(i)) { - hw_valid_cos_bitmap &= (~BIT(i)); - overflow--; - } - - i--; - } - } - - *dst_valid_cos_bitmap = hw_valid_cos_bitmap; - - return num_cos; -} - -static int get_dft_valid_up_bitmap(struct spnic_nic_dev *nic_dev, u8 num_pri, - u8 *valid_up_bitmap) -{ - bool setted = false; - u8 up_bitmap = 0; - u8 up; - int err; - - err = spnic_get_chip_up_bitmap(nic_dev->pdev, &setted, &up_bitmap); - if (err) { - spnic_err(nic_dev, drv, "Get chip cos_up map failed\n"); - return -EFAULT; - } - - if (!setted) { - /* Use (num_cos-1)~0 as default user priority */ - for (up = 0; up < num_pri; up++) - up_bitmap |= (u8)BIT(up); - } - - err = spnic_set_chip_up_bitmap(nic_dev->pdev, up_bitmap); - if (err) { - spnic_err(nic_dev, drv, "Set chip cos_up map failed\n"); - return -EFAULT; - } - - *valid_up_bitmap = up_bitmap; - - return 0; -} - -u8 spnic_get_valid_up_bitmap(struct spnic_dcb_config *dcb_cfg) -{ - u8 valid_up_bitmap = 0; - u8 i; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (dcb_cfg->valid_cos_bitmap & BIT(i)) - valid_up_bitmap |= (u8)BIT(dcb_cfg->cos_cfg[i].up); - } - - return valid_up_bitmap; -} - -static void update_valid_up_bitmap(struct spnic_dcb_config *dcb_cfg, - u8 valid_up_bitmap) -{ - u8 i, up; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) { - dcb_cfg->cos_cfg[i].up = 0; - continue; - } - - /* get the highest priority */ - up = (u8)fls(valid_up_bitmap) - 1; - valid_up_bitmap &= (~BIT(up)); - - dcb_cfg->cos_cfg[i].up = up; - } -} - -static int init_default_dcb_cfg(struct spnic_nic_dev *nic_dev, - struct spnic_dcb_config *dcb_cfg) -{ - struct spnic_cos_cfg *cos_cfg = dcb_cfg->cos_cfg; - struct spnic_tc_cfg *tc_cfg = dcb_cfg->tc_cfg; - u8 valid_cos_bitmap, i; - u8 valid_up_bitmap = 0; - int err; - - valid_cos_bitmap = sphw_cos_valid_bitmap(nic_dev->hwdev); - if (!valid_cos_bitmap) { - spnic_err(nic_dev, drv, "None cos supported\n"); - return -EFAULT; - } - - dcb_cfg->max_cos = get_cos_settings(valid_cos_bitmap, - &dcb_cfg->valid_cos_bitmap); - dcb_cfg->default_cos = (u8)fls(dcb_cfg->valid_cos_bitmap) - 1; - - err = get_dft_valid_up_bitmap(nic_dev, dcb_cfg->max_cos, - &valid_up_bitmap); - if (err) - return err; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - /* set all cos with 100 percent bw in default */ - cos_cfg[i].bw_pct = MAX_BW_PERCENT; - cos_cfg[i].prio_sp = 0; /* DWRR */ - cos_cfg[i].tc_id = 0; /* all cos mapping to tc0 */ - cos_cfg[i].up = 0; - } - - update_valid_up_bitmap(dcb_cfg, valid_up_bitmap); - - for (i = 0; i < SPNIC_DCB_TC_MAX; i++) { - /* tc0 with 100 percent bw in default */ - tc_cfg[i].bw_pct = (i == 0) ? MAX_BW_PERCENT : 0; - tc_cfg[i].prio_sp = 0; /* DWRR */ - } - - /* disable pfc */ - dcb_cfg->pfc_state = 0; - dcb_cfg->pfc_en_bitmap = 0; - - return 0; -} - -int spnic_dcb_init(struct spnic_nic_dev *nic_dev) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - int err; - - if (SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - return spnic_set_tx_cos_state(nic_dev); - - err = init_default_dcb_cfg(nic_dev, dcb_cfg); - if (err) { - spnic_err(nic_dev, drv, "Initialize dcb configuration failed\n"); - return err; - } - - spnic_info(nic_dev, drv, "Support num cos %u, default cos %u\n", - dcb_cfg->max_cos, dcb_cfg->default_cos); - - nic_dev->dcb_changes = DCB_CFG_CHG_ETS | DCB_CFG_CHG_PFC | - DCB_CFG_CHG_UP_COS; - - memcpy(&nic_dev->hw_dcb_cfg, &nic_dev->wanted_dcb_cfg, - sizeof(nic_dev->hw_dcb_cfg)); - - err = spnic_set_tx_cos_state(nic_dev); - if (err) { - spnic_err(nic_dev, drv, "Set tx cos state failed\n"); - return err; - } - - sema_init(&nic_dev->dcb_sem, 1); - - return 0; -} - -u32 spnic_sync_dcb_cfg(struct spnic_nic_dev *nic_dev, struct spnic_dcb_config *src_dcb_cfg) -{ - struct spnic_dcb_config *wanted_cfg = src_dcb_cfg; - struct spnic_dcb_config *hw_cfg = &nic_dev->hw_dcb_cfg; - u32 changes = 0; - - if (memcmp(hw_cfg->cos_cfg, wanted_cfg->cos_cfg, - sizeof(hw_cfg->cos_cfg))) { - memcpy(hw_cfg->cos_cfg, wanted_cfg->cos_cfg, - sizeof(hw_cfg->cos_cfg)); - changes |= DCB_CFG_CHG_ETS; - } - - if (memcmp(hw_cfg->tc_cfg, wanted_cfg->tc_cfg, - sizeof(hw_cfg->tc_cfg))) { - memcpy(hw_cfg->tc_cfg, wanted_cfg->tc_cfg, - sizeof(hw_cfg->tc_cfg)); - changes |= DCB_CFG_CHG_ETS; - } - - if (hw_cfg->pfc_state != wanted_cfg->pfc_state || - (wanted_cfg->pfc_state && - hw_cfg->pfc_en_bitmap != wanted_cfg->pfc_en_bitmap)) { - hw_cfg->pfc_state = wanted_cfg->pfc_state; - hw_cfg->pfc_en_bitmap = wanted_cfg->pfc_en_bitmap; - changes |= DCB_CFG_CHG_PFC; - } - - return changes; -} - -static int dcbcfg_set_hw_cos_up_map(struct spnic_nic_dev *nic_dev, - struct spnic_dcb_config *dcb_cfg) -{ - u8 cos_up_map[SPNIC_DCB_COS_MAX] = {0}; - int err; - u8 i; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) - continue; - - cos_up_map[i] = dcb_cfg->cos_cfg[i].up; - } - - err = spnic_dcb_set_cos_up_map(nic_dev->hwdev, dcb_cfg->valid_cos_bitmap, - cos_up_map, SPNIC_DCB_COS_MAX); - if (err) - spnic_err(nic_dev, drv, "Set cos_up map failed\n"); - - return err; -} - -/* The sum of the cos bandwidth mapped to the same TC is 100 */ -static void adjust_cos_bw(u8 valid_cos_bitmap, u8 *cos_tc, u8 *cos_bw) -{ - u8 tc, cos, cos_num; - u16 bw_all, bw_remain; - - for (tc = 0; tc < SPNIC_DCB_TC_MAX; tc++) { - bw_all = 0; - cos_num = 0; - for (cos = 0; cos < SPNIC_DCB_COS_MAX; cos++) { - if (!(valid_cos_bitmap & BIT(cos)) || cos_tc[cos] != tc) - continue; - bw_all += cos_bw[cos]; - cos_num++; - } - - if (!bw_all || !cos_num) - continue; - - bw_remain = MAX_BW_PERCENT % cos_num; - for (cos = 0; cos < SPNIC_DCB_COS_MAX; cos++) { - if (!(valid_cos_bitmap & BIT(cos)) || cos_tc[cos] != tc) - continue; - - cos_bw[cos] = - (u8)(MAX_BW_PERCENT * cos_bw[cos] / bw_all); - - if (bw_remain) { - cos_bw[cos]++; - bw_remain--; - } - } - } -} - -static void dcbcfg_dump_configuration(struct spnic_nic_dev *nic_dev, - u8 *cos_tc, u8 *cos_bw, u8 *cos_prio, - u8 *tc_bw, u8 *tc_prio) -{ - u8 i; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (!(nic_dev->hw_dcb_cfg.valid_cos_bitmap & BIT(i))) - continue; - - spnic_info(nic_dev, drv, "cos: %u, up: %u, tc: %u, bw: %u, prio: %u\n", - i, nic_dev->hw_dcb_cfg.cos_cfg[i].up, cos_tc[i], - cos_bw[i], cos_prio[i]); - } - - for (i = 0; i < nic_dev->hw_dcb_cfg.max_cos; i++) - spnic_info(nic_dev, drv, "tc: %u, bw: %u, prio: %u\n", - i, tc_bw[i], tc_prio[i]); -} - -static int dcbcfg_set_hw_ets(struct spnic_nic_dev *nic_dev, - struct spnic_dcb_config *dcb_cfg) -{ - u8 cos_tc[SPNIC_DCB_COS_MAX] = {0}; - u8 cos_bw[SPNIC_DCB_COS_MAX] = {0}; - u8 cos_prio[SPNIC_DCB_COS_MAX] = {0}; - u8 tc_bw[SPNIC_DCB_TC_MAX] = {0}; - u8 tc_prio[SPNIC_DCB_TC_MAX] = {0}; - int err; - u8 i; - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) - continue; - - cos_tc[i] = dcb_cfg->cos_cfg[i].tc_id; - cos_bw[i] = dcb_cfg->cos_cfg[i].bw_pct; - cos_prio[i] = dcb_cfg->cos_cfg[i].prio_sp; - } - - for (i = 0; i < SPNIC_DCB_TC_MAX; i++) { - tc_bw[i] = dcb_cfg->tc_cfg[i].bw_pct; - tc_prio[i] = dcb_cfg->tc_cfg[i].prio_sp; - } - - adjust_cos_bw(dcb_cfg->valid_cos_bitmap, cos_tc, cos_bw); - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) - dcbcfg_dump_configuration(nic_dev, cos_tc, cos_bw, cos_prio, tc_bw, tc_prio); - - err = spnic_dcb_set_ets(nic_dev->hwdev, cos_tc, cos_bw, cos_prio, tc_bw, tc_prio); - if (err) { - spnic_err(nic_dev, drv, "Failed to set ets\n"); - return err; - } - - return 0; -} - -static int dcbcfg_set_hw_pfc(struct spnic_nic_dev *nic_dev, - struct spnic_dcb_config *dcb_cfg) -{ - u8 valid_up_bitmap = spnic_get_valid_up_bitmap(dcb_cfg); - u8 outof_range_pfc = (~valid_up_bitmap) & dcb_cfg->pfc_en_bitmap; - int err; - - if (dcb_cfg->pfc_state && outof_range_pfc) - spnic_info(nic_dev, drv, "PFC setting out of range, 0x%x will be ignored\n", - outof_range_pfc); - - err = spnic_dcb_set_pfc(nic_dev->hwdev, dcb_cfg->pfc_state, dcb_cfg->pfc_en_bitmap); - if (err) { - spnic_err(nic_dev, drv, "Failed to %s PFC\n", - dcb_cfg->pfc_state ? "enable" : "disable"); - return err; - } - - if (dcb_cfg->pfc_state) - spnic_info(nic_dev, drv, "Set PFC: 0x%x to hw done\n", - dcb_cfg->pfc_en_bitmap & valid_up_bitmap); - else - spnic_info(nic_dev, drv, "Disable PFC, enable tx/rx pause\n"); - - return 0; -} - -int spnic_dcbcfg_setall_to_hw(struct spnic_nic_dev *nic_dev, struct spnic_dcb_config *src_dcb_cfg) -{ - bool stop_traffic = false; - int err = 0; - - nic_dev->dcb_changes |= spnic_sync_dcb_cfg(nic_dev, src_dcb_cfg); - if (!nic_dev->dcb_changes) - return 0; - - /* hw does not support to change up cos mapping and cos tc mapping with - * traffic flow - */ - stop_traffic = !!(nic_dev->dcb_changes & - (DCB_CFG_CHG_ETS | DCB_CFG_CHG_UP_COS)); - if (stop_traffic) { - err = spnic_stop_port_traffic_flow(nic_dev, true); - if (err) - return err; - } - - if (nic_dev->dcb_changes & DCB_CFG_CHG_UP_COS) { - err = dcbcfg_set_hw_cos_up_map(nic_dev, &nic_dev->hw_dcb_cfg); - if (err) - goto out; - - nic_dev->dcb_changes &= (~DCB_CFG_CHG_UP_COS); - } - - if (nic_dev->dcb_changes & DCB_CFG_CHG_ETS) { - err = dcbcfg_set_hw_ets(nic_dev, &nic_dev->hw_dcb_cfg); - if (err) - goto out; - - nic_dev->dcb_changes &= (~DCB_CFG_CHG_ETS); - } - - if (nic_dev->dcb_changes & DCB_CFG_CHG_PFC) { - err = dcbcfg_set_hw_pfc(nic_dev, &nic_dev->hw_dcb_cfg); - if (err) - goto out; - - nic_dev->dcb_changes &= (~DCB_CFG_CHG_PFC); - } - -out: - if (stop_traffic) - spnic_start_port_traffic_flow(nic_dev); - - return err; -} - -int spnic_dcb_reset_hw_config(struct spnic_nic_dev *nic_dev) -{ - struct spnic_dcb_config dft_cfg = {0}; - int err; - - init_default_dcb_cfg(nic_dev, &dft_cfg); - err = spnic_dcbcfg_setall_to_hw(nic_dev, &dft_cfg); - if (err) { - spnic_err(nic_dev, drv, "Failed to reset hw dcb configuration\n"); - return err; - } - - spnic_info(nic_dev, drv, "Reset hardware DCB configuration done\n"); - - return 0; -} - -int spnic_configure_dcb(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) - return spnic_dcbcfg_setall_to_hw(nic_dev, &nic_dev->wanted_dcb_cfg); - else - return spnic_dcb_reset_hw_config(nic_dev); -} - -void spnic_dcbcfg_set_pfc_state(struct spnic_nic_dev *nic_dev, u8 pfc_state) -{ - nic_dev->wanted_dcb_cfg.pfc_state = pfc_state; -} - -u8 spnic_dcbcfg_get_pfc_state(struct spnic_nic_dev *nic_dev) -{ - return nic_dev->wanted_dcb_cfg.pfc_state; -} - -void spnic_dcbcfg_set_pfc_pri_en(struct spnic_nic_dev *nic_dev, u8 pfc_en_bitmap) -{ - nic_dev->wanted_dcb_cfg.pfc_en_bitmap = pfc_en_bitmap; -} - -u8 spnic_dcbcfg_get_pfc_pri_en(struct spnic_nic_dev *nic_dev) -{ - return nic_dev->wanted_dcb_cfg.pfc_en_bitmap; -} - -int spnic_dcbcfg_set_ets_up_tc_map(struct spnic_nic_dev *nic_dev, const u8 *up_tc_map) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - u8 i; - - for (i = 0; i < SPNIC_DCB_UP_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) - continue; - - /* TC id can't exceed max cos */ - if (up_tc_map[dcb_cfg->cos_cfg[i].up] >= dcb_cfg->max_cos) - return -EINVAL; - } - - for (i = 0; i < SPNIC_DCB_UP_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) - continue; - - dcb_cfg->cos_cfg[i].tc_id = up_tc_map[dcb_cfg->cos_cfg[i].up]; - } - - return 0; -} - -void spnic_dcbcfg_get_ets_up_tc_map(struct spnic_nic_dev *nic_dev, u8 *up_tc_map) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - struct spnic_cos_cfg *cos_cfg = dcb_cfg->cos_cfg; - u8 i; - - /* set unused up mapping to default tc */ - memset(up_tc_map, cos_cfg[dcb_cfg->default_cos].tc_id, - SPNIC_DCB_UP_MAX); - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - if (!(dcb_cfg->valid_cos_bitmap & BIT(i))) - continue; - - up_tc_map[cos_cfg[i].up] = cos_cfg[i].tc_id; - } -} - -int spnic_dcbcfg_set_ets_tc_bw(struct spnic_nic_dev *nic_dev, const u8 *tc_bw) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - u8 bw_sum = 0; - u8 i; - - for (i = 0; i < SPNIC_DCB_TC_MAX; i++) { - /* cannot set bandwidth for unused tc */ - if (i >= dcb_cfg->max_cos && tc_bw[i] > 0) - return -EINVAL; - - bw_sum += tc_bw[i]; - } - - if (bw_sum != MAX_BW_PERCENT && bw_sum != 0) { - spnic_err(nic_dev, drv, "Invalid total bw %u\n", bw_sum); - return -EINVAL; - } - - for (i = 0; i < dcb_cfg->max_cos; i++) - dcb_cfg->tc_cfg[i].bw_pct = tc_bw[i]; - - return 0; -} - -void spnic_dcbcfg_get_ets_tc_bw(struct spnic_nic_dev *nic_dev, u8 *tc_bw) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - u8 i; - - for (i = 0; i < dcb_cfg->max_cos; i++) - tc_bw[i] = dcb_cfg->tc_cfg[i].bw_pct; -} - -void spnic_dcbcfg_set_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 tc_prio_bitmap) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - u8 i; - - for (i = 0; i < dcb_cfg->max_cos; i++) - dcb_cfg->tc_cfg[i].prio_sp = !!(tc_prio_bitmap & BIT(i)); -} - -void spnic_dcbcfg_get_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 *tc_prio_bitmap) -{ - struct spnic_dcb_config *dcb_cfg = &nic_dev->wanted_dcb_cfg; - u8 i; - - *tc_prio_bitmap = 0; - for (i = 0; i < dcb_cfg->max_cos; i++) { - if (dcb_cfg->tc_cfg[i].prio_sp) - *tc_prio_bitmap |= (u8)BIT(i); - } -} - -/* TODO: send a command to MPU, and MPU close all port traffic */ -static int stop_all_ports_flow(void *uld_array[], u32 num_dev) -{ - struct spnic_nic_dev *tmp_dev = NULL; - u32 i, idx; - int err; - - for (idx = 0; idx < num_dev; idx++) { - tmp_dev = (struct spnic_nic_dev *)uld_array[idx]; - err = spnic_stop_port_traffic_flow(tmp_dev, false); - if (err) { - nicif_err(tmp_dev, drv, tmp_dev->netdev, "Stop port traffic flow failed\n"); - goto stop_port_err; - } - } - - /* wait all traffic flow stopped */ - msleep(SPNIC_WAIT_PORT_IO_STOP); - - return 0; - -stop_port_err: - for (i = 0; i < idx; i++) { - tmp_dev = (struct spnic_nic_dev *)uld_array[i]; - spnic_start_port_traffic_flow(tmp_dev); - } - - return err; -} - -static void start_all_ports_flow(void *uld_array[], u32 num_dev) -{ - struct spnic_nic_dev *tmp_dev = NULL; - u32 idx; - - for (idx = 0; idx < num_dev; idx++) { - tmp_dev = (struct spnic_nic_dev *)uld_array[idx]; - spnic_start_port_traffic_flow(tmp_dev); - } -} - -int change_dev_cos_up_map(struct spnic_nic_dev *nic_dev, u8 valid_up_bitmap) -{ - struct net_device *netdev = nic_dev->netdev; - int err = 0; - - if (test_and_set_bit(SPNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags)) { - nicif_warn(nic_dev, drv, netdev, - "Cos_up map setting in inprocess, please try again later\n"); - return -EFAULT; - } - - if (spnic_get_valid_up_bitmap(&nic_dev->wanted_dcb_cfg) == - valid_up_bitmap) { - nicif_err(nic_dev, drv, netdev, "Same up bitmap, don't need to change anything\n"); - err = 0; - goto out; - } - - nicif_info(nic_dev, drv, netdev, "Set valid_up_bitmap: 0x%x\n", - valid_up_bitmap); - - update_valid_up_bitmap(&nic_dev->wanted_dcb_cfg, valid_up_bitmap); - - nic_dev->dcb_changes = DCB_CFG_CHG_ETS | DCB_CFG_CHG_PFC | DCB_CFG_CHG_UP_COS; - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - err = spnic_dcbcfg_setall_to_hw(nic_dev, &nic_dev->wanted_dcb_cfg); - if (err) { - nicif_err(nic_dev, drv, netdev, "Reconfig dcb to hw failed\n"); - goto out; - } - - /* Change up/tc map for netdev */ - spnic_set_prio_tc_map(nic_dev); - spnic_update_tx_db_cos(nic_dev); - } - - err = spnic_set_tx_cos_state(nic_dev); - -out: - clear_bit(SPNIC_DCB_UP_COS_SETTING, &nic_dev->dcb_flags); - - return err; -} - -int spnic_dcbcfg_set_up_bitmap(struct spnic_nic_dev *nic_dev, u8 valid_up_bitmap) -{ - struct spnic_nic_dev *tmp_dev = NULL; - void **uld_array = NULL; - u32 i, idx, num_dev = 0; - int err, rollback_err; - bool up_setted = false; - u8 old_valid_up_bitmap = 0; - u8 max_pf; - - /* Save old valid up bitmap, in case of set failed */ - err = spnic_get_chip_up_bitmap(nic_dev->pdev, &up_setted, &old_valid_up_bitmap); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Get old chip valid up bitmap failed\n"); - return -EFAULT; - } - - if (valid_up_bitmap == old_valid_up_bitmap) { - nicif_info(nic_dev, drv, nic_dev->netdev, "Same valid up bitmap, don't need to change anything\n"); - return 0; - } - - max_pf = sphw_max_pf_num(nic_dev->hwdev); - if (!max_pf) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid max pf number\n"); - return -EFAULT; - } - - uld_array = kcalloc(max_pf, sizeof(void *), GFP_KERNEL); - if (!uld_array) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc uld_array\n"); - return -ENOMEM; - } - - /* Get all pf of this chip */ - err = spnic_get_pf_nic_uld_array(nic_dev->pdev, &num_dev, uld_array); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Get all pf private handle failed\n"); - err = -EFAULT; - goto out; - } - - err = stop_all_ports_flow(uld_array, num_dev); - if (err) - goto out; - - for (idx = 0; idx < num_dev; idx++) { - tmp_dev = (struct spnic_nic_dev *)uld_array[idx]; - err = change_dev_cos_up_map(tmp_dev, valid_up_bitmap); - if (err) { - nicif_err(tmp_dev, drv, tmp_dev->netdev, "Set cos_up map to hw failed\n"); - goto set_err; - } - } - - start_all_ports_flow(uld_array, num_dev); - - spnic_set_chip_up_bitmap(nic_dev->pdev, valid_up_bitmap); - kfree(uld_array); - - return 0; - -set_err: - /* undo all settings */ - for (i = 0; i <= idx; i++) { - tmp_dev = (struct spnic_nic_dev *)uld_array[i]; - rollback_err = change_dev_cos_up_map(tmp_dev, old_valid_up_bitmap); - if (rollback_err) - nicif_err(tmp_dev, drv, tmp_dev->netdev, "Failed to rollback cos_up map to hw\n"); - } - - start_all_ports_flow(uld_array, num_dev); - -out: - kfree(uld_array); - - return err; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h b/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h deleted file mode 100644 index 48ef471237e2..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_dcb.h +++ /dev/null @@ -1,56 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_DCB_H -#define SPNIC_DCB_H - -enum SPNIC_DCB_FLAGS { - SPNIC_DCB_UP_COS_SETTING, - SPNIC_DCB_TRAFFIC_STOPPED, -}; - -struct spnic_cos_cfg { - u8 up; - u8 bw_pct; - u8 tc_id; - u8 prio_sp; /* 0 - DWRR, 1 - SP */ -}; - -struct spnic_tc_cfg { - u8 bw_pct; - u8 prio_sp; /* 0 - DWRR, 1 - SP */ - u16 rsvd; -}; - -struct spnic_dcb_config { - /* The num_tc of the protocol stack is also the same */ - u8 max_cos; - u8 default_cos; - u8 valid_cos_bitmap; - u8 rsvd1; - struct spnic_cos_cfg cos_cfg[SPNIC_DCB_COS_MAX]; - struct spnic_tc_cfg tc_cfg[SPNIC_DCB_TC_MAX]; - - u8 pfc_state; - u8 pfc_en_bitmap; - u16 rsvd2; -}; - -int spnic_dcb_init(struct spnic_nic_dev *nic_dev); -int spnic_dcb_reset_hw_config(struct spnic_nic_dev *nic_dev); -int spnic_configure_dcb(struct net_device *netdev); -int spnic_setup_tc(struct net_device *netdev, u8 tc); -u8 spnic_get_valid_up_bitmap(struct spnic_dcb_config *dcb_cfg); -void spnic_dcbcfg_set_pfc_state(struct spnic_nic_dev *nic_dev, u8 pfc_state); -u8 spnic_dcbcfg_get_pfc_state(struct spnic_nic_dev *nic_dev); -void spnic_dcbcfg_set_pfc_pri_en(struct spnic_nic_dev *nic_dev, u8 pfc_en_bitmap); -u8 spnic_dcbcfg_get_pfc_pri_en(struct spnic_nic_dev *nic_dev); -int spnic_dcbcfg_set_ets_up_tc_map(struct spnic_nic_dev *nic_dev, const u8 *up_tc_map); -void spnic_dcbcfg_get_ets_up_tc_map(struct spnic_nic_dev *nic_dev, u8 *up_tc_map); -int spnic_dcbcfg_set_ets_tc_bw(struct spnic_nic_dev *nic_dev, const u8 *tc_bw); -void spnic_dcbcfg_get_ets_tc_bw(struct spnic_nic_dev *nic_dev, u8 *tc_bw); -void spnic_dcbcfg_set_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 tc_prio_bitmap); -void spnic_dcbcfg_get_ets_tc_prio_type(struct spnic_nic_dev *nic_dev, u8 *tc_prio_bitmap); -int spnic_dcbcfg_set_up_bitmap(struct spnic_nic_dev *nic_dev, u8 valid_up_bitmap); -void spnic_update_tx_db_cos(struct spnic_nic_dev *nic_dev); -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c b/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c deleted file mode 100644 index 6037645c0e8b..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.c +++ /dev/null @@ -1,811 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/io-mapping.h> -#include <linux/interrupt.h> -#include <net/addrconf.h> -#include <linux/time.h> -#include <linux/timex.h> -#include <linux/rtc.h> -#include <linux/debugfs.h> - -#include "sphw_common.h" -#include "sphw_mt.h" -#include "sphw_crm.h" -#include "spnic_lld.h" -#include "spnic_sriov.h" -#include "spnic_pci_id_tbl.h" -#include "spnic_dev_mgmt.h" - -#define SPNIC_WAIT_TOOL_CNT_TIMEOUT 10000 -#define SPNIC_WAIT_TOOL_MIN_USLEEP_TIME 9900 -#define SPNIC_WAIT_TOOL_MAX_USLEEP_TIME 10000 - -#define MAX_CARD_ID 64 -static unsigned long card_bit_map; - -LIST_HEAD(g_spnic_chip_list); - -void lld_dev_cnt_init(struct spnic_pcidev *pci_adapter) -{ - atomic_set(&pci_adapter->ref_cnt, 0); -} - -void lld_dev_hold(struct spnic_lld_dev *dev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); - - atomic_inc(&pci_adapter->ref_cnt); -} - -void lld_dev_put(struct spnic_lld_dev *dev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(dev->pdev); - - atomic_dec(&pci_adapter->ref_cnt); -} - -void wait_lld_dev_unused(struct spnic_pcidev *pci_adapter) -{ - unsigned long end; - - end = jiffies + msecs_to_jiffies(SPNIC_WAIT_TOOL_CNT_TIMEOUT); - do { - if (!atomic_read(&pci_adapter->ref_cnt)) - return; - - /* if sleep 10ms, use usleep_range to be more precise */ - usleep_range(SPNIC_WAIT_TOOL_MIN_USLEEP_TIME, - SPNIC_WAIT_TOOL_MAX_USLEEP_TIME); - } while (time_before(jiffies, end)); -} - -enum spnic_lld_status { - SPNIC_NODE_CHANGE = BIT(0), -}; - -struct spnic_lld_lock { - /* lock for chip list */ - struct mutex lld_mutex; - unsigned long status; - atomic_t dev_ref_cnt; -}; - -struct spnic_lld_lock g_lld_lock; - -#define WAIT_LLD_DEV_HOLD_TIMEOUT (10 * 60 * 1000) /* 10minutes */ -#define WAIT_LLD_DEV_NODE_CHANGED (10 * 60 * 1000) /* 10minutes */ -#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ -#define PRINT_TIMEOUT_INTERVAL 10000 -#define MS_PER_SEC 1000 -#define LLD_LOCK_MIN_USLEEP_TIME 900 -#define LLD_LOCK_MAX_USLEEP_TIME 1000 - -/* node in chip_node will changed, tools or driver can't get node - * during this situation - */ -void lld_lock_chip_node(void) -{ - unsigned long end; - bool timeout = true; - u32 loop_cnt; - - mutex_lock(&g_lld_lock.lld_mutex); - - loop_cnt = 0; - end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_NODE_CHANGED); - do { - if (!test_and_set_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) { - timeout = false; - break; - } - - loop_cnt++; - if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) - pr_warn("Wait for lld node change complete for %us\n", - loop_cnt / MS_PER_SEC); - - /* if sleep 1ms, use usleep_range to be more precise */ - usleep_range(LLD_LOCK_MIN_USLEEP_TIME, - LLD_LOCK_MAX_USLEEP_TIME); - } while (time_before(jiffies, end)); - - if (timeout && test_and_set_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) - pr_warn("Wait for lld node change complete timeout when trying to get lld lock\n"); - - loop_cnt = 0; - timeout = true; - end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_NODE_CHANGED); - do { - if (!atomic_read(&g_lld_lock.dev_ref_cnt)) { - timeout = false; - break; - } - - loop_cnt++; - if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) - pr_warn("Wait for lld dev unused for %us, reference count: %d\n", - loop_cnt / MS_PER_SEC, - atomic_read(&g_lld_lock.dev_ref_cnt)); - - /* if sleep 1ms, use usleep_range to be more precise */ - usleep_range(LLD_LOCK_MIN_USLEEP_TIME, - LLD_LOCK_MAX_USLEEP_TIME); - } while (time_before(jiffies, end)); - - if (timeout && atomic_read(&g_lld_lock.dev_ref_cnt)) - pr_warn("Wait for lld dev unused timeout\n"); - - mutex_unlock(&g_lld_lock.lld_mutex); -} - -void lld_unlock_chip_node(void) -{ - clear_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status); -} - -/* When tools or other drivers want to get node of chip_node, use this function - * to prevent node be freed - */ -void lld_hold(void) -{ - unsigned long end; - u32 loop_cnt = 0; - - /* ensure there have not any chip node in changing */ - mutex_lock(&g_lld_lock.lld_mutex); - - end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_HOLD_TIMEOUT); - do { - if (!test_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) - break; - - loop_cnt++; - - if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) - pr_warn("Wait lld node change complete for %us\n", - loop_cnt / MS_PER_SEC); - /* if sleep 1ms, use usleep_range to be more precise */ - usleep_range(LLD_LOCK_MIN_USLEEP_TIME, - LLD_LOCK_MAX_USLEEP_TIME); - } while (time_before(jiffies, end)); - - if (test_bit(SPNIC_NODE_CHANGE, &g_lld_lock.status)) - pr_warn("Wait lld node change complete timeout when trying to hode lld dev\n"); - - atomic_inc(&g_lld_lock.dev_ref_cnt); - mutex_unlock(&g_lld_lock.lld_mutex); -} - -void lld_put(void) -{ - atomic_dec(&g_lld_lock.dev_ref_cnt); -} - -void spnic_lld_lock_init(void) -{ - mutex_init(&g_lld_lock.lld_mutex); - atomic_set(&g_lld_lock.dev_ref_cnt, 0); -} - -void spnic_get_all_chip_id(void *id_info) -{ - struct nic_card_id *card_id = (struct nic_card_id *)id_info; - struct card_node *chip_node = NULL; - int i = 0; - int id, err; - - lld_hold(); - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - err = sscanf(chip_node->chip_name, SPHW_CHIP_NAME "%d", &id); - if (err < 0) - pr_err("Failed to get spnic id\n"); - card_id->id[i] = id; - i++; - } - lld_put(); - card_id->num = i; -} - -void spnic_get_card_func_info_by_card_name(const char *chip_name, - struct sphw_card_func_info *card_func) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - struct func_pdev_info *pdev_info = NULL; - - card_func->num_pf = 0; - - lld_hold(); - - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ)) - continue; - - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) == TYPE_VF) - continue; - - pdev_info = &card_func->pdev_info[card_func->num_pf]; - pdev_info->bar1_size = - pci_resource_len(dev->pcidev, SPNIC_PF_PCI_CFG_REG_BAR); - pdev_info->bar1_phy_addr = - pci_resource_start(dev->pcidev, SPNIC_PF_PCI_CFG_REG_BAR); - - pdev_info->bar3_size = - pci_resource_len(dev->pcidev, SPNIC_PCI_MGMT_REG_BAR); - pdev_info->bar3_phy_addr = - pci_resource_start(dev->pcidev, SPNIC_PCI_MGMT_REG_BAR); - - card_func->num_pf++; - if (card_func->num_pf >= CARD_MAX_SIZE) { - lld_put(); - return; - } - } - } - - lld_put(); -} - -static bool is_pcidev_match_chip_name(const char *ifname, struct spnic_pcidev *dev, - struct card_node *chip_node, enum func_type type) -{ - if (!strncmp(chip_node->chip_name, ifname, IFNAMSIZ)) { - if (sphw_func_type(dev->hwdev) != type) - return false; - return true; - } - - return false; -} - -static struct spnic_lld_dev *_get_lld_dev_by_chip_name(const char *ifname, enum func_type type) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - lld_hold(); - - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - list_for_each_entry(dev, &chip_node->func_list, node) { - if (is_pcidev_match_chip_name(ifname, dev, chip_node, type)) { - lld_put(); - return &dev->lld_dev; - } - } - } - - lld_put(); - return NULL; -} - -static struct spnic_lld_dev *spnic_get_lld_dev_by_chip_name(const char *ifname) -{ - struct spnic_lld_dev *dev_hw_init = NULL; - struct spnic_lld_dev *dev = NULL; - - /*find hw init device first*/ - dev_hw_init = _get_lld_dev_by_chip_name(ifname, TYPE_UNKNOWN); - if (dev_hw_init) { - if (sphw_func_type(dev_hw_init->hwdev) == TYPE_PPF) - return dev_hw_init; - } - - dev = _get_lld_dev_by_chip_name(ifname, TYPE_PPF); - if (dev) { - if (dev_hw_init) - return dev_hw_init; - - return dev; - } - - dev = _get_lld_dev_by_chip_name(ifname, TYPE_PF); - if (dev) { - if (dev_hw_init) - return dev_hw_init; - - return dev; - } - - dev = _get_lld_dev_by_chip_name(ifname, TYPE_VF); - if (dev) - return dev; - - return NULL; -} - -static bool is_pcidev_match_dev_name(const char *ifname, struct spnic_pcidev *dev, - enum sphw_service_type type) -{ - enum sphw_service_type i; - char nic_uld_name[IFNAMSIZ] = {0}; - int err; - - if (type == SERVICE_T_MAX) { - for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) { - if (!strncmp(dev->uld_dev_name[i], ifname, IFNAMSIZ)) - return true; - } - } else { - if (!strncmp(dev->uld_dev_name[type], ifname, IFNAMSIZ)) - return true; - } - - err = spnic_get_uld_dev_name(dev, SERVICE_T_NIC, (char *)nic_uld_name); - if (!err) { - if (!strncmp(nic_uld_name, ifname, IFNAMSIZ)) - return true; - } - - return false; -} - -static struct spnic_lld_dev *spnic_get_lld_dev_by_dev_name(const char *ifname, - enum sphw_service_type type) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - lld_hold(); - - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - list_for_each_entry(dev, &chip_node->func_list, node) { - if (is_pcidev_match_dev_name(ifname, dev, type)) { - lld_put(); - return &dev->lld_dev; - } - } - } - - lld_put(); - - return NULL; -} - -struct spnic_lld_dev *spnic_get_lld_dev_by_ifname(const char *ifname) -{ - struct spnic_lld_dev *dev = NULL; - - lld_hold(); - /* support search hwdev by chip name, net device name, - * or fc device name - */ - /* Find pcidev by chip_name first */ - dev = spnic_get_lld_dev_by_chip_name(ifname); - if (dev) - goto find_dev; - - /* If ifname not a chip name, - * find pcidev by FC name or netdevice name - */ - dev = spnic_get_lld_dev_by_dev_name(ifname, SERVICE_T_MAX); - if (!dev) { - lld_put(); - return NULL; - } - -find_dev: - lld_dev_hold(dev); - lld_put(); - return dev; -} - -void *spnic_get_hwdev_by_ifname(const char *ifname) -{ - struct spnic_lld_dev *dev = NULL; - - dev = spnic_get_lld_dev_by_ifname(ifname); - if (dev) - return dev->hwdev; - - return NULL; -} - -void *spnic_get_uld_dev_by_ifname(const char *ifname, enum sphw_service_type type) -{ - struct spnic_pcidev *dev = NULL; - struct spnic_lld_dev *lld_dev = NULL; - - if (type >= SERVICE_T_MAX) { - pr_err("Service type :%d is error\n", type); - return NULL; - } - - lld_dev = spnic_get_lld_dev_by_dev_name(ifname, type); - if (!lld_dev) - return NULL; - - dev = pci_get_drvdata(lld_dev->pdev); - if (dev) - return dev->uld_dev[type]; - - return NULL; -} - -static struct card_node *spnic_get_chip_node_by_hwdev(const void *hwdev) -{ - struct card_node *chip_node = NULL; - struct card_node *node_tmp = NULL; - struct spnic_pcidev *dev = NULL; - - if (!hwdev) - return NULL; - - lld_hold(); - - list_for_each_entry(node_tmp, &g_spnic_chip_list, node) { - if (!chip_node) { - list_for_each_entry(dev, &node_tmp->func_list, node) { - if (dev->hwdev == hwdev) { - chip_node = node_tmp; - break; - } - } - } - } - - lld_put(); - - return chip_node; -} - -int spnic_get_chip_name_by_hwdev(const void *hwdev, char *ifname) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - if (!hwdev || !ifname) - return -EINVAL; - - lld_hold(); - - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - list_for_each_entry(dev, &chip_node->func_list, node) { - if (dev->hwdev == hwdev) { - strncpy(ifname, chip_node->chip_name, IFNAMSIZ - 1); - ifname[IFNAMSIZ - 1] = 0; - lld_put(); - return 0; - } - } - } - - lld_put(); - - return -ENXIO; -} - -void *spnic_get_uld_dev_by_pdev(struct pci_dev *pdev, enum sphw_service_type type) -{ - struct spnic_pcidev *pci_adapter = NULL; - - if (type >= SERVICE_T_MAX) { - pr_err("Service type :%d is error\n", type); - return NULL; - } - - pci_adapter = pci_get_drvdata(pdev); - if (pci_adapter) - return pci_adapter->uld_dev[type]; - - return NULL; -} - -void *spnic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = NULL; - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - if (!pdev) - return NULL; - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) - return NULL; - - chip_node = pci_adapter->chip_node; - lld_hold(); - list_for_each_entry(dev, &chip_node->func_list, node) { - if (dev->hwdev && sphw_func_type(dev->hwdev) == TYPE_PPF) { - lld_put(); - return dev->hwdev; - } - } - lld_put(); - - return NULL; -} - -/* NOTICE: nictool can't use this function, because this function can't keep - * tool context mutual exclusive with remove context - */ -void *spnic_get_ppf_uld_by_pdev(struct pci_dev *pdev, enum sphw_service_type type) -{ - struct spnic_pcidev *pci_adapter = NULL; - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - if (!pdev) - return NULL; - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) - return NULL; - - chip_node = pci_adapter->chip_node; - lld_hold(); - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) == TYPE_PPF) { - lld_put(); - return dev->uld_dev[type]; - } - } - lld_put(); - - return NULL; -} - -int spnic_get_pf_nic_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]) -{ - struct spnic_pcidev *dev = pci_get_drvdata(pdev); - struct card_node *chip_node = NULL; - u32 cnt; - - if (!dev || !sphw_support_nic(dev->hwdev, NULL)) - return -EINVAL; - - lld_hold(); - - cnt = 0; - chip_node = dev->chip_node; - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) == TYPE_VF) - continue; - - array[cnt] = dev->uld_dev[SERVICE_T_NIC]; - cnt++; - } - lld_put(); - - *dev_cnt = cnt; - - return 0; -} - -static bool is_func_valid(struct spnic_pcidev *dev) -{ - if (sphw_func_type(dev->hwdev) == TYPE_VF) - return false; - - return true; -} - -int spnic_get_uld_dev_name(struct spnic_pcidev *dev, enum sphw_service_type type, char *ifname) -{ - u32 out_size = IFNAMSIZ; - - if (!g_uld_info[type].ioctl) - return -EFAULT; - - return g_uld_info[type].ioctl(dev->uld_dev[type], GET_ULD_DEV_NAME, - NULL, 0, ifname, &out_size); -} - -void spnic_get_card_info(const void *hwdev, void *bufin) -{ - struct card_node *chip_node = NULL; - struct card_info *info = (struct card_info *)bufin; - struct spnic_pcidev *dev = NULL; - void *fun_hwdev = NULL; - u32 i = 0; - - info->pf_num = 0; - - chip_node = spnic_get_chip_node_by_hwdev(hwdev); - if (!chip_node) - return; - - lld_hold(); - - list_for_each_entry(dev, &chip_node->func_list, node) { - if (!is_func_valid(dev)) - continue; - - fun_hwdev = dev->hwdev; - - if (sphw_support_nic(fun_hwdev, NULL)) { - if (dev->uld_dev[SERVICE_T_NIC]) { - info->pf[i].pf_type |= (u32)BIT(SERVICE_T_NIC); - spnic_get_uld_dev_name(dev, SERVICE_T_NIC, info->pf[i].name); - } - } - - /* to do : get other service info*/ - - if (sphw_func_for_mgmt(fun_hwdev)) - strlcpy(info->pf[i].name, "FOR_MGMT", IFNAMSIZ); - - strlcpy(info->pf[i].bus_info, pci_name(dev->pcidev), - sizeof(info->pf[i].bus_info)); - info->pf_num++; - i = info->pf_num; - } - - lld_put(); -} - -struct spnic_sriov_info *spnic_get_sriov_info_by_pcidev(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = NULL; - - if (!pdev) - return NULL; - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) - return NULL; - - return &pci_adapter->sriov_info; -} - -void *spnic_get_hwdev_by_pcidev(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = NULL; - - if (!pdev) - return NULL; - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) - return NULL; - - return pci_adapter->hwdev; -} - -bool spnic_is_in_host(void) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - lld_hold(); - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) != TYPE_VF) { - lld_put(); - return true; - } - } - } - - lld_put(); - - return false; -} - -int spnic_get_chip_up_bitmap(struct pci_dev *pdev, bool *is_setted, u8 *valid_up_bitmap) -{ - struct spnic_pcidev *dev = pci_get_drvdata(pdev); - struct card_node *chip_node = NULL; - - if (!dev || !is_setted || !valid_up_bitmap) - return -EINVAL; - - chip_node = dev->chip_node; - *is_setted = chip_node->up_bitmap_setted; - if (chip_node->up_bitmap_setted) - *valid_up_bitmap = chip_node->valid_up_bitmap; - - return 0; -} - -int spnic_set_chip_up_bitmap(struct pci_dev *pdev, u8 valid_up_bitmap) -{ - struct spnic_pcidev *dev = pci_get_drvdata(pdev); - struct card_node *chip_node = NULL; - - if (!dev) - return -EINVAL; - - chip_node = dev->chip_node; - chip_node->up_bitmap_setted = true; - chip_node->valid_up_bitmap = valid_up_bitmap; - - return 0; -} - -static bool chip_node_is_exist(struct spnic_pcidev *pci_adapter, unsigned char *bus_number) -{ - struct card_node *chip_node = NULL; - - if (!pci_is_root_bus(pci_adapter->pcidev->bus)) - *bus_number = pci_adapter->pcidev->bus->number; - - if (*bus_number != 0) { - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - if (chip_node->bus_num == *bus_number) { - pci_adapter->chip_node = chip_node; - return true; - } - } - } else if (pci_adapter->pcidev->device == SPNIC_DEV_ID_VF || - pci_adapter->pcidev->device == SPNIC_DEV_ID_VF_HV) { - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - if (chip_node) { - pci_adapter->chip_node = chip_node; - return true; - } - } - } - - return false; -} - -int alloc_chip_node(struct spnic_pcidev *pci_adapter) -{ - struct card_node *chip_node = NULL; - unsigned char i; - unsigned char bus_number = 0; - - if (chip_node_is_exist(pci_adapter, &bus_number)) - return 0; - - for (i = 0; i < MAX_CARD_ID; i++) { - if (!test_and_set_bit(i, &card_bit_map)) - break; - } - - if (i == MAX_CARD_ID) { - sdk_err(&pci_adapter->pcidev->dev, "Failed to alloc card id\n"); - return -EFAULT; - } - - chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); - if (!chip_node) { - clear_bit(i, &card_bit_map); - sdk_err(&pci_adapter->pcidev->dev, - "Failed to alloc chip node\n"); - return -ENOMEM; - } - - /* bus number */ - chip_node->bus_num = bus_number; - - snprintf(chip_node->chip_name, IFNAMSIZ, "%s%u", SPHW_CHIP_NAME, i); - - sdk_info(&pci_adapter->pcidev->dev, "Add new chip %s to global list succeed\n", - chip_node->chip_name); - - list_add_tail(&chip_node->node, &g_spnic_chip_list); - - INIT_LIST_HEAD(&chip_node->func_list); - pci_adapter->chip_node = chip_node; - - return 0; -} - -void free_chip_node(struct spnic_pcidev *pci_adapter) -{ - struct card_node *chip_node = pci_adapter->chip_node; - int id, err; - - if (list_empty(&chip_node->func_list)) { - list_del(&chip_node->node); - sdk_info(&pci_adapter->pcidev->dev, "Delete chip %s from global list succeed\n", - chip_node->chip_name); - err = sscanf(chip_node->chip_name, SPHW_CHIP_NAME "%d", &id); - if (err < 0) - sdk_err(&pci_adapter->pcidev->dev, "Failed to get spnic id\n"); - - clear_bit(id, &card_bit_map); - - kfree(chip_node); - } -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h b/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h deleted file mode 100644 index 8f345769bec5..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_dev_mgmt.h +++ /dev/null @@ -1,78 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_DEV_MGMT_H -#define SPNIC_DEV_MGMT_H -#include <linux/types.h> -#include <linux/bitops.h> - -#define SPHW_CHIP_NAME "spnic" - -#define SPNIC_VF_PCI_CFG_REG_BAR 0 -#define SPNIC_PF_PCI_CFG_REG_BAR 1 - -#define SPNIC_PCI_INTR_REG_BAR 2 -#define SPNIC_PCI_MGMT_REG_BAR 3 /* Only PF have mgmt bar */ -#define SPNIC_PCI_DB_BAR 4 - -/* Structure pcidev private*/ -struct spnic_pcidev { - struct pci_dev *pcidev; - void *hwdev; - struct card_node *chip_node; - struct spnic_lld_dev lld_dev; - /* Record the service object address, - * such as spnic_dev and toe_dev, fc_dev - */ - void *uld_dev[SERVICE_T_MAX]; - /* Record the service object name */ - char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ]; - /* It is a the global variable for driver to manage - * all function device linked list - */ - struct list_head node; - - bool disable_vf_load; - bool disable_srv_load[SERVICE_T_MAX]; - - void __iomem *cfg_reg_base; - void __iomem *intr_reg_base; - void __iomem *mgmt_reg_base; - u64 db_dwqe_len; - u64 db_base_phy; - void __iomem *db_base; - - /* lock for attach/detach uld */ - struct mutex pdev_mutex; - - struct spnic_sriov_info sriov_info; - - /* setted when uld driver processing event */ - unsigned long state; - struct pci_device_id id; - - atomic_t ref_cnt; -}; - -extern struct list_head g_spnic_chip_list; - -extern struct spnic_uld_info g_uld_info[SERVICE_T_MAX]; - -int alloc_chip_node(struct spnic_pcidev *pci_adapter); - -void free_chip_node(struct spnic_pcidev *pci_adapter); - -void lld_lock_chip_node(void); - -void lld_unlock_chip_node(void); - -void spnic_lld_lock_init(void); - -void lld_dev_cnt_init(struct spnic_pcidev *pci_adapter); -void wait_lld_dev_unused(struct spnic_pcidev *pci_adapter); - -int spnic_get_uld_dev_name(struct spnic_pcidev *dev, enum sphw_service_type type, char *ifname); - -void *spnic_get_hwdev_by_pcidev(struct pci_dev *pdev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c b/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c deleted file mode 100644 index e3a7e81a601b..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool.c +++ /dev/null @@ -1,994 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> -#include <linux/if_vlan.h> -#include <linux/ethtool.h> - -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" -#include "spnic_rx.h" -#include "spnic_rss.h" - -#define COALESCE_ALL_QUEUE 0xFFFF -#define COALESCE_PENDING_LIMIT_UNIT 8 -#define COALESCE_TIMER_CFG_UNIT 5 -#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) -#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) - -static void spnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct pci_dev *pdev = nic_dev->pdev; - u8 mgmt_ver[SPHW_MGMT_VERSION_MAX_LEN] = {0}; - int err; - - strlcpy(info->driver, SPNIC_NIC_DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, SPNIC_DRV_VERSION, sizeof(info->version)); - strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); - - err = sphw_get_mgmt_version(nic_dev->hwdev, mgmt_ver, SPHW_MGMT_VERSION_MAX_LEN, - SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to get fw version\n"); - return; - } - - snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver); -} - -static u32 spnic_get_msglevel(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - return nic_dev->msg_enable; -} - -static void spnic_set_msglevel(struct net_device *netdev, u32 data) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - nic_dev->msg_enable = data; - - nicif_info(nic_dev, drv, netdev, "Set message level: 0x%x\n", data); -} - -int spnic_nway_reset(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct nic_port_info port_info = {0}; - int err; - - err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, netdev, - "Get port info failed\n"); - return -EFAULT; - } - - if (!port_info.autoneg_state) { - nicif_err(nic_dev, drv, netdev, "Autonegotiation is off, don't support to restart it\n"); - return -EINVAL; - } - - err = spnic_set_autoneg(nic_dev->hwdev, true); - if (err) { - nicif_err(nic_dev, drv, netdev, "Restart autonegotiation failed\n"); - return -EFAULT; - } - - nicif_info(nic_dev, drv, netdev, "Restart autonegotiation successfully\n"); - - return 0; -} - -static void spnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ering, - struct netlink_ext_ack *extack) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - ring->rx_max_pending = SPNIC_MAX_RX_QUEUE_DEPTH; - ring->tx_max_pending = SPNIC_MAX_TX_QUEUE_DEPTH; - ring->rx_pending = nic_dev->rxqs[0].q_depth; - ring->tx_pending = nic_dev->txqs[0].q_depth; -} - -static void spnic_update_qp_depth(struct spnic_nic_dev *nic_dev, u32 sq_depth, u32 rq_depth) -{ - u16 i; - - nic_dev->q_params.sq_depth = sq_depth; - nic_dev->q_params.rq_depth = rq_depth; - for (i = 0; i < nic_dev->max_qps; i++) { - nic_dev->txqs[i].q_depth = sq_depth; - nic_dev->txqs[i].q_mask = sq_depth - 1; - nic_dev->rxqs[i].q_depth = rq_depth; - nic_dev->rxqs[i].q_mask = rq_depth - 1; - } -} - -static int check_ringparam_valid(struct net_device *netdev, struct ethtool_ringparam *ring) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (ring->rx_jumbo_pending || ring->rx_mini_pending) { - nicif_err(nic_dev, drv, netdev, - "Unsupported rx_jumbo_pending/rx_mini_pending\n"); - return -EINVAL; - } - - if (ring->tx_pending > SPNIC_MAX_TX_QUEUE_DEPTH || - ring->tx_pending < SPNIC_MIN_QUEUE_DEPTH || - ring->rx_pending > SPNIC_MAX_RX_QUEUE_DEPTH || - ring->rx_pending < SPNIC_MIN_QUEUE_DEPTH) { - nicif_err(nic_dev, drv, netdev, "Queue depth out of rang tx[%d-%d] rx[%d-%d]\n", - SPNIC_MIN_QUEUE_DEPTH, SPNIC_MAX_TX_QUEUE_DEPTH, - SPNIC_MIN_QUEUE_DEPTH, SPNIC_MAX_RX_QUEUE_DEPTH); - return -EINVAL; - } - - return 0; -} - -static int spnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, - struct kernel_ethtool_ringparam *kernel_ering, - struct netlink_ext_ack *extack) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_dyna_txrxq_params q_params = {0}; - u32 new_sq_depth, new_rq_depth; - int err; - - err = check_ringparam_valid(netdev, ring); - if (err) - return err; - - new_sq_depth = (u32)(1U << (u16)ilog2(ring->tx_pending)); - new_rq_depth = (u32)(1U << (u16)ilog2(ring->rx_pending)); - - if (new_sq_depth == nic_dev->q_params.sq_depth && - new_rq_depth == nic_dev->q_params.rq_depth) - return 0; /* nothing to do */ - - nicif_info(nic_dev, drv, netdev, "Change Tx/Rx ring depth from %u/%u to %u/%u\n", - nic_dev->q_params.sq_depth, nic_dev->q_params.rq_depth, - new_sq_depth, new_rq_depth); - - if (!netif_running(netdev)) { - spnic_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth); - } else { - q_params = nic_dev->q_params; - q_params.sq_depth = new_sq_depth; - q_params.rq_depth = new_rq_depth; - q_params.txqs_res = NULL; - q_params.rxqs_res = NULL; - q_params.irq_cfg = NULL; - - nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); - err = spnic_change_channel_settings(nic_dev, &q_params, NULL, NULL); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); - return -EFAULT; - } - } - - return 0; -} - -static int get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_intr_coal_info *interrupt_info = NULL; - - if (queue == COALESCE_ALL_QUEUE) { - /* get tx/rx irq0 as default parameters */ - interrupt_info = &nic_dev->intr_coalesce[0]; - } else { - if (queue >= nic_dev->q_params.num_qps) { - nicif_err(nic_dev, drv, netdev, "Invalid queue_id: %u\n", queue); - return -EINVAL; - } - interrupt_info = &nic_dev->intr_coalesce[queue]; - } - - /* coalescs_timer is in unit of 5us */ - coal->rx_coalesce_usecs = interrupt_info->coalesce_timer_cfg * COALESCE_TIMER_CFG_UNIT; - /* coalescs_frams is in unit of 8 */ - coal->rx_max_coalesced_frames = interrupt_info->pending_limt * COALESCE_PENDING_LIMIT_UNIT; - - /* tx/rx use the same interrupt */ - coal->tx_coalesce_usecs = coal->rx_coalesce_usecs; - coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames; - coal->use_adaptive_rx_coalesce = nic_dev->adaptive_rx_coal; - - coal->pkt_rate_high = (u32)interrupt_info->pkt_rate_high; - coal->rx_coalesce_usecs_high = interrupt_info->rx_usecs_high * COALESCE_TIMER_CFG_UNIT; - coal->rx_max_coalesced_frames_high = interrupt_info->rx_pending_limt_high * - COALESCE_PENDING_LIMIT_UNIT; - - coal->pkt_rate_low = (u32)interrupt_info->pkt_rate_low; - coal->rx_coalesce_usecs_low = interrupt_info->rx_usecs_low * - COALESCE_TIMER_CFG_UNIT; - coal->rx_max_coalesced_frames_low = interrupt_info->rx_pending_limt_low * - COALESCE_PENDING_LIMIT_UNIT; - - return 0; -} - -static int set_queue_coalesce(struct spnic_nic_dev *nic_dev, u16 q_id, - struct spnic_intr_coal_info *coal) -{ - struct spnic_intr_coal_info *intr_coal; - struct interrupt_info info = {0}; - struct net_device *netdev = nic_dev->netdev; - int err; - - intr_coal = &nic_dev->intr_coalesce[q_id]; - if (intr_coal->coalesce_timer_cfg != coal->coalesce_timer_cfg || - intr_coal->pending_limt != coal->pending_limt) - intr_coal->user_set_intr_coal_flag = 1; - - intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; - intr_coal->pending_limt = coal->pending_limt; - intr_coal->pkt_rate_low = coal->pkt_rate_low; - intr_coal->rx_usecs_low = coal->rx_usecs_low; - intr_coal->rx_pending_limt_low = coal->rx_pending_limt_low; - intr_coal->pkt_rate_high = coal->pkt_rate_high; - intr_coal->rx_usecs_high = coal->rx_usecs_high; - intr_coal->rx_pending_limt_high = coal->rx_pending_limt_high; - - /* netdev not running or qp not in using, - * don't need to set coalesce to hw - */ - if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags) || - q_id >= nic_dev->q_params.num_qps || nic_dev->adaptive_rx_coal) - return 0; - - info.msix_index = nic_dev->q_params.irq_cfg[q_id].msix_entry_idx; - info.lli_set = 0; - info.interrupt_coalesc_set = 1; - info.coalesc_timer_cfg = intr_coal->coalesce_timer_cfg; - info.pending_limt = intr_coal->pending_limt; - info.resend_timer_cfg = intr_coal->resend_timer_cfg; - nic_dev->rxqs[q_id].last_coalesc_timer_cfg = intr_coal->coalesce_timer_cfg; - nic_dev->rxqs[q_id].last_pending_limt = intr_coal->pending_limt; - err = sphw_set_interrupt_cfg(nic_dev->hwdev, info, SPHW_CHANNEL_NIC); - if (err) - nicif_warn(nic_dev, drv, netdev, "Failed to set queue%u coalesce", q_id); - - return err; -} - -static int is_coalesce_exceed_limit(struct net_device *netdev, - const struct ethtool_coalesce *coal) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG) { - nicif_err(nic_dev, drv, netdev, "rx_coalesce_usecs out of range[%d-%d]\n", 0, - COALESCE_MAX_TIMER_CFG); - return -EOPNOTSUPP; - } - - if (coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) { - nicif_err(nic_dev, drv, netdev, "rx_max_coalesced_frames out of range[%d-%d]\n", 0, - COALESCE_MAX_PENDING_LIMIT); - return -EOPNOTSUPP; - } - - if (coal->rx_coalesce_usecs_low > COALESCE_MAX_TIMER_CFG) { - nicif_err(nic_dev, drv, netdev, "rx_coalesce_usecs_low out of range[%d-%d]\n", 0, - COALESCE_MAX_TIMER_CFG); - return -EOPNOTSUPP; - } - - if (coal->rx_max_coalesced_frames_low > COALESCE_MAX_PENDING_LIMIT) { - nicif_err(nic_dev, drv, netdev, "rx_max_coalesced_frames_low out of range[%d-%d]\n", - 0, COALESCE_MAX_PENDING_LIMIT); - return -EOPNOTSUPP; - } - - if (coal->rx_coalesce_usecs_high > COALESCE_MAX_TIMER_CFG) { - nicif_err(nic_dev, drv, netdev, "rx_coalesce_usecs_high out of range[%d-%d]\n", 0, - COALESCE_MAX_TIMER_CFG); - return -EOPNOTSUPP; - } - - if (coal->rx_max_coalesced_frames_high > COALESCE_MAX_PENDING_LIMIT) { - nicif_err(nic_dev, drv, netdev, "rx_max_coalesced_frames_high out of range[%d-%d]\n", - 0, COALESCE_MAX_PENDING_LIMIT); - return -EOPNOTSUPP; - } - - return 0; -} - -static int is_coalesce_legal(struct net_device *netdev, - const struct ethtool_coalesce *coal) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct ethtool_coalesce tmp_coal = {0}; - int err; - - if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { - nicif_err(nic_dev, drv, netdev, "tx-usecs must be equal to rx-usecs\n"); - return -EINVAL; - } - - if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { - nicif_err(nic_dev, drv, netdev, "tx-frames must be equal to rx-frames\n"); - return -EINVAL; - } - - tmp_coal.cmd = coal->cmd; - tmp_coal.rx_coalesce_usecs = coal->rx_coalesce_usecs; - tmp_coal.rx_max_coalesced_frames = coal->rx_max_coalesced_frames; - tmp_coal.tx_coalesce_usecs = coal->tx_coalesce_usecs; - tmp_coal.tx_max_coalesced_frames = coal->tx_max_coalesced_frames; - tmp_coal.use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce; - - tmp_coal.pkt_rate_low = coal->pkt_rate_low; - tmp_coal.rx_coalesce_usecs_low = coal->rx_coalesce_usecs_low; - tmp_coal.rx_max_coalesced_frames_low = coal->rx_max_coalesced_frames_low; - - tmp_coal.pkt_rate_high = coal->pkt_rate_high; - tmp_coal.rx_coalesce_usecs_high = coal->rx_coalesce_usecs_high; - tmp_coal.rx_max_coalesced_frames_high = coal->rx_max_coalesced_frames_high; - - if (memcmp(coal, &tmp_coal, sizeof(struct ethtool_coalesce))) { - nicif_err(nic_dev, drv, netdev, "Only support to change rx/tx-usecs and rx/tx-frames\n"); - return -EOPNOTSUPP; - } - - err = is_coalesce_exceed_limit(netdev, coal); - if (err) - return err; - - if (coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT >= - coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT) { - nicif_err(nic_dev, drv, netdev, - "coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u), after dividing %d usecs unit\n", - coal->rx_coalesce_usecs_high, - coal->rx_coalesce_usecs_low, - COALESCE_TIMER_CFG_UNIT); - return -EOPNOTSUPP; - } - - if (coal->rx_max_coalesced_frames_low / COALESCE_PENDING_LIMIT_UNIT >= - coal->rx_max_coalesced_frames_high / COALESCE_PENDING_LIMIT_UNIT) { - nicif_err(nic_dev, drv, netdev, - "coalesced_frames_high(%u) must more than coalesced_frames_low(%u),after dividing %d frames unit\n", - coal->rx_max_coalesced_frames_high, - coal->rx_max_coalesced_frames_low, - COALESCE_PENDING_LIMIT_UNIT); - return -EOPNOTSUPP; - } - - if (coal->pkt_rate_low >= coal->pkt_rate_high) { - nicif_err(nic_dev, drv, netdev, "pkt_rate_high(%u) must more than pkt_rate_low(%u)\n", - coal->pkt_rate_high, - coal->pkt_rate_low); - return -EOPNOTSUPP; - } - - return 0; -} - -#define CHECK_COALESCE_ALIGN(coal, item, unit) \ -do { \ - if ((coal)->item % (unit)) \ - nicif_warn(nic_dev, drv, netdev, \ - "%s in %d units, change to %u\n", \ - #item, (unit), ((coal)->item - \ - (coal)->item % (unit))); \ -} while (0) - -#define CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \ -do { \ - if (((coal)->item / (unit)) != (ori_val)) \ - nicif_info(nic_dev, drv, netdev, \ - "Change %s from %d to %u %s\n", \ - #item, (ori_val) * (unit), \ - ((coal)->item - (coal)->item % (unit)), \ - (obj_str)); \ -} while (0) - -#define CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \ -do { \ - if ((coal)->item != (ori_val)) \ - nicif_info(nic_dev, drv, netdev, \ - "Change %s from %llu to %u %s\n", \ - #item, (ori_val), (coal)->item, (obj_str)); \ -} while (0) - -static int set_hw_coal_param(struct spnic_nic_dev *nic_dev, struct spnic_intr_coal_info *intr_coal, - u16 queue) -{ - u16 i; - - if (queue == COALESCE_ALL_QUEUE) { - for (i = 0; i < nic_dev->max_qps; i++) - set_queue_coalesce(nic_dev, i, intr_coal); - } else { - if (queue >= nic_dev->q_params.num_qps) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid queue_id: %u\n", queue); - return -EINVAL; - } - set_queue_coalesce(nic_dev, queue, intr_coal); - } - - return 0; -} - -static int set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_intr_coal_info intr_coal = {0}; - struct spnic_intr_coal_info *ori_intr_coal = NULL; - u32 last_adaptive_rx; - char obj_str[32] = {0}; - int err = 0; - - err = is_coalesce_legal(netdev, coal); - if (err) - return err; - - CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT); - CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames, COALESCE_PENDING_LIMIT_UNIT); - CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high, COALESCE_TIMER_CFG_UNIT); - CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high, COALESCE_PENDING_LIMIT_UNIT); - CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low, COALESCE_TIMER_CFG_UNIT); - CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low, COALESCE_PENDING_LIMIT_UNIT); - - if (queue == COALESCE_ALL_QUEUE) { - ori_intr_coal = &nic_dev->intr_coalesce[0]; - snprintf(obj_str, sizeof(obj_str), "for netdev"); - } else { - ori_intr_coal = &nic_dev->intr_coalesce[queue]; - snprintf(obj_str, sizeof(obj_str), "for queue %u", queue); - } - CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT, - ori_intr_coal->coalesce_timer_cfg, obj_str); - CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames, COALESCE_PENDING_LIMIT_UNIT, - ori_intr_coal->pending_limt, obj_str); - CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high, ori_intr_coal->pkt_rate_high, obj_str); - CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high, COALESCE_TIMER_CFG_UNIT, - ori_intr_coal->rx_usecs_high, obj_str); - CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high, COALESCE_PENDING_LIMIT_UNIT, - ori_intr_coal->rx_pending_limt_high, obj_str); - CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low, ori_intr_coal->pkt_rate_low, obj_str); - CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low, COALESCE_TIMER_CFG_UNIT, - ori_intr_coal->rx_usecs_low, obj_str); - CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low, COALESCE_PENDING_LIMIT_UNIT, - ori_intr_coal->rx_pending_limt_low, obj_str); - - intr_coal.coalesce_timer_cfg = (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); - intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / COALESCE_PENDING_LIMIT_UNIT); - - last_adaptive_rx = nic_dev->adaptive_rx_coal; - nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; - - intr_coal.pkt_rate_high = coal->pkt_rate_high; - intr_coal.rx_usecs_high = (u8)(coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT); - intr_coal.rx_pending_limt_high = (u8)(coal->rx_max_coalesced_frames_high / - COALESCE_PENDING_LIMIT_UNIT); - - intr_coal.pkt_rate_low = coal->pkt_rate_low; - intr_coal.rx_usecs_low = (u8)(coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT); - intr_coal.rx_pending_limt_low = (u8)(coal->rx_max_coalesced_frames_low / - COALESCE_PENDING_LIMIT_UNIT); - - /* coalesce timer or pending set to zero will disable coalesce */ - if (!nic_dev->adaptive_rx_coal && - (!intr_coal.coalesce_timer_cfg || !intr_coal.pending_limt)) - nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n"); - - /* ensure coalesce paramester will not be changed in auto - * moderation work - */ - if (SPHW_CHANNEL_RES_VALID(nic_dev)) { - if (!nic_dev->adaptive_rx_coal) - cancel_delayed_work_sync(&nic_dev->moderation_task); - else if (!last_adaptive_rx) - queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, - SPNIC_MODERATONE_DELAY); - } - - return set_hw_coal_param(nic_dev, &intr_coal, queue); -} - -static int spnic_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal, - struct kernel_ethtool_coalesce *kernel_coal, - struct netlink_ext_ack *ext_ack) -{ - return get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); -} - -static int spnic_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal, - struct kernel_ethtool_coalesce *kernel_coal, - struct netlink_ext_ack *ext_ack) -{ - return set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); -} - -static int spnic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, - struct ethtool_coalesce *coal) -{ - return get_coalesce(netdev, coal, queue); -} - -static int spnic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, - struct ethtool_coalesce *coal) -{ - return set_coalesce(netdev, coal, queue); -} - -static int spnic_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - err = spnic_set_led_status(nic_dev->hwdev, MAG_CMD_LED_TYPE_ALARM, - MAG_CMD_LED_MODE_FORCE_BLINK_2HZ); - if (err) - nicif_err(nic_dev, drv, netdev, "Set LED blinking in 2HZ failed\n"); - else - nicif_info(nic_dev, drv, netdev, "Set LED blinking in 2HZ success\n"); - break; - - case ETHTOOL_ID_INACTIVE: - err = spnic_set_led_status(nic_dev->hwdev, MAG_CMD_LED_TYPE_ALARM, - MAG_CMD_LED_MODE_DEFAULT); - if (err) - nicif_err(nic_dev, drv, netdev, "Reset LED to original status failed\n"); - else - nicif_info(nic_dev, drv, netdev, "Reset LED to original status success\n"); - break; - - default: - return -EOPNOTSUPP; - } - - return err; -} - -static void spnic_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct nic_pause_config nic_pause = {0}; - int err; - - err = spnic_get_pause_info(nic_dev->hwdev, &nic_pause); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to get pauseparam from hw\n"); - } else { - pause->autoneg = nic_pause.auto_neg; - pause->rx_pause = nic_pause.rx_pause; - pause->tx_pause = nic_pause.tx_pause; - } -} - -static int spnic_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct nic_pause_config nic_pause = {0}; - struct nic_port_info port_info = {0}; - int err; - - err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to get auto-negotiation state\n"); - return -EFAULT; - } - - if (pause->autoneg != port_info.autoneg_state) { - nicif_err(nic_dev, drv, netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); - return -EOPNOTSUPP; - } - - if (nic_dev->hw_dcb_cfg.pfc_state) { - nicif_err(nic_dev, drv, netdev, "Can not set pause when pfc is enable\n"); - return -EPERM; - } - - nic_pause.auto_neg = (u8)pause->autoneg; - nic_pause.rx_pause = (u8)pause->rx_pause; - nic_pause.tx_pause = (u8)pause->tx_pause; - - err = spnic_set_pause_info(nic_dev->hwdev, nic_pause); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set pauseparam\n"); - return -EFAULT; - } - - nicif_info(nic_dev, drv, netdev, "Set pause options, tx: %s, rx: %s\n", - pause->tx_pause ? "on" : "off", - pause->rx_pause ? "on" : "off"); - - return 0; -} - -static int spnic_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u8 sfp_type = 0; - u8 sfp_type_ext = 0; - int err; - - err = spnic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); - if (err) - return err; - - switch (sfp_type) { - case MODULE_TYPE_SFP: - modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; - break; - case MODULE_TYPE_QSFP: - modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; - break; - case MODULE_TYPE_QSFP_PLUS: - if (sfp_type_ext >= 0x3) { - modinfo->type = ETH_MODULE_SFF_8636; - modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; - - } else { - modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; - } - break; - case MODULE_TYPE_QSFP28: - modinfo->type = ETH_MODULE_SFF_8636; - modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; - break; - default: - nicif_warn(nic_dev, drv, netdev, "Optical module unknown: 0x%x\n", sfp_type); - return -EINVAL; - } - - return 0; -} - -static int spnic_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; - int err; - - if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) - return -EINVAL; - - memset(data, 0, ee->len); - - err = spnic_get_sfp_eeprom(nic_dev->hwdev, (u8 *)sfp_data, ee->len); - if (err) - return err; - - memcpy(data, sfp_data + ee->offset, ee->len); - - return 0; -} - -#define SPNIC_PRIV_FLAGS_SYMM_RSS BIT(0) - -static u32 spnic_get_priv_flags(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u32 priv_flags = 0; - - if (test_bit(SPNIC_SAME_RXTX, &nic_dev->flags)) - priv_flags |= SPNIC_PRIV_FLAGS_SYMM_RSS; - - return priv_flags; -} - -static int spnic_set_priv_flags(struct net_device *netdev, u32 priv_flags) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (priv_flags & SPNIC_PRIV_FLAGS_SYMM_RSS) { - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, "Failed to open Symmetric RSS while DCB is enabled\n"); - return -EOPNOTSUPP; - } - set_bit(SPNIC_SAME_RXTX, &nic_dev->flags); - } else { - clear_bit(SPNIC_SAME_RXTX, &nic_dev->flags); - } - - return 0; -} - -#define BROADCAST_PACKET_SIM 0xFF -#define UNICAST_PACKET_SIM 0xFE -#define IP_PROTOCOL_TYPE 0x08 -#define IP_PROTOCOL_COMPLEMENT 0x00 - -#define PORT_DOWN_ERR_IDX 0 -#define LP_DEFAULT_TIME 5 /* seconds */ -#define LP_PKT_LEN 1514 - -#define TEST_TIME_MULTIPLE 5 -static int spnic_run_lp_test(struct spnic_nic_dev *nic_dev, u32 test_time) -{ - u32 cnt = test_time * TEST_TIME_MULTIPLE; - struct sk_buff *skb = NULL; - struct sk_buff *skb_tmp = NULL; - u8 *test_data = NULL; - u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; - struct net_device *netdev = nic_dev->netdev; - u32 i; - u8 j; - - skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); - if (!skb_tmp) { - nicif_err(nic_dev, drv, netdev, "Alloc xmit skb template failed for loopback test\n"); - return -ENOMEM; - } - - test_data = __skb_put(skb_tmp, LP_PKT_LEN); - - memset(test_data, BROADCAST_PACKET_SIM, ETH_ALEN + ETH_ALEN); - - test_data[ETH_ALEN] = UNICAST_PACKET_SIM; - test_data[ETH_ALEN + ETH_ALEN] = IP_PROTOCOL_TYPE; - test_data[ETH_ALEN + ETH_ALEN + 1] = IP_PROTOCOL_COMPLEMENT; - - for (i = ETH_HLEN; i < LP_PKT_LEN; i++) - test_data[i] = i & 0xFF; - - skb_tmp->queue_mapping = 0; - skb_tmp->ip_summed = CHECKSUM_COMPLETE; - skb_tmp->dev = netdev; - - for (i = 0; i < cnt; i++) { - nic_dev->lb_test_rx_idx = 0; - memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN); - - for (j = 0; j < LP_PKT_CNT; j++) { - skb = pskb_copy(skb_tmp, GFP_ATOMIC); - if (!skb) { - dev_kfree_skb_any(skb_tmp); - nicif_err(nic_dev, drv, netdev, "Copy skb failed for loopback test\n"); - return -ENOMEM; - } - - /* mark index for every pkt */ - skb->data[LP_PKT_LEN - 1] = j; - - if (spnic_lb_xmit_frame(skb, netdev)) { - dev_kfree_skb_any(skb); - dev_kfree_skb_any(skb_tmp); - nicif_err(nic_dev, drv, netdev, "Xmit pkt failed for loopback test\n"); - return -EBUSY; - } - } - - /* wait till all pkts received to RX buffer */ - msleep(200); - - for (j = 0; j < LP_PKT_CNT; j++) { - if (memcmp((lb_test_rx_buf + (j * LP_PKT_LEN)), - skb_tmp->data, (LP_PKT_LEN - 1)) || - (*(lb_test_rx_buf + ((j * LP_PKT_LEN) + - (LP_PKT_LEN - 1))) != j)) { - dev_kfree_skb_any(skb_tmp); - nicif_err(nic_dev, drv, netdev, - "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", - (j + (i * LP_PKT_CNT)), (LP_PKT_LEN - 1), - *(lb_test_rx_buf + - ((j * LP_PKT_LEN) + (LP_PKT_LEN - 1)))); - return -EIO; - } - } - } - - dev_kfree_skb_any(skb_tmp); - nicif_info(nic_dev, drv, netdev, "Loopback test succeed.\n"); - return 0; -} - -enum diag_test_index { - INTERNAL_LP_TEST = 0, - EXTERNAL_LP_TEST = 1, - DIAG_TEST_MAX = 2, -}; - -#define SPNIC_INTERNAL_LP_MODE 5 -static int do_lp_test(struct spnic_nic_dev *nic_dev, u32 *flags, u32 test_time, - enum diag_test_index *test_index) -{ - struct net_device *netdev = nic_dev->netdev; - u8 *lb_test_rx_buf = NULL; - int err = 0; - - if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { - *test_index = INTERNAL_LP_TEST; - if (spnic_set_loopback_mode(nic_dev->hwdev, SPNIC_INTERNAL_LP_MODE, true)) { - nicif_err(nic_dev, drv, netdev, - "Failed to set port loopback mode before loopback test\n"); - return -EFAULT; - } - } else { - *test_index = EXTERNAL_LP_TEST; - } - - lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); - if (!lb_test_rx_buf) { - nicif_err(nic_dev, drv, netdev, "Failed to alloc RX buffer for loopback test\n"); - err = -ENOMEM; - } else { - nic_dev->lb_test_rx_buf = lb_test_rx_buf; - nic_dev->lb_pkt_len = LP_PKT_LEN; - set_bit(SPNIC_LP_TEST, &nic_dev->flags); - - if (spnic_run_lp_test(nic_dev, test_time)) - err = -EFAULT; - - clear_bit(SPNIC_LP_TEST, &nic_dev->flags); - msleep(100); - vfree(lb_test_rx_buf); - nic_dev->lb_test_rx_buf = NULL; - } - - if (!(*flags & ETH_TEST_FL_EXTERNAL_LB)) { - if (spnic_set_loopback_mode(nic_dev->hwdev, SPNIC_INTERNAL_LP_MODE, false)) { - nicif_err(nic_dev, drv, netdev, - "Failed to cancel port loopback mode after loopback test\n"); - err = -EFAULT; - } - } else { - *flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; - } - - return err; -} - -void spnic_lp_test(struct net_device *netdev, struct ethtool_test *eth_test, - u64 *data, u32 test_time) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - enum diag_test_index test_index = 0; - u8 link_status = 0; - int err; - - /* don't support loopback test when netdev is closed. */ - if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, - "Do not support loopback test when netdev is closed\n"); - eth_test->flags |= ETH_TEST_FL_FAILED; - data[PORT_DOWN_ERR_IDX] = 1; - return; - } - - if (test_time == 0) - test_time = LP_DEFAULT_TIME; - - netif_carrier_off(netdev); - netif_tx_disable(netdev); - - err = do_lp_test(nic_dev, ð_test->flags, test_time, &test_index); - if (err) { - eth_test->flags |= ETH_TEST_FL_FAILED; - data[test_index] = 1; - } - - netif_tx_wake_all_queues(netdev); - - err = spnic_get_link_state(nic_dev->hwdev, &link_status); - if (!err && link_status) - netif_carrier_on(netdev); -} - -static void spnic_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) -{ - memset(data, 0, DIAG_TEST_MAX * sizeof(u64)); - - spnic_lp_test(netdev, eth_test, data, 0); -} - -static const struct ethtool_ops spnic_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_PKT_RATE_RX_USECS, - .get_link_ksettings = spnic_get_link_ksettings, - .set_link_ksettings = spnic_set_link_ksettings, - - .get_drvinfo = spnic_get_drvinfo, - .get_msglevel = spnic_get_msglevel, - .set_msglevel = spnic_set_msglevel, - .nway_reset = spnic_nway_reset, - .get_link = ethtool_op_get_link, - .get_ringparam = spnic_get_ringparam, - .set_ringparam = spnic_set_ringparam, - .get_pauseparam = spnic_get_pauseparam, - .set_pauseparam = spnic_set_pauseparam, - .get_sset_count = spnic_get_sset_count, - .get_ethtool_stats = spnic_get_ethtool_stats, - .get_strings = spnic_get_strings, - - .self_test = spnic_diag_test, - .set_phys_id = spnic_set_phys_id, - - .get_coalesce = spnic_get_coalesce, - .set_coalesce = spnic_set_coalesce, - .get_per_queue_coalesce = spnic_get_per_queue_coalesce, - .set_per_queue_coalesce = spnic_set_per_queue_coalesce, - - .get_rxnfc = spnic_get_rxnfc, - .set_rxnfc = spnic_set_rxnfc, - .get_priv_flags = spnic_get_priv_flags, - .set_priv_flags = spnic_set_priv_flags, - - .get_channels = spnic_get_channels, - .set_channels = spnic_set_channels, - - .get_module_info = spnic_get_module_info, - .get_module_eeprom = spnic_get_module_eeprom, - - .get_rxfh_indir_size = spnic_get_rxfh_indir_size, - .get_rxfh_key_size = spnic_get_rxfh_key_size, - .get_rxfh = spnic_get_rxfh, - .set_rxfh = spnic_set_rxfh, -}; - -static const struct ethtool_ops spnicvf_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_PKT_RATE_RX_USECS, - .get_link_ksettings = spnic_get_link_ksettings, - .get_drvinfo = spnic_get_drvinfo, - .get_msglevel = spnic_get_msglevel, - .set_msglevel = spnic_set_msglevel, - .get_link = ethtool_op_get_link, - .get_ringparam = spnic_get_ringparam, - - .set_ringparam = spnic_set_ringparam, - .get_sset_count = spnic_get_sset_count, - .get_ethtool_stats = spnic_get_ethtool_stats, - .get_strings = spnic_get_strings, - - .get_coalesce = spnic_get_coalesce, - .set_coalesce = spnic_set_coalesce, - .get_per_queue_coalesce = spnic_get_per_queue_coalesce, - .set_per_queue_coalesce = spnic_set_per_queue_coalesce, - - .get_rxnfc = spnic_get_rxnfc, - .set_rxnfc = spnic_set_rxnfc, - .get_priv_flags = spnic_get_priv_flags, - .set_priv_flags = spnic_set_priv_flags, - - .get_channels = spnic_get_channels, - .set_channels = spnic_set_channels, - - .get_rxfh_indir_size = spnic_get_rxfh_indir_size, - .get_rxfh_key_size = spnic_get_rxfh_key_size, - .get_rxfh = spnic_get_rxfh, - .set_rxfh = spnic_set_rxfh, -}; - -void spnic_set_ethtool_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &spnic_ethtool_ops; -} - -void spnicvf_set_ethtool_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &spnicvf_ethtool_ops; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c b/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c deleted file mode 100644 index 5a830e3454d4..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_ethtool_stats.c +++ /dev/null @@ -1,1035 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> -#include <linux/if_vlan.h> -#include <linux/ethtool.h> - -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "sphw_mt.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" -#include "spnic_rx.h" - -struct spnic_stats { - char name[ETH_GSTRING_LEN]; - u32 size; - int offset; -}; - -#define SPNIC_NETDEV_STAT(_stat_item) { \ - .name = #_stat_item, \ - .size = sizeof_field(struct rtnl_link_stats64, _stat_item), \ - .offset = offsetof(struct rtnl_link_stats64, _stat_item) \ -} - -static struct spnic_stats spnic_netdev_stats[] = { - SPNIC_NETDEV_STAT(rx_packets), - SPNIC_NETDEV_STAT(tx_packets), - SPNIC_NETDEV_STAT(rx_bytes), - SPNIC_NETDEV_STAT(tx_bytes), - SPNIC_NETDEV_STAT(rx_errors), - SPNIC_NETDEV_STAT(tx_errors), - SPNIC_NETDEV_STAT(rx_dropped), - SPNIC_NETDEV_STAT(tx_dropped), - SPNIC_NETDEV_STAT(multicast), - SPNIC_NETDEV_STAT(collisions), - SPNIC_NETDEV_STAT(rx_length_errors), - SPNIC_NETDEV_STAT(rx_over_errors), - SPNIC_NETDEV_STAT(rx_crc_errors), - SPNIC_NETDEV_STAT(rx_frame_errors), - SPNIC_NETDEV_STAT(rx_fifo_errors), - SPNIC_NETDEV_STAT(rx_missed_errors), - SPNIC_NETDEV_STAT(tx_aborted_errors), - SPNIC_NETDEV_STAT(tx_carrier_errors), - SPNIC_NETDEV_STAT(tx_fifo_errors), - SPNIC_NETDEV_STAT(tx_heartbeat_errors), -}; - -#define SPNIC_NIC_STAT(_stat_item) { \ - .name = #_stat_item, \ - .size = sizeof_field(struct spnic_nic_stats, _stat_item), \ - .offset = offsetof(struct spnic_nic_stats, _stat_item) \ -} - -static struct spnic_stats spnic_nic_dev_stats[] = { - SPNIC_NIC_STAT(netdev_tx_timeout), -}; - -static struct spnic_stats spnic_nic_dev_stats_extern[] = { - SPNIC_NIC_STAT(tx_carrier_off_drop), - SPNIC_NIC_STAT(tx_invalid_qid), -}; - -#define SPNIC_RXQ_STAT(_stat_item) { \ - .name = "rxq%d_"#_stat_item, \ - .size = sizeof_field(struct spnic_rxq_stats, _stat_item), \ - .offset = offsetof(struct spnic_rxq_stats, _stat_item) \ -} - -#define SPNIC_TXQ_STAT(_stat_item) { \ - .name = "txq%d_"#_stat_item, \ - .size = sizeof_field(struct spnic_txq_stats, _stat_item), \ - .offset = offsetof(struct spnic_txq_stats, _stat_item) \ -} - -static struct spnic_stats spnic_rx_queue_stats[] = { - SPNIC_RXQ_STAT(packets), - SPNIC_RXQ_STAT(bytes), - SPNIC_RXQ_STAT(errors), - SPNIC_RXQ_STAT(csum_errors), - SPNIC_RXQ_STAT(other_errors), - SPNIC_RXQ_STAT(dropped), - SPNIC_RXQ_STAT(xdp_dropped), - SPNIC_RXQ_STAT(rx_buf_empty), -}; - -static struct spnic_stats spnic_rx_queue_stats_extern[] = { - SPNIC_RXQ_STAT(alloc_skb_err), - SPNIC_RXQ_STAT(alloc_rx_buf_err), - SPNIC_RXQ_STAT(xdp_large_pkt), -}; - -static struct spnic_stats spnic_tx_queue_stats[] = { - SPNIC_TXQ_STAT(packets), - SPNIC_TXQ_STAT(bytes), - SPNIC_TXQ_STAT(busy), - SPNIC_TXQ_STAT(wake), - SPNIC_TXQ_STAT(dropped), -}; - -static struct spnic_stats spnic_tx_queue_stats_extern[] = { - SPNIC_TXQ_STAT(skb_pad_err), - SPNIC_TXQ_STAT(frag_len_overflow), - SPNIC_TXQ_STAT(offload_cow_skb_err), - SPNIC_TXQ_STAT(map_frag_err), - SPNIC_TXQ_STAT(unknown_tunnel_pkt), - SPNIC_TXQ_STAT(frag_size_err), -}; - -#define SPNIC_FUNC_STAT(_stat_item) { \ - .name = #_stat_item, \ - .size = sizeof_field(struct spnic_vport_stats, _stat_item), \ - .offset = offsetof(struct spnic_vport_stats, _stat_item) \ -} - -static struct spnic_stats spnic_function_stats[] = { - SPNIC_FUNC_STAT(tx_unicast_pkts_vport), - SPNIC_FUNC_STAT(tx_unicast_bytes_vport), - SPNIC_FUNC_STAT(tx_multicast_pkts_vport), - SPNIC_FUNC_STAT(tx_multicast_bytes_vport), - SPNIC_FUNC_STAT(tx_broadcast_pkts_vport), - SPNIC_FUNC_STAT(tx_broadcast_bytes_vport), - - SPNIC_FUNC_STAT(rx_unicast_pkts_vport), - SPNIC_FUNC_STAT(rx_unicast_bytes_vport), - SPNIC_FUNC_STAT(rx_multicast_pkts_vport), - SPNIC_FUNC_STAT(rx_multicast_bytes_vport), - SPNIC_FUNC_STAT(rx_broadcast_pkts_vport), - SPNIC_FUNC_STAT(rx_broadcast_bytes_vport), - - SPNIC_FUNC_STAT(tx_discard_vport), - SPNIC_FUNC_STAT(rx_discard_vport), - SPNIC_FUNC_STAT(tx_err_vport), - SPNIC_FUNC_STAT(rx_err_vport), -}; - -#define SPNIC_PORT_STAT(_stat_item) { \ - .name = #_stat_item, \ - .size = sizeof_field(struct mag_cmd_port_stats, _stat_item), \ - .offset = offsetof(struct mag_cmd_port_stats, _stat_item) \ -} - -static struct spnic_stats spnic_port_stats[] = { - SPNIC_PORT_STAT(mac_rx_total_pkt_num), - SPNIC_PORT_STAT(mac_rx_total_oct_num), - SPNIC_PORT_STAT(mac_rx_bad_pkt_num), - SPNIC_PORT_STAT(mac_rx_bad_oct_num), - SPNIC_PORT_STAT(mac_rx_good_pkt_num), - SPNIC_PORT_STAT(mac_rx_good_oct_num), - SPNIC_PORT_STAT(mac_rx_uni_pkt_num), - SPNIC_PORT_STAT(mac_rx_multi_pkt_num), - SPNIC_PORT_STAT(mac_rx_broad_pkt_num), - SPNIC_PORT_STAT(mac_tx_total_pkt_num), - SPNIC_PORT_STAT(mac_tx_total_oct_num), - SPNIC_PORT_STAT(mac_tx_bad_pkt_num), - SPNIC_PORT_STAT(mac_tx_bad_oct_num), - SPNIC_PORT_STAT(mac_tx_good_pkt_num), - SPNIC_PORT_STAT(mac_tx_good_oct_num), - SPNIC_PORT_STAT(mac_tx_uni_pkt_num), - SPNIC_PORT_STAT(mac_tx_multi_pkt_num), - SPNIC_PORT_STAT(mac_tx_broad_pkt_num), - SPNIC_PORT_STAT(mac_rx_fragment_pkt_num), - SPNIC_PORT_STAT(mac_rx_undersize_pkt_num), - SPNIC_PORT_STAT(mac_rx_undermin_pkt_num), - SPNIC_PORT_STAT(mac_rx_64_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_65_127_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_128_255_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_256_511_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_512_1023_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), - SPNIC_PORT_STAT(mac_rx_1519_max_good_pkt_num), - SPNIC_PORT_STAT(mac_rx_1519_max_bad_pkt_num), - SPNIC_PORT_STAT(mac_rx_oversize_pkt_num), - SPNIC_PORT_STAT(mac_rx_jabber_pkt_num), - SPNIC_PORT_STAT(mac_rx_pause_num), - SPNIC_PORT_STAT(mac_rx_pfc_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri0_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri1_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri2_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri3_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri4_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri5_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri6_pkt_num), - SPNIC_PORT_STAT(mac_rx_pfc_pri7_pkt_num), - SPNIC_PORT_STAT(mac_rx_control_pkt_num), - SPNIC_PORT_STAT(mac_rx_sym_err_pkt_num), - SPNIC_PORT_STAT(mac_rx_fcs_err_pkt_num), - SPNIC_PORT_STAT(mac_rx_send_app_good_pkt_num), - SPNIC_PORT_STAT(mac_rx_send_app_bad_pkt_num), - SPNIC_PORT_STAT(mac_tx_fragment_pkt_num), - SPNIC_PORT_STAT(mac_tx_undersize_pkt_num), - SPNIC_PORT_STAT(mac_tx_undermin_pkt_num), - SPNIC_PORT_STAT(mac_tx_64_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_65_127_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_128_255_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_256_511_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_512_1023_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), - SPNIC_PORT_STAT(mac_tx_1519_max_good_pkt_num), - SPNIC_PORT_STAT(mac_tx_1519_max_bad_pkt_num), - SPNIC_PORT_STAT(mac_tx_oversize_pkt_num), - SPNIC_PORT_STAT(mac_tx_jabber_pkt_num), - SPNIC_PORT_STAT(mac_tx_pause_num), - SPNIC_PORT_STAT(mac_tx_pfc_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri0_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri1_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri2_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri3_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri4_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri5_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri6_pkt_num), - SPNIC_PORT_STAT(mac_tx_pfc_pri7_pkt_num), - SPNIC_PORT_STAT(mac_tx_control_pkt_num), - SPNIC_PORT_STAT(mac_tx_err_all_pkt_num), - SPNIC_PORT_STAT(mac_tx_from_app_good_pkt_num), - SPNIC_PORT_STAT(mac_tx_from_app_bad_pkt_num), -}; - -static char g_spnic_priv_flags_strings[][ETH_GSTRING_LEN] = { - "Symmetric-RSS", -}; - -u32 spnic_get_io_stats_size(struct spnic_nic_dev *nic_dev) -{ - u32 count; - - count = ARRAY_LEN(spnic_nic_dev_stats) + - ARRAY_LEN(spnic_nic_dev_stats_extern) + - (ARRAY_LEN(spnic_tx_queue_stats) + - ARRAY_LEN(spnic_tx_queue_stats_extern) + - ARRAY_LEN(spnic_rx_queue_stats) + - ARRAY_LEN(spnic_rx_queue_stats_extern)) * nic_dev->max_qps; - - return count; -} - -#define GET_VALUE_OF_PTR(size, ptr) ( \ - (size) == sizeof(u64) ? *(u64 *)(ptr) : \ - (size) == sizeof(u32) ? *(u32 *)(ptr) : \ - (size) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \ -) - -#define DEV_STATS_PACK(items, item_idx, array, stats_ptr) do { \ - int j; \ - for (j = 0; j < ARRAY_LEN(array); j++) { \ - memcpy((items)[item_idx].name, (array)[j].name, \ - SPNIC_SHOW_ITEM_LEN); \ - (items)[item_idx].hexadecimal = 0; \ - (items)[item_idx].value = \ - GET_VALUE_OF_PTR((array)[j].size, \ - (char *)(stats_ptr) + (array)[j].offset); \ - (item_idx)++; \ - } \ -} while (0) - -#define QUEUE_STATS_PACK(items, item_idx, array, stats_ptr, qid) do { \ - int j; \ - for (j = 0; j < ARRAY_LEN(array); j++) { \ - memcpy((items)[item_idx].name, (array)[j].name, \ - SPNIC_SHOW_ITEM_LEN); \ - snprintf((items)[item_idx].name, SPNIC_SHOW_ITEM_LEN, \ - (array)[j].name, (qid)); \ - (items)[item_idx].hexadecimal = 0; \ - (items)[item_idx].value = \ - GET_VALUE_OF_PTR((array)[j].size, \ - (char *)(stats_ptr) + (array)[j].offset); \ - (item_idx)++; \ - } \ -} while (0) - -void spnic_get_io_stats(struct spnic_nic_dev *nic_dev, void *stats) -{ - struct spnic_show_item *items = stats; - int item_idx = 0; - u16 qid; - - DEV_STATS_PACK(items, item_idx, spnic_nic_dev_stats, &nic_dev->stats); - DEV_STATS_PACK(items, item_idx, spnic_nic_dev_stats_extern, - &nic_dev->stats); - - for (qid = 0; qid < nic_dev->max_qps; qid++) { - QUEUE_STATS_PACK(items, item_idx, spnic_tx_queue_stats, - &nic_dev->txqs[qid].txq_stats, qid); - QUEUE_STATS_PACK(items, item_idx, spnic_tx_queue_stats_extern, - &nic_dev->txqs[qid].txq_stats, qid); - } - - for (qid = 0; qid < nic_dev->max_qps; qid++) { - QUEUE_STATS_PACK(items, item_idx, spnic_rx_queue_stats, - &nic_dev->rxqs[qid].rxq_stats, qid); - QUEUE_STATS_PACK(items, item_idx, spnic_rx_queue_stats_extern, - &nic_dev->rxqs[qid].rxq_stats, qid); - } -} - -static char spnic_test_strings[][ETH_GSTRING_LEN] = { - "Internal lb test (on/offline)", - "External lb test (external_lb)", -}; - -int spnic_get_sset_count(struct net_device *netdev, int sset) -{ - int count = 0, q_num = 0; - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - switch (sset) { - case ETH_SS_TEST: - return ARRAY_LEN(spnic_test_strings); - case ETH_SS_STATS: - q_num = nic_dev->q_params.num_qps; - count = ARRAY_LEN(spnic_netdev_stats) + - ARRAY_LEN(spnic_nic_dev_stats) + - ARRAY_LEN(spnic_function_stats) + - (ARRAY_LEN(spnic_tx_queue_stats) + - ARRAY_LEN(spnic_rx_queue_stats)) * q_num; - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - count += ARRAY_LEN(spnic_port_stats); - - return count; - case ETH_SS_PRIV_FLAGS: - return ARRAY_LEN(g_spnic_priv_flags_strings); - default: - return -EOPNOTSUPP; - } -} - -static void get_drv_queue_stats(struct spnic_nic_dev *nic_dev, u64 *data) -{ - struct spnic_txq_stats txq_stats; - struct spnic_rxq_stats rxq_stats; - u16 i = 0, j = 0, qid = 0; - char *p = NULL; - - for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { - if (!nic_dev->txqs) - break; - - spnic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); - for (j = 0; j < ARRAY_LEN(spnic_tx_queue_stats); j++, i++) { - p = (char *)(&txq_stats) + - spnic_tx_queue_stats[j].offset; - data[i] = (spnic_tx_queue_stats[j].size == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - } - - for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { - if (!nic_dev->rxqs) - break; - - spnic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); - for (j = 0; j < ARRAY_LEN(spnic_rx_queue_stats); j++, i++) { - p = (char *)(&rxq_stats) + - spnic_rx_queue_stats[j].offset; - data[i] = (spnic_rx_queue_stats[j].size == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - } -} - -static u16 get_ethtool_port_stats(struct spnic_nic_dev *nic_dev, u64 *data) -{ - struct mag_cmd_port_stats *port_stats; - char *p = NULL; - u16 i = 0, j = 0; - int err; - - port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); - if (!port_stats) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to malloc port stats\n"); - memset(&data[i], 0, - ARRAY_LEN(spnic_port_stats) * sizeof(*data)); - i += ARRAY_LEN(spnic_port_stats); - return i; - } - - err = spnic_get_phy_port_stats(nic_dev->hwdev, port_stats); - if (err) - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to get port stats from fw\n"); - - for (j = 0; j < ARRAY_LEN(spnic_port_stats); j++, i++) { - p = (char *)(port_stats) + spnic_port_stats[j].offset; - data[i] = (spnic_port_stats[j].size == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - - kfree(port_stats); - - return i; -} - -void spnic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct rtnl_link_stats64 temp; - const struct rtnl_link_stats64 *net_stats = NULL; - struct spnic_nic_stats *nic_stats = NULL; - - struct spnic_vport_stats vport_stats = {0}; - u16 i = 0, j = 0; - char *p = NULL; - int err; - - net_stats = dev_get_stats(netdev, &temp); - for (j = 0; j < ARRAY_LEN(spnic_netdev_stats); j++, i++) { - p = (char *)(net_stats) + spnic_netdev_stats[j].offset; - data[i] = GET_VALUE_OF_PTR(spnic_netdev_stats[j].size, p); - } - - nic_stats = &nic_dev->stats; - for (j = 0; j < ARRAY_LEN(spnic_nic_dev_stats); j++, i++) { - p = (char *)(nic_stats) + spnic_nic_dev_stats[j].offset; - data[i] = GET_VALUE_OF_PTR(spnic_nic_dev_stats[j].size, p); - } - - err = spnic_get_vport_stats(nic_dev->hwdev, &vport_stats); - if (err) - nicif_err(nic_dev, drv, netdev, - "Failed to get function stats from fw\n"); - - for (j = 0; j < ARRAY_LEN(spnic_function_stats); j++, i++) { - p = (char *)(&vport_stats) + spnic_function_stats[j].offset; - data[i] = GET_VALUE_OF_PTR(spnic_function_stats[j].size, p); - } - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - i += get_ethtool_port_stats(nic_dev, data + i); - - get_drv_queue_stats(nic_dev, data + i); -} - -static u16 get_drv_dev_strings(struct spnic_nic_dev *nic_dev, char *p) -{ - u16 i, cnt = 0; - - for (i = 0; i < ARRAY_LEN(spnic_netdev_stats); i++) { - memcpy(p, spnic_netdev_stats[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - cnt++; - } - - for (i = 0; i < ARRAY_LEN(spnic_nic_dev_stats); i++) { - memcpy(p, spnic_nic_dev_stats[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - cnt++; - } - - return cnt; -} - -static u16 get_hw_stats_strings(struct spnic_nic_dev *nic_dev, char *p) -{ - u16 i, cnt = 0; - - for (i = 0; i < ARRAY_LEN(spnic_function_stats); i++) { - memcpy(p, spnic_function_stats[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - cnt++; - } - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { - for (i = 0; i < ARRAY_LEN(spnic_port_stats); i++) { - memcpy(p, spnic_port_stats[i].name, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - cnt++; - } - } - - return cnt; -} - -static u16 get_qp_stats_strings(struct spnic_nic_dev *nic_dev, char *p) -{ - u16 i = 0, j = 0, cnt = 0; - - for (i = 0; i < nic_dev->q_params.num_qps; i++) { - for (j = 0; j < ARRAY_LEN(spnic_tx_queue_stats); j++) { - sprintf(p, spnic_tx_queue_stats[j].name, i); - p += ETH_GSTRING_LEN; - cnt++; - } - } - - for (i = 0; i < nic_dev->q_params.num_qps; i++) { - for (j = 0; j < ARRAY_LEN(spnic_rx_queue_stats); j++) { - sprintf(p, spnic_rx_queue_stats[j].name, i); - p += ETH_GSTRING_LEN; - cnt++; - } - } - - return cnt; -} - -void spnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - char *p = (char *)data; - u16 offset = 0; - - switch (stringset) { - case ETH_SS_TEST: - memcpy(data, *spnic_test_strings, sizeof(spnic_test_strings)); - return; - case ETH_SS_STATS: - offset = get_drv_dev_strings(nic_dev, p); - offset += get_hw_stats_strings(nic_dev, - p + offset * ETH_GSTRING_LEN); - get_qp_stats_strings(nic_dev, p + offset * ETH_GSTRING_LEN); - - return; - case ETH_SS_PRIV_FLAGS: - memcpy(data, g_spnic_priv_flags_strings, - sizeof(g_spnic_priv_flags_strings)); - return; - default: - nicif_err(nic_dev, drv, netdev, - "Invalid string set %u.", stringset); - return; - } -} - -static const u32 spnic_mag_link_mode_ge[] = { - ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - ETHTOOL_LINK_MODE_1000baseX_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_10ge_base_r[] = { - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, - ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_25ge_base_r[] = { - ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_40ge_base_r4[] = { - ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_50ge_base_r[] = { - ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, - ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_50ge_base_r2[] = { - ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_100ge_base_r[] = { - ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, - ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_100ge_base_r2[] = { - ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, - ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, - ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_100ge_base_r4[] = { - ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_200ge_base_r2[] = { - ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, - ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, - ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, -}; - -static const u32 spnic_mag_link_mode_200ge_base_r4[] = { - ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, -}; - -struct hw2ethtool_link_mode { - const u32 *link_mode_bit_arr; - u32 arr_size; - u32 speed; -}; - -static const struct hw2ethtool_link_mode - hw2ethtool_link_mode_table[LINK_MODE_MAX_NUMBERS] = { - [LINK_MODE_GE] = { - .link_mode_bit_arr = spnic_mag_link_mode_ge, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_ge), - .speed = SPEED_1000, - }, - [LINK_MODE_10GE_BASE_R] = { - .link_mode_bit_arr = spnic_mag_link_mode_10ge_base_r, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_10ge_base_r), - .speed = SPEED_10000, - }, - [LINK_MODE_25GE_BASE_R] = { - .link_mode_bit_arr = spnic_mag_link_mode_25ge_base_r, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_25ge_base_r), - .speed = SPEED_25000, - }, - [LINK_MODE_40GE_BASE_R4] = { - .link_mode_bit_arr = spnic_mag_link_mode_40ge_base_r4, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_40ge_base_r4), - .speed = SPEED_40000, - }, - [LINK_MODE_50GE_BASE_R] = { - .link_mode_bit_arr = spnic_mag_link_mode_50ge_base_r, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_50ge_base_r), - .speed = SPEED_50000, - }, - [LINK_MODE_50GE_BASE_R2] = { - .link_mode_bit_arr = spnic_mag_link_mode_50ge_base_r2, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_50ge_base_r2), - .speed = SPEED_50000, - }, - [LINK_MODE_100GE_BASE_R] = { - .link_mode_bit_arr = spnic_mag_link_mode_100ge_base_r, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_100ge_base_r), - .speed = SPEED_100000, - }, - [LINK_MODE_100GE_BASE_R2] = { - .link_mode_bit_arr = spnic_mag_link_mode_100ge_base_r2, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_100ge_base_r2), - .speed = SPEED_100000, - }, - [LINK_MODE_100GE_BASE_R4] = { - .link_mode_bit_arr = spnic_mag_link_mode_100ge_base_r4, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_100ge_base_r4), - .speed = SPEED_100000, - }, - [LINK_MODE_200GE_BASE_R2] = { - .link_mode_bit_arr = spnic_mag_link_mode_200ge_base_r2, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_200ge_base_r2), - .speed = SPEED_200000, - }, - [LINK_MODE_200GE_BASE_R4] = { - .link_mode_bit_arr = spnic_mag_link_mode_200ge_base_r4, - .arr_size = ARRAY_LEN(spnic_mag_link_mode_200ge_base_r4), - .speed = SPEED_200000, - }, -}; - -#define GET_SUPPORTED_MODE 0 -#define GET_ADVERTISED_MODE 1 - -struct cmd_link_settings { - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); - __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); - - u32 speed; - u8 duplex; - u8 port; - u8 autoneg; -}; - -#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ - set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->supported) -#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ - set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->advertising) - -#define ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(ecmd, mode) \ -do { \ - u32 i; \ - for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) \ - set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], \ - (ecmd)->supported); \ -} while (0) - -#define ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(ecmd, mode) \ -do { \ - u32 i; \ - for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) \ - set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], \ - (ecmd)->advertising); \ -} while (0) - -/* Related to enum mag_cmd_port_speed */ -static u32 hw_to_ethtool_speed[] = { - (u32)SPEED_UNKNOWN, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, - SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, SPEED_200000 -}; - -static int spnic_ethtool_to_hw_speed_level(u32 speed) -{ - int i; - - for (i = 0; i < ARRAY_LEN(hw_to_ethtool_speed); i++) { - if (hw_to_ethtool_speed[i] == speed) - break; - } - - return i; -} - -static void -spnic_add_ethtool_link_mode(struct cmd_link_settings *link_settings, u32 hw_link_mode, u32 name) -{ - u32 link_mode; - - for (link_mode = 0; link_mode < LINK_MODE_MAX_NUMBERS; link_mode++) { - if (hw_link_mode & BIT(link_mode)) { - if (name == GET_SUPPORTED_MODE) - ETHTOOL_ADD_SUPPORTED_SPEED_LINK_MODE(link_settings, link_mode); - else - ETHTOOL_ADD_ADVERTISED_SPEED_LINK_MODE(link_settings, link_mode); - } - } -} - -static int spnic_link_speed_set(struct spnic_nic_dev *nic_dev, - struct cmd_link_settings *link_settings, - struct nic_port_info *port_info) -{ - u8 link_state = 0; - int err; - - if (port_info->supported_mode != LINK_MODE_UNKNOWN) - spnic_add_ethtool_link_mode(link_settings, port_info->supported_mode, - GET_SUPPORTED_MODE); - if (port_info->advertised_mode != LINK_MODE_UNKNOWN) - spnic_add_ethtool_link_mode(link_settings, port_info->advertised_mode, - GET_ADVERTISED_MODE); - - err = spnic_get_link_state(nic_dev->hwdev, &link_state); - if (!err && link_state) { - link_settings->speed = - port_info->speed < ARRAY_LEN(hw_to_ethtool_speed) ? - hw_to_ethtool_speed[port_info->speed] : - (u32)SPEED_UNKNOWN; - - link_settings->duplex = port_info->duplex; - } else { - link_settings->speed = (u32)SPEED_UNKNOWN; - link_settings->duplex = DUPLEX_UNKNOWN; - } - - return 0; -} - -static void spnic_link_port_type(struct cmd_link_settings *link_settings, u8 port_type) -{ - switch (port_type) { - case MAG_CMD_WIRE_TYPE_ELECTRIC: - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP); - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP); - link_settings->port = PORT_TP; - break; - - case MAG_CMD_WIRE_TYPE_AOC: - case MAG_CMD_WIRE_TYPE_MM: - case MAG_CMD_WIRE_TYPE_SM: - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); - link_settings->port = PORT_FIBRE; - break; - - case MAG_CMD_WIRE_TYPE_COPPER: - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); - link_settings->port = PORT_DA; - break; - - case MAG_CMD_WIRE_TYPE_BACKPLANE: - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane); - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane); - link_settings->port = PORT_NONE; - break; - - default: - link_settings->port = PORT_OTHER; - break; - } -} - -static int get_link_pause_settings(struct spnic_nic_dev *nic_dev, - struct cmd_link_settings *link_settings) -{ - struct nic_pause_config nic_pause = {0}; - int err; - - err = spnic_get_pause_info(nic_dev->hwdev, &nic_pause); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to get pauseparam from hw\n"); - return err; - } - - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Pause); - if (nic_pause.rx_pause && nic_pause.tx_pause) { - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); - } else if (nic_pause.tx_pause) { - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Asym_Pause); - } else if (nic_pause.rx_pause) { - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Asym_Pause); - } - - return 0; -} - -int get_link_settings(struct net_device *netdev, struct cmd_link_settings *link_settings) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct nic_port_info port_info = {0}; - int err; - - err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to get port info\n"); - return err; - } - - err = spnic_link_speed_set(nic_dev, link_settings, &port_info); - if (err) - return err; - - spnic_link_port_type(link_settings, port_info.port_type); - - link_settings->autoneg = port_info.autoneg_state == PORT_CFG_AN_ON ? - AUTONEG_ENABLE : AUTONEG_DISABLE; - if (port_info.autoneg_cap) - ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Autoneg); - if (port_info.autoneg_state == PORT_CFG_AN_ON) - ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Autoneg); - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - err = get_link_pause_settings(nic_dev, link_settings); - - return err; -} - -int spnic_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *link_settings) -{ - struct cmd_link_settings settings = { { 0 } }; - struct ethtool_link_settings *base = &link_settings->base; - int err; - - ethtool_link_ksettings_zero_link_mode(link_settings, supported); - ethtool_link_ksettings_zero_link_mode(link_settings, advertising); - - err = get_link_settings(netdev, &settings); - if (err) - return err; - - bitmap_copy(link_settings->link_modes.supported, settings.supported, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_copy(link_settings->link_modes.advertising, settings.advertising, - __ETHTOOL_LINK_MODE_MASK_NBITS); - - base->autoneg = settings.autoneg; - base->speed = settings.speed; - base->duplex = settings.duplex; - base->port = settings.port; - - return 0; -} - -static bool spnic_is_support_speed(u32 supported_link, u32 speed) -{ - u32 link_mode; - - for (link_mode = 0; link_mode < LINK_MODE_MAX_NUMBERS; link_mode++) { - if (!(supported_link & BIT(link_mode))) - continue; - - if (hw2ethtool_link_mode_table[link_mode].speed == speed) - return true; - } - - return false; -} - -static int spnic_is_speed_legal(struct spnic_nic_dev *nic_dev, - struct nic_port_info *port_info, u32 speed) -{ - struct net_device *netdev = nic_dev->netdev; - int speed_level = 0; - - if (port_info->supported_mode == LINK_MODE_UNKNOWN || - port_info->advertised_mode == LINK_MODE_UNKNOWN) { - nicif_err(nic_dev, drv, netdev, "Unknown supported link modes\n"); - return -EAGAIN; - } - - speed_level = spnic_ethtool_to_hw_speed_level(speed); - if (speed_level >= PORT_SPEED_UNKNOWN || - !spnic_is_support_speed(port_info->supported_mode, speed)) { - nicif_err(nic_dev, drv, netdev, - "Not supported speed: %u\n", speed); - return -EINVAL; - } - - return 0; -} - -static int get_link_settings_type(struct spnic_nic_dev *nic_dev, - u8 autoneg, u32 speed, u32 *set_settings) -{ - struct nic_port_info port_info = {0}; - int err; - - err = spnic_get_port_info(nic_dev->hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get current settings\n"); - return -EAGAIN; - } - - /* Alwayse set autonegation */ - if (port_info.autoneg_cap) - *set_settings |= HILINK_LINK_SET_AUTONEG; - - if (autoneg == AUTONEG_ENABLE) { - if (!port_info.autoneg_cap) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n"); - return -EOPNOTSUPP; - } - } else if (speed != (u32)SPEED_UNKNOWN) { - /* Set speed only when autoneg is disable */ - err = spnic_is_speed_legal(nic_dev, &port_info, speed); - if (err) - return err; - - *set_settings |= HILINK_LINK_SET_SPEED; - } else { - nicif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n"); - return -EOPNOTSUPP; - } - - return 0; -} - -static int spnic_set_settings_to_hw(struct spnic_nic_dev *nic_dev, - u32 set_settings, u8 autoneg, u32 speed) -{ - struct net_device *netdev = nic_dev->netdev; - struct spnic_link_ksettings settings = {0}; - int speed_level = 0; - char set_link_str[128] = {0}; - int err = 0; - - snprintf(set_link_str, sizeof(set_link_str), "%s", - (set_settings & HILINK_LINK_SET_AUTONEG) ? - (autoneg ? "autong enable " : "autong disable ") : ""); - if (set_settings & HILINK_LINK_SET_SPEED) { - speed_level = spnic_ethtool_to_hw_speed_level(speed); - snprintf(set_link_str, sizeof(set_link_str), - "%sspeed %u ", set_link_str, speed); - } - - settings.valid_bitmap = set_settings; - settings.autoneg = autoneg ? PORT_CFG_AN_ON : PORT_CFG_AN_OFF; - settings.speed = (u8)speed_level; - - err = spnic_set_link_settings(nic_dev->hwdev, &settings); - if (err) - nicif_err(nic_dev, drv, netdev, "Set %sfailed\n", - set_link_str); - else - nicif_info(nic_dev, drv, netdev, "Set %ssuccess\n", - set_link_str); - - return err; -} - -int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u32 set_settings = 0; - int err = 0; - - err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings); - if (err) - return err; - - if (set_settings) - err = spnic_set_settings_to_hw(nic_dev, set_settings, autoneg, speed); - else - nicif_info(nic_dev, drv, netdev, "Nothing changed, exiting without setting anything\n"); - - return err; -} - -int spnic_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *link_settings) -{ - /* Only support to set autoneg and speed */ - return set_link_settings(netdev, link_settings->base.autoneg, - link_settings->base.speed); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_filter.c b/drivers/net/ethernet/ramaxel/spnic/spnic_filter.c deleted file mode 100644 index d7ca2bed454b..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_filter.c +++ /dev/null @@ -1,411 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> -#include <linux/debugfs.h> -#include <linux/module.h> -#include <linux/moduleparam.h> - -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "spnic_nic_dev.h" - -enum spnic_rx_mod { - SPNIC_RX_MODE_UC = 1 << 0, - SPNIC_RX_MODE_MC = 1 << 1, - SPNIC_RX_MODE_BC = 1 << 2, - SPNIC_RX_MODE_MC_ALL = 1 << 3, - SPNIC_RX_MODE_PROMISC = 1 << 4, -}; - -static int spnic_uc_sync(struct net_device *netdev, u8 *addr) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - return spnic_set_mac(nic_dev->hwdev, addr, 0, sphw_global_func_id(nic_dev->hwdev), - SPHW_CHANNEL_NIC); -} - -static int spnic_uc_unsync(struct net_device *netdev, u8 *addr) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - /* The addr is in use */ - if (ether_addr_equal(addr, netdev->dev_addr)) - return 0; - - return spnic_del_mac(nic_dev->hwdev, addr, 0, sphw_global_func_id(nic_dev->hwdev), - SPHW_CHANNEL_NIC); -} - -void spnic_clean_mac_list_filter(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - - list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) { - if (f->state == SPNIC_MAC_HW_SYNCED) - spnic_uc_unsync(netdev, f->addr); - list_del(&f->list); - kfree(f); - } - - list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) { - if (f->state == SPNIC_MAC_HW_SYNCED) - spnic_uc_unsync(netdev, f->addr); - list_del(&f->list); - kfree(f); - } -} - -static struct spnic_mac_filter *spnic_find_mac(struct list_head *filter_list, u8 *addr) -{ - struct spnic_mac_filter *f = NULL; - - list_for_each_entry(f, filter_list, list) { - if (ether_addr_equal(addr, f->addr)) - return f; - } - return NULL; -} - -static struct spnic_mac_filter *spnic_add_filter(struct spnic_nic_dev *nic_dev, - struct list_head *mac_filter_list, u8 *addr) -{ - struct spnic_mac_filter *f; - - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - goto out; - - ether_addr_copy(f->addr, addr); - - INIT_LIST_HEAD(&f->list); - list_add_tail(&f->list, mac_filter_list); - - f->state = SPNIC_MAC_WAIT_HW_SYNC; - set_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags); - -out: - return f; -} - -static void spnic_del_filter(struct spnic_nic_dev *nic_dev, struct spnic_mac_filter *f) -{ - set_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags); - - if (f->state == SPNIC_MAC_WAIT_HW_SYNC) { - /* have not added to hw, delete it directly */ - list_del(&f->list); - kfree(f); - return; - } - - f->state = SPNIC_MAC_WAIT_HW_UNSYNC; -} - -static struct spnic_mac_filter *spnic_mac_filter_entry_clone(struct spnic_mac_filter *src) -{ - struct spnic_mac_filter *f; - - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return NULL; - - *f = *src; - INIT_LIST_HEAD(&f->list); - - return f; -} - -static void spnic_undo_del_filter_entries(struct list_head *filter_list, struct list_head *from) -{ - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - - list_for_each_entry_safe(f, ftmp, from, list) { - if (spnic_find_mac(filter_list, f->addr)) - continue; - - if (f->state == SPNIC_MAC_HW_SYNCED) - f->state = SPNIC_MAC_WAIT_HW_UNSYNC; - - list_move_tail(&f->list, filter_list); - } -} - -static void spnic_undo_add_filter_entries(struct list_head *filter_list, struct list_head *from) -{ - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *tmp = NULL; - struct spnic_mac_filter *f = NULL; - - list_for_each_entry_safe(f, ftmp, from, list) { - tmp = spnic_find_mac(filter_list, f->addr); - if (tmp && tmp->state == SPNIC_MAC_HW_SYNCED) - tmp->state = SPNIC_MAC_WAIT_HW_SYNC; - } -} - -static void spnic_cleanup_filter_list(struct list_head *head) -{ - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - - list_for_each_entry_safe(f, ftmp, head, list) { - list_del(&f->list); - kfree(f); - } -} - -static int spnic_mac_filter_sync_hw(struct spnic_nic_dev *nic_dev, struct list_head *del_list, - struct list_head *add_list) -{ - struct net_device *netdev = nic_dev->netdev; - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - int err = 0, add_count = 0; - - if (!list_empty(del_list)) { - list_for_each_entry_safe(f, ftmp, del_list, list) { - err = spnic_uc_unsync(netdev, f->addr); - if (err) { /* ignore errors when delete mac */ - nic_err(&nic_dev->pdev->dev, "Failed to delete mac\n"); - } - - list_del(&f->list); - kfree(f); - } - } - - if (!list_empty(add_list)) { - list_for_each_entry_safe(f, ftmp, add_list, list) { - err = spnic_uc_sync(netdev, f->addr); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to add mac\n"); - return err; - } - - add_count++; - list_del(&f->list); - kfree(f); - } - } - - return add_count; -} - -static int spnic_mac_filter_sync(struct spnic_nic_dev *nic_dev, - struct list_head *mac_filter_list, bool uc) -{ - struct net_device *netdev = nic_dev->netdev; - struct list_head tmp_del_list, tmp_add_list; - struct spnic_mac_filter *fclone = NULL; - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - int err = 0, add_count = 0; - - INIT_LIST_HEAD(&tmp_del_list); - INIT_LIST_HEAD(&tmp_add_list); - - list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { - if (f->state != SPNIC_MAC_WAIT_HW_UNSYNC) - continue; - - f->state = SPNIC_MAC_HW_UNSYNCED; - list_move_tail(&f->list, &tmp_del_list); - } - - list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { - if (f->state != SPNIC_MAC_WAIT_HW_SYNC) - continue; - - fclone = spnic_mac_filter_entry_clone(f); - if (!fclone) { - err = -ENOMEM; - break; - } - - f->state = SPNIC_MAC_HW_SYNCED; - list_add_tail(&fclone->list, &tmp_add_list); - } - - if (err) { - spnic_undo_del_filter_entries(mac_filter_list, &tmp_del_list); - spnic_undo_add_filter_entries(mac_filter_list, &tmp_add_list); - nicif_err(nic_dev, drv, netdev, "Failed to clone mac_filter_entry\n"); - - spnic_cleanup_filter_list(&tmp_del_list); - spnic_cleanup_filter_list(&tmp_add_list); - return -ENOMEM; - } - - add_count = spnic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); - if (list_empty(&tmp_add_list)) - return add_count; - - /* there are errors when add mac to hw, delete all mac in hw */ - spnic_undo_add_filter_entries(mac_filter_list, &tmp_add_list); - /* VF don't support to enter promisc mode, - * so we can't delete any other uc mac - */ - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev) || !uc) { - list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { - if (f->state != SPNIC_MAC_HW_SYNCED) - continue; - - fclone = spnic_mac_filter_entry_clone(f); - if (!fclone) - break; - - f->state = SPNIC_MAC_WAIT_HW_SYNC; - list_add_tail(&fclone->list, &tmp_del_list); - } - } - - spnic_cleanup_filter_list(&tmp_add_list); - spnic_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); - - /* need to enter promisc/allmulti mode */ - return -ENOMEM; -} - -static void spnic_mac_filter_sync_all(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - int add_count; - - if (test_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags)) { - clear_bit(SPNIC_MAC_FILTER_CHANGED, &nic_dev->flags); - add_count = spnic_mac_filter_sync(nic_dev, &nic_dev->uc_filter_list, true); - if (add_count < 0 && SPNIC_SUPPORT_PROMISC(nic_dev->hwdev)) { - set_bit(SPNIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); - nicif_info(nic_dev, drv, netdev, "Promisc mode forced on\n"); - } else if (add_count) { - clear_bit(SPNIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); - } - - add_count = spnic_mac_filter_sync(nic_dev, &nic_dev->mc_filter_list, false); - if (add_count < 0 && SPNIC_SUPPORT_ALLMULTI(nic_dev->hwdev)) { - set_bit(SPNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); - nicif_info(nic_dev, drv, netdev, "All multicast mode forced on\n"); - } else if (add_count) { - clear_bit(SPNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); - } - } -} - -#define SPNIC_DEFAULT_RX_MODE (SPNIC_RX_MODE_UC | SPNIC_RX_MODE_MC | \ - SPNIC_RX_MODE_BC) - -static void spnic_update_mac_filter(struct spnic_nic_dev *nic_dev, - struct netdev_hw_addr_list *src_list, - struct list_head *filter_list) -{ - struct spnic_mac_filter *filter = NULL; - struct spnic_mac_filter *ftmp = NULL; - struct spnic_mac_filter *f = NULL; - struct netdev_hw_addr *ha = NULL; - - /* add addr if not already in the filter list */ - netif_addr_lock_bh(nic_dev->netdev); - netdev_hw_addr_list_for_each(ha, src_list) { - filter = spnic_find_mac(filter_list, ha->addr); - if (!filter) - spnic_add_filter(nic_dev, filter_list, ha->addr); - else if (filter->state == SPNIC_MAC_WAIT_HW_UNSYNC) - filter->state = SPNIC_MAC_HW_SYNCED; - } - netif_addr_unlock_bh(nic_dev->netdev); - - /* delete addr if not in netdev list */ - list_for_each_entry_safe(f, ftmp, filter_list, list) { - bool found = false; - - netif_addr_lock_bh(nic_dev->netdev); - netdev_hw_addr_list_for_each(ha, src_list) - if (ether_addr_equal(ha->addr, f->addr)) { - found = true; - break; - } - netif_addr_unlock_bh(nic_dev->netdev); - - if (found) - continue; - - spnic_del_filter(nic_dev, f); - } -} - -static void update_mac_filter(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - - if (test_and_clear_bit(SPNIC_UPDATE_MAC_FILTER, &nic_dev->flags)) { - spnic_update_mac_filter(nic_dev, &netdev->uc, &nic_dev->uc_filter_list); - spnic_update_mac_filter(nic_dev, &netdev->mc, &nic_dev->mc_filter_list); - } -} - -static void sync_rx_mode_to_hw(struct spnic_nic_dev *nic_dev, int promisc_en, - int allmulti_en) -{ - struct net_device *netdev = nic_dev->netdev; - u32 rx_mod = SPNIC_DEFAULT_RX_MODE; - int err; - - rx_mod |= (promisc_en ? SPNIC_RX_MODE_PROMISC : 0); - rx_mod |= (allmulti_en ? SPNIC_RX_MODE_MC_ALL : 0); - - if (promisc_en != test_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state)) - nicif_info(nic_dev, drv, netdev, "%s promisc mode\n", - promisc_en ? "Enter" : "Left"); - if (allmulti_en != - test_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) - nicif_info(nic_dev, drv, netdev, "%s all_multi mode\n", - allmulti_en ? "Enter" : "Left"); - - err = spnic_set_rx_mode(nic_dev->hwdev, rx_mod); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set rx_mode\n"); - return; - } - - promisc_en ? set_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) : - clear_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state); - - allmulti_en ? set_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) : - clear_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state); -} - -void spnic_set_rx_mode_work(struct work_struct *work) -{ - struct spnic_nic_dev *nic_dev = - container_of(work, struct spnic_nic_dev, rx_mode_work); - struct net_device *netdev = nic_dev->netdev; - int promisc_en = 0, allmulti_en = 0; - - update_mac_filter(nic_dev); - - spnic_mac_filter_sync_all(nic_dev); - - if (SPNIC_SUPPORT_PROMISC(nic_dev->hwdev)) - promisc_en = !!(netdev->flags & IFF_PROMISC) || - test_bit(SPNIC_PROMISC_FORCE_ON, &nic_dev->rx_mod_state); - - if (SPNIC_SUPPORT_ALLMULTI(nic_dev->hwdev)) - allmulti_en = !!(netdev->flags & IFF_ALLMULTI) || - test_bit(SPNIC_ALLMULTI_FORCE_ON, &nic_dev->rx_mod_state); - - if (promisc_en != test_bit(SPNIC_HW_PROMISC_ON, &nic_dev->rx_mod_state) || - allmulti_en != test_bit(SPNIC_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) - sync_rx_mode_to_hw(nic_dev, promisc_en, allmulti_en); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_irq.c b/drivers/net/ethernet/ramaxel/spnic/spnic_irq.c deleted file mode 100644 index 872a94a73590..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_irq.c +++ /dev/null @@ -1,178 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> -#include <linux/debugfs.h> - -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "spnic_nic_io.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" -#include "spnic_rx.h" - -int spnic_poll(struct napi_struct *napi, int budget) -{ - struct spnic_irq *irq_cfg = container_of(napi, struct spnic_irq, napi); - struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); - int tx_pkts, rx_pkts; - - rx_pkts = spnic_rx_poll(irq_cfg->rxq, budget); - - tx_pkts = spnic_tx_poll(irq_cfg->txq, budget); - - if (tx_pkts >= budget || rx_pkts >= budget) - return budget; - - napi_complete(napi); - - sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_ENABLE); - - return max(tx_pkts, rx_pkts); -} - -static void qp_add_napi(struct spnic_irq *irq_cfg) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); - - netif_napi_add(nic_dev->netdev, &irq_cfg->napi, spnic_poll, nic_dev->poll_weight); - napi_enable(&irq_cfg->napi); -} - -static void qp_del_napi(struct spnic_irq *irq_cfg) -{ - napi_disable(&irq_cfg->napi); - netif_napi_del(&irq_cfg->napi); -} - -static irqreturn_t qp_irq(int irq, void *data) -{ - struct spnic_irq *irq_cfg = (struct spnic_irq *)data; - struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); - - /* 1 is resend_timer */ - sphw_misx_intr_clear_resend_bit(nic_dev->hwdev, irq_cfg->msix_entry_idx, 1); - - napi_schedule(&irq_cfg->napi); - return IRQ_HANDLED; -} - -static int spnic_request_irq(struct spnic_irq *irq_cfg, u16 q_id) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); - struct interrupt_info info = {0}; - int err; - - qp_add_napi(irq_cfg); - - info.msix_index = irq_cfg->msix_entry_idx; - info.lli_set = 0; - info.interrupt_coalesc_set = 1; - info.pending_limt = nic_dev->intr_coalesce[q_id].pending_limt; - info.coalesc_timer_cfg = nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; - info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg; - nic_dev->rxqs[q_id].last_coalesc_timer_cfg = - nic_dev->intr_coalesce[q_id].coalesce_timer_cfg; - nic_dev->rxqs[q_id].last_pending_limt = nic_dev->intr_coalesce[q_id].pending_limt; - err = sphw_set_interrupt_cfg(nic_dev->hwdev, info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, irq_cfg->netdev, - "Failed to set RX interrupt coalescing attribute.\n"); - qp_del_napi(irq_cfg); - return err; - } - - err = request_irq(irq_cfg->irq_id, &qp_irq, 0, irq_cfg->irq_name, irq_cfg); - if (err) { - nicif_err(nic_dev, drv, irq_cfg->netdev, "Failed to request Rx irq\n"); - qp_del_napi(irq_cfg); - return err; - } - - irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask); - - return 0; -} - -static void spnic_release_irq(struct spnic_irq *irq_cfg) -{ - irq_set_affinity_hint(irq_cfg->irq_id, NULL); - synchronize_irq(irq_cfg->irq_id); - free_irq(irq_cfg->irq_id, irq_cfg); - qp_del_napi(irq_cfg); -} - -int spnic_qps_irq_init(struct spnic_nic_dev *nic_dev) -{ - struct pci_dev *pdev = nic_dev->pdev; - struct irq_info *qp_irq_info = NULL; - struct spnic_irq *irq_cfg = NULL; - u16 q_id, i; - u32 local_cpu; - int err; - - for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { - qp_irq_info = &nic_dev->qps_irq_info[q_id]; - irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; - - irq_cfg->irq_id = qp_irq_info->irq_id; - irq_cfg->msix_entry_idx = qp_irq_info->msix_entry_idx; - irq_cfg->netdev = nic_dev->netdev; - irq_cfg->txq = &nic_dev->txqs[q_id]; - irq_cfg->rxq = &nic_dev->rxqs[q_id]; - nic_dev->rxqs[q_id].irq_cfg = irq_cfg; - - local_cpu = cpumask_local_spread(q_id, dev_to_node(&pdev->dev)); - cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask); - - snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name), - "%s_qp%u", nic_dev->netdev->name, q_id); - - err = spnic_request_irq(irq_cfg, q_id); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to request Rx irq\n"); - goto req_tx_irq_err; - } - - sphw_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, - SPHW_SET_MSIX_AUTO_MASK); - sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_ENABLE); - } - - INIT_DELAYED_WORK(&nic_dev->moderation_task, spnic_auto_moderation_work); - - return 0; - -req_tx_irq_err: - for (i = 0; i < q_id; i++) { - irq_cfg = &nic_dev->q_params.irq_cfg[i]; - sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_DISABLE); - sphw_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, - SPHW_CLR_MSIX_AUTO_MASK); - spnic_release_irq(irq_cfg); - } - - return err; -} - -void spnic_qps_irq_deinit(struct spnic_nic_dev *nic_dev) -{ - struct spnic_irq *irq_cfg = NULL; - u16 q_id; - - for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { - irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; - sphw_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, SPHW_MSIX_DISABLE); - sphw_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, - SPHW_CLR_MSIX_AUTO_MASK); - spnic_release_irq(irq_cfg); - } -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_lld.c b/drivers/net/ethernet/ramaxel/spnic/spnic_lld.c deleted file mode 100644 index f09a4c186aae..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_lld.c +++ /dev/null @@ -1,937 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/io-mapping.h> -#include <linux/interrupt.h> -#include <linux/inetdevice.h> -#include <net/addrconf.h> -#include <linux/time.h> -#include <linux/timex.h> -#include <linux/rtc.h> -#include <linux/aer.h> -#include <linux/debugfs.h> - -#include "sphw_common.h" -#include "sphw_mt.h" -#include "sphw_crm.h" -#include "spnic_lld.h" -#include "spnic_pci_id_tbl.h" -#include "spnic_sriov.h" -#include "spnic_dev_mgmt.h" -#include "sphw_hw.h" -#include "spnic_nic_dev.h" - -static bool disable_vf_load; -module_param(disable_vf_load, bool, 0444); -MODULE_PARM_DESC(disable_vf_load, "Disable virtual functions probe or not - default is false"); - -static bool disable_attach; -module_param(disable_attach, bool, 0444); -MODULE_PARM_DESC(disable_attach, "disable_attach or not - default is false"); - -#define SPNIC_WAIT_SRIOV_CFG_TIMEOUT 15000 -#define SPNIC_SYNC_YEAR_OFFSET 1900 - -MODULE_AUTHOR("Ramaxel Technologies CO., Ltd"); -MODULE_DESCRIPTION(SPNIC_DRV_DESC); -MODULE_VERSION(SPNIC_DRV_VERSION); -MODULE_LICENSE("GPL"); - -struct spnic_uld_info g_uld_info[SERVICE_T_MAX] = { {0} }; - -#define SPHW_EVENT_PROCESS_TIMEOUT 10000 - -static const char *s_uld_name[SERVICE_T_MAX] = { - "nic", "ovs", "roce", "toe", "ioe", - "fc", "vbs", "ipsec", "virtio", "migrate"}; - -static int attach_uld(struct spnic_pcidev *dev, enum sphw_service_type type, - struct spnic_uld_info *uld_info) -{ - void *uld_dev = NULL; - int err; - - mutex_lock(&dev->pdev_mutex); - - if (dev->uld_dev[type]) { - sdk_err(&dev->pcidev->dev, "%s driver has attached to pcie device\n", - s_uld_name[type]); - err = 0; - goto out_unlock; - } - - err = uld_info->probe(&dev->lld_dev, &uld_dev, dev->uld_dev_name[type]); - if (err || !uld_dev) { - sdk_err(&dev->pcidev->dev, "Failed to add object for %s driver to pcie device\n", - s_uld_name[type]); - goto probe_failed; - } - - dev->uld_dev[type] = uld_dev; - mutex_unlock(&dev->pdev_mutex); - - sdk_info(&dev->pcidev->dev, "Attach %s driver to pcie device succeed\n", s_uld_name[type]); - return 0; - -probe_failed: -out_unlock: - mutex_unlock(&dev->pdev_mutex); - - return err; -} - -static void detach_uld(struct spnic_pcidev *dev, enum sphw_service_type type) -{ - struct spnic_uld_info *uld_info = &g_uld_info[type]; - unsigned long end; - bool timeout = true; - - mutex_lock(&dev->pdev_mutex); - if (!dev->uld_dev[type]) { - mutex_unlock(&dev->pdev_mutex); - return; - } - - end = jiffies + msecs_to_jiffies(SPHW_EVENT_PROCESS_TIMEOUT); - do { - if (!test_and_set_bit(type, &dev->state)) { - timeout = false; - break; - } - usleep_range(900, 1000); - } while (time_before(jiffies, end)); - - if (timeout && !test_and_set_bit(type, &dev->state)) - timeout = false; - - uld_info->remove(&dev->lld_dev, dev->uld_dev[type]); - dev->uld_dev[type] = NULL; - if (!timeout) - clear_bit(type, &dev->state); - - sdk_info(&dev->pcidev->dev, "Detach %s driver from pcie device succeed\n", - s_uld_name[type]); - mutex_unlock(&dev->pdev_mutex); -} - -static void attach_ulds(struct spnic_pcidev *dev) -{ - enum sphw_service_type type; - struct pci_dev *pdev = dev->pcidev; - - for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { - if (g_uld_info[type].probe) { - if (pdev->is_virtfn && (!spnic_get_vf_service_load(pdev, (u16)type))) { - sdk_info(&pdev->dev, "VF device disable service_type = %d load in host\n", - type); - continue; - } - attach_uld(dev, type, &g_uld_info[type]); - } - } -} - -static void detach_ulds(struct spnic_pcidev *dev) -{ - enum sphw_service_type type; - - for (type = SERVICE_T_MAX - 1; type > SERVICE_T_NIC; type--) { - if (g_uld_info[type].probe) - detach_uld(dev, type); - } - - if (g_uld_info[SERVICE_T_NIC].probe) - detach_uld(dev, SERVICE_T_NIC); -} - -int spnic_register_uld(enum sphw_service_type type, struct spnic_uld_info *uld_info) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - - if (type >= SERVICE_T_MAX) { - pr_err("Unknown type %d of up layer driver to register\n", type); - return -EINVAL; - } - - if (!uld_info || !uld_info->probe || !uld_info->remove) { - pr_err("Invalid information of %s driver to register\n", s_uld_name[type]); - return -EINVAL; - } - - lld_hold(); - - if (g_uld_info[type].probe) { - pr_err("%s driver has registered\n", s_uld_name[type]); - lld_put(); - return -EINVAL; - } - - memcpy(&g_uld_info[type], uld_info, sizeof(*uld_info)); - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - list_for_each_entry(dev, &chip_node->func_list, node) { - if (attach_uld(dev, type, uld_info)) { - sdk_err(&dev->pcidev->dev, "Attach %s driver to pcie device failed\n", - s_uld_name[type]); - continue; - } - } - } - - lld_put(); - - pr_info("Register %s driver succeed\n", s_uld_name[type]); - return 0; -} - -void spnic_unregister_uld(enum sphw_service_type type) -{ - struct card_node *chip_node = NULL; - struct spnic_pcidev *dev = NULL; - struct spnic_uld_info *uld_info = NULL; - - if (type >= SERVICE_T_MAX) { - pr_err("Unknown type %d of up layer driver to unregister\n", type); - return; - } - - lld_hold(); - list_for_each_entry(chip_node, &g_spnic_chip_list, node) { - /* detach vf first */ - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) != TYPE_VF) - continue; - - detach_uld(dev, type); - } - - list_for_each_entry(dev, &chip_node->func_list, node) { - if (sphw_func_type(dev->hwdev) == TYPE_VF) - continue; - - detach_uld(dev, type); - } - } - - uld_info = &g_uld_info[type]; - memset(uld_info, 0, sizeof(*uld_info)); - lld_put(); -} - -int spnic_attach_nic(struct spnic_lld_dev *lld_dev) -{ - struct spnic_pcidev *dev = NULL; - - if (!lld_dev) - return -EINVAL; - - dev = container_of(lld_dev, struct spnic_pcidev, lld_dev); - return attach_uld(dev, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]); -} - -void spnic_detach_nic(struct spnic_lld_dev *lld_dev) -{ - struct spnic_pcidev *dev = NULL; - - if (!lld_dev) - return; - - dev = container_of(lld_dev, struct spnic_pcidev, lld_dev); - detach_uld(dev, SERVICE_T_NIC); -} - -static void sphw_sync_time_to_fmw(struct spnic_pcidev *pdev_pri) -{ - struct tm tm = {0}; - u64 tv_msec; - int err; - - tv_msec = ktime_to_ms(ktime_get_real()); - err = sphw_sync_time(pdev_pri->hwdev, tv_msec); - if (err) { - sdk_err(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", - err); - } else { - time64_to_tm(tv_msec / MSEC_PER_SEC, 0, &tm); - sdk_info(&pdev_pri->pcidev->dev, "Synchronize UTC time to firmware succeed. UTC time %ld-%02d-%02d %02d:%02d:%02d.\n", - tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, - tm.tm_min, tm.tm_sec); - } -} - -static void send_uld_dev_event(struct spnic_pcidev *dev, - struct sphw_event_info *event) -{ - enum sphw_service_type type; - - for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { - if (test_and_set_bit(type, &dev->state)) { - sdk_warn(&dev->pcidev->dev, "Event: 0x%x can't handler, %s is in detach\n", - event->type, s_uld_name[type]); - continue; - } - - if (g_uld_info[type].event) - g_uld_info[type].event(&dev->lld_dev, dev->uld_dev[type], event); - clear_bit(type, &dev->state); - } -} - -static void send_event_to_dst_pf(struct spnic_pcidev *dev, u16 func_id, - struct sphw_event_info *event) -{ - struct spnic_pcidev *des_dev = NULL; - - lld_hold(); - list_for_each_entry(des_dev, &dev->chip_node->func_list, node) { - if (sphw_func_type(des_dev->hwdev) == TYPE_VF) - continue; - - if (sphw_global_func_id(des_dev->hwdev) == func_id) { - send_uld_dev_event(des_dev, event); - break; - } - } - lld_put(); -} - -void spnic_event_process(void *adapter, struct sphw_event_info *event) -{ - struct spnic_pcidev *dev = adapter; - u16 func_id; - - if (event->type == SPHW_EVENT_FAULT && - event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR && - event->info.event.chip.func_id < sphw_max_pf_num(dev->hwdev)) { - func_id = event->info.event.chip.func_id; - return send_event_to_dst_pf(adapter, func_id, event); - } - - send_uld_dev_event(adapter, event); -} - -#define SPNIC_IS_VF_DEV(pdev) ((pdev)->device == SPNIC_DEV_ID_VF) - -static int mapping_bar(struct pci_dev *pdev, struct spnic_pcidev *pci_adapter) -{ - int cfg_bar; - - cfg_bar = SPNIC_IS_VF_DEV(pdev) ? SPNIC_VF_PCI_CFG_REG_BAR : SPNIC_PF_PCI_CFG_REG_BAR; - - pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, cfg_bar); - if (!pci_adapter->cfg_reg_base) { - sdk_err(&pdev->dev, "Failed to map configuration regs\n"); - return -ENOMEM; - } - - pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, SPNIC_PCI_INTR_REG_BAR); - if (!pci_adapter->intr_reg_base) { - sdk_err(&pdev->dev, - "Failed to map interrupt regs\n"); - goto map_intr_bar_err; - } - - if (!SPNIC_IS_VF_DEV(pdev)) { - pci_adapter->mgmt_reg_base = pci_ioremap_bar(pdev, SPNIC_PCI_MGMT_REG_BAR); - if (!pci_adapter->mgmt_reg_base) { - sdk_err(&pdev->dev, "Failed to map mgmt regs\n"); - goto map_mgmt_bar_err; - } - } - - pci_adapter->db_base_phy = pci_resource_start(pdev, SPNIC_PCI_DB_BAR); - pci_adapter->db_dwqe_len = pci_resource_len(pdev, SPNIC_PCI_DB_BAR); - pci_adapter->db_base = pci_ioremap_bar(pdev, SPNIC_PCI_DB_BAR); - if (!pci_adapter->db_base) { - sdk_err(&pdev->dev, "Failed to map doorbell regs\n"); - goto map_db_err; - } - - return 0; - -map_db_err: - if (!SPNIC_IS_VF_DEV(pdev)) - iounmap(pci_adapter->mgmt_reg_base); - -map_mgmt_bar_err: - iounmap(pci_adapter->intr_reg_base); - -map_intr_bar_err: - iounmap(pci_adapter->cfg_reg_base); - - return -ENOMEM; -} - -static void unmapping_bar(struct spnic_pcidev *pci_adapter) -{ - iounmap(pci_adapter->db_base); - - if (!SPNIC_IS_VF_DEV(pci_adapter->pcidev)) - iounmap(pci_adapter->mgmt_reg_base); - - iounmap(pci_adapter->intr_reg_base); - iounmap(pci_adapter->cfg_reg_base); -} - -static int spnic_pci_init(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = NULL; - int err; - - pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); - if (!pci_adapter) { - sdk_err(&pdev->dev, "Failed to alloc pci device adapter\n"); - return -ENOMEM; - } - pci_adapter->pcidev = pdev; - mutex_init(&pci_adapter->pdev_mutex); - - pci_set_drvdata(pdev, pci_adapter); - - /* to do CONFIG_PCI_IOV */ - - err = pci_enable_device(pdev); - if (err) { - sdk_err(&pdev->dev, "Failed to enable PCI device\n"); - goto pci_enable_err; - } - - err = pci_request_regions(pdev, SPNIC_NIC_DRV_NAME); - if (err) { - sdk_err(&pdev->dev, "Failed to request regions\n"); - goto pci_regions_err; - } - - pci_enable_pcie_error_reporting(pdev); - - pci_set_master(pdev); - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - sdk_err(&pdev->dev, "Failed to set DMA mask\n"); - goto dma_mask_err; - } - } - - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - sdk_warn(&pdev->dev, "Couldn't set 64-bit coherent DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - sdk_err(&pdev->dev, "Failed to set coherent DMA mask\n"); - goto dma_consistnet_mask_err; - } - } - - return 0; - -dma_consistnet_mask_err: -dma_mask_err: - pci_clear_master(pdev); - pci_release_regions(pdev); - -pci_regions_err: - pci_disable_device(pdev); - -pci_enable_err: - pci_set_drvdata(pdev, NULL); - kfree(pci_adapter); - - return err; -} - -static void spnic_pci_deinit(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); - - pci_clear_master(pdev); - pci_release_regions(pdev); - pci_disable_pcie_error_reporting(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - kfree(pci_adapter); -} - -#ifdef CONFIG_X86 -/** - * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma - * order register to zero - * @pci_adapter: pci_adapter - **/ -static void cfg_order_reg(struct spnic_pcidev *pci_adapter) -{ - u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56}; - struct cpuinfo_x86 *cpuinfo = NULL; - u32 i; - - if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) - return; - - cpuinfo = &cpu_data(0); - for (i = 0; i < sizeof(cpu_model); i++) { - if (cpu_model[i] == cpuinfo->x86_model) - sphw_set_pcie_order_cfg(pci_adapter->hwdev); - } -} -#endif - -static int spnic_func_init(struct pci_dev *pdev, struct spnic_pcidev *pci_adapter) -{ - struct sphw_init_para init_para = {0}; - int err; - - init_para.adapter_hdl = pci_adapter; - init_para.pcidev_hdl = pdev; - init_para.dev_hdl = &pdev->dev; - init_para.cfg_reg_base = pci_adapter->cfg_reg_base; - init_para.intr_reg_base = pci_adapter->intr_reg_base; - init_para.mgmt_reg_base = pci_adapter->mgmt_reg_base; - init_para.db_base = pci_adapter->db_base; - init_para.db_base_phy = pci_adapter->db_base_phy; - init_para.db_dwqe_len = pci_adapter->db_dwqe_len; - init_para.hwdev = &pci_adapter->hwdev; - init_para.chip_node = pci_adapter->chip_node; - err = sphw_init_hwdev(&init_para); - if (err) { - pci_adapter->hwdev = NULL; - sdk_err(&pdev->dev, "Failed to initialize hardware device\n"); - return -EFAULT; - } - - pci_adapter->lld_dev.pdev = pdev; - pci_adapter->lld_dev.hwdev = pci_adapter->hwdev; - if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) - set_bit(SPNIC_FUNC_PERSENT, &pci_adapter->sriov_info.state); - - sphw_event_register(pci_adapter->hwdev, pci_adapter, spnic_event_process); - - if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) - sphw_sync_time_to_fmw(pci_adapter); - - lld_lock_chip_node(); - list_add_tail(&pci_adapter->node, &pci_adapter->chip_node->func_list); - lld_unlock_chip_node(); - - if (!disable_attach) { - attach_ulds(pci_adapter); -#ifdef CONFIG_X86 - cfg_order_reg(pci_adapter); -#endif - } - - sdk_info(&pdev->dev, "Pcie device probed\n"); - - return 0; -} - -static void spnic_func_deinit(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); - - /* When function deinit, disable mgmt initiative report events firstly, - * then flush mgmt work-queue. - */ - sphw_disable_mgmt_msg_report(pci_adapter->hwdev); - - sphw_flush_mgmt_workq(pci_adapter->hwdev); - - lld_lock_chip_node(); - list_del(&pci_adapter->node); - lld_unlock_chip_node(); - - wait_lld_dev_unused(pci_adapter); - - detach_ulds(pci_adapter); - - sphw_event_unregister(pci_adapter->hwdev); - - sphw_free_hwdev(pci_adapter->hwdev); -} - -static inline void wait_sriov_cfg_complete(struct spnic_pcidev *pci_adapter) -{ - struct spnic_sriov_info *sriov_info; - unsigned long end; - - sriov_info = &pci_adapter->sriov_info; - clear_bit(SPNIC_FUNC_PERSENT, &sriov_info->state); - usleep_range(9900, 10000); - - end = jiffies + msecs_to_jiffies(SPNIC_WAIT_SRIOV_CFG_TIMEOUT); - do { - if (!test_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state) && - !test_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state)) - return; - - usleep_range(9900, 10000); - } while (time_before(jiffies, end)); -} - -bool spnic_get_vf_load_state(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = NULL; - struct pci_dev *pf_pdev = NULL; - - if (!pdev) { - pr_err("pdev is null.\n"); - return false; - } - - /* vf used in vm */ - if (pci_is_root_bus(pdev->bus)) - return false; - - if (pdev->is_virtfn) - pf_pdev = pdev->physfn; - else - pf_pdev = pdev; - - pci_adapter = pci_get_drvdata(pf_pdev); - if (!pci_adapter) { - sdk_err(&pdev->dev, "pci_adapter is null.\n"); - return false; - } - - return !pci_adapter->disable_vf_load; -} - -int spnic_set_vf_load_state(struct pci_dev *pdev, bool vf_load_state) -{ - struct spnic_pcidev *pci_adapter = NULL; - - if (!pdev) { - pr_err("pdev is null.\n"); - return -EINVAL; - } - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) { - sdk_err(&pdev->dev, "pci_adapter is null.\n"); - return -EINVAL; - } - - if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) - return 0; - - pci_adapter->disable_vf_load = !vf_load_state; - sdk_info(&pci_adapter->pcidev->dev, "Current function %s vf load in host\n", - vf_load_state ? "enable" : "disable"); - - return 0; -} - -bool spnic_get_vf_service_load(struct pci_dev *pdev, u16 service) -{ - struct spnic_pcidev *pci_adapter = NULL; - struct pci_dev *pf_pdev = NULL; - - if (!pdev) { - pr_err("pdev is null.\n"); - return false; - } - - if (pdev->is_virtfn) - pf_pdev = pdev->physfn; - else - pf_pdev = pdev; - - pci_adapter = pci_get_drvdata(pf_pdev); - if (!pci_adapter) { - sdk_err(&pdev->dev, "pci_adapter is null.\n"); - return false; - } - - if (service >= SERVICE_T_MAX) { - sdk_err(&pdev->dev, "service_type = %u state is error\n", - service); - return false; - } - - return !pci_adapter->disable_srv_load[service]; -} - -int spnic_set_vf_service_load(struct pci_dev *pdev, u16 service, bool vf_srv_load) -{ - struct spnic_pcidev *pci_adapter = NULL; - - if (!pdev) { - pr_err("pdev is null.\n"); - return -EINVAL; - } - - if (service >= SERVICE_T_MAX) { - sdk_err(&pdev->dev, "service_type = %u state is error\n", - service); - return -EFAULT; - } - - pci_adapter = pci_get_drvdata(pdev); - if (!pci_adapter) { - sdk_err(&pdev->dev, "pci_adapter is null.\n"); - return -EINVAL; - } - - if (sphw_func_type(pci_adapter->hwdev) == TYPE_VF) - return 0; - - pci_adapter->disable_srv_load[service] = !vf_srv_load; - sdk_info(&pci_adapter->pcidev->dev, "Current function %s vf load in host\n", - vf_srv_load ? "enable" : "disable"); - - return 0; -} - -static int enable_vf_service_state(struct spnic_pcidev *dst_dev, u16 service) -{ - int err; - - err = sphw_get_dev_cap(dst_dev->hwdev); - if (err) { - sdk_err(&dst_dev->pcidev->dev, "Failed to get current device capabilities\n"); - return -EFAULT; - } - return attach_uld(dst_dev, service, &g_uld_info[service]); -} - -int spnic_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, u16 service, bool en) -{ - struct spnic_pcidev *dev = NULL; - struct spnic_pcidev *dst_dev = NULL; - int err = -EFAULT; - - if (!pdev) - return -EINVAL; - - dev = pci_get_drvdata(pdev); - if (!dev) - return -EFAULT; - - if (service >= SERVICE_T_MAX) { - sdk_err(&pdev->dev, "Current vf do not supports set service_type = %u state in host\n", - service); - return -EFAULT; - } - - /* find func_idx pci_adapter and disable or enable service */ - lld_hold(); - list_for_each_entry(dst_dev, &dev->chip_node->func_list, node) { - if (sphw_global_func_id(dst_dev->hwdev) != vf_func_id) - continue; - if (en) { - err = enable_vf_service_state(dst_dev, service); - if (err) - sdk_err(&dev->pcidev->dev, "Failed to set functio_id = %u service_type = %u\n", - vf_func_id, service); - } else { - detach_uld(dst_dev, service); - err = 0; - } - break; - } - lld_put(); - - return err; -} - -static void spnic_remove(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); - - if (!pci_adapter) - return; - - sdk_info(&pdev->dev, "Pcie device remove begin\n"); - - sphw_detect_hw_present(pci_adapter->hwdev); - - if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) { - wait_sriov_cfg_complete(pci_adapter); - spnic_pci_sriov_disable(pdev); - } - - spnic_func_deinit(pdev); - - lld_lock_chip_node(); - free_chip_node(pci_adapter); - lld_unlock_chip_node(); - - unmapping_bar(pci_adapter); - spnic_pci_deinit(pdev); - - sdk_info(&pdev->dev, "Pcie device removed\n"); -} - -static int spnic_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct spnic_pcidev *pci_adapter = NULL; - int err; - - sdk_info(&pdev->dev, "Pcie device probe begin\n"); - - if (pdev->is_virtfn && (!spnic_get_vf_load_state(pdev))) { - sdk_info(&pdev->dev, "VF device disable load in host\n"); - return 0; - } - - err = spnic_pci_init(pdev); - if (err) - return err; - - pci_adapter = pci_get_drvdata(pdev); - err = mapping_bar(pdev, pci_adapter); - if (err) { - sdk_err(&pdev->dev, "Failed to map bar\n"); - goto map_bar_failed; - } - - pci_adapter->disable_vf_load = disable_vf_load; - pci_adapter->id = *id; - lld_dev_cnt_init(pci_adapter); - - /* if chip information of pcie function exist, add the function into chip */ - lld_lock_chip_node(); - err = alloc_chip_node(pci_adapter); - if (err) { - lld_unlock_chip_node(); - sdk_err(&pdev->dev, - "Failed to add new chip node to global list\n"); - goto alloc_chip_node_fail; - } - - lld_unlock_chip_node(); - - err = spnic_func_init(pdev, pci_adapter); - if (err) - goto func_init_err; - - if (sphw_func_type(pci_adapter->hwdev) != TYPE_VF) { - err = sphw_set_bdf_ctxt(pci_adapter->hwdev, pdev->bus->number, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); - if (err) { - sdk_err(&pdev->dev, "Failed to set BDF info to MPU\n"); - goto set_bdf_err; - } - } - - return 0; - -set_bdf_err: - spnic_func_deinit(pdev); - -func_init_err: - lld_lock_chip_node(); - free_chip_node(pci_adapter); - lld_unlock_chip_node(); - -alloc_chip_node_fail: - unmapping_bar(pci_adapter); - -map_bar_failed: - spnic_pci_deinit(pdev); - - sdk_err(&pdev->dev, "Pcie device probe failed\n"); - return err; -} - -static const struct pci_device_id spnic_pci_table[] = { - {PCI_VDEVICE(RAMAXEL, SPNIC_DEV_ID_PF_STD), 0}, - {PCI_VDEVICE(RAMAXEL, SPNIC_DEV_ID_VF), 0}, - {PCI_VDEVICE(RAMAXEL, SPNIC_DEV_ID_VF_HV), 0}, - {0, 0} -}; - -MODULE_DEVICE_TABLE(pci, spnic_pci_table); - -/** - * spnic_io_error_detected - called when PCI error is detected - * @pdev: Pointer to PCI device - * @state: The current pci connection state - * - * This function is called after a PCI bus error affecting - * this device has been detected. - * - * Since we only need error detecting not error handling, so we - * always return PCI_ERS_RESULT_CAN_RECOVER to tell the AER - * driver that we don't need reset(error handling). - */ -static pci_ers_result_t spnic_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) -{ - struct spnic_pcidev *pci_adapter = NULL; - - sdk_err(&pdev->dev, - "Uncorrectable error detected, log and cleanup error status: 0x%08x\n", - state); - - pci_aer_clear_nonfatal_status(pdev); - pci_adapter = pci_get_drvdata(pdev); - - if (pci_adapter) - sphw_record_pcie_error(pci_adapter->hwdev); - - return PCI_ERS_RESULT_CAN_RECOVER; -} - -static void spnic_shutdown(struct pci_dev *pdev) -{ - struct spnic_pcidev *pci_adapter = pci_get_drvdata(pdev); - - sdk_err(&pdev->dev, "Shutdown device\n"); - - if (pci_adapter) - sphw_shutdown_hwdev(pci_adapter->hwdev); - - pci_disable_device(pdev); -} - -/* Cause we only need error detecting not error handling, so only error_detected - * callback is enough. - */ -static struct pci_error_handlers spnic_err_handler = { - .error_detected = spnic_io_error_detected, -}; - -static struct pci_driver spnic_driver = { - .name = SPNIC_NIC_DRV_NAME, - .id_table = spnic_pci_table, - .probe = spnic_probe, - .remove = spnic_remove, - .shutdown = spnic_shutdown, - .sriov_configure = spnic_pci_sriov_configure, - .err_handler = &spnic_err_handler -}; - -static __init int spnic_lld_init(void) -{ - int err; - - pr_info("%s - version %s\n", SPNIC_DRV_DESC, SPNIC_DRV_VERSION); - memset(g_uld_info, 0, sizeof(g_uld_info)); - - spnic_lld_lock_init(); - - err = spnic_register_uld(SERVICE_T_NIC, &nic_uld_info); - if (err) { - pr_err("Register spnic uld failed\n"); - return err; - } - - return pci_register_driver(&spnic_driver); -} - -static __exit void spnic_lld_exit(void) -{ - pci_unregister_driver(&spnic_driver); - spnic_unregister_uld(SERVICE_T_NIC); -} - -module_init(spnic_lld_init); -module_exit(spnic_lld_exit); diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_lld.h b/drivers/net/ethernet/ramaxel/spnic/spnic_lld.h deleted file mode 100644 index e1864f1b6c5b..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_lld.h +++ /dev/null @@ -1,75 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_LLD_H -#define SPNIC_LLD_H - -#include "sphw_crm.h" - -struct spnic_lld_dev { - struct pci_dev *pdev; - void *hwdev; -}; - -struct spnic_uld_info { - /* uld_dev: should not return null even the function capability - * is not support the up layer driver - * uld_dev_name: NIC driver should copy net device name. - * FC driver could copy fc device name. - * other up layer driver don`t need copy anything - */ - int (*probe)(struct spnic_lld_dev *lld_dev, void **uld_dev, - char *uld_dev_name); - void (*remove)(struct spnic_lld_dev *lld_dev, void *uld_dev); - int (*suspend)(struct spnic_lld_dev *lld_dev, void *uld_dev, - pm_message_t state); - int (*resume)(struct spnic_lld_dev *lld_dev, void *uld_dev); - void (*event)(struct spnic_lld_dev *lld_dev, void *uld_dev, - struct sphw_event_info *event); - int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, - void *buf_out, u32 *out_size); -}; - -int spnic_register_uld(enum sphw_service_type type, struct spnic_uld_info *uld_info); - -void spnic_unregister_uld(enum sphw_service_type type); - -void *spnic_get_uld_dev_by_pdev(struct pci_dev *pdev, enum sphw_service_type type); - -void *spnic_get_ppf_uld_by_pdev(struct pci_dev *pdev, enum sphw_service_type type); - -int spnic_get_chip_name_by_hwdev(const void *hwdev, char *ifname); - -void *spnic_get_uld_dev_by_ifname(const char *ifname, enum sphw_service_type type); - -int spnic_get_pf_nic_uld_array(struct pci_dev *pdev, u32 *dev_cnt, void *array[]); - -int spnic_get_chip_up_bitmap(struct pci_dev *pdev, bool *is_setted, u8 *valid_up_bitmap); - -int spnic_set_chip_up_bitmap(struct pci_dev *pdev, u8 valid_up_bitmap); - -bool spnic_get_vf_service_load(struct pci_dev *pdev, u16 service); - -int spnic_set_vf_service_load(struct pci_dev *pdev, u16 service, bool vf_srv_load); - -int spnic_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, u16 service, bool en); - -bool spnic_get_vf_load_state(struct pci_dev *pdev); - -int spnic_set_vf_load_state(struct pci_dev *pdev, bool vf_load_state); - -int spnic_attach_nic(struct spnic_lld_dev *lld_dev); - -void spnic_detach_nic(struct spnic_lld_dev *lld_dev); - -void lld_hold(void); -void lld_put(void); -void lld_dev_hold(struct spnic_lld_dev *dev); -void lld_dev_put(struct spnic_lld_dev *dev); -struct spnic_lld_dev *spnic_get_lld_dev_by_ifname(const char *ifname); - -void *spnic_get_ppf_hwdev_by_pdev(struct pci_dev *pdev); - -void spnic_send_event_to_uld(struct pci_dev *pdev, enum sphw_service_type type, - struct sphw_event_info *event); -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c deleted file mode 100644 index d0448d1a6bd3..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cfg.c +++ /dev/null @@ -1,778 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/etherdevice.h> -#include <linux/if_vlan.h> -#include <linux/ethtool.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/pci.h> -#include <linux/netdevice.h> -#include <linux/module.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "spnic_mag_cmd.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" -#include "sphw_common.h" - -static int mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); -static int mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, - u16 channel); - -int spnic_set_port_enable(void *hwdev, bool enable, u16 channel) -{ - struct mag_cmd_set_port_enable en_state; - u16 out_size = sizeof(en_state); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - if (sphw_func_type(hwdev) == TYPE_VF) - return 0; - - memset(&en_state, 0, sizeof(en_state)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - en_state.function_id = sphw_global_func_id(hwdev); - en_state.state = enable ? MAG_CMD_TX_ENABLE | MAG_CMD_RX_ENABLE : - MAG_CMD_PORT_DISABLE; - - err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_SET_PORT_ENABLE, &en_state, - sizeof(en_state), &en_state, &out_size, channel); - if (err || !out_size || en_state.head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, en_state.head.status, out_size, channel); - return -EIO; - } - - return 0; -} - -int spnic_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats) -{ - struct mag_cmd_get_port_stat *port_stats = NULL; - struct mag_cmd_port_stats_info stats_info; - u16 out_size = sizeof(*port_stats); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); - if (!port_stats) - return -ENOMEM; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&stats_info, 0, sizeof(stats_info)); - stats_info.port_id = sphw_physical_port_id(hwdev); - - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_PORT_STAT, - &stats_info, sizeof(stats_info), - port_stats, &out_size); - if (err || !out_size || port_stats->head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n", - err, port_stats->head.status, out_size); - err = -EIO; - goto out; - } - - memcpy(stats, &port_stats->counter, sizeof(*stats)); - -out: - kfree(port_stats); - - return err; -} - -int spnic_set_port_funcs_state(void *hwdev, bool enable) -{ - return 0; -} - -int spnic_reset_port_link_cfg(void *hwdev) -{ - return 0; -} - -int spnic_force_port_relink(void *hwdev) -{ - return 0; -} - -int spnic_set_autoneg(void *hwdev, bool enable) -{ - /* TODO */ - - return 0; -} - -static int spnic_cfg_loopback_mode(struct spnic_nic_cfg *nic_cfg, u8 opcode, u8 *mode, u8 *enable) -{ - struct mag_cmd_cfg_loopback_mode lp; - u16 out_size = sizeof(lp); - int err; - - memset(&lp, 0, sizeof(lp)); - lp.port_id = sphw_physical_port_id(nic_cfg->hwdev); - lp.opcode = opcode; - if (opcode == MGMT_MSG_CMD_OP_SET) { - lp.lp_mode = *mode; - lp.lp_en = *enable; - } - - err = mag_msg_to_mgmt_sync(nic_cfg->hwdev, MAG_CMD_CFG_LOOPBACK_MODE, - &lp, sizeof(lp), &lp, &out_size); - if (err || !out_size || lp.head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to %s loopback mode, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get", - err, lp.head.status, out_size); - return -EIO; - } - - if (opcode == MGMT_MSG_CMD_OP_GET) { - *mode = lp.lp_mode; - *enable = lp.lp_en; - } - - return 0; -} - -int spnic_get_loopback_mode(void *hwdev, u8 *mode, u8 *enable) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !mode || !enable) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - return spnic_cfg_loopback_mode(nic_cfg, MGMT_MSG_CMD_OP_GET, mode, enable); -} - -#define LOOP_MODE_MIN 1 -#define LOOP_MODE_MAX 6 -int spnic_set_loopback_mode(void *hwdev, u8 mode, u8 enable) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) { - nic_err(nic_cfg->dev_hdl, "Invalid loopback mode %u to set\n", - mode); - return -EINVAL; - } - - return spnic_cfg_loopback_mode(nic_cfg, MGMT_MSG_CMD_OP_SET, &mode, &enable); -} - -int spnic_set_led_status(void *hwdev, enum mag_led_type type, enum mag_led_mode mode) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct mag_cmd_set_led_cfg led_info; - u16 out_size = sizeof(led_info); - int err; - - if (!hwdev) - return -EFAULT; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&led_info, 0, sizeof(led_info)); - - led_info.function_id = sphw_global_func_id(hwdev); - led_info.type = type; - led_info.mode = mode; - - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_SET_LED_CFG, &led_info, - sizeof(led_info), &led_info, &out_size); - if (err || led_info.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n", - err, led_info.head.status, out_size); - return -EIO; - } - - return 0; -} - -int spnic_get_port_info(void *hwdev, struct nic_port_info *port_info, u16 channel) -{ - struct mag_cmd_get_port_info port_msg; - u16 out_size = sizeof(port_msg); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !port_info) - return -EINVAL; - - memset(&port_msg, 0, sizeof(port_msg)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - port_msg.port_id = sphw_physical_port_id(hwdev); - - err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_GET_PORT_INFO, &port_msg, - sizeof(port_msg), &port_msg, &out_size, - channel); - if (err || !out_size || port_msg.head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to get port info, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, port_msg.head.status, out_size, channel); - return -EIO; - } - - port_info->autoneg_cap = port_msg.an_support; - port_info->autoneg_state = port_msg.an_en; - port_info->duplex = port_msg.duplex; - port_info->port_type = port_msg.wire_type; - port_info->speed = port_msg.speed; - port_info->fec = port_msg.fec; - port_info->supported_mode = port_msg.supported_mode; - port_info->advertised_mode = port_msg.advertised_mode; - - return 0; -} - -int spnic_get_speed(void *hwdev, enum mag_cmd_port_speed *speed, u16 channel) -{ - struct nic_port_info port_info = {0}; - int err; - - if (!hwdev || !speed) - return -EINVAL; - - err = spnic_get_port_info(hwdev, &port_info, channel); - if (err) - return err; - - *speed = port_info.speed; - - return 0; -} - -int spnic_set_link_settings(void *hwdev, struct spnic_link_ksettings *settings) -{ - struct mag_cmd_set_port_cfg info; - u16 out_size = sizeof(info); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !settings) - return -EINVAL; - - memset(&info, 0, sizeof(info)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - info.port_id = sphw_physical_port_id(hwdev); - info.config_bitmap = settings->valid_bitmap; - info.autoneg = settings->autoneg; - info.speed = settings->speed; - info.fec = settings->fec; - - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_SET_PORT_CFG, &info, - sizeof(info), &info, &out_size); - if (err || !out_size || info.head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n", - err, info.head.status, out_size); - return -EIO; - } - - return info.head.status; -} - -int spnic_get_link_state(void *hwdev, u8 *link_state) -{ - struct mag_cmd_get_link_status get_link; - u16 out_size = sizeof(get_link); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !link_state) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&get_link, 0, sizeof(get_link)); - get_link.port_id = sphw_physical_port_id(hwdev); - - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_LINK_STATUS, &get_link, - sizeof(get_link), &get_link, &out_size); - if (err || !out_size || get_link.head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n", - err, get_link.head.status, out_size); - return -EIO; - } - - *link_state = get_link.status; - - return 0; -} - -void spnic_notify_vf_link_status(struct spnic_nic_cfg *nic_cfg, u16 vf_id, u8 link_status) -{ - struct mag_cmd_get_link_status link; - struct vf_data_storage *vf_infos = nic_cfg->vf_infos; - u16 out_size = sizeof(link); - int err; - - memset(&link, 0, sizeof(link)); - if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) { - link.status = link_status; - link.port_id = sphw_physical_port_id(nic_cfg->hwdev); - err = sphw_mbox_to_vf(nic_cfg->hwdev, vf_id, SPHW_MOD_HILINK, - MAG_CMD_GET_LINK_STATUS, &link, sizeof(link), &link, - &out_size, 0, SPHW_CHANNEL_NIC); - if (err || !out_size || link.head.status) - nic_err(nic_cfg->dev_hdl, - "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n", - HW_VF_ID_TO_OS(vf_id), err, - link.head.status, out_size); - } -} - -void spnic_notify_all_vfs_link_changed(void *hwdev, u8 link_status) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u16 i; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - nic_cfg->link_status = link_status; - for (i = 1; i <= nic_cfg->max_vfs; i++) { - if (!nic_cfg->vf_infos[HW_VF_ID_TO_OS(i)].link_forced) - spnic_notify_vf_link_status(nic_cfg, i, link_status); - } -} - -static int spnic_get_vf_link_status_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id, - void *buf_in, u16 in_size, void *buf_out, - u16 *out_size) -{ - struct vf_data_storage *vf_infos = nic_cfg->vf_infos; - struct mag_cmd_get_link_status *get_link = buf_out; - bool link_forced, link_up; - - link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced; - link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up; - - if (link_forced) - get_link->status = link_up ? SPNIC_LINK_UP : SPNIC_LINK_DOWN; - else - get_link->status = nic_cfg->link_status; - - get_link->head.status = 0; - *out_size = sizeof(*get_link); - - return 0; -} - -int spnic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info) -{ - /*TO DO */ - return 0; -} - -static void get_port_info(void *hwdev, struct mag_cmd_get_link_status *link_status, - struct sphw_event_link_info *link_info) -{ - struct nic_port_info port_info = {0}; - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (sphw_func_type(hwdev) != TYPE_VF && - link_status->status == SPHW_EVENT_LINK_UP) { - err = spnic_get_port_info(hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err) { - nic_warn(nic_cfg->dev_hdl, "Failed to get port info\n"); - } else { - link_info->valid = 1; - link_info->port_type = port_info.port_type; - link_info->autoneg_cap = port_info.autoneg_cap; - link_info->autoneg_state = port_info.autoneg_state; - link_info->duplex = port_info.duplex; - link_info->speed = port_info.speed; - spnic_refresh_nic_cfg(hwdev, &port_info); - } - } -} - -static void link_status_event_handler(void *hwdev, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size) -{ - struct mag_cmd_get_link_status *link_status = NULL; - struct mag_cmd_get_link_status *ret_link_status = NULL; - struct sphw_event_info event_info = {0}; - struct sphw_event_link_info *link_info = &event_info.link_info; - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - link_status = buf_in; - sdk_info(nic_cfg->dev_hdl, "Link status report received, func_id: %u, status: %u\n", - sphw_global_func_id(hwdev), link_status->status); - - sphw_link_event_stats(hwdev, link_status->status); - - /* link event reported only after set vport enable */ - get_port_info(hwdev, link_status, link_info); - - event_info.type = link_status->status ? SPHW_EVENT_LINK_UP : SPHW_EVENT_LINK_DOWN; - - sphw_event_callback(hwdev, &event_info); - - if (sphw_func_type(hwdev) != TYPE_VF) { - spnic_notify_all_vfs_link_changed(hwdev, link_status->status); - ret_link_status = buf_out; - ret_link_status->head.status = 0; - *out_size = sizeof(*ret_link_status); - } -} - -static void cable_plug_event(void *hwdev, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - struct mag_cmd_wire_event *plug_event = buf_in; - struct spnic_port_routine_cmd *rt_cmd = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - struct sphw_event_info event_info; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - rt_cmd = &nic_cfg->rt_cmd; - - mutex_lock(&nic_cfg->sfp_mutex); - rt_cmd->mpu_send_sfp_abs = false; - rt_cmd->mpu_send_sfp_info = false; - mutex_unlock(&nic_cfg->sfp_mutex); - - memset(&event_info, 0, sizeof(event_info)); - event_info.type = SPHW_EVENT_PORT_MODULE_EVENT; - event_info.module_event.type = plug_event->status ? - SPHW_PORT_MODULE_CABLE_PLUGGED : - SPHW_PORT_MODULE_CABLE_UNPLUGGED; - - *out_size = sizeof(*plug_event); - plug_event = buf_out; - plug_event->head.status = 0; - - sphw_event_callback(hwdev, &event_info); -} - -static void port_sfp_info_event(void *hwdev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct mag_cmd_get_xsfp_info *sfp_info = buf_in; - struct spnic_port_routine_cmd *rt_cmd = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (in_size != sizeof(*sfp_info)) { - sdk_err(nic_cfg->dev_hdl, "Invalid sfp info cmd, length: %u, should be %ld\n", - in_size, sizeof(*sfp_info)); - return; - } - - rt_cmd = &nic_cfg->rt_cmd; - mutex_lock(&nic_cfg->sfp_mutex); - memcpy(&rt_cmd->std_sfp_info, sfp_info, - sizeof(struct mag_cmd_get_xsfp_info)); - rt_cmd->mpu_send_sfp_info = true; - mutex_unlock(&nic_cfg->sfp_mutex); -} - -static void port_sfp_abs_event(void *hwdev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct mag_cmd_get_xsfp_present *sfp_abs = buf_in; - struct spnic_port_routine_cmd *rt_cmd = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (in_size != sizeof(*sfp_abs)) { - sdk_err(nic_cfg->dev_hdl, "Invalid sfp absent cmd, length: %u, should be %ld\n", - in_size, sizeof(*sfp_abs)); - return; - } - - rt_cmd = &nic_cfg->rt_cmd; - mutex_lock(&nic_cfg->sfp_mutex); - memcpy(&rt_cmd->abs, sfp_abs, sizeof(struct mag_cmd_get_xsfp_present)); - rt_cmd->mpu_send_sfp_abs = true; - mutex_unlock(&nic_cfg->sfp_mutex); -} - -static bool spnic_if_sfp_absent(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_port_routine_cmd *rt_cmd = NULL; - struct mag_cmd_get_xsfp_present sfp_abs; - u8 port_id = sphw_physical_port_id(hwdev); - u16 out_size = sizeof(sfp_abs); - int err; - bool sfp_abs_status; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&sfp_abs, 0, sizeof(sfp_abs)); - - rt_cmd = &nic_cfg->rt_cmd; - mutex_lock(&nic_cfg->sfp_mutex); - if (rt_cmd->mpu_send_sfp_abs) { - if (rt_cmd->abs.head.status) { - mutex_unlock(&nic_cfg->sfp_mutex); - return true; - } - - sfp_abs_status = (bool)rt_cmd->abs.abs_status; - mutex_unlock(&nic_cfg->sfp_mutex); - return sfp_abs_status; - } - mutex_unlock(&nic_cfg->sfp_mutex); - - sfp_abs.port_id = port_id; - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_PRESENT, - &sfp_abs, sizeof(sfp_abs), &sfp_abs, - &out_size); - if (sfp_abs.head.status || err || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Failed to get port%u sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n", - port_id, err, sfp_abs.head.status, out_size); - return true; - } - - return (sfp_abs.abs_status == 0 ? false : true); -} - -int spnic_get_sfp_eeprom(void *hwdev, u8 *data, u32 len) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_port_routine_cmd *rt_cmd = NULL; - struct mag_cmd_get_xsfp_info sfp_info; - u16 out_size = sizeof(sfp_info); - int err; - - if (!hwdev || !data) - return -EINVAL; - - if (spnic_if_sfp_absent(hwdev)) - return -ENXIO; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&sfp_info, 0, sizeof(sfp_info)); - - rt_cmd = &nic_cfg->rt_cmd; - mutex_lock(&nic_cfg->sfp_mutex); - if (rt_cmd->mpu_send_sfp_info) { - if (rt_cmd->std_sfp_info.head.status) { - mutex_unlock(&nic_cfg->sfp_mutex); - return -EIO; - } - - memcpy(data, rt_cmd->std_sfp_info.sfp_info, len); - mutex_unlock(&nic_cfg->sfp_mutex); - return 0; - } - mutex_unlock(&nic_cfg->sfp_mutex); - - sfp_info.port_id = sphw_physical_port_id(hwdev); - err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_INFO, &sfp_info, - sizeof(sfp_info), &sfp_info, &out_size); - if (sfp_info.head.status || err || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Failed to get port%u sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n", - sphw_physical_port_id(hwdev), err, - sfp_info.head.status, out_size); - return -EIO; - } - - memcpy(data, sfp_info.sfp_info, len); - - return 0; -} - -int spnic_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_port_routine_cmd *rt_cmd = NULL; - u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; - int err; - - if (!hwdev || !sfp_type || !sfp_type_ext) - return -EINVAL; - - if (spnic_if_sfp_absent(hwdev)) - return -ENXIO; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - rt_cmd = &nic_cfg->rt_cmd; - - mutex_lock(&nic_cfg->sfp_mutex); - if (rt_cmd->mpu_send_sfp_info) { - if (rt_cmd->std_sfp_info.head.status) { - mutex_unlock(&nic_cfg->sfp_mutex); - return -EIO; - } - - *sfp_type = rt_cmd->std_sfp_info.sfp_info[0]; - *sfp_type_ext = rt_cmd->std_sfp_info.sfp_info[1]; - mutex_unlock(&nic_cfg->sfp_mutex); - return 0; - } - mutex_unlock(&nic_cfg->sfp_mutex); - - err = spnic_get_sfp_eeprom(hwdev, (u8 *)sfp_data, STD_SFP_INFO_MAX_SIZE); - if (err) - return err; - - *sfp_type = sfp_data[0]; - *sfp_type_ext = sfp_data[1]; - - return 0; -} - -static const struct vf_msg_handler vf_mag_cmd_handler[] = { - { - .cmd = MAG_CMD_GET_LINK_STATUS, - .handler = spnic_get_vf_link_status_msg_handler, - }, -}; - -/* pf/ppf handler mbox msg from vf */ -int spnic_pf_mag_mbox_handler(void *hwdev, void *pri_handle, u16 vf_id, - u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - u32 index, cmd_size = ARRAY_LEN(vf_mag_cmd_handler); - struct spnic_nic_cfg *nic_cfg = NULL; - const struct vf_msg_handler *handler = NULL; - - if (!hwdev) - return -EFAULT; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - for (index = 0; index < cmd_size; index++) { - handler = &vf_mag_cmd_handler[index]; - if (cmd == handler->cmd) - return handler->handler(nic_cfg, vf_id, buf_in, in_size, - buf_out, out_size); - } - - nic_warn(nic_cfg->dev_hdl, "NO handler for mag cmd: %u received from vf id: %u\n", - cmd, vf_id); - - return -EINVAL; -} - -static struct nic_event_handler mag_cmd_handler[] = { - { - .cmd = MAG_CMD_GET_LINK_STATUS, - .handler = link_status_event_handler, - }, - - { - .cmd = MAG_CMD_WIRE_EVENT, - .handler = cable_plug_event, - }, - - { - .cmd = MAG_CMD_GET_XSFP_INFO, - .handler = port_sfp_info_event, - }, - - { - .cmd = MAG_CMD_GET_XSFP_PRESENT, - .handler = port_sfp_abs_event, - }, -}; - -int spnic_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u32 size = ARRAY_LEN(mag_cmd_handler); - u32 i; - - if (!hwdev) - return -EINVAL; - - *out_size = 0; - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - for (i = 0; i < size; i++) { - if (cmd == mag_cmd_handler[i].cmd) { - mag_cmd_handler[i].handler(hwdev, buf_in, in_size, - buf_out, out_size); - break; - } - } - - /* can't find this event cmd */ - if (i == size) - sdk_warn(nic_cfg->dev_hdl, "Unsupported mag event, cmd: %u\n", - cmd); - - return 0; -} - -int spnic_vf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - return spnic_mag_event_handler(hwdev, pri_handle, cmd, buf_in, in_size, buf_out, out_size); -} - -/* pf/ppf handler mgmt cpu report hilink event*/ -void spnic_pf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - spnic_mag_event_handler(hwdev, pri_handle, cmd, buf_in, in_size, buf_out, out_size); -} - -static int _mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u16 channel) -{ - u32 i, cmd_cnt = ARRAY_LEN(vf_mag_cmd_handler); - bool cmd_to_pf = false; - - if (sphw_func_type(hwdev) == TYPE_VF) { - for (i = 0; i < cmd_cnt; i++) { - if (cmd == vf_mag_cmd_handler[i].cmd) { - cmd_to_pf = true; - break; - } - } - } - - if (cmd_to_pf) - return sphw_mbox_to_pf(hwdev, SPHW_MOD_HILINK, cmd, buf_in, in_size, buf_out, - out_size, 0, channel); - - return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_HILINK, cmd, buf_in, - in_size, buf_out, out_size, 0, channel); -} - -static int mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - return _mag_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, - out_size, SPHW_CHANNEL_NIC); -} - -static int mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, - u16 channel) -{ - return _mag_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, - out_size, channel); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h b/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h deleted file mode 100644 index 4e65b7af115b..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_mag_cmd.h +++ /dev/null @@ -1,643 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_MAG_CMD_H -#define SPNIC_MAG_CMD_H - -#include "sphw_mgmt_msg_base.h" - -enum mag_cmd { - SERDES_CMD_PROCESS = 0, - - MAG_CMD_SET_PORT_CFG = 1, - MAG_CMD_SET_PORT_ADAPT = 2, - MAG_CMD_CFG_LOOPBACK_MODE = 3, - - MAG_CMD_GET_PORT_ENABLE = 5, - MAG_CMD_SET_PORT_ENABLE = 6, - MAG_CMD_GET_LINK_STATUS = 7, - MAG_CMD_SET_LINK_FOLLOW = 8, - MAG_CMD_SET_PMA_ENABLE = 9, - MAG_CMD_CFG_FEC_MODE = 10, - - MAG_CMD_CFG_AN_TYPE = 12, /* reserved for furture use */ - - MAG_CMD_SET_PANGEA_ADAPT = 15, - - MAG_CMD_CFG_BIOS_LINK_CFG = 31, - MAG_CMD_RESTORE_LINK_CFG = 32, - MAG_CMD_ACTIVATE_BIOS_LINK_CFG = 33, - - /* LED */ - MAG_CMD_SET_LED_CFG = 50, - - /* PHY */ - MAG_CMD_GET_PHY_INIT_STATUS = 55, /* reserved for furture use */ - - MAG_CMD_GET_XSFP_INFO = 60, - MAG_CMD_SET_XSFP_ENABLE = 61, - MAG_CMD_GET_XSFP_PRESENT = 62, - MAG_CMD_SET_XSFP_RW = 63, - MAG_CMD_CFG_XSFP_TEMPERATURE = 64, - - MAG_CMD_WIRE_EVENT = 100, - MAG_CMD_LINK_ERR_EVENT = 101, - - MAG_CMD_EVENT_PORT_INFO = 150, - MAG_CMD_GET_PORT_STAT = 151, - MAG_CMD_CLR_PORT_STAT = 152, - MAG_CMD_GET_PORT_INFO = 153, - MAG_CMD_GET_PCS_ERR_CNT = 154, - MAG_CMD_GET_MAG_CNT = 155, - MAG_CMD_DUMP_ANTRAIN_INFO = 156, - - MAG_CMD_MAX = 0xFF, -}; - -enum mag_cmd_port_speed { - PORT_SPEED_NOT_SET = 0, - PORT_SPEED_10MB = 1, - PORT_SPEED_100MB = 2, - PORT_SPEED_1GB = 3, - PORT_SPEED_10GB = 4, - PORT_SPEED_25GB = 5, - PORT_SPEED_40GB = 6, - PORT_SPEED_50GB = 7, - PORT_SPEED_100GB = 8, - PORT_SPEED_200GB = 9, - PORT_SPEED_UNKNOWN -}; - -enum mag_cmd_port_an { - PORT_AN_NOT_SET = 0, - PORT_CFG_AN_ON = 1, - PORT_CFG_AN_OFF = 2 -}; - -enum mag_cmd_port_fec { - PORT_FEC_NOT_SET = 0, - PORT_FEC_RSFEC = 1, - PORT_FEC_BASEFEC = 2, - PORT_FEC_NOFEC = 3, - PORT_FEC_LLRSFEC = 4 -}; - -enum mag_cmd_port_lanes { - PORT_LANES_NOT_SET = 0, - PORT_LANES_X1 = 1, - PORT_LANES_X2 = 2, - PORT_LANES_X4 = 4, - PORT_LANES_X8 = 8, /* reserved for future use */ -}; - -enum mag_cmd_port_duplex { - PORT_DUPLEX_HALF = 0, - PORT_DUPLEX_FULL = 1 -}; - -/* mag_cmd_set_port_cfg config bitmap */ -#define MAG_CMD_SET_SPEED 0x1 -#define MAG_CMD_SET_AUTONEG 0x2 -#define MAG_CMD_SET_FEC 0x4 -#define MAG_CMD_SET_LANES 0x8 -struct mag_cmd_set_port_cfg { - struct mgmt_msg_head head; - - u8 port_id; - u8 rsvd0[3]; - - u32 config_bitmap; - u8 speed; - u8 autoneg; - u8 fec; - u8 lanes; - u8 rsvd1[20]; -}; - -/* mag supported/advertised link mode bitmap */ -enum mag_cmd_link_mode { - LINK_MODE_GE = 0, - LINK_MODE_10GE_BASE_R = 1, - LINK_MODE_25GE_BASE_R = 2, - LINK_MODE_40GE_BASE_R4 = 3, - LINK_MODE_50GE_BASE_R = 4, - LINK_MODE_50GE_BASE_R2 = 5, - LINK_MODE_100GE_BASE_R = 6, - LINK_MODE_100GE_BASE_R2 = 7, - LINK_MODE_100GE_BASE_R4 = 8, - LINK_MODE_200GE_BASE_R2 = 9, - LINK_MODE_200GE_BASE_R4 = 10, - LINK_MODE_MAX_NUMBERS, - - LINK_MODE_UNKNOWN = 0xFFFF -}; - -struct mag_cmd_get_port_info { - struct mgmt_msg_head head; - - u8 port_id; - u8 rsvd0[3]; - - u8 wire_type; - u8 an_support; - u8 an_en; - u8 duplex; - - u8 speed; - u8 fec; - u8 lanes; - u8 rsvd1; - - u32 supported_mode; - u32 advertised_mode; - u8 rsvd2[8]; -}; - -#define MAG_CMD_OPCODE_GET 0 -#define MAG_CMD_OPCODE_SET 1 -struct mag_cmd_set_port_adapt { - struct mgmt_msg_head head; - - u8 port_id; - u8 opcode; /* 0:get adapt info 1:set adapt */ - u8 enable; - u8 rsvd0; - u32 speed_mode; - u32 rsvd1[3]; -}; - -#define MAG_CMD_LP_MODE_SDS_S_TX2RX 1 -#define MAG_CMD_LP_MODE_SDS_P_RX2TX 2 -#define MAG_CMD_LP_MODE_SDS_P_TX2RX 3 -#define MAG_CMD_LP_MODE_MAC_RX2TX 4 -#define MAG_CMD_LP_MODE_MAC_TX2RX 5 -#define MAG_CMD_LP_MODE_TXDP2RXDP 6 -struct mag_cmd_cfg_loopback_mode { - struct mgmt_msg_head head; - - u8 port_id; - u8 opcode; /* 0:get loopback mode 1:set loopback mode */ - u8 lp_mode; - u8 lp_en; /* 0:disable 1:enable */ - - u32 rsvd0[2]; -}; - -#define MAG_CMD_PORT_DISABLE 0x0 -#define MAG_CMD_TX_ENABLE 0x1 -#define MAG_CMD_RX_ENABLE 0x2 - -struct mag_cmd_set_port_enable { - struct mgmt_msg_head head; - - u16 function_id; /* function_id should not more than the max support pf_id(32) */ - u16 rsvd0; - - u8 state; /* bitmap bit0:tx_en bit1:rx_en */ - u8 rsvd1[3]; -}; - -struct mag_cmd_get_port_enable { - struct mgmt_msg_head head; - - u8 port; - u8 state; /* bitmap bit0:tx_en bit1:rx_en */ - u8 rsvd0[2]; -}; - -#define PMA_FOLLOW_DEFAULT 0x0 -#define PMA_FOLLOW_ENABLE 0x1 -#define PMA_FOLLOW_DISABLE 0x2 -/* the physical port disable link follow only when all pf of the port are set to follow disable */ -struct mag_cmd_set_link_follow { - struct mgmt_msg_head head; - - u16 function_id; /* function_id should not more than the max support pf_id(32) */ - u16 rsvd0; - - u8 follow; - u8 rsvd1[3]; -}; - -/* firmware also use this cmd report link event to driver */ -struct mag_cmd_get_link_status { - struct mgmt_msg_head head; - - u8 port_id; - u8 status; /* 0:link down 1:link up */ - u8 rsvd0[2]; -}; - -struct mag_cmd_set_pma_enable { - struct mgmt_msg_head head; - - u16 function_id; /* function_id should not more than the max support pf_id(32) */ - u16 enable; -}; - -struct mag_cmd_cfg_an_type { - struct mgmt_msg_head head; - - u8 port_id; - u8 opcode; /* 0:get an type 1:set an type */ - u8 rsvd0[2]; - - u32 an_type; /* 0:ieee 1:25G/50 eth consortium */ -}; - -struct mag_cmd_cfg_fec_mode { - struct mgmt_msg_head head; - - u8 port_id; - u8 opcode; /* 0:get fec mode 1:set fec mode */ - u8 fec; - u8 rsvd0; -}; - -struct mag_cmd_cfg_bios_link_cfg { - struct mgmt_msg_head head; - - u8 port_id; - u8 opcode; /* 0:get bios link info 1:set bios link cfg */ - u8 clear; - u8 rsvd0; - - u32 wire_type; - u8 an_en; - u8 speed; - u8 fec; - u8 rsvd1; - u32 speed_mode; - u32 rsvd2[3]; -}; - -struct mag_cmd_restore_link_cfg { - struct mgmt_msg_head head; - - u8 port_id; - u8 rsvd[7]; -}; - -struct mag_cmd_activate_bios_link_cfg { - struct mgmt_msg_head head; - - u32 rsvd[8]; -}; - -/* led type */ -enum mag_led_type { - MAG_CMD_LED_TYPE_ALARM = 0x0, - MAG_CMD_LED_TYPE_LOW_SPEED = 0x1, - MAG_CMD_LED_TYPE_HIGH_SPEED = 0x2 -}; - -/* led mode */ -enum mag_led_mode { - MAG_CMD_LED_MODE_DEFAULT = 0x0, - MAG_CMD_LED_MODE_FORCE_ON = 0x1, - MAG_CMD_LED_MODE_FORCE_OFF = 0x2, - MAG_CMD_LED_MODE_FORCE_BLINK_1HZ = 0x3, - MAG_CMD_LED_MODE_FORCE_BLINK_2HZ = 0x4, - MAG_CMD_LED_MODE_FORCE_BLINK_4HZ = 0x5, - MAG_CMD_LED_MODE_1HZ = 0x6, - MAG_CMD_LED_MODE_2HZ = 0x7, - MAG_CMD_LED_MODE_4HZ = 0x8, -}; - -/* the led is report alarm when any pf of the port is alram */ -struct mag_cmd_set_led_cfg { - struct mgmt_msg_head head; - - u16 function_id; - u8 type; - u8 mode; -}; - -#define XSFP_INFO_MAX_SIZE 640 -/* xsfp wire type, refer to cmis protocol definition */ -enum mag_wire_type { - MAG_CMD_WIRE_TYPE_UNKNOWN = 0x0, - MAG_CMD_WIRE_TYPE_MM = 0x1, - MAG_CMD_WIRE_TYPE_SM = 0x2, - MAG_CMD_WIRE_TYPE_COPPER = 0x3, - MAG_CMD_WIRE_TYPE_ACC = 0x4, - MAG_CMD_WIRE_TYPE_BASET = 0x5, - MAG_CMD_WIRE_TYPE_AOC = 0x40, - MAG_CMD_WIRE_TYPE_ELECTRIC = 0x41, - MAG_CMD_WIRE_TYPE_BACKPLANE = 0x42 -}; - -struct mag_cmd_get_xsfp_info { - struct mgmt_msg_head head; - - u8 port_id; - u8 wire_type; - u16 out_len; - u32 rsvd; - u8 sfp_info[XSFP_INFO_MAX_SIZE]; -}; - -#define MAG_CMD_XSFP_DISABLE 0x0 -#define MAG_CMD_XSFP_ENABLE 0x1 - -struct mag_cmd_set_xsfp_enable { - struct mgmt_msg_head head; - - u16 function_id; /* function_id should not more than the max support pf_id(32) */ - u16 rsvd0; - - u8 status; - u8 rsvd1[3]; -}; - -#define MAG_CMD_XSFP_PRESENT 0x0 -#define MAG_CMD_XSFP_ABSENT 0x1 -struct mag_cmd_get_xsfp_present { - struct mgmt_msg_head head; - - u8 port_id; - u8 abs_status; /* 0:present, 1:absent */ - u8 rsvd[2]; -}; - -#define MAG_CMD_XSFP_READ 0x0 -#define MAG_CMD_XSFP_WRITE 0x1 -struct mag_cmd_set_xsfp_rw { - struct mgmt_msg_head head; - - u8 port_id; - u8 operation; /* 0: read; 1: write */ - u8 value; - u8 rsvd0; - u32 devaddr; - u32 offset; - u32 rsvd1; -}; - -struct mag_cmd_cfg_xsfp_temperature { - struct mgmt_msg_head head; - - u8 opcode; /* 0:read 1:write */ - u8 rsvd0[3]; - s32 max_temp; - s32 min_temp; -}; - -struct mag_cmd_get_xsfp_temperature { - struct mgmt_msg_head head; - - s16 sfp_temp[8]; - u8 rsvd[32]; - s32 max_temp; - s32 min_temp; -}; - -/* xsfp plug event */ -struct mag_cmd_wire_event { - struct mgmt_msg_head head; - - u8 port_id; - u8 status; /* 0:present, 1:absent */ - u8 rsvd[2]; -}; - -/* link err type definition */ -#define MAG_CMD_ERR_XSFP_UNKNOWN 0x0 -struct mag_cmd_link_err_event { - struct mgmt_msg_head head; - - u8 port_id; - u8 link_err_type; - u8 rsvd[2]; -}; - -#define MAG_PARAM_TYPE_DEFAULT_CFG 0x0 -#define MAG_PARAM_TYPE_BIOS_CFG 0x1 -#define MAG_PARAM_TYPE_TOOL_CFG 0x2 -#define MAG_PARAM_TYPE_FINAL_CFG 0x3 -#define MAG_PARAM_TYPE_WIRE_INFO 0x4 -#define MAG_PARAM_TYPE_ADAPT_INFO 0x5 -#define MAG_PARAM_TYPE_MAX_CNT 0x6 -struct param_head { - u8 valid_len; - u8 info_type; - u8 rsvd[2]; -}; - -struct mag_port_link_param { - struct param_head head; - - u8 an; - u8 fec; - u8 speed; - u8 rsvd0; - - u32 used; - u32 an_fec_ability; - u32 an_speed_ability; - u32 an_pause_ability; -}; - -struct mag_port_wire_info { - struct param_head head; - - u8 status; - u8 rsvd0[3]; - - u8 wire_type; - u8 default_fec; - u8 speed; - u8 rsvd1; - u32 speed_ability; -}; - -struct mag_port_adapt_info { - struct param_head head; - - u32 adapt_en; - u32 flash_adapt; - u32 rsvd0[2]; - - u32 wire_node; - u32 an_en; - u32 speed; - u32 fec; -}; - -struct mag_port_param_info { - u8 parameter_cnt; - u8 lane_id; - u8 lane_num; - u8 rsvd0; - - struct mag_port_link_param default_cfg; - struct mag_port_link_param bios_cfg; - struct mag_port_link_param tool_cfg; - struct mag_port_link_param final_cfg; - - struct mag_port_wire_info wire_info; - struct mag_port_adapt_info adapt_info; -}; - -struct mag_cmd_event_port_info { - struct mgmt_msg_head head; - - u8 port_id; - u8 event_type; - u8 rsvd0[2]; - - u8 vendor_name[16]; - u32 port_type; - u32 port_sub_type; - u32 cable_length; - u8 cable_temp; - u8 max_speed; - u8 sfp_type; - u8 rsvd1; - u32 power[4]; - - u8 an_state; - u8 fec; - u16 speed; - - u8 gpio_insert; /* 0:present 1:absent */ - u8 alos; - u8 rx_los; - u8 pma_ctrl; - - u32 pma_fifo_reg; - u32 pma_signal_ok_reg; - u32 pcs_64_66b_reg; - u32 rf_lf; - u8 pcs_link; - u8 pcs_mac_link; - u8 tx_enable; - u8 rx_enable; - u32 pcs_err_cnt; - - u8 eq_data[38]; - u8 rsvd2[2]; - - u32 his_link_machine_state; - u32 cur_link_machine_state; - u8 his_machine_state_data[128]; - u8 cur_machine_state_data[128]; - u8 his_machine_state_length; - u8 cur_machine_state_length; - - struct mag_port_param_info param_info; - u8 rsvd3[360]; -}; - -struct mag_cmd_port_stats { - u64 mac_tx_fragment_pkt_num; - u64 mac_tx_undersize_pkt_num; - u64 mac_tx_undermin_pkt_num; - u64 mac_tx_64_oct_pkt_num; - u64 mac_tx_65_127_oct_pkt_num; - u64 mac_tx_128_255_oct_pkt_num; - u64 mac_tx_256_511_oct_pkt_num; - u64 mac_tx_512_1023_oct_pkt_num; - u64 mac_tx_1024_1518_oct_pkt_num; - u64 mac_tx_1519_2047_oct_pkt_num; - u64 mac_tx_2048_4095_oct_pkt_num; - u64 mac_tx_4096_8191_oct_pkt_num; - u64 mac_tx_8192_9216_oct_pkt_num; - u64 mac_tx_9217_12287_oct_pkt_num; - u64 mac_tx_12288_16383_oct_pkt_num; - u64 mac_tx_1519_max_bad_pkt_num; - u64 mac_tx_1519_max_good_pkt_num; - u64 mac_tx_oversize_pkt_num; - u64 mac_tx_jabber_pkt_num; - u64 mac_tx_bad_pkt_num; - u64 mac_tx_bad_oct_num; - u64 mac_tx_good_pkt_num; - u64 mac_tx_good_oct_num; - u64 mac_tx_total_pkt_num; - u64 mac_tx_total_oct_num; - u64 mac_tx_uni_pkt_num; - u64 mac_tx_multi_pkt_num; - u64 mac_tx_broad_pkt_num; - u64 mac_tx_pause_num; - u64 mac_tx_pfc_pkt_num; - u64 mac_tx_pfc_pri0_pkt_num; - u64 mac_tx_pfc_pri1_pkt_num; - u64 mac_tx_pfc_pri2_pkt_num; - u64 mac_tx_pfc_pri3_pkt_num; - u64 mac_tx_pfc_pri4_pkt_num; - u64 mac_tx_pfc_pri5_pkt_num; - u64 mac_tx_pfc_pri6_pkt_num; - u64 mac_tx_pfc_pri7_pkt_num; - u64 mac_tx_control_pkt_num; - u64 mac_tx_err_all_pkt_num; - u64 mac_tx_from_app_good_pkt_num; - u64 mac_tx_from_app_bad_pkt_num; - - u64 mac_rx_fragment_pkt_num; - u64 mac_rx_undersize_pkt_num; - u64 mac_rx_undermin_pkt_num; - u64 mac_rx_64_oct_pkt_num; - u64 mac_rx_65_127_oct_pkt_num; - u64 mac_rx_128_255_oct_pkt_num; - u64 mac_rx_256_511_oct_pkt_num; - u64 mac_rx_512_1023_oct_pkt_num; - u64 mac_rx_1024_1518_oct_pkt_num; - u64 mac_rx_1519_2047_oct_pkt_num; - u64 mac_rx_2048_4095_oct_pkt_num; - u64 mac_rx_4096_8191_oct_pkt_num; - u64 mac_rx_8192_9216_oct_pkt_num; - u64 mac_rx_9217_12287_oct_pkt_num; - u64 mac_rx_12288_16383_oct_pkt_num; - u64 mac_rx_1519_max_bad_pkt_num; - u64 mac_rx_1519_max_good_pkt_num; - u64 mac_rx_oversize_pkt_num; - u64 mac_rx_jabber_pkt_num; - u64 mac_rx_bad_pkt_num; - u64 mac_rx_bad_oct_num; - u64 mac_rx_good_pkt_num; - u64 mac_rx_good_oct_num; - u64 mac_rx_total_pkt_num; - u64 mac_rx_total_oct_num; - u64 mac_rx_uni_pkt_num; - u64 mac_rx_multi_pkt_num; - u64 mac_rx_broad_pkt_num; - u64 mac_rx_pause_num; - u64 mac_rx_pfc_pkt_num; - u64 mac_rx_pfc_pri0_pkt_num; - u64 mac_rx_pfc_pri1_pkt_num; - u64 mac_rx_pfc_pri2_pkt_num; - u64 mac_rx_pfc_pri3_pkt_num; - u64 mac_rx_pfc_pri4_pkt_num; - u64 mac_rx_pfc_pri5_pkt_num; - u64 mac_rx_pfc_pri6_pkt_num; - u64 mac_rx_pfc_pri7_pkt_num; - u64 mac_rx_control_pkt_num; - u64 mac_rx_sym_err_pkt_num; - u64 mac_rx_fcs_err_pkt_num; - u64 mac_rx_send_app_good_pkt_num; - u64 mac_rx_send_app_bad_pkt_num; - u64 mac_rx_unfilter_pkt_num; -}; - -struct mag_cmd_port_stats_info { - struct mgmt_msg_head head; - - u8 port_id; - u8 rsvd0[3]; -}; - -struct mag_cmd_get_port_stat { - struct mgmt_msg_head head; - - struct mag_cmd_port_stats counter; - u64 rsvd1[15]; -}; - -struct mag_cmd_clr_port_stat { - struct mgmt_msg_head head; - - u8 port_id; - u8 rsvd0[3]; -}; - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_main.c b/drivers/net/ethernet/ramaxel/spnic/spnic_main.c deleted file mode 100644 index 5b9822ebd806..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_main.c +++ /dev/null @@ -1,924 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> -#include <linux/if_vlan.h> -#include <linux/ethtool.h> -#include <linux/dcbnl.h> -#include <linux/tcp.h> -#include <linux/ip.h> -#include <linux/debugfs.h> - -#include "sphw_common.h" -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "sphw_mt.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_io.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" -#include "spnic_rx.h" -#include "spnic_lld.h" -#include "spnic_rss.h" -#include "spnic_dcb.h" - -#define DEFAULT_POLL_WEIGHT 64 -static unsigned int poll_weight = DEFAULT_POLL_WEIGHT; -module_param(poll_weight, uint, 0444); -MODULE_PARM_DESC(poll_weight, "Number packets for NAPI budget (default=64)"); - -#define SPNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 0 -#define SPNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 0 -#define SPNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 - -static unsigned char qp_pending_limit = SPNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; -module_param(qp_pending_limit, byte, 0444); -MODULE_PARM_DESC(qp_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit (default=2)"); - -static unsigned char qp_coalesc_timer_cfg = - SPNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; -module_param(qp_coalesc_timer_cfg, byte, 0444); -MODULE_PARM_DESC(qp_coalesc_timer_cfg, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg (default=32)"); - -#define DEFAULT_RX_BUFF_LEN 2 -u16 rx_buff = DEFAULT_RX_BUFF_LEN; -module_param(rx_buff, ushort, 0444); -MODULE_PARM_DESC(rx_buff, "Set rx_buff size, buffer len must be 2^n. 2 - 16, default is 2KB"); - -static unsigned int lro_replenish_thld = 256; -module_param(lro_replenish_thld, uint, 0444); -MODULE_PARM_DESC(lro_replenish_thld, "Number wqe for lro replenish buffer (default=256)"); - -#define SPNIC_NIC_DEV_WQ_NAME "spnic_nic_dev_wq" - -#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK) - -#define QID_MASKED(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) -#define WATCHDOG_TIMEOUT 5 - -#define SPNIC_SQ_DEPTH 1024 -#define SPNIC_RQ_DEPTH 1024 - -enum spnic_rx_buff_len { - RX_BUFF_VALID_2KB = 2, - RX_BUFF_VALID_4KB = 4, - RX_BUFF_VALID_8KB = 8, - RX_BUFF_VALID_16KB = 16, -}; - -#define CONVERT_UNIT 1024 - -static int spnic_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr); - -/* used for netdev notifier register/unregister */ -DEFINE_MUTEX(spnic_netdev_notifiers_mutex); -static int spnic_netdev_notifiers_ref_cnt; -static struct notifier_block spnic_netdev_notifier = { - .notifier_call = spnic_netdev_event, -}; - -static void spnic_register_notifier(struct spnic_nic_dev *nic_dev) -{ - int err; - - mutex_lock(&spnic_netdev_notifiers_mutex); - spnic_netdev_notifiers_ref_cnt++; - if (spnic_netdev_notifiers_ref_cnt == 1) { - err = register_netdevice_notifier(&spnic_netdev_notifier); - if (err) { - nic_info(&nic_dev->pdev->dev, "Register netdevice notifier failed, err: %d\n", - err); - spnic_netdev_notifiers_ref_cnt--; - } - } - mutex_unlock(&spnic_netdev_notifiers_mutex); -} - -static void spnic_unregister_notifier(struct spnic_nic_dev *nic_dev) -{ - mutex_lock(&spnic_netdev_notifiers_mutex); - if (spnic_netdev_notifiers_ref_cnt == 1) - unregister_netdevice_notifier(&spnic_netdev_notifier); - - if (spnic_netdev_notifiers_ref_cnt) - spnic_netdev_notifiers_ref_cnt--; - mutex_unlock(&spnic_netdev_notifiers_mutex); -} - -#define SPNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 1 -#define SPNIC_VLAN_CLEAR_OFFLOAD (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ - NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \ - NETIF_F_ALL_TSO) - -int spnic_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr) -{ - struct net_device *ndev = netdev_notifier_info_to_dev(ptr); - struct net_device *real_dev = NULL; - struct net_device *ret = NULL; - struct spnic_nic_dev *nic_dev = NULL; - u16 vlan_depth; - - if (!is_vlan_dev(ndev)) - return NOTIFY_DONE; - - dev_hold(ndev); - - switch (event) { - case NETDEV_REGISTER: - real_dev = vlan_dev_real_dev(ndev); - nic_dev = spnic_get_uld_dev_by_ifname(real_dev->name, SERVICE_T_NIC); - if (!nic_dev) - goto out; - - vlan_depth = 1; - ret = vlan_dev_priv(ndev)->real_dev; - while (is_vlan_dev(ret)) { - ret = vlan_dev_priv(ret)->real_dev; - vlan_depth++; - } - - if (vlan_depth == SPNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { - ndev->vlan_features &= (~SPNIC_VLAN_CLEAR_OFFLOAD); - } else if (vlan_depth > SPNIC_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { - ndev->hw_features &= (~SPNIC_VLAN_CLEAR_OFFLOAD); - ndev->features &= (~SPNIC_VLAN_CLEAR_OFFLOAD); - } - - break; - - default: - break; - }; - -out: - dev_put(ndev); - - return NOTIFY_DONE; -} - -void spnic_link_status_change(struct spnic_nic_dev *nic_dev, bool status) -{ - struct net_device *netdev = nic_dev->netdev; - - if (!SPHW_CHANNEL_RES_VALID(nic_dev) || test_bit(SPNIC_LP_TEST, &nic_dev->flags)) - return; - - if (status) { - if (netif_carrier_ok(netdev)) - return; - - nic_dev->link_status = status; - netif_carrier_on(netdev); - nicif_info(nic_dev, link, netdev, "Link is up\n"); - } else { - if (!netif_carrier_ok(netdev)) - return; - - nic_dev->link_status = status; - netif_carrier_off(netdev); - nicif_info(nic_dev, link, netdev, "Link is down\n"); - } -} - -static void netdev_feature_init(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - netdev_features_t dft_fts = 0; - netdev_features_t cso_fts = 0; - netdev_features_t vlan_fts = 0; - netdev_features_t tso_fts = 0; - netdev_features_t hw_features = 0; - - dft_fts |= NETIF_F_SG | NETIF_F_HIGHDMA; - - if (SPNIC_SUPPORT_CSUM(nic_dev->hwdev)) - cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; - if (SPNIC_SUPPORT_SCTP_CRC(nic_dev->hwdev)) - cso_fts |= NETIF_F_SCTP_CRC; - - if (SPNIC_SUPPORT_TSO(nic_dev->hwdev)) - tso_fts |= NETIF_F_TSO | NETIF_F_TSO6; - - if (SPNIC_SUPPORT_VLAN_OFFLOAD(nic_dev->hwdev)) { - vlan_fts |= NETIF_F_HW_VLAN_CTAG_TX; - vlan_fts |= NETIF_F_HW_VLAN_CTAG_RX; - } - - if (SPNIC_SUPPORT_RXVLAN_FILTER(nic_dev->hwdev)) - vlan_fts |= NETIF_F_HW_VLAN_CTAG_FILTER; - - if (SPNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->hwdev)) - tso_fts |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; - - /* LRO is disable in default, only set hw features */ - if (SPNIC_SUPPORT_LRO(nic_dev->hwdev)) - hw_features |= NETIF_F_LRO; - - netdev->features |= dft_fts | cso_fts | tso_fts | vlan_fts; - netdev->vlan_features |= dft_fts | cso_fts | tso_fts; - - hw_features |= netdev->hw_features; - - hw_features |= netdev->features; - - netdev->hw_features = hw_features; - - netdev->priv_flags |= IFF_UNICAST_FLT; - - netdev->hw_enc_features |= dft_fts; - if (SPNIC_SUPPORT_VXLAN_OFFLOAD(nic_dev->hwdev)) { - netdev->hw_enc_features |= cso_fts; - netdev->hw_enc_features |= tso_fts | NETIF_F_TSO_ECN; - } -} - -static void init_intr_coal_param(struct spnic_nic_dev *nic_dev) -{ - struct spnic_intr_coal_info *info = NULL; - u16 i; - - for (i = 0; i < nic_dev->max_qps; i++) { - info = &nic_dev->intr_coalesce[i]; - - info->pending_limt = qp_pending_limit; - info->coalesce_timer_cfg = qp_coalesc_timer_cfg; - - info->resend_timer_cfg = SPNIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; - - info->pkt_rate_high = SPNIC_RX_RATE_HIGH; - info->rx_usecs_high = SPNIC_RX_COAL_TIME_HIGH; - info->rx_pending_limt_high = SPNIC_RX_PENDING_LIMIT_HIGH; - info->pkt_rate_low = SPNIC_RX_RATE_LOW; - info->rx_usecs_low = SPNIC_RX_COAL_TIME_LOW; - info->rx_pending_limt_low = SPNIC_RX_PENDING_LIMIT_LOW; - } -} - -static int spnic_init_intr_coalesce(struct spnic_nic_dev *nic_dev) -{ - u64 size; - - if (qp_pending_limit != SPNIC_DEAULT_TXRX_MSIX_PENDING_LIMIT || - qp_coalesc_timer_cfg != SPNIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG) - nic_dev->intr_coal_set_flag = 1; - else - nic_dev->intr_coal_set_flag = 0; - - size = sizeof(*nic_dev->intr_coalesce) * nic_dev->max_qps; - if (!size) { - nic_err(&nic_dev->pdev->dev, "Cannot allocate zero size intr coalesce\n"); - return -EINVAL; - } - nic_dev->intr_coalesce = kzalloc(size, GFP_KERNEL); - if (!nic_dev->intr_coalesce) { - nic_err(&nic_dev->pdev->dev, "Failed to alloc intr coalesce\n"); - return -ENOMEM; - } - - init_intr_coal_param(nic_dev); - - if (test_bit(SPNIC_INTR_ADAPT, &nic_dev->flags)) - nic_dev->adaptive_rx_coal = 1; - else - nic_dev->adaptive_rx_coal = 0; - - return 0; -} - -static void spnic_free_intr_coalesce(struct spnic_nic_dev *nic_dev) -{ - kfree(nic_dev->intr_coalesce); -} - -static int spnic_alloc_txrxqs(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - int err; - - err = spnic_alloc_txqs(netdev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to alloc txqs\n"); - return err; - } - - err = spnic_alloc_rxqs(netdev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to alloc rxqs\n"); - goto alloc_rxqs_err; - } - - err = spnic_init_intr_coalesce(nic_dev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to init_intr_coalesce\n"); - goto init_intr_err; - } - - return 0; - -init_intr_err: - spnic_free_rxqs(netdev); - -alloc_rxqs_err: - spnic_free_txqs(netdev); - - return err; -} - -static void spnic_free_txrxqs(struct spnic_nic_dev *nic_dev) -{ - spnic_free_intr_coalesce(nic_dev); - spnic_free_rxqs(nic_dev->netdev); - spnic_free_txqs(nic_dev->netdev); -} - -static void spnic_sw_deinit(struct spnic_nic_dev *nic_dev) -{ - spnic_free_txrxqs(nic_dev); - - spnic_clean_mac_list_filter(nic_dev); - - spnic_del_mac(nic_dev->hwdev, nic_dev->netdev->dev_addr, 0, - sphw_global_func_id(nic_dev->hwdev), SPHW_CHANNEL_NIC); - - spnic_clear_rss_config(nic_dev); -} - -static int spnic_sw_init(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - u64 nic_feature; - int err = 0; - - nic_feature = spnic_get_feature_cap(nic_dev->hwdev); - nic_feature &= SPNIC_DRV_FEATURE; - spnic_update_nic_feature(nic_dev->hwdev, nic_feature); - - sema_init(&nic_dev->port_state_sem, 1); - - err = spnic_dcb_init(nic_dev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to init dcb\n"); - return -EFAULT; - } - - nic_dev->q_params.sq_depth = SPNIC_SQ_DEPTH; - nic_dev->q_params.rq_depth = SPNIC_RQ_DEPTH; - - spnic_try_to_enable_rss(nic_dev); - - err = spnic_get_default_mac(nic_dev->hwdev, netdev->dev_addr); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to get MAC address\n"); - goto get_mac_err; - } - - if (!is_valid_ether_addr(netdev->dev_addr)) { - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { - nic_err(&nic_dev->pdev->dev, "Invalid MAC address %pM\n", netdev->dev_addr); - err = -EIO; - goto err_mac; - } - - nic_info(&nic_dev->pdev->dev, "Invalid MAC address %pM, using random\n", - netdev->dev_addr); - eth_hw_addr_random(netdev); - } - - err = spnic_set_mac(nic_dev->hwdev, netdev->dev_addr, 0, - sphw_global_func_id(nic_dev->hwdev), SPHW_CHANNEL_NIC); - /* When this is VF driver, we must consider that PF has already set VF - * MAC, and we can't consider this condition is error status during - * driver probe procedure. - */ - if (err && err != SPNIC_PF_SET_VF_ALREADY) { - nic_err(&nic_dev->pdev->dev, "Failed to set default MAC\n"); - goto set_mac_err; - } - - /* MTU range: 384 - 9600 */ - netdev->min_mtu = SPNIC_MIN_MTU_SIZE; - netdev->max_mtu = SPNIC_MAX_JUMBO_FRAME_SIZE; - - err = spnic_alloc_txrxqs(nic_dev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to alloc qps\n"); - goto alloc_qps_err; - } - - return 0; - -alloc_qps_err: - spnic_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, sphw_global_func_id(nic_dev->hwdev), - SPHW_CHANNEL_NIC); - -set_mac_err: -err_mac: -get_mac_err: - spnic_clear_rss_config(nic_dev); - - return err; -} - -static void spnic_assign_netdev_ops(struct spnic_nic_dev *adapter) -{ - spnic_set_netdev_ops(adapter); - if (!SPNIC_FUNC_IS_VF(adapter->hwdev)) - spnic_set_ethtool_ops(adapter->netdev); - else - spnicvf_set_ethtool_ops(adapter->netdev); - - adapter->netdev->watchdog_timeo = WATCHDOG_TIMEOUT * HZ; -} - -static int spnic_validate_parameters(struct spnic_lld_dev *lld_dev) -{ - struct pci_dev *pdev = lld_dev->pdev; - - /* If weight exceeds the queue depth, the queue resources will be - * exhausted, and increasing it has no effect. - */ - if (!poll_weight || poll_weight > SPNIC_MAX_RX_QUEUE_DEPTH) { - nic_warn(&pdev->dev, "Module Parameter poll_weight is out of range: [1, %d], resetting to %d\n", - SPNIC_MAX_RX_QUEUE_DEPTH, DEFAULT_POLL_WEIGHT); - poll_weight = DEFAULT_POLL_WEIGHT; - } - - /* check rx_buff value, default rx_buff is 2KB. - * Valid rx_buff include 2KB/4KB/8KB/16KB. - */ - if (rx_buff != RX_BUFF_VALID_2KB && rx_buff != RX_BUFF_VALID_4KB && - rx_buff != RX_BUFF_VALID_8KB && rx_buff != RX_BUFF_VALID_16KB) { - nic_warn(&pdev->dev, "Module Parameter rx_buff value %u is out of range, must be 2^n. Valid range is 2 - 16, resetting to %dKB", - rx_buff, DEFAULT_RX_BUFF_LEN); - rx_buff = DEFAULT_RX_BUFF_LEN; - } - - return 0; -} - -static void adaptive_configuration_init(struct spnic_nic_dev *nic_dev) -{ - /* TODO: */ -} - -static int set_interrupt_moder(struct spnic_nic_dev *nic_dev, u16 q_id, - u8 coalesc_timer_cfg, u8 pending_limt) -{ - struct interrupt_info info; - int err; - - memset(&info, 0, sizeof(info)); - - if (coalesc_timer_cfg == nic_dev->rxqs[q_id].last_coalesc_timer_cfg && - pending_limt == nic_dev->rxqs[q_id].last_pending_limt) - return 0; - - /* netdev not running or qp not in using, - * don't need to set coalesce to hw - */ - if (!SPHW_CHANNEL_RES_VALID(nic_dev) || q_id >= nic_dev->q_params.num_qps) - return 0; - - info.lli_set = 0; - info.interrupt_coalesc_set = 1; - info.coalesc_timer_cfg = coalesc_timer_cfg; - info.pending_limt = pending_limt; - info.msix_index = nic_dev->q_params.irq_cfg[q_id].msix_entry_idx; - info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg; - - err = sphw_set_interrupt_cfg(nic_dev->hwdev, info, SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to modify moderation for Queue: %u\n", q_id); - } else { - nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg; - nic_dev->rxqs[q_id].last_pending_limt = pending_limt; - } - - return err; -} - -static void calc_coal_para(struct spnic_nic_dev *nic_dev, struct spnic_intr_coal_info *q_coal, - u64 rx_rate, u8 *coalesc_timer_cfg, u8 *pending_limt) -{ - if (rx_rate < q_coal->pkt_rate_low) { - *coalesc_timer_cfg = q_coal->rx_usecs_low; - *pending_limt = q_coal->rx_pending_limt_low; - } else if (rx_rate > q_coal->pkt_rate_high) { - *coalesc_timer_cfg = q_coal->rx_usecs_high; - *pending_limt = q_coal->rx_pending_limt_high; - } else { - *coalesc_timer_cfg = - (u8)((rx_rate - q_coal->pkt_rate_low) * - (q_coal->rx_usecs_high - q_coal->rx_usecs_low) / - (q_coal->pkt_rate_high - q_coal->pkt_rate_low) + q_coal->rx_usecs_low); - - *pending_limt = q_coal->rx_pending_limt_low; - } -} - -static void update_queue_coal(struct spnic_nic_dev *nic_dev, u16 qid, - u64 rx_rate, u64 avg_pkt_size, u64 tx_rate) -{ - struct spnic_intr_coal_info *q_coal = NULL; - u8 coalesc_timer_cfg, pending_limt; - - q_coal = &nic_dev->intr_coalesce[qid]; - - if (rx_rate > SPNIC_RX_RATE_THRESH && - avg_pkt_size > SPNIC_AVG_PKT_SMALL) { - calc_coal_para(nic_dev, q_coal, rx_rate, &coalesc_timer_cfg, &pending_limt); - } else { - coalesc_timer_cfg = SPNIC_LOWEST_LATENCY; - pending_limt = q_coal->rx_pending_limt_low; - } - - set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg, pending_limt); -} - -void spnic_auto_moderation_work(struct work_struct *work) -{ - struct delayed_work *delay = to_delayed_work(work); - struct spnic_nic_dev *nic_dev = container_of(delay, struct spnic_nic_dev, moderation_task); - unsigned long period = (unsigned long)(jiffies - nic_dev->last_moder_jiffies); - u64 rx_packets, rx_bytes, rx_pkt_diff, rx_rate, avg_pkt_size; - u64 tx_packets, tx_bytes, tx_pkt_diff, tx_rate; - u16 qid; - - if (!test_bit(SPNIC_INTF_UP, &nic_dev->flags)) - return; - - queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, SPNIC_MODERATONE_DELAY); - - if (!nic_dev->adaptive_rx_coal || !period) - return; - - for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { - rx_packets = nic_dev->rxqs[qid].rxq_stats.packets; - rx_bytes = nic_dev->rxqs[qid].rxq_stats.bytes; - tx_packets = nic_dev->txqs[qid].txq_stats.packets; - tx_bytes = nic_dev->txqs[qid].txq_stats.bytes; - - rx_pkt_diff = rx_packets - nic_dev->rxqs[qid].last_moder_packets; - avg_pkt_size = rx_pkt_diff ? - ((unsigned long)(rx_bytes - nic_dev->rxqs[qid].last_moder_bytes)) / - rx_pkt_diff : 0; - - rx_rate = rx_pkt_diff * HZ / period; - tx_pkt_diff = tx_packets - nic_dev->txqs[qid].last_moder_packets; - tx_rate = tx_pkt_diff * HZ / period; - - update_queue_coal(nic_dev, qid, rx_rate, avg_pkt_size, tx_rate); - - nic_dev->rxqs[qid].last_moder_packets = rx_packets; - nic_dev->rxqs[qid].last_moder_bytes = rx_bytes; - nic_dev->txqs[qid].last_moder_packets = tx_packets; - nic_dev->txqs[qid].last_moder_bytes = tx_bytes; - } - - nic_dev->last_moder_jiffies = jiffies; -} - -void spnic_periodic_work_handler(struct work_struct *work) -{ - struct delayed_work *delay = to_delayed_work(work); - struct spnic_nic_dev *nic_dev = container_of(delay, struct spnic_nic_dev, periodic_work); - - if (test_and_clear_bit(EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag)) - sphw_fault_event_report(nic_dev->hwdev, SPHW_FAULT_SRC_TX_TIMEOUT, - FAULT_LEVEL_SERIOUS_FLR); - - queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ); -} - -static void free_nic_dev(struct spnic_nic_dev *nic_dev) -{ - destroy_workqueue(nic_dev->workq); - kfree(nic_dev->vlan_bitmap); -} - -static int setup_nic_dev(struct net_device *netdev, struct spnic_lld_dev *lld_dev) -{ - struct pci_dev *pdev = lld_dev->pdev; - struct spnic_nic_dev *nic_dev; - u32 page_num; - - nic_dev = (struct spnic_nic_dev *)netdev_priv(netdev); - nic_dev->netdev = netdev; - SET_NETDEV_DEV(netdev, &pdev->dev); - nic_dev->hwdev = lld_dev->hwdev; - nic_dev->pdev = pdev; - nic_dev->poll_weight = (int)poll_weight; - nic_dev->msg_enable = DEFAULT_MSG_ENABLE; - nic_dev->lro_replenish_thld = lro_replenish_thld; - nic_dev->rx_buff_len = (u16)(rx_buff * CONVERT_UNIT); - nic_dev->dma_rx_buff_size = RX_BUFF_NUM_PER_PAGE * nic_dev->rx_buff_len; - page_num = nic_dev->dma_rx_buff_size / PAGE_SIZE; - nic_dev->page_order = page_num > 0 ? ilog2(page_num) : 0; - - mutex_init(&nic_dev->nic_mutex); - - nic_dev->vlan_bitmap = kzalloc(VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); - if (!nic_dev->vlan_bitmap) { - nic_err(&pdev->dev, "Failed to allocate vlan bitmap\n"); - return -ENOMEM; - } - - nic_dev->workq = create_singlethread_workqueue(SPNIC_NIC_DEV_WQ_NAME); - if (!nic_dev->workq) { - nic_err(&pdev->dev, "Failed to initialize nic workqueue\n"); - kfree(nic_dev->vlan_bitmap); - return -ENOMEM; - } - - INIT_DELAYED_WORK(&nic_dev->periodic_work, spnic_periodic_work_handler); - - INIT_LIST_HEAD(&nic_dev->uc_filter_list); - INIT_LIST_HEAD(&nic_dev->mc_filter_list); - INIT_WORK(&nic_dev->rx_mode_work, spnic_set_rx_mode_work); - - INIT_LIST_HEAD(&nic_dev->rx_flow_rule.rules); - INIT_LIST_HEAD(&nic_dev->tcam.tcam_list); - INIT_LIST_HEAD(&nic_dev->tcam.tcam_dynamic_info.tcam_dynamic_list); - - return 0; -} - -static int spnic_set_default_hw_feature(struct spnic_nic_dev *nic_dev) -{ - int err; - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) { - err = spnic_dcb_reset_hw_config(nic_dev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to reset hw dcb configuration\n"); - return err; - } - } - - err = spnic_set_nic_feature_to_hw(nic_dev->hwdev); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to set nic features\n"); - return err; - } - - /* enable all hw features in netdev->features */ - return spnic_set_hw_features(nic_dev); -} - -static int nic_probe(struct spnic_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name) -{ - struct pci_dev *pdev = lld_dev->pdev; - struct spnic_nic_dev *nic_dev = NULL; - struct net_device *netdev = NULL; - u16 max_qps, glb_func_id; - int err; - - /* *uld_dev should always no be NULL */ - *uld_dev = lld_dev; - - if (!sphw_support_nic(lld_dev->hwdev, NULL)) { - nic_info(&pdev->dev, "Hw don't support nic\n"); - return 0; - } - - nic_info(&pdev->dev, "NIC service probe begin\n"); - - err = spnic_validate_parameters(lld_dev); - if (err) { - err = -EINVAL; - goto err_out; - } - - glb_func_id = sphw_global_func_id(lld_dev->hwdev); - err = sphw_func_reset(lld_dev->hwdev, glb_func_id, SPHW_NIC_RES, SPHW_CHANNEL_NIC); - if (err) { - nic_err(&pdev->dev, "Failed to reset function\n"); - goto err_out; - } - - max_qps = sphw_func_max_nic_qnum(lld_dev->hwdev); - netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps); - if (!netdev) { - nic_err(&pdev->dev, "Failed to allocate ETH device\n"); - err = -ENOMEM; - goto err_out; - } - - nic_dev = (struct spnic_nic_dev *)netdev_priv(netdev); - err = setup_nic_dev(netdev, lld_dev); - if (err) - goto setup_dev_err; - - adaptive_configuration_init(nic_dev); - - /* get nic cap from hw */ - sphw_support_nic(lld_dev->hwdev, &nic_dev->nic_cap); - - err = spnic_init_nic_hwdev(nic_dev->hwdev, pdev, &pdev->dev, nic_dev->rx_buff_len); - if (err) { - nic_err(&pdev->dev, "Failed to init nic hwdev\n"); - goto init_nic_hwdev_err; - } - - err = spnic_sw_init(nic_dev); - if (err) - goto sw_init_err; - - spnic_assign_netdev_ops(nic_dev); - netdev_feature_init(netdev); - - err = spnic_set_default_hw_feature(nic_dev); - if (err) - goto set_features_err; - - spnic_register_notifier(nic_dev); - - err = register_netdev(netdev); - if (err) { - nic_err(&pdev->dev, "Failed to register netdev\n"); - err = -ENOMEM; - goto netdev_err; - } - - queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ); - netif_carrier_off(netdev); - - *uld_dev = nic_dev; - nicif_info(nic_dev, probe, netdev, "Register netdev succeed\n"); - nic_info(&pdev->dev, "NIC service probed\n"); - - return 0; - -netdev_err: - spnic_unregister_notifier(nic_dev); - -set_features_err: - spnic_sw_deinit(nic_dev); - -sw_init_err: - spnic_free_nic_hwdev(nic_dev->hwdev); - -init_nic_hwdev_err: - free_nic_dev(nic_dev); -setup_dev_err: - free_netdev(netdev); - -err_out: - nic_err(&pdev->dev, "NIC service probe failed\n"); - - return err; -} - -static void nic_remove(struct spnic_lld_dev *lld_dev, void *adapter) -{ - struct spnic_nic_dev *nic_dev = adapter; - struct net_device *netdev = NULL; - - if (!nic_dev || !sphw_support_nic(lld_dev->hwdev, NULL)) - return; - - nic_info(&lld_dev->pdev->dev, "NIC service remove begin\n"); - - netdev = nic_dev->netdev; - - unregister_netdev(netdev); - spnic_unregister_notifier(nic_dev); - - cancel_delayed_work_sync(&nic_dev->periodic_work); - cancel_work_sync(&nic_dev->rx_mode_work); - destroy_workqueue(nic_dev->workq); - - spnic_sw_deinit(nic_dev); - - spnic_flush_rx_flow_rule(nic_dev); - spnic_free_nic_hwdev(nic_dev->hwdev); - - kfree(nic_dev->vlan_bitmap); - - free_netdev(netdev); - - nic_info(&lld_dev->pdev->dev, "NIC service removed\n"); -} - -static void sriov_state_change(struct spnic_nic_dev *nic_dev, - const struct sphw_sriov_state_info *info) -{ - if (!info->enable) - spnic_clear_vfs_info(nic_dev->hwdev); -} - -const char *g_spnic_module_link_err[LINK_ERR_NUM] = { - "Unrecognized module", -}; - -void sphw_port_module_event_handler(struct spnic_nic_dev *nic_dev, struct sphw_event_info *event) -{ - enum port_module_event_type type = event->module_event.type; - enum link_err_type err_type = event->module_event.err_type; - - switch (type) { - case SPHW_PORT_MODULE_CABLE_PLUGGED: - case SPHW_PORT_MODULE_CABLE_UNPLUGGED: - nicif_info(nic_dev, link, nic_dev->netdev, - "Port module event: Cable %s\n", - type == SPHW_PORT_MODULE_CABLE_PLUGGED ? - "plugged" : "unplugged"); - break; - case SPHW_PORT_MODULE_LINK_ERR: - if (err_type >= LINK_ERR_NUM) { - nicif_info(nic_dev, link, nic_dev->netdev, - "Link failed, Unknown error type: 0x%x\n", err_type); - } else { - nicif_info(nic_dev, link, nic_dev->netdev, "Link failed, error type: 0x%x: %s\n", - err_type, g_spnic_module_link_err[err_type]); - } - break; - default: - nicif_err(nic_dev, link, nic_dev->netdev, "Unknown port module type %d\n", type); - break; - } -} - -void nic_event(struct spnic_lld_dev *lld_dev, void *adapter, struct sphw_event_info *event) -{ - struct spnic_nic_dev *nic_dev = adapter; - enum sphw_event_type type; - - if (!nic_dev || !event || !sphw_support_nic(lld_dev->hwdev, NULL)) - return; - - type = event->type; - - switch (type) { - case SPHW_EVENT_LINK_DOWN: - spnic_link_status_change(nic_dev, false); - break; - case SPHW_EVENT_LINK_UP: - spnic_link_status_change(nic_dev, true); - break; - case SPHW_EVENT_SRIOV_STATE_CHANGE: - sriov_state_change(nic_dev, &event->sriov_state); - break; - case SPHW_EVENT_PORT_MODULE_EVENT: - sphw_port_module_event_handler(nic_dev, event); - break; - case SPHW_EVENT_FAULT: - if (event->info.fault_level == FAULT_LEVEL_SERIOUS_FLR && - event->info.event.chip.func_id == sphw_global_func_id(lld_dev->hwdev)) - spnic_link_status_change(nic_dev, false); - break; - case SPHW_EVENT_PCIE_LINK_DOWN: - case SPHW_EVENT_HEART_LOST: - spnic_link_status_change(nic_dev, false); - break; - default: - break; - } -} - -struct net_device *spnic_get_netdev_by_lld(struct spnic_lld_dev *lld_dev) -{ - struct spnic_nic_dev *nic_dev = NULL; - - if (!lld_dev || !sphw_support_nic(lld_dev->hwdev, NULL)) - return NULL; - - nic_dev = spnic_get_uld_dev_by_pdev(lld_dev->pdev, SERVICE_T_NIC); - if (!nic_dev) { - sdk_err(&lld_dev->pdev->dev, - "There's no net device attached on the pci device"); - return NULL; - } - - return nic_dev->netdev; -} - -void *spnic_get_hwdev_by_netdev(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (!nic_dev || !netdev) - return NULL; - - return nic_dev->hwdev; -} - -struct spnic_uld_info nic_uld_info = { - .probe = nic_probe, - .remove = nic_remove, - .suspend = NULL, - .resume = NULL, - .event = nic_event, - .ioctl = nic_ioctl, -}; diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h b/drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h deleted file mode 100644 index 720f6fcf5f0a..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_mgmt_interface.h +++ /dev/null @@ -1,605 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_MGMT_INTERFACE_H -#define SPNIC_MGMT_INTERFACE_H - -#include <linux/if_ether.h> - -#include "sphw_mgmt_msg_base.h" - -#define SPNIC_CMD_OP_SET MGMT_MSG_CMD_OP_SET -#define SPNIC_CMD_OP_GET MGMT_MSG_CMD_OP_GET - -#define SPNIC_CMD_OP_ADD 1 -#define SPNIC_CMD_OP_DEL 0 - -enum nic_feature_cap { - NIC_F_CSUM = BIT(0), - NIC_F_SCTP_CRC = BIT(1), - NIC_F_TSO = BIT(2), - NIC_F_LRO = BIT(3), - NIC_F_UFO = BIT(4), - NIC_F_RSS = BIT(5), - NIC_F_RX_VLAN_FILTER = BIT(6), - NIC_F_RX_VLAN_STRIP = BIT(7), - NIC_F_TX_VLAN_INSERT = BIT(8), - NIC_F_VXLAN_OFFLOAD = BIT(9), - NIC_F_IPSEC_OFFLOAD = BIT(10), - NIC_F_FDIR = BIT(11), - NIC_F_PROMISC = BIT(12), - NIC_F_ALLMULTI = BIT(13), -}; - -#define NIC_F_ALL_MASK 0x3FFF - -#define NIC_MAX_FEATURE_QWORD 4 -struct spnic_cmd_feature_nego { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; /* 1: set, 0: get */ - u8 rsvd; - u64 s_feature[NIC_MAX_FEATURE_QWORD]; -}; - -struct spnic_port_mac_set { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 vlan_id; - u16 rsvd1; - u8 mac[ETH_ALEN]; -}; - -struct spnic_port_mac_update { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 vlan_id; - u16 rsvd1; - u8 old_mac[ETH_ALEN]; - u16 rsvd2; - u8 new_mac[ETH_ALEN]; -}; - -struct spnic_vport_state { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u8 state; /* 0--disable, 1--enable */ - u8 rsvd2[3]; -}; - -struct spnic_port_state { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u8 state; /* 0--disable, 1--enable */ - u8 rsvd2[3]; -}; - -struct spnic_cmd_clear_qp_resource { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; -}; - -struct spnic_port_stats_info { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; -}; - -struct spnic_vport_stats { - u64 tx_unicast_pkts_vport; - u64 tx_unicast_bytes_vport; - u64 tx_multicast_pkts_vport; - u64 tx_multicast_bytes_vport; - u64 tx_broadcast_pkts_vport; - u64 tx_broadcast_bytes_vport; - - u64 rx_unicast_pkts_vport; - u64 rx_unicast_bytes_vport; - u64 rx_multicast_pkts_vport; - u64 rx_multicast_bytes_vport; - u64 rx_broadcast_pkts_vport; - u64 rx_broadcast_bytes_vport; - - u64 tx_discard_vport; - u64 rx_discard_vport; - u64 tx_err_vport; - u64 rx_err_vport; -}; - -struct spnic_cmd_vport_stats { - struct mgmt_msg_head msg_head; - - u32 stats_size; - u32 rsvd1; - struct spnic_vport_stats stats; - u64 rsvd2[6]; -}; - -struct spnic_cmd_qpn { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 base_qpn; -}; - -enum spnic_func_tbl_cfg_bitmap { - FUNC_CFG_INIT, - FUNC_CFG_RX_BUF_SIZE, - FUNC_CFG_MTU, -}; - -struct spnic_func_tbl_cfg { - u16 rx_wqe_buf_size; - u16 mtu; - u32 rsvd[9]; -}; - -struct spnic_cmd_set_func_tbl { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd; - - u32 cfg_bitmap; - struct spnic_func_tbl_cfg tbl_cfg; -}; - -struct spnic_cmd_cons_idx_attr { - struct mgmt_msg_head msg_head; - - u16 func_idx; - u8 dma_attr_off; - u8 pending_limit; - u8 coalescing_time; - u8 intr_en; - u16 intr_idx; - u32 l2nic_sqn; - u32 rsvd; - u64 ci_addr; -}; - -struct spnic_cmd_vlan_offload { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 vlan_offload; - u8 rsvd1[5]; -}; - -struct spnic_cmd_lro_config { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u8 lro_ipv4_en; - u8 lro_ipv6_en; - u8 lro_max_pkt_len; /* unit is 1K */ - u8 resv2[13]; -}; - -struct spnic_cmd_lro_timer { - struct mgmt_msg_head msg_head; - - u8 opcode; /* 1: set timer value, 0: get timer value */ - u8 rsvd1; - u16 rsvd2; - u32 timer; -}; - -struct spnic_cmd_vf_vlan_config { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u16 vlan_id; - u8 qos; - u8 rsvd2[5]; -}; - -struct spnic_cmd_spoofchk_set { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 state; - u8 rsvd1; -}; - -struct spnic_cmd_tx_rate_cfg { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u32 min_rate; - u32 max_rate; - u8 rsvd2[8]; -}; - -struct spnic_cmd_port_info { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 rsvd1[3]; - u8 port_type; - u8 autoneg_cap; - u8 autoneg_state; - u8 duplex; - u8 speed; - u8 fec; - u16 rsvd2; - u32 rsvd3[4]; -}; - -struct spnic_cmd_register_vf { - struct mgmt_msg_head msg_head; - - u8 op_register; /* 0 - unregister, 1 - register */ - u8 rsvd[39]; -}; - -struct spnic_cmd_link_state { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 state; - u16 rsvd1; -}; - -struct spnic_cmd_vlan_config { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u16 vlan_id; - u16 rsvd2; -}; - -/* set vlan filter */ -struct spnic_cmd_set_vlan_filter { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 resvd[2]; - u32 vlan_filter_ctrl; /* bit0:vlan filter en; bit1:broadcast_filter_en */ -}; - -struct spnic_cmd_link_ksettings_info { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 rsvd1[3]; - - u32 valid_bitmap; - u8 speed; /* enum nic_speed_level */ - u8 autoneg; /* 0 - off, 1 - on */ - u8 fec; /* 0 - RSFEC, 1 - BASEFEC, 2 - NOFEC */ - u8 rsvd2[21]; /* reserved for duplex, port, etc. */ -}; - -struct mpu_lt_info { - u8 node; - u8 inst; - u8 entry_size; - u8 rsvd; - u32 lt_index; - u32 offset; - u32 len; -}; - -struct nic_mpu_lt_opera { - struct mgmt_msg_head msg_head; - struct mpu_lt_info net_lt_cmd; - u8 data[100]; -}; - -struct spnic_rx_mode_config { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u32 rx_mode; -}; - -/* rss */ -struct spnic_rss_context_table { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u32 context; -}; - -struct spnic_cmd_rss_engine_type { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; - u8 hash_engine; - u8 rsvd1[4]; -}; - -#define SPNIC_RSS_INDIR_SIZE 256 -#define SPNIC_RSS_KEY_SIZE 40 - -struct spnic_cmd_rss_hash_key { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u8 key[SPNIC_RSS_KEY_SIZE]; -}; - -struct spnic_rss_indir_table { - struct mgmt_msg_head msg_head; - - u16 func_id; - u16 rsvd1; - u8 indir[SPNIC_RSS_INDIR_SIZE]; -}; - -#define SPNIC_DCB_UP_MAX 0x8 -#define SPNIC_DCB_COS_MAX 0x8 -#define SPNIC_DCB_TC_MAX 0x8 - -struct spnic_cmd_rss_config { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 rss_en; - u8 rq_priority_number; - u8 prio_tc[SPNIC_DCB_COS_MAX]; - u16 num_qps; - u16 rsvd1; -}; - -struct spnic_dcb_state { - u8 dcb_on; - u8 default_cos; - u16 rsvd1; - u8 up_cos[SPNIC_DCB_UP_MAX]; - u32 rsvd2[7]; -}; - -struct spnic_cmd_vf_dcb_state { - struct mgmt_msg_head msg_head; - - struct spnic_dcb_state state; -}; - -struct spnic_up_ets_cfg { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 rsvd1[3]; - - u8 cos_tc[SPNIC_DCB_COS_MAX]; - u8 tc_bw[SPNIC_DCB_TC_MAX]; - u8 cos_prio[SPNIC_DCB_COS_MAX]; - u8 cos_bw[SPNIC_DCB_COS_MAX]; - u8 tc_prio[SPNIC_DCB_TC_MAX]; -}; - -struct spnic_cmd_set_pfc { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 rsvd1; - u8 pfc_en; - u8 pfc_bitmap; - u8 rsvd2[4]; -}; - -struct spnic_cos_up_map { - struct mgmt_msg_head msg_head; - - u8 port_id; - /* every bit indicate index of map is valid 1 or not 0*/ - u8 cos_valid_mask; - u16 rsvd1; - - /* user priority in cos(index:cos, value: up pri) */ - u8 map[SPNIC_DCB_UP_MAX]; -}; - -struct spnic_cmd_pause_config { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 opcode; - u16 rsvd1; - u8 auto_neg; - u8 rx_pause; - u8 tx_pause; - u8 rsvd2[5]; -}; - -struct nic_cmd_tx_pause_notice { - struct mgmt_msg_head head; - - u32 tx_pause_except; - u32 except_level; - u32 rsvd; -}; - -#define SPNIC_CMD_OP_FREE 0 -#define SPNIC_CMD_OP_ALLOC 1 - -struct spnic_cmd_cfg_qps { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 opcode; /* 1: alloc qp, 0: free qp */ - u8 rsvd1; - u16 num_qps; - u16 rsvd2; -}; - -struct spnic_cmd_led_config { - struct mgmt_msg_head msg_head; - - u8 port; - u8 type; - u8 mode; - u8 rsvd1; -}; - -struct spnic_cmd_port_loopback { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 opcode; - u8 mode; - u8 en; - u32 rsvd1[2]; -}; - -struct spnic_cmd_get_light_module_abs { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 abs_status; /* 0:present, 1:absent */ - u8 rsv[2]; -}; - -#define STD_SFP_INFO_MAX_SIZE 640 -struct spnic_cmd_get_std_sfp_info { - struct mgmt_msg_head msg_head; - - u8 port_id; - u8 wire_type; - u16 eeprom_len; - u32 rsvd; - u8 sfp_info[STD_SFP_INFO_MAX_SIZE]; -}; - -struct spnic_cable_plug_event { - struct mgmt_msg_head msg_head; - - u16 func_id; - u8 plugged; /* 0: unplugged, 1: plugged */ - u8 port_id; -}; - -struct nic_cmd_mac_info { - struct mgmt_msg_head head; - - u32 valid_bitmap; - u16 rsvd; - - u8 host_id[32]; - u8 port_id[32]; - u8 mac_addr[192]; -}; - -#define SPNIC_TCAM_BLOCK_ENABLE 1 -#define SPNIC_TCAM_BLOCK_DISABLE 0 -#define SPNIC_TCAM_BLOCK_NORMAL_TYPE 0 -#define SPNIC_MAX_TCAM_RULES_NUM 4096 - -struct nic_cmd_set_tcam_enable { - struct mgmt_msg_head head; - - u16 func_id; - u8 tcam_enable; - u8 rsvd1; - u32 rsvd2; -}; - -/* alloc tcam block input struct */ -struct nic_cmd_ctrl_tcam_block_in { - struct mgmt_msg_head head; - - u16 func_id; /* func_id */ - u8 alloc_en; /* 0: free tcam block, 1: alloc tcam block */ - u8 tcam_type; /* 0: alloc 16 size tcam block, 1: alloc 0 size tcam block */ - u16 tcam_block_index; - u16 alloc_block_num; -}; - -/* alloc tcam block output struct */ -struct nic_cmd_ctrl_tcam_block_out { - struct mgmt_msg_head head; - - u16 func_id; - u8 alloc_en; - u8 tcam_type; - u16 tcam_block_index; - u16 mpu_alloc_block_size; -}; - -struct nic_cmd_flush_tcam_rules { - struct mgmt_msg_head head; - - u16 func_id; /* func_id */ - u16 rsvd; -}; - -struct nic_cmd_dfx_fdir_tcam_block_table { - struct mgmt_msg_head head; - u8 tcam_type; - u8 valid; - u16 tcam_block_index; - u16 use_function_id; - u16 rsvd; -}; - -struct tcam_result { - u32 qid; - u32 rsvd; -}; - -#define TCAM_FLOW_KEY_SIZE 44 - -struct tcam_key_x_y { - u8 x[TCAM_FLOW_KEY_SIZE]; - u8 y[TCAM_FLOW_KEY_SIZE]; -}; - -struct nic_tcam_cfg_rule { - u32 index; - struct tcam_result data; - struct tcam_key_x_y key; -}; - -struct nic_cmd_fdir_add_rule { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd; - struct nic_tcam_cfg_rule rule; -}; - -struct nic_cmd_fdir_del_rules { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd; - u32 index_start; - u32 index_num; -}; - -struct nic_cmd_fdir_get_rule { - struct mgmt_msg_head head; - - u32 index; - u32 valid; - struct tcam_key_x_y key; - struct tcam_result data; - u64 packet_count; - u64 byte_count; -}; - -#endif /* SPNIC_MGMT_INTERFACE_H */ diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c b/drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c deleted file mode 100644 index a4f668682f37..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_netdev_ops.c +++ /dev/null @@ -1,1526 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/device.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> -#include <linux/netlink.h> -#include <linux/debugfs.h> -#include <linux/ip.h> -#include <linux/bpf.h> - -#include "sphw_hw.h" -#include "sphw_crm.h" -#include "spnic_nic_io.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" -#include "spnic_rx.h" -#include "spnic_dcb.h" - -#define SPNIC_DEFAULT_RX_CSUM_OFFLOAD 0xFFF - -#define SPNIC_LRO_DEFAULT_COAL_PKT_SIZE 32 -#define SPNIC_LRO_DEFAULT_TIME_LIMIT 16 -#define SPNIC_WAIT_FLUSH_QP_RESOURCE_TIMEOUT 2000 -static void spnic_nic_set_rx_mode(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt || - netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) { - set_bit(SPNIC_UPDATE_MAC_FILTER, &nic_dev->flags); - nic_dev->netdev_uc_cnt = netdev_uc_count(netdev); - nic_dev->netdev_mc_cnt = netdev_mc_count(netdev); - } - - queue_work(nic_dev->workq, &nic_dev->rx_mode_work); -} - -int spnic_alloc_txrxq_resources(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *q_params) -{ - u32 size; - int err; - - size = sizeof(*q_params->txqs_res) * q_params->num_qps; - q_params->txqs_res = kzalloc(size, GFP_KERNEL); - if (!q_params->txqs_res) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txqs resources array\n"); - return -ENOMEM; - } - - size = sizeof(*q_params->rxqs_res) * q_params->num_qps; - q_params->rxqs_res = kzalloc(size, GFP_KERNEL); - if (!q_params->rxqs_res) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxqs resource array\n"); - err = -ENOMEM; - goto alloc_rxqs_res_arr_err; - } - - size = sizeof(*q_params->irq_cfg) * q_params->num_qps; - q_params->irq_cfg = kzalloc(size, GFP_KERNEL); - if (!q_params->irq_cfg) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc irq resource array\n"); - err = -ENOMEM; - goto alloc_irq_cfg_err; - } - - err = spnic_alloc_txqs_res(nic_dev, q_params->num_qps, - q_params->sq_depth, q_params->txqs_res); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc txqs resource\n"); - goto alloc_txqs_res_err; - } - - err = spnic_alloc_rxqs_res(nic_dev, q_params->num_qps, - q_params->rq_depth, q_params->rxqs_res); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxqs resource\n"); - goto alloc_rxqs_res_err; - } - - return 0; - -alloc_rxqs_res_err: - spnic_free_txqs_res(nic_dev, q_params->num_qps, q_params->sq_depth, - q_params->txqs_res); - -alloc_txqs_res_err: - kfree(q_params->irq_cfg); - q_params->irq_cfg = NULL; - -alloc_irq_cfg_err: - kfree(q_params->rxqs_res); - q_params->rxqs_res = NULL; - -alloc_rxqs_res_arr_err: - kfree(q_params->txqs_res); - q_params->txqs_res = NULL; - - return err; -} - -void spnic_free_txrxq_resources(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *q_params) -{ - spnic_free_rxqs_res(nic_dev, q_params->num_qps, q_params->rq_depth, q_params->rxqs_res); - spnic_free_txqs_res(nic_dev, q_params->num_qps, q_params->sq_depth, q_params->txqs_res); - - kfree(q_params->irq_cfg); - q_params->irq_cfg = NULL; - - kfree(q_params->rxqs_res); - q_params->rxqs_res = NULL; - - kfree(q_params->txqs_res); - q_params->txqs_res = NULL; -} - -int spnic_configure_txrxqs(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *q_params) -{ - int err; - - err = spnic_configure_txqs(nic_dev, q_params->num_qps, - q_params->sq_depth, q_params->txqs_res); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to configure txqs\n"); - return err; - } - - err = spnic_configure_rxqs(nic_dev, q_params->num_qps, - q_params->rq_depth, q_params->rxqs_res); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to configure rxqs\n"); - return err; - } - - return 0; -} - -static void config_dcb_qps_map(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - u8 i, num_tcs; - u16 num_rss; - - if (!test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - spnic_update_tx_db_cos(nic_dev); - return; - } - - num_tcs = (u8)netdev_get_num_tc(netdev); - /* For now, we don't support to change num_tcs */ - if (num_tcs != nic_dev->hw_dcb_cfg.max_cos || - nic_dev->q_params.num_qps < num_tcs || - !test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, "Invalid num_tcs: %u or num_qps: %u, disable DCB\n", - num_tcs, nic_dev->q_params.num_qps); - netdev_reset_tc(netdev); - nic_dev->q_params.num_tc = 0; - clear_bit(SPNIC_DCB_ENABLE, &nic_dev->flags); - /* if we can't enable rss or get enough num_qps, - * need to sync default configure to hw - */ - spnic_configure_dcb(netdev); - } else { - /* use 0~max_cos-1 as tc for netdev */ - num_rss = nic_dev->q_params.num_rss; - for (i = 0; i < num_tcs; i++) - netdev_set_tc_queue(netdev, i, num_rss, (u16)(num_rss * i)); - } - - spnic_update_tx_db_cos(nic_dev); -} - -static int spnic_configure(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - int err; - - err = spnic_set_port_mtu(nic_dev->hwdev, (u16)netdev->mtu); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n"); - return err; - } - - config_dcb_qps_map(nic_dev); - - /* rx rss init */ - err = spnic_rx_configure(netdev); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to configure rx\n"); - return err; - } - - return 0; -} - -static void spnic_remove_configure(struct spnic_nic_dev *nic_dev) -{ - spnic_rx_remove_configure(nic_dev->netdev); -} - -/* try to modify the number of irq to the target number, - * and return the actual number of irq. - */ -static u16 spnic_qp_irq_change(struct spnic_nic_dev *nic_dev, u16 dst_num_qp_irq) -{ - struct irq_info *qps_irq_info = nic_dev->qps_irq_info; - u16 resp_irq_num, irq_num_gap, i; - u16 idx; - int err; - - if (dst_num_qp_irq > nic_dev->num_qp_irq) { - irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq; - err = sphw_alloc_irqs(nic_dev->hwdev, SERVICE_T_NIC, irq_num_gap, - &qps_irq_info[nic_dev->num_qp_irq], &resp_irq_num); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc irqs\n"); - return nic_dev->num_qp_irq; - } - - nic_dev->num_qp_irq += resp_irq_num; - } else if (dst_num_qp_irq < nic_dev->num_qp_irq) { - irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq; - for (i = 0; i < irq_num_gap; i++) { - idx = (nic_dev->num_qp_irq - i) - 1; - sphw_free_irq(nic_dev->hwdev, SERVICE_T_NIC, qps_irq_info[idx].irq_id); - qps_irq_info[idx].irq_id = 0; - qps_irq_info[idx].msix_entry_idx = 0; - } - nic_dev->num_qp_irq = dst_num_qp_irq; - } - - return nic_dev->num_qp_irq; -} - -static void config_dcb_num_qps(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *q_params, - u16 max_qps) -{ - u8 num_tcs = q_params->num_tc; - u16 num_rss; - - if (!num_tcs || !test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) - return; - - if (num_tcs == nic_dev->hw_dcb_cfg.max_cos && max_qps >= num_tcs) { - num_rss = max_qps / num_tcs; - num_rss = min_t(u16, num_rss, q_params->rss_limit); - q_params->num_rss = num_rss; - q_params->num_qps = (u16)(num_tcs * num_rss); - } /* else will disable DCB in config_dcb_qps_map() */ -} - -static void spnic_config_num_qps(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *q_params) -{ - u16 alloc_num_irq, cur_num_irq; - u16 dst_num_irq; - - if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - q_params->num_rss = q_params->rss_limit; - q_params->num_qps = q_params->rss_limit; - } else { - q_params->num_rss = 0; - q_params->num_qps = 1; - } - - config_dcb_num_qps(nic_dev, q_params, nic_dev->max_qps); - - if (nic_dev->num_qp_irq >= q_params->num_qps) - goto out; - - cur_num_irq = nic_dev->num_qp_irq; - - alloc_num_irq = spnic_qp_irq_change(nic_dev, q_params->num_qps); - if (alloc_num_irq < q_params->num_qps) { - q_params->num_qps = alloc_num_irq; - q_params->num_rss = q_params->num_qps; - config_dcb_num_qps(nic_dev, q_params, q_params->num_qps); - nicif_warn(nic_dev, drv, nic_dev->netdev, - "Can not get enough irqs, adjust num_qps to %u\n", - q_params->num_qps); - - /* The current irq may be in use, we must keep it */ - dst_num_irq = max_t(u16, cur_num_irq, q_params->num_qps); - spnic_qp_irq_change(nic_dev, dst_num_irq); - } - -out: - nicif_info(nic_dev, drv, nic_dev->netdev, "Finally num_qps: %u, num_rss: %u\n", - q_params->num_qps, q_params->num_rss); -} - -/* determin num_qps from rss_tmpl_id/irq_num/dcb_en */ -int spnic_setup_num_qps(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - u32 irq_size; - - nic_dev->num_qp_irq = 0; - - irq_size = sizeof(*nic_dev->qps_irq_info) * nic_dev->max_qps; - if (!irq_size) { - nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size entries\n"); - return -EINVAL; - } - nic_dev->qps_irq_info = kzalloc(irq_size, GFP_KERNEL); - if (!nic_dev->qps_irq_info) { - nicif_err(nic_dev, drv, netdev, "Failed to alloc qps_irq_info\n"); - return -ENOMEM; - } - - spnic_config_num_qps(nic_dev, &nic_dev->q_params); - - return 0; -} - -static void spnic_destroy_num_qps(struct spnic_nic_dev *nic_dev) -{ - u16 i; - - for (i = 0; i < nic_dev->num_qp_irq; i++) - sphw_free_irq(nic_dev->hwdev, SERVICE_T_NIC, nic_dev->qps_irq_info[i].irq_id); - - kfree(nic_dev->qps_irq_info); -} - -int spnic_force_port_disable(struct spnic_nic_dev *nic_dev) -{ - int err; - - down(&nic_dev->port_state_sem); - - err = spnic_set_port_enable(nic_dev->hwdev, false, SPHW_CHANNEL_NIC); - if (!err) - nic_dev->force_port_disable = true; - - up(&nic_dev->port_state_sem); - - return err; -} - -int spnic_force_set_port_state(struct spnic_nic_dev *nic_dev, bool enable) -{ - int err = 0; - - down(&nic_dev->port_state_sem); - - nic_dev->force_port_disable = false; - err = spnic_set_port_enable(nic_dev->hwdev, enable, SPHW_CHANNEL_NIC); - - up(&nic_dev->port_state_sem); - - return err; -} - -int spnic_maybe_set_port_state(struct spnic_nic_dev *nic_dev, bool enable) -{ - int err; - - down(&nic_dev->port_state_sem); - - /* Do nothing when force disable - * Port will disable when call force port disable - * and should not enable port when in force mode - */ - if (nic_dev->force_port_disable) { - up(&nic_dev->port_state_sem); - return 0; - } - - err = spnic_set_port_enable(nic_dev->hwdev, enable, SPHW_CHANNEL_NIC); - - up(&nic_dev->port_state_sem); - - return err; -} - -static void spnic_print_link_message(struct spnic_nic_dev *nic_dev, u8 link_status) -{ - if (nic_dev->link_status == link_status) - return; - - nic_dev->link_status = link_status; - - nicif_info(nic_dev, link, nic_dev->netdev, "Link is %s\n", - (link_status ? "up" : "down")); -} - -int spnic_alloc_channel_resources(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_qp_params *qp_params, - struct spnic_dyna_txrxq_params *trxq_params) -{ - int err; - - qp_params->num_qps = trxq_params->num_qps; - qp_params->sq_depth = trxq_params->sq_depth; - qp_params->rq_depth = trxq_params->rq_depth; - - err = spnic_alloc_qps(nic_dev->hwdev, nic_dev->qps_irq_info, qp_params); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc qps\n"); - return err; - } - - err = spnic_alloc_txrxq_resources(nic_dev, trxq_params); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txrxq resources\n"); - spnic_free_qps(nic_dev->hwdev, qp_params); - return err; - } - - return 0; -} - -void spnic_free_channel_resources(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_qp_params *qp_params, - struct spnic_dyna_txrxq_params *trxq_params) -{ - mutex_lock(&nic_dev->nic_mutex); - spnic_free_txrxq_resources(nic_dev, trxq_params); - spnic_free_qps(nic_dev->hwdev, qp_params); - mutex_unlock(&nic_dev->nic_mutex); -} - -int spnic_open_channel(struct spnic_nic_dev *nic_dev, struct spnic_dyna_qp_params *qp_params, - struct spnic_dyna_txrxq_params *trxq_params) -{ - int err; - - err = spnic_init_qps(nic_dev->hwdev, qp_params); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init qps\n"); - return err; - } - - err = spnic_configure_txrxqs(nic_dev, trxq_params); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to configure txrxqs\n"); - goto cfg_txrxqs_err; - } - - err = spnic_qps_irq_init(nic_dev); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init txrxq irq\n"); - goto init_qp_irq_err; - } - - err = spnic_configure(nic_dev); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init txrxq irq\n"); - goto configure_err; - } - - return 0; - -configure_err: - spnic_qps_irq_deinit(nic_dev); - -init_qp_irq_err: -cfg_txrxqs_err: - spnic_deinit_qps(nic_dev->hwdev, qp_params); - - return err; -} - -void spnic_close_channel(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_qp_params *qp_params) -{ - spnic_remove_configure(nic_dev); - spnic_qps_irq_deinit(nic_dev); - spnic_deinit_qps(nic_dev->hwdev, qp_params); -} - -int spnic_vport_up(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - u8 link_status = 0; - u16 glb_func_id; - int err; - - glb_func_id = sphw_global_func_id(nic_dev->hwdev); - err = spnic_set_vport_enable(nic_dev->hwdev, glb_func_id, true, - SPHW_CHANNEL_NIC); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to enable vport\n"); - goto vport_enable_err; - } - - err = spnic_maybe_set_port_state(nic_dev, true); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to enable port\n"); - goto port_enable_err; - } - - netif_set_real_num_tx_queues(netdev, nic_dev->q_params.num_qps); - netif_set_real_num_rx_queues(netdev, nic_dev->q_params.num_qps); - netif_tx_wake_all_queues(netdev); - - err = spnic_get_link_state(nic_dev->hwdev, &link_status); - if (!err && link_status) - netif_carrier_on(netdev); - - queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, SPNIC_MODERATONE_DELAY); - - spnic_print_link_message(nic_dev, link_status); - - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - spnic_notify_all_vfs_link_changed(nic_dev->hwdev, link_status); - - return 0; - -port_enable_err: - spnic_set_vport_enable(nic_dev->hwdev, glb_func_id, false, SPHW_CHANNEL_NIC); - -vport_enable_err: - spnic_flush_qps_res(nic_dev->hwdev); - /* After set vport disable 100ms, no packets will be send to host */ - msleep(100); - - return err; -} - -void spnic_vport_down(struct spnic_nic_dev *nic_dev) -{ - u16 glb_func_id; - - netif_carrier_off(nic_dev->netdev); - netif_tx_disable(nic_dev->netdev); - - cancel_delayed_work_sync(&nic_dev->moderation_task); - - if (sphw_get_chip_present_flag(nic_dev->hwdev)) { - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - spnic_notify_all_vfs_link_changed(nic_dev->hwdev, 0); - - spnic_maybe_set_port_state(nic_dev, false); - - glb_func_id = sphw_global_func_id(nic_dev->hwdev); - spnic_set_vport_enable(nic_dev->hwdev, glb_func_id, false, SPHW_CHANNEL_NIC); - - spnic_flush_txqs(nic_dev->netdev); - spnic_flush_qps_res(nic_dev->hwdev); - /* After set vport disable 100ms, - * no packets will be send to host - * FPGA set 2000ms - */ - msleep(SPNIC_WAIT_FLUSH_QP_RESOURCE_TIMEOUT); - } -} - -int spnic_change_channel_settings(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *trxq_params, - spnic_reopen_handler reopen_handler, const void *priv_data) -{ - struct spnic_dyna_qp_params new_qp_params = {0}; - struct spnic_dyna_qp_params cur_qp_params = {0}; - int err; - - spnic_config_num_qps(nic_dev, trxq_params); - - err = spnic_alloc_channel_resources(nic_dev, &new_qp_params, trxq_params); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc channel resources\n"); - return err; - } - - if (!test_and_set_bit(SPNIC_CHANGE_RES_INVALID, &nic_dev->flags)) { - spnic_vport_down(nic_dev); - spnic_close_channel(nic_dev, &cur_qp_params); - spnic_free_channel_resources(nic_dev, &cur_qp_params, &nic_dev->q_params); - } - - if (nic_dev->num_qp_irq > trxq_params->num_qps) - spnic_qp_irq_change(nic_dev, trxq_params->num_qps); - nic_dev->q_params = *trxq_params; - - if (reopen_handler) - reopen_handler(nic_dev, priv_data); - - err = spnic_open_channel(nic_dev, &new_qp_params, trxq_params); - if (err) - goto open_channel_err; - - err = spnic_vport_up(nic_dev); - if (err) - goto vport_up_err; - - clear_bit(SPNIC_CHANGE_RES_INVALID, &nic_dev->flags); - nicif_info(nic_dev, drv, nic_dev->netdev, "Change channel settings success\n"); - - return 0; - -vport_up_err: - spnic_close_channel(nic_dev, &new_qp_params); - -open_channel_err: - spnic_free_channel_resources(nic_dev, &new_qp_params, trxq_params); - - return err; -} - -int spnic_open(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_dyna_qp_params qp_params = {0}; - int err; - - if (test_bit(SPNIC_INTF_UP, &nic_dev->flags)) { - nicif_info(nic_dev, drv, netdev, "Netdev already open, do nothing\n"); - return 0; - } - - err = spnic_init_nicio_res(nic_dev->hwdev); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to init nicio resources\n"); - return err; - } - - err = spnic_setup_num_qps(nic_dev); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to setup num_qps\n"); - goto setup_qps_err; - } - - err = spnic_alloc_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); - if (err) - goto alloc_channel_res_err; - - err = spnic_open_channel(nic_dev, &qp_params, &nic_dev->q_params); - if (err) - goto open_channel_err; - - err = spnic_vport_up(nic_dev); - if (err) - goto vport_up_err; - - set_bit(SPNIC_INTF_UP, &nic_dev->flags); - nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n"); - - return 0; - -vport_up_err: - spnic_close_channel(nic_dev, &qp_params); - -open_channel_err: - spnic_free_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); - -alloc_channel_res_err: - spnic_destroy_num_qps(nic_dev); - -setup_qps_err: - spnic_deinit_nicio_res(nic_dev->hwdev); - - return err; -} - -int spnic_close(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_dyna_qp_params qp_params = {0}; - - if (!test_and_clear_bit(SPNIC_INTF_UP, &nic_dev->flags)) { - nicif_info(nic_dev, drv, netdev, "Netdev already close, do nothing\n"); - return 0; - } - - if (test_and_clear_bit(SPNIC_CHANGE_RES_INVALID, &nic_dev->flags)) - goto out; - - spnic_vport_down(nic_dev); - spnic_close_channel(nic_dev, &qp_params); - spnic_free_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); - -out: - spnic_deinit_nicio_res(nic_dev->hwdev); - spnic_destroy_num_qps(nic_dev); - - nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n"); - - return 0; -} - -#define IPV6_ADDR_LEN 4 -#define PKT_INFO_LEN 9 -#define BITS_PER_TUPLE 32 -static u32 calc_xor_rss(u8 *rss_tunple, u32 len) -{ - u32 hash_value; - u32 i; - - hash_value = rss_tunple[0]; - for (i = 1; i < len; i++) - hash_value = hash_value ^ rss_tunple[i]; - - return hash_value; -} - -static u32 calc_toep_rss(u32 *rss_tunple, u32 len, const u32 *rss_key) -{ - u32 rss = 0; - u32 i, j; - - for (i = 1; i <= len; i++) { - for (j = 0; j < BITS_PER_TUPLE; j++) - if (rss_tunple[i - 1] & ((u32)1 << - (u32)((BITS_PER_TUPLE - 1) - j))) - rss ^= (rss_key[i - 1] << j) | - (u32)((u64)rss_key[i] >> (BITS_PER_TUPLE - j)); - } - - return rss; -} - -#define RSS_VAL(val, type) \ - (((type) == SPNIC_RSS_HASH_ENGINE_TYPE_TOEP) ? ntohl(val) : (val)) - -static u8 parse_ipv6_info(struct sk_buff *skb, u32 *rss_tunple, u8 hash_engine, u32 *len) -{ - struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); - u32 *saddr = (u32 *)&ipv6hdr->saddr; - u32 *daddr = (u32 *)&ipv6hdr->daddr; - u8 i; - - for (i = 0; i < IPV6_ADDR_LEN; i++) { - rss_tunple[i] = RSS_VAL(daddr[i], hash_engine); - /* The offset of the sport relative to the dport is 4 */ - rss_tunple[(u32)(i + IPV6_ADDR_LEN)] = RSS_VAL(saddr[i], hash_engine); - } - *len = IPV6_ADDR_LEN + IPV6_ADDR_LEN; - - if (skb_network_header(skb) + sizeof(*ipv6hdr) == skb_transport_header(skb)) - return ipv6hdr->nexthdr; - return 0; -} - -u16 select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb, - unsigned int num_tx_queues) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(dev); - struct nic_rss_type rss_type = nic_dev->rss_type; - struct iphdr *iphdr = NULL; - u32 rss_tunple[PKT_INFO_LEN] = {0}; - u32 len = 0; - u32 hash = 0; - u8 hash_engine = nic_dev->rss_hash_engine; - u8 l4_proto; - unsigned char *l4_hdr = NULL; - - if (skb_rx_queue_recorded(skb)) { - hash = skb_get_rx_queue(skb); - - if (unlikely(hash >= num_tx_queues)) - hash %= num_tx_queues; - return (u16)hash; - } - - iphdr = ip_hdr(skb); - if (iphdr->version == IPV4_VERSION) { - rss_tunple[len++] = RSS_VAL(iphdr->daddr, hash_engine); - rss_tunple[len++] = RSS_VAL(iphdr->saddr, hash_engine); - l4_proto = iphdr->protocol; - } else if (iphdr->version == IPV6_VERSION) { - l4_proto = parse_ipv6_info(skb, (u32 *)rss_tunple, hash_engine, &len); - } else { - return (u16)hash; - } - - if ((iphdr->version == IPV4_VERSION && - ((l4_proto == IPPROTO_UDP && rss_type.udp_ipv4) || - (l4_proto == IPPROTO_TCP && rss_type.tcp_ipv4))) || - (iphdr->version == IPV6_VERSION && - ((l4_proto == IPPROTO_UDP && rss_type.udp_ipv6) || - (l4_proto == IPPROTO_TCP && rss_type.tcp_ipv6)))) { - l4_hdr = skb_transport_header(skb); - /* High 16 bits are dport, low 16 bits are sport. */ - rss_tunple[len++] = ((u32)ntohs(*((u16 *)l4_hdr + 1U)) << 16) | - ntohs(*(u16 *)l4_hdr); - } /* rss_type.ipv4 and rss_type.ipv6 default on. */ - - if (hash_engine == SPNIC_RSS_HASH_ENGINE_TYPE_TOEP) - hash = calc_toep_rss((u32 *)rss_tunple, len, nic_dev->rss_hkey_be); - else - hash = calc_xor_rss((u8 *)rss_tunple, len * (u32)sizeof(u32)); - - return (u16)nic_dev->rss_indir[hash & 0xFF]; -} - -static u16 spnic_select_queue(struct net_device *netdev, struct sk_buff *skb, - struct net_device *sb_dev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (skb->vlan_tci) - skb->priority = skb->vlan_tci >> VLAN_PRIO_SHIFT; - - if (netdev_get_num_tc(netdev)) - goto fall_back; - - if (test_bit(SPNIC_SAME_RXTX, &nic_dev->flags)) - return select_queue_by_hash_func(netdev, skb, netdev->real_num_tx_queues); - -fall_back: - return netdev_pick_tx(netdev, skb, NULL); -} - -static void spnic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) - -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_txq_stats *txq_stats = NULL; - struct spnic_rxq_stats *rxq_stats = NULL; - struct spnic_txq *txq = NULL; - struct spnic_rxq *rxq = NULL; - u64 bytes, packets, dropped, errors; - unsigned int start; - int i; - - bytes = 0; - packets = 0; - dropped = 0; - for (i = 0; i < nic_dev->max_qps; i++) { - if (!nic_dev->txqs) - break; - - txq = &nic_dev->txqs[i]; - txq_stats = &txq->txq_stats; - do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - bytes += txq_stats->bytes; - packets += txq_stats->packets; - dropped += txq_stats->dropped; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - } - stats->tx_packets = packets; - stats->tx_bytes = bytes; - stats->tx_dropped = dropped; - - bytes = 0; - packets = 0; - errors = 0; - dropped = 0; - for (i = 0; i < nic_dev->max_qps; i++) { - if (!nic_dev->rxqs) - break; - - rxq = &nic_dev->rxqs[i]; - rxq_stats = &rxq->rxq_stats; - do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - bytes += rxq_stats->bytes; - packets += rxq_stats->packets; - errors += rxq_stats->csum_errors + rxq_stats->other_errors; - dropped += rxq_stats->dropped; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - } - stats->rx_packets = packets; - stats->rx_bytes = bytes; - stats->rx_errors = errors; - stats->rx_dropped = dropped; -} - -static void spnic_tx_timeout(struct net_device *netdev, unsigned int txqueue) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_io_queue *sq = NULL; - bool hw_err = false; - u32 sw_pi, hw_ci; - u8 q_id; - - SPNIC_NIC_STATS_INC(nic_dev, netdev_tx_timeout); - nicif_err(nic_dev, drv, netdev, "Tx timeout\n"); - - for (q_id = 0; q_id < nic_dev->q_params.num_qps; q_id++) { - if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) - continue; - - sq = nic_dev->txqs[q_id].sq; - sw_pi = spnic_get_sq_local_pi(sq); - hw_ci = spnic_get_sq_hw_ci(sq); - nicif_info(nic_dev, drv, netdev, "txq%u: sw_pi: %hu, hw_ci: %u, sw_ci: %u, napi->state: 0x%lx\n", - q_id, sw_pi, hw_ci, spnic_get_sq_local_ci(sq), - nic_dev->q_params.irq_cfg[q_id].napi.state); - - if (sw_pi != hw_ci) - hw_err = true; - } - - if (hw_err) - set_bit(EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag); -} - -static int spnic_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u32 mtu = (u32)new_mtu; - int err = 0; - - u32 xdp_max_mtu; - - if (spnic_is_xdp_enable(nic_dev)) { - xdp_max_mtu = spnic_xdp_max_mtu(nic_dev); - if (mtu > xdp_max_mtu) { - nicif_err(nic_dev, drv, netdev, "Max MTU for xdp usage is %d\n", - xdp_max_mtu); - return -EINVAL; - } - } - - err = spnic_set_port_mtu(nic_dev->hwdev, (u16)mtu); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to change port mtu to %d\n", - new_mtu); - } else { - nicif_info(nic_dev, drv, nic_dev->netdev, "Change mtu from %u to %d\n", - netdev->mtu, new_mtu); - netdev->mtu = mtu; - } - - return err; -} - -static int spnic_set_mac_addr(struct net_device *netdev, void *addr) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct sockaddr *saddr = addr; - int err; - - if (!is_valid_ether_addr(saddr->sa_data)) - return -EADDRNOTAVAIL; - - if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) { - nicif_info(nic_dev, drv, netdev, "Already using mac address %pM\n", - saddr->sa_data); - return 0; - } - - err = spnic_update_mac(nic_dev->hwdev, netdev->dev_addr, saddr->sa_data, 0, - sphw_global_func_id(nic_dev->hwdev)); - if (err) - return err; - - ether_addr_copy(netdev->dev_addr, saddr->sa_data); - - nicif_info(nic_dev, drv, netdev, "Set new mac address %pM\n", saddr->sa_data); - - return 0; -} - -static int spnic_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; - u16 func_id; - u32 col, line; - int err = 0; - - /* VLAN 0 donot be added, which is the same as VLAN 0 deleted. */ - if (vid == 0) - goto end; - - col = VID_COL(nic_dev, vid); - line = VID_LINE(nic_dev, vid); - - func_id = sphw_global_func_id(nic_dev->hwdev); - - err = spnic_add_vlan(nic_dev->hwdev, vid, func_id); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to add vlan %u\n", vid); - goto end; - } - - set_bit(col, &vlan_bitmap[line]); - - nicif_info(nic_dev, drv, netdev, "Add vlan %u\n", vid); - -end: - return err; -} - -static int spnic_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; - u16 func_id; - int col, line; - int err = 0; - - col = VID_COL(nic_dev, vid); - line = VID_LINE(nic_dev, vid); - - /* In the broadcast scenario, ucode finds the corresponding function - * based on VLAN 0 of vlan table. If we delete VLAN 0, the VLAN function - * is affected. - */ - if (vid == 0) - goto end; - - func_id = sphw_global_func_id(nic_dev->hwdev); - err = spnic_del_vlan(nic_dev->hwdev, vid, func_id); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); - goto end; - } - - clear_bit(col, &vlan_bitmap[line]); - - nicif_info(nic_dev, drv, netdev, "Remove vlan %u\n", vid); - -end: - return err; -} - -#define SET_FEATURES_OP_STR(op) ((op) ? "Enable" : "Disable") - -static int set_feature_rx_csum(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, - netdev_features_t features, netdev_features_t *failed_features) -{ - netdev_features_t changed = wanted_features ^ features; - - if (changed & NETIF_F_RXCSUM) - spnic_info(nic_dev, drv, "%s rx csum success\n", - SET_FEATURES_OP_STR(wanted_features & NETIF_F_RXCSUM)); - - return 0; -} - -static int set_feature_tso(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, - netdev_features_t features, netdev_features_t *failed_features) -{ - netdev_features_t changed = wanted_features ^ features; - - if (changed & NETIF_F_TSO) - spnic_info(nic_dev, drv, "%s tso success\n", - SET_FEATURES_OP_STR(wanted_features & NETIF_F_TSO)); - - return 0; -} - -static int set_feature_lro(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, - netdev_features_t features, netdev_features_t *failed_features) -{ - netdev_features_t changed = wanted_features ^ features; - bool en = !!(wanted_features & NETIF_F_LRO); - int err; - - if (!(changed & NETIF_F_LRO)) - return 0; - - if (en && spnic_is_xdp_enable(nic_dev)) { - spnic_err(nic_dev, drv, "Can not enable LRO when xdp is enable\n"); - *failed_features |= NETIF_F_LRO; - return -EINVAL; - } - - err = spnic_set_rx_lro_state(nic_dev->hwdev, en, SPNIC_LRO_DEFAULT_TIME_LIMIT, - SPNIC_LRO_DEFAULT_COAL_PKT_SIZE); - if (err) { - spnic_err(nic_dev, drv, "%s lro failed\n", SET_FEATURES_OP_STR(en)); - *failed_features |= NETIF_F_LRO; - } else { - spnic_info(nic_dev, drv, "%s lro success\n", SET_FEATURES_OP_STR(en)); - } - - return err; -} - -static int set_feature_rx_cvlan(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, - netdev_features_t features, netdev_features_t *failed_features) -{ - netdev_features_t changed = wanted_features ^ features; - netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX; - bool en = !!(wanted_features & vlan_feature); - int err; - - if (!(changed & vlan_feature)) - return 0; - - err = spnic_set_rx_vlan_offload(nic_dev->hwdev, en); - if (err) { - spnic_err(nic_dev, drv, "%s rxvlan failed\n", SET_FEATURES_OP_STR(en)); - *failed_features |= vlan_feature; - } else { - spnic_info(nic_dev, drv, "%s rxvlan success\n", SET_FEATURES_OP_STR(en)); - } - - return err; -} - -static int set_feature_vlan_filter(struct spnic_nic_dev *nic_dev, netdev_features_t wanted_features, - netdev_features_t features, netdev_features_t *failed_features) -{ - netdev_features_t changed = wanted_features ^ features; - netdev_features_t vlan_filter_feature = NETIF_F_HW_VLAN_CTAG_FILTER; - bool en = !!(wanted_features & vlan_filter_feature); - int err = 0; - - if (!(changed & vlan_filter_feature)) - return 0; - - if (err == 0) - err = spnic_set_vlan_fliter(nic_dev->hwdev, en); - if (err) { - spnic_err(nic_dev, drv, "%s rx vlan filter failed\n", SET_FEATURES_OP_STR(en)); - *failed_features |= vlan_filter_feature; - } else { - spnic_info(nic_dev, drv, "%s rx vlan filter success\n", SET_FEATURES_OP_STR(en)); - } - - return err; -} - -static int set_features(struct spnic_nic_dev *nic_dev, netdev_features_t pre_features, - netdev_features_t features) -{ - netdev_features_t failed_features = 0; - u32 err = 0; - - err |= (u32)set_feature_rx_csum(nic_dev, features, pre_features, &failed_features); - err |= (u32)set_feature_tso(nic_dev, features, pre_features, &failed_features); - err |= (u32)set_feature_lro(nic_dev, features, pre_features, &failed_features); - err |= (u32)set_feature_rx_cvlan(nic_dev, features, pre_features, &failed_features); - err |= (u32)set_feature_vlan_filter(nic_dev, features, pre_features, &failed_features); - if (err) { - nic_dev->netdev->features = features ^ failed_features; - return -EIO; - } - - return 0; -} - -static int spnic_set_features(struct net_device *netdev, netdev_features_t features) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - return set_features(nic_dev, nic_dev->netdev->features, features); -} - -int spnic_set_hw_features(struct spnic_nic_dev *nic_dev) -{ - /* enable all hw features in netdev->features */ - return set_features(nic_dev, ~nic_dev->netdev->features, nic_dev->netdev->features); -} - -static netdev_features_t spnic_fix_features(struct net_device *netdev, netdev_features_t features) -{ - /* If Rx checksum is disabled, then LRO should also be disabled */ - if (!(features & NETIF_F_RXCSUM)) - features &= ~NETIF_F_LRO; - - return features; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void spnic_netpoll(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u16 i; - - for (i = 0; i < nic_dev->q_params.num_qps; i++) - napi_schedule(&nic_dev->q_params.irq_cfg[i].napi); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - -static int spnic_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - int err; - - if (is_multicast_ether_addr(mac) || vf >= pci_num_vf(adapter->pdev)) - return -EINVAL; - - err = spnic_set_vf_mac(adapter->hwdev, OS_VF_ID_TO_HW(vf), mac); - if (err) - return err; - - if (!is_zero_ether_addr(mac)) - nic_info(&adapter->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf); - else - nic_info(&adapter->pdev->dev, "Deleting MAC on VF %d\n", vf); - - nic_info(&adapter->pdev->dev, "Please reload the VF driver to make this change effective."); - - return 0; -} - -static int set_hw_vf_vlan(void *hwdev, u16 cur_vlanprio, int vf, u16 vlan, u8 qos) -{ - int err = 0; - u16 old_vlan = cur_vlanprio & VLAN_VID_MASK; - - if (vlan || qos) { - if (cur_vlanprio) { - err = spnic_kill_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf)); - if (err) - return err; - } - err = spnic_add_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf), vlan, qos); - } else { - err = spnic_kill_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf)); - } - - if (err) - return err; - - return spnic_update_mac_vlan(hwdev, old_vlan, vlan, OS_VF_ID_TO_HW(vf)); -} - -#define SPNIC_MAX_VLAN_ID 4094 -#define SPNIC_MAX_QOS_NUM 7 - -static int spnic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, - u8 qos, __be16 vlan_proto) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - u16 vlanprio, cur_vlanprio; - - if (vf >= pci_num_vf(adapter->pdev) || vlan > SPNIC_MAX_VLAN_ID || qos > SPNIC_MAX_QOS_NUM) - return -EINVAL; - if (vlan_proto != htons(ETH_P_8021Q)) - return -EPROTONOSUPPORT; - vlanprio = vlan | qos << SPNIC_VLAN_PRIORITY_SHIFT; - cur_vlanprio = spnic_vf_info_vlanprio(adapter->hwdev, OS_VF_ID_TO_HW(vf)); - /* duplicate request, so just return success */ - if (vlanprio == cur_vlanprio) - return 0; - - return set_hw_vf_vlan(adapter->hwdev, cur_vlanprio, vf, vlan, qos); -} - -static int spnic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - int err = 0; - bool cur_spoofchk = false; - - if (vf >= pci_num_vf(adapter->pdev)) - return -EINVAL; - - cur_spoofchk = spnic_vf_info_spoofchk(adapter->hwdev, OS_VF_ID_TO_HW(vf)); - /* same request, so just return success */ - if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk)) - return 0; - - err = spnic_set_vf_spoofchk(adapter->hwdev, OS_VF_ID_TO_HW(vf), setting); - if (!err) - nicif_info(adapter, drv, netdev, "Set VF %d spoofchk %s\n", - vf, setting ? "on" : "off"); - - return err; -} - -int spnic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - int err; - bool cur_trust; - - if (vf >= pci_num_vf(adapter->pdev)) - return -EINVAL; - - cur_trust = spnic_get_vf_trust(adapter->hwdev, OS_VF_ID_TO_HW(vf)); - /* same request, so just return success */ - if ((setting && cur_trust) || (!setting && !cur_trust)) - return 0; - - err = spnic_set_vf_trust(adapter->hwdev, OS_VF_ID_TO_HW(vf), setting); - if (!err) - nicif_info(adapter, drv, netdev, "Set VF %d trusted %s successfully\n", - vf, setting ? "on" : "off"); - else - nicif_err(adapter, drv, netdev, "Failed set VF %d trusted %s\n", - vf, setting ? "on" : "off"); - - return err; -} - -static int spnic_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - - if (vf >= pci_num_vf(adapter->pdev)) - return -EINVAL; - - spnic_get_vf_config(adapter->hwdev, OS_VF_ID_TO_HW(vf), ivi); - - return 0; -} - -/** - * spnic_ndo_set_vf_link_state - * @netdev: network interface device structure - * @vf_id: VF identifier - * @link: required link state - * - * Set the link state of a specified VF, regardless of physical link state - **/ -int spnic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) -{ - static const char * const vf_link[] = {"auto", "enable", "disable"}; - struct spnic_nic_dev *adapter = netdev_priv(netdev); - int err; - - /* validate the request */ - if (vf_id >= pci_num_vf(adapter->pdev)) { - nicif_err(adapter, drv, netdev, "Invalid VF Identifier %d\n", vf_id); - return -EINVAL; - } - - err = spnic_set_vf_link_state(adapter->hwdev, OS_VF_ID_TO_HW(vf_id), link); - if (!err) - nicif_info(adapter, drv, netdev, "Set VF %d link state: %s\n", - vf_id, vf_link[link]); - - return err; -} - -static int is_set_vf_bw_param_valid(const struct spnic_nic_dev *adapter, - int vf, int min_tx_rate, int max_tx_rate) -{ - /* verify VF is active */ - if (vf >= pci_num_vf(adapter->pdev)) { - nicif_err(adapter, drv, adapter->netdev, "VF number must be less than %d\n", - pci_num_vf(adapter->pdev)); - return -EINVAL; - } - - if (max_tx_rate < min_tx_rate) { - nicif_err(adapter, drv, adapter->netdev, "Invalid rate, max rate %d must greater than min rate %d\n", - max_tx_rate, min_tx_rate); - return -EINVAL; - } - - return 0; -} - -#define SPNIC_TX_RATE_TABLE_FULL 12 - -static int spnic_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate) -{ - struct spnic_nic_dev *adapter = netdev_priv(netdev); - struct nic_port_info port_info = {0}; - u8 link_status = 0; - u32 speeds[] = {0, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, - SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, - SPEED_200000}; - int err = 0; - - err = is_set_vf_bw_param_valid(adapter, vf, min_tx_rate, max_tx_rate); - if (err) - return err; - - err = spnic_get_link_state(adapter->hwdev, &link_status); - if (err) { - nicif_err(adapter, drv, netdev, "Get link status failed when set vf tx rate\n"); - return -EIO; - } - - if (!link_status) { - nicif_err(adapter, drv, netdev, "Link status must be up when set vf tx rate\n"); - return -EINVAL; - } - - err = spnic_get_port_info(adapter->hwdev, &port_info, SPHW_CHANNEL_NIC); - if (err || port_info.speed >= PORT_SPEED_UNKNOWN) - return -EIO; - - /* rate limit cannot be less than 0 and greater than link speed */ - if (max_tx_rate < 0 || max_tx_rate > speeds[port_info.speed]) { - nicif_err(adapter, drv, netdev, "Set vf max tx rate must be in [0 - %u]\n", - speeds[port_info.speed]); - return -EINVAL; - } - - err = spnic_set_vf_tx_rate(adapter->hwdev, OS_VF_ID_TO_HW(vf), max_tx_rate, min_tx_rate); - if (err) { - nicif_err(adapter, drv, netdev, "Unable to set VF %d max rate %d min rate %d%s\n", - vf, max_tx_rate, min_tx_rate, - err == SPNIC_TX_RATE_TABLE_FULL ? ", tx rate profile is full" : ""); - return -EIO; - } - - nicif_info(adapter, drv, netdev, "Set VF %d max tx rate %d min tx rate %d successfully\n", - vf, max_tx_rate, min_tx_rate); - - return 0; -} - -bool spnic_is_xdp_enable(struct spnic_nic_dev *nic_dev) -{ - return !!nic_dev->xdp_prog; -} - -int spnic_xdp_max_mtu(struct spnic_nic_dev *nic_dev) -{ - return nic_dev->rx_buff_len - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); -} - -static int spnic_xdp_setup(struct spnic_nic_dev *nic_dev, struct bpf_prog *prog, - struct netlink_ext_ack *extack) -{ - struct bpf_prog *old_prog = NULL; - int max_mtu = spnic_xdp_max_mtu(nic_dev); - int q_id; - - if (nic_dev->netdev->mtu > max_mtu) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to setup xdp program, the current MTU %d is larger than max allowed MTU %d\n", - nic_dev->netdev->mtu, max_mtu); - NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading xdp program"); - return -EINVAL; - } - - if (nic_dev->netdev->features & NETIF_F_LRO) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to setup xdp program while LRO is on\n"); - NL_SET_ERR_MSG_MOD(extack, "Failed to setup xdp program while LRO is on\n"); - return -EINVAL; - } - - old_prog = xchg(&nic_dev->xdp_prog, prog); - for (q_id = 0; q_id < nic_dev->max_qps; q_id++) - xchg(&nic_dev->rxqs[q_id].xdp_prog, nic_dev->xdp_prog); - - if (old_prog) - bpf_prog_put(old_prog); - - return 0; -} - -static int spnic_xdp(struct net_device *netdev, struct netdev_bpf *xdp) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - switch (xdp->command) { - case XDP_SETUP_PROG: - return spnic_xdp_setup(nic_dev, xdp->prog, xdp->extack); - default: - return -EINVAL; - } -} - -static const struct net_device_ops spnic_netdev_ops = { - .ndo_open = spnic_open, - .ndo_stop = spnic_close, - .ndo_start_xmit = spnic_xmit_frame, - - .ndo_get_stats64 = spnic_get_stats64, - - .ndo_tx_timeout = spnic_tx_timeout, - .ndo_select_queue = spnic_select_queue, - .ndo_change_mtu = spnic_change_mtu, - .ndo_set_mac_address = spnic_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - - .ndo_vlan_rx_add_vid = spnic_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = spnic_vlan_rx_kill_vid, - - .ndo_set_vf_mac = spnic_ndo_set_vf_mac, - .ndo_set_vf_vlan = spnic_ndo_set_vf_vlan, - .ndo_set_vf_rate = spnic_ndo_set_vf_bw, - .ndo_set_vf_spoofchk = spnic_ndo_set_vf_spoofchk, - - .ndo_set_vf_trust = spnic_ndo_set_vf_trust, - - .ndo_get_vf_config = spnic_ndo_get_vf_config, - -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = spnic_netpoll, -#endif /* CONFIG_NET_POLL_CONTROLLER */ - - .ndo_set_rx_mode = spnic_nic_set_rx_mode, - - .ndo_bpf = spnic_xdp, - - .ndo_set_vf_link_state = spnic_ndo_set_vf_link_state, - - .ndo_fix_features = spnic_fix_features, - .ndo_set_features = spnic_set_features, -}; - -static const struct net_device_ops spnicvf_netdev_ops = { - .ndo_open = spnic_open, - .ndo_stop = spnic_close, - .ndo_start_xmit = spnic_xmit_frame, - - .ndo_get_stats64 = spnic_get_stats64, - - .ndo_tx_timeout = spnic_tx_timeout, - .ndo_select_queue = spnic_select_queue, - - .ndo_change_mtu = spnic_change_mtu, - .ndo_set_mac_address = spnic_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - - .ndo_vlan_rx_add_vid = spnic_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = spnic_vlan_rx_kill_vid, - -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = spnic_netpoll, -#endif /* CONFIG_NET_POLL_CONTROLLER */ - - .ndo_set_rx_mode = spnic_nic_set_rx_mode, - - .ndo_bpf = spnic_xdp, - - .ndo_fix_features = spnic_fix_features, - .ndo_set_features = spnic_set_features, -}; - -void spnic_set_netdev_ops(struct spnic_nic_dev *nic_dev) -{ - if (!SPNIC_FUNC_IS_VF(nic_dev->hwdev)) - nic_dev->netdev->netdev_ops = &spnic_netdev_ops; - else - nic_dev->netdev->netdev_ops = &spnicvf_netdev_ops; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic.h deleted file mode 100644 index 83c904bc0f72..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic.h +++ /dev/null @@ -1,148 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_H -#define SPNIC_NIC_H -#include <linux/types.h> -#include "sphw_common.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_mag_cmd.h" - -#define MSG_TO_MGMT_SYNC_RETURN_ERR(err, status, out_size) \ - ((err) || (status) || !(out_size)) - -struct spnic_sq_attr { - u8 dma_attr_off; - u8 pending_limit; - u8 coalescing_time; - u8 intr_en; - u16 intr_idx; - u32 l2nic_sqn; - u64 ci_dma_base; -}; - -struct vf_data_storage { - u8 drv_mac_addr[ETH_ALEN]; - u8 user_mac_addr[ETH_ALEN]; - bool registered; - bool use_specified_mac; - u16 pf_vlan; - u8 pf_qos; - u32 max_rate; - u32 min_rate; - - bool link_forced; - bool link_up; /* only valid if VF link is forced */ - bool spoofchk; - bool trust; - u16 num_qps; -}; - -struct spnic_port_routine_cmd { - bool mpu_send_sfp_info; - bool mpu_send_sfp_abs; - - struct mag_cmd_get_xsfp_info std_sfp_info; - struct mag_cmd_get_xsfp_present abs; -}; - -struct spnic_nic_cfg { - void *hwdev; - void *pcidev_hdl; - void *dev_hdl; - - struct spnic_io_queue *sq; - struct spnic_io_queue *rq; - - u16 rx_buff_len; - - u16 num_qps; - u16 max_qps; - - void *ci_vaddr_base; - dma_addr_t ci_dma_base; - - /* including rq and rx doorbell */ - u16 allocated_num_db; - u8 __iomem **db_addr; - - u8 link_status; - - u16 max_vfs; - struct vf_data_storage *vf_infos; - struct spnic_dcb_state dcb_state; - - u64 feature_cap; - - struct semaphore cfg_lock; - - /* Valid when pfc is disable */ - bool pause_set; - struct nic_pause_config nic_pause; - - u8 pfc_en; - u8 pfc_bitmap; - - struct nic_port_info port_info; - - /* percentage of pf link bandwidth */ - u32 pf_bw_limit; - - struct spnic_port_routine_cmd rt_cmd; - /* mutex used for copy sfp info */ - struct mutex sfp_mutex; -}; - -struct vf_msg_handler { - u16 cmd; - int (*handler)(struct spnic_nic_cfg *nic_cfg, u16 vf, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); -}; - -struct nic_event_handler { - u16 cmd; - void (*handler)(void *hwdev, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); -}; - -int spnic_set_ci_table(void *hwdev, struct spnic_sq_attr *attr); - -int l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); - -int l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u16 channel); - -int spnic_cfg_vf_vlan(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vid, u8 qos, int vf_id); - -int spnic_vf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); - -void spnic_pf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size); - -int spnic_pf_mbox_handler(void *hwdev, void *pri_handle, u16 vf_id, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size); - -u8 spnic_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data); - -int spnic_vf_func_init(struct spnic_nic_cfg *nic_cfg); - -void spnic_vf_func_free(struct spnic_nic_cfg *nic_cfg); - -void spnic_notify_dcb_state_event(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state); - -int spnic_save_dcb_state(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state); - -void spnic_notify_vf_link_status(struct spnic_nic_cfg *nic_cfg, u16 vf_id, u8 link_status); - -int spnic_vf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size); - -void spnic_pf_mag_event_handler(void *hwdev, void *pri_handle, u16 cmd, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size); - -int spnic_pf_mag_mbox_handler(void *hwdev, void *pri_handle, u16 vf_id, - u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c deleted file mode 100644 index d241f6a7947d..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.c +++ /dev/null @@ -1,1334 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/etherdevice.h> -#include <linux/if_vlan.h> -#include <linux/ethtool.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/pci.h> -#include <linux/netdevice.h> -#include <linux/module.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" -#include "spnic_nic_cmd.h" -#include "sphw_common.h" - -int spnic_set_ci_table(void *hwdev, struct spnic_sq_attr *attr) -{ - struct spnic_cmd_cons_idx_attr cons_idx_attr; - u16 out_size = sizeof(cons_idx_attr); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !attr) - return -EINVAL; - - memset(&cons_idx_attr, 0, sizeof(cons_idx_attr)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - cons_idx_attr.func_idx = sphw_global_func_id(hwdev); - - cons_idx_attr.dma_attr_off = attr->dma_attr_off; - cons_idx_attr.pending_limit = attr->pending_limit; - cons_idx_attr.coalescing_time = attr->coalescing_time; - - if (attr->intr_en) { - cons_idx_attr.intr_en = attr->intr_en; - cons_idx_attr.intr_idx = attr->intr_idx; - } - - cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; - cons_idx_attr.ci_addr = attr->ci_dma_base; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SQ_CI_ATTR_SET, - &cons_idx_attr, sizeof(cons_idx_attr), - &cons_idx_attr, &out_size); - if (err || !out_size || cons_idx_attr.msg_head.status) { - sdk_err(nic_cfg->dev_hdl, - "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n", - err, cons_idx_attr.msg_head.status, out_size); - return -EFAULT; - } - - return 0; -} - -static int spnic_check_mac_info(u8 status, u16 vlan_id) -{ - if (status && status != SPNIC_MGMT_STATUS_EXIST && status != SPNIC_PF_SET_VF_ALREADY) - return -EINVAL; - - return 0; -} - -#define SPNIC_VLAN_ID_MASK 0x7FFF - -int spnic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel) -{ - struct spnic_port_mac_set mac_info; - u16 out_size = sizeof(mac_info); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !mac_addr) - return -EINVAL; - - memset(&mac_info, 0, sizeof(mac_info)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if ((vlan_id & SPNIC_VLAN_ID_MASK) >= VLAN_N_VID) { - nic_err(nic_cfg->dev_hdl, "Invalid VLAN number: %d\n", - vlan_id & SPNIC_VLAN_ID_MASK); - return -EINVAL; - } - - mac_info.func_id = func_id; - mac_info.vlan_id = vlan_id; - ether_addr_copy(mac_info.mac, mac_addr); - - err = l2nic_msg_to_mgmt_sync_ch(hwdev, SPNIC_NIC_CMD_SET_MAC, - &mac_info, sizeof(mac_info), - &mac_info, &out_size, channel); - if (err || !out_size || spnic_check_mac_info(mac_info.msg_head.status, mac_info.vlan_id)) { - nic_err(nic_cfg->dev_hdl, - "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, mac_info.msg_head.status, out_size, channel); - return -EINVAL; - } - - if (mac_info.msg_head.status == SPNIC_PF_SET_VF_ALREADY) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF mac, Ignore set operation\n"); - return SPNIC_PF_SET_VF_ALREADY; - } - - if (mac_info.msg_head.status == SPNIC_MGMT_STATUS_EXIST) { - nic_warn(nic_cfg->dev_hdl, "MAC is repeated. Ignore update operation\n"); - return 0; - } - - return 0; -} - -int spnic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel) -{ - struct spnic_port_mac_set mac_info; - u16 out_size = sizeof(mac_info); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !mac_addr) - return -EINVAL; - - memset(&mac_info, 0, sizeof(mac_info)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if ((vlan_id & SPNIC_VLAN_ID_MASK) >= VLAN_N_VID) { - nic_err(nic_cfg->dev_hdl, "Invalid VLAN number: %d\n", - (vlan_id & SPNIC_VLAN_ID_MASK)); - return -EINVAL; - } - - mac_info.func_id = func_id; - mac_info.vlan_id = vlan_id; - ether_addr_copy(mac_info.mac, mac_addr); - - err = l2nic_msg_to_mgmt_sync_ch(hwdev, SPNIC_NIC_CMD_DEL_MAC, - &mac_info, sizeof(mac_info), &mac_info, - &out_size, channel); - if (err || !out_size || - (mac_info.msg_head.status && mac_info.msg_head.status != - SPNIC_PF_SET_VF_ALREADY)) { - nic_err(nic_cfg->dev_hdl, - "Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, mac_info.msg_head.status, out_size, channel); - return -EINVAL; - } - - if (mac_info.msg_head.status == SPNIC_PF_SET_VF_ALREADY) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF mac, Ignore delete operation.\n"); - return SPNIC_PF_SET_VF_ALREADY; - } - - return 0; -} - -int spnic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, u16 func_id) -{ - struct spnic_port_mac_update mac_info; - u16 out_size = sizeof(mac_info); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !old_mac || !new_mac) - return -EINVAL; - - memset(&mac_info, 0, sizeof(mac_info)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if ((vlan_id & SPNIC_VLAN_ID_MASK) >= VLAN_N_VID) { - nic_err(nic_cfg->dev_hdl, "Invalid VLAN number: %d\n", - vlan_id & SPNIC_VLAN_ID_MASK); - return -EINVAL; - } - - mac_info.func_id = func_id; - mac_info.vlan_id = vlan_id; - ether_addr_copy(mac_info.old_mac, old_mac); - ether_addr_copy(mac_info.new_mac, new_mac); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_UPDATE_MAC, - &mac_info, sizeof(mac_info), - &mac_info, &out_size); - if (err || !out_size || spnic_check_mac_info(mac_info.msg_head.status, mac_info.vlan_id)) { - nic_err(nic_cfg->dev_hdl, - "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n", - err, mac_info.msg_head.status, out_size); - return -EINVAL; - } - - if (mac_info.msg_head.status == SPNIC_PF_SET_VF_ALREADY) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF MAC. Ignore update operation\n"); - return SPNIC_PF_SET_VF_ALREADY; - } - - if (mac_info.msg_head.status == SPNIC_MGMT_STATUS_EXIST) { - nic_warn(nic_cfg->dev_hdl, "MAC is repeated. Ignore update operation\n"); - return 0; - } - - return 0; -} - -int spnic_get_default_mac(void *hwdev, u8 *mac_addr) -{ - struct spnic_port_mac_set mac_info; - u16 out_size = sizeof(mac_info); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !mac_addr) - return -EINVAL; - - memset(&mac_info, 0, sizeof(mac_info)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - mac_info.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_GET_MAC, &mac_info, sizeof(mac_info), - &mac_info, &out_size); - if (err || !out_size || mac_info.msg_head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n", - err, mac_info.msg_head.status, out_size); - return -EINVAL; - } - - ether_addr_copy(mac_addr, mac_info.mac); - - return 0; -} - -static int spnic_config_vlan(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vlan_id, u16 func_id) -{ - struct spnic_cmd_vlan_config vlan_info; - u16 out_size = sizeof(vlan_info); - int err; - - memset(&vlan_info, 0, sizeof(vlan_info)); - vlan_info.opcode = opcode; - vlan_info.func_id = func_id; - vlan_info.vlan_id = vlan_id; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_FUNC_VLAN, - &vlan_info, sizeof(vlan_info), - &vlan_info, &out_size); - if (err || !out_size || vlan_info.msg_head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to %s vlan, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == SPNIC_CMD_OP_ADD ? "add" : "delete", - err, vlan_info.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - return spnic_config_vlan(nic_cfg, SPNIC_CMD_OP_ADD, vlan_id, func_id); -} - -int spnic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - return spnic_config_vlan(nic_cfg, SPNIC_CMD_OP_DEL, vlan_id, func_id); -} - -int spnic_set_vport_enable(void *hwdev, u16 func_id, bool enable, u16 channel) -{ - struct spnic_vport_state en_state; - u16 out_size = sizeof(en_state); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - memset(&en_state, 0, sizeof(en_state)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - en_state.func_id = func_id; - en_state.state = enable ? 1 : 0; - - err = l2nic_msg_to_mgmt_sync_ch(hwdev, SPNIC_NIC_CMD_SET_VPORT_ENABLE, - &en_state, sizeof(en_state), - &en_state, &out_size, channel); - if (err || !out_size || en_state.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", - err, en_state.msg_head.status, out_size, channel); - return -EINVAL; - } - - return 0; -} - -int spnic_set_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state) -{ - struct vf_data_storage *vf_infos = NULL; - struct spnic_cmd_vf_dcb_state vf_dcb; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 vf_id, out_size = 0; - int err; - - if (!hwdev || !dcb_state) - return -EINVAL; - - memset(&vf_dcb, 0, sizeof(vf_dcb)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (!memcmp(&nic_cfg->dcb_state, dcb_state, sizeof(nic_cfg->dcb_state))) - return 0; - - memcpy(&vf_dcb.state, dcb_state, sizeof(vf_dcb.state)); - /* save in sdk, vf will get dcb state when probing */ - spnic_save_dcb_state(nic_cfg, dcb_state); - - /* notify statefull in pf, than notify all vf */ - spnic_notify_dcb_state_event(nic_cfg, dcb_state); - - /* not vf supported, don't need to notify vf */ - if (!nic_cfg->vf_infos) - return 0; - - vf_infos = nic_cfg->vf_infos; - for (vf_id = 0; vf_id < nic_cfg->max_vfs; vf_id++) { - if (vf_infos[vf_id].registered) { - vf_dcb.msg_head.status = 0; - out_size = sizeof(vf_dcb); - err = sphw_mbox_to_vf(hwdev, OS_VF_ID_TO_HW(vf_id), SPHW_MOD_L2NIC, - SPNIC_NIC_CMD_VF_COS, &vf_dcb, sizeof(vf_dcb), - &vf_dcb, &out_size, 0, SPHW_CHANNEL_NIC); - if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, vf_dcb.msg_head.status, out_size)) - nic_err(nic_cfg->dev_hdl, - "Failed to notify dcb state to VF %u, err: %d, status: 0x%x, out size: 0x%x\n", - vf_id, err, vf_dcb.msg_head.status, out_size); - } - } - - return 0; -} - -int spnic_get_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !dcb_state) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memcpy(dcb_state, &nic_cfg->dcb_state, sizeof(*dcb_state)); - - return 0; -} - -int spnic_save_dcb_state(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state) -{ - memcpy(&nic_cfg->dcb_state, dcb_state, sizeof(*dcb_state)); - - return 0; -} - -int spnic_get_pf_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state) -{ - struct spnic_cmd_vf_dcb_state vf_dcb; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 out_size = sizeof(vf_dcb); - int err; - - if (!hwdev || !dcb_state) - return -EINVAL; - - memset(&vf_dcb, 0, sizeof(vf_dcb)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (sphw_func_type(hwdev) != TYPE_VF) { - nic_err(nic_cfg->dev_hdl, "Only vf need to get pf dcb state\n"); - return -EINVAL; - } - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_VF_COS, &vf_dcb, - sizeof(vf_dcb), &vf_dcb, &out_size); - if (err || !out_size || vf_dcb.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to get vf default cos, err: %d, status: 0x%x, out size: 0x%x\n", - err, vf_dcb.msg_head.status, out_size); - return -EFAULT; - } - - memcpy(dcb_state, &vf_dcb.state, sizeof(*dcb_state)); - /* Save dcb_state in hw for statefull module */ - spnic_save_dcb_state(nic_cfg, dcb_state); - - return 0; -} - -static int spnic_cfg_hw_pause(struct spnic_nic_cfg *nic_cfg, u8 opcode, - struct nic_pause_config *nic_pause) -{ - struct spnic_cmd_pause_config pause_info; - u16 out_size = sizeof(pause_info); - int err; - - memset(&pause_info, 0, sizeof(pause_info)); - - pause_info.port_id = sphw_physical_port_id(nic_cfg->hwdev); - pause_info.opcode = opcode; - if (opcode == SPNIC_CMD_OP_SET) { - pause_info.auto_neg = nic_pause->auto_neg; - pause_info.rx_pause = nic_pause->rx_pause; - pause_info.tx_pause = nic_pause->tx_pause; - } - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_PAUSE_INFO, - &pause_info, sizeof(pause_info), - &pause_info, &out_size); - if (err || !out_size || pause_info.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to %s pause info, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == SPNIC_CMD_OP_SET ? "set" : "get", - err, pause_info.msg_head.status, out_size); - return -EINVAL; - } - - if (opcode == SPNIC_CMD_OP_GET) { - nic_pause->auto_neg = pause_info.auto_neg; - nic_pause->rx_pause = pause_info.rx_pause; - nic_pause->tx_pause = pause_info.tx_pause; - } - - return 0; -} - -int spnic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - down(&nic_cfg->cfg_lock); - - err = spnic_cfg_hw_pause(nic_cfg, SPNIC_CMD_OP_SET, &nic_pause); - if (err) { - up(&nic_cfg->cfg_lock); - return err; - } - - nic_cfg->pfc_en = 0; - nic_cfg->pfc_bitmap = 0; - nic_cfg->pause_set = true; - nic_cfg->nic_pause.auto_neg = nic_pause.auto_neg; - nic_cfg->nic_pause.rx_pause = nic_pause.rx_pause; - nic_cfg->nic_pause.tx_pause = nic_pause.tx_pause; - - up(&nic_cfg->cfg_lock); - - return 0; -} - -int spnic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - int err = 0; - - if (!hwdev || !nic_pause) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - err = spnic_cfg_hw_pause(nic_cfg, SPNIC_CMD_OP_GET, nic_pause); - if (err) - return err; - - if (nic_cfg->pause_set || !nic_pause->auto_neg) { - nic_pause->rx_pause = nic_cfg->nic_pause.rx_pause; - nic_pause->tx_pause = nic_cfg->nic_pause.tx_pause; - } - - return 0; -} - -static int spnic_dcb_set_hw_pfc(struct spnic_nic_cfg *nic_cfg, u8 pfc_en, u8 pfc_bitmap) -{ - struct spnic_cmd_set_pfc pfc; - u16 out_size = sizeof(pfc); - int err; - - memset(&pfc, 0, sizeof(pfc)); - - pfc.port_id = sphw_physical_port_id(nic_cfg->hwdev); - pfc.pfc_bitmap = pfc_bitmap; - pfc.pfc_en = pfc_en; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_PFC, - &pfc, sizeof(pfc), &pfc, &out_size); - if (err || pfc.msg_head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, "Failed to set pfc, err: %d, status: 0x%x, out size: 0x%x\n", - err, pfc.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - down(&nic_cfg->cfg_lock); - - err = spnic_dcb_set_hw_pfc(nic_cfg, pfc_en, pfc_bitmap); - if (err) { - up(&nic_cfg->cfg_lock); - return err; - } - - nic_cfg->pfc_en = pfc_en; - nic_cfg->pfc_bitmap = pfc_bitmap; - - /* pause settings is opposite from pfc */ - nic_cfg->nic_pause.rx_pause = pfc_en ? 0 : 1; - nic_cfg->nic_pause.tx_pause = pfc_en ? 0 : 1; - - up(&nic_cfg->cfg_lock); - - return 0; -} - -int spnic_dcb_set_ets(void *hwdev, u8 *cos_tc, u8 *cos_bw, u8 *cos_prio, - u8 *tc_bw, u8 *tc_prio) -{ - struct spnic_up_ets_cfg ets; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 out_size = sizeof(ets); - u16 cos_bw_t = 0; - u8 tc_bw_t = 0; - int i, err; - - memset(&ets, 0, sizeof(ets)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - for (i = 0; i < SPNIC_DCB_COS_MAX; i++) { - cos_bw_t += *(cos_bw + i); - tc_bw_t += *(tc_bw + i); - - if (*(cos_tc + i) > SPNIC_DCB_TC_MAX) { - nic_err(nic_cfg->dev_hdl, "Invalid cos %d mapping tc: %u\n", - i, *(cos_tc + i)); - return -EINVAL; - } - } - - /* The sum of all TCs must be 100%, and the same for cos */ - if ((tc_bw_t != 100 && tc_bw_t != 0) || (cos_bw_t % 100) != 0) { - nic_err(nic_cfg->dev_hdl, - "Invalid pg_bw: %u or up_bw: %u\n", tc_bw_t, cos_bw_t); - return -EINVAL; - } - - ets.port_id = sphw_physical_port_id(hwdev); - memcpy(ets.cos_tc, cos_tc, SPNIC_DCB_COS_MAX); - memcpy(ets.cos_bw, cos_bw, SPNIC_DCB_COS_MAX); - memcpy(ets.cos_prio, cos_prio, SPNIC_DCB_COS_MAX); - memcpy(ets.tc_bw, tc_bw, SPNIC_DCB_TC_MAX); - memcpy(ets.tc_prio, tc_prio, SPNIC_DCB_TC_MAX); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_ETS, - &ets, sizeof(ets), &ets, &out_size); - if (err || ets.msg_head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Failed to set ets, err: %d, status: 0x%x, out size: 0x%x\n", - err, ets.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up, u8 max_cos_num) -{ - struct spnic_cos_up_map map; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 out_size = sizeof(map); - int err; - - if (!hwdev || !cos_up) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&map, 0, sizeof(map)); - - map.port_id = sphw_physical_port_id(hwdev); - map.cos_valid_mask = cos_valid_bitmap; - memcpy(map.map, cos_up, sizeof(map.map)); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SETUP_COS_MAPPING, - &map, sizeof(map), &map, &out_size); - if (err || map.msg_head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Failed to set cos2up map, err: %d, status: 0x%x, out size: 0x%x\n", - err, map.msg_head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int spnic_flush_qps_res(void *hwdev) -{ - struct spnic_cmd_clear_qp_resource sq_res; - u16 out_size = sizeof(sq_res); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&sq_res, 0, sizeof(sq_res)); - - sq_res.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CLEAR_QP_RESOURCE, - &sq_res, sizeof(sq_res), &sq_res, - &out_size); - if (err || !out_size || sq_res.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to clear sq resources, err: %d, status: 0x%x, out size: 0x%x\n", - err, sq_res.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_get_vport_stats(void *hwdev, struct spnic_vport_stats *stats) -{ - struct spnic_port_stats_info stats_info; - struct spnic_cmd_vport_stats vport_stats; - u16 out_size = sizeof(vport_stats); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !stats) - return -EINVAL; - - memset(&stats_info, 0, sizeof(stats_info)); - memset(&vport_stats, 0, sizeof(vport_stats)); - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - stats_info.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_GET_VPORT_STAT, - &stats_info, sizeof(stats_info), - &vport_stats, &out_size); - if (err || !out_size || vport_stats.msg_head.status) { - nic_err(nic_cfg->dev_hdl, - "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n", - err, vport_stats.msg_head.status, out_size); - return -EFAULT; - } - - memcpy(stats, &vport_stats.stats, sizeof(*stats)); - - return 0; -} - -int spnic_set_function_table(struct spnic_nic_cfg *nic_cfg, u32 cfg_bitmap, - struct spnic_func_tbl_cfg *cfg) -{ - struct spnic_cmd_set_func_tbl cmd_func_tbl; - u16 out_size = sizeof(cmd_func_tbl); - int err; - - memset(&cmd_func_tbl, 0, sizeof(cmd_func_tbl)); - cmd_func_tbl.func_id = sphw_global_func_id(nic_cfg->hwdev); - cmd_func_tbl.cfg_bitmap = cfg_bitmap; - cmd_func_tbl.tbl_cfg = *cfg; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_FUNC_TBL, - &cmd_func_tbl, sizeof(cmd_func_tbl), - &cmd_func_tbl, &out_size); - if (err || cmd_func_tbl.msg_head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x, out size: 0x%x\n", - cfg_bitmap, err, cmd_func_tbl.msg_head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int spnic_init_function_table(struct spnic_nic_cfg *nic_cfg) -{ - struct spnic_func_tbl_cfg func_tbl_cfg = {0}; - u32 cfg_bitmap = BIT(FUNC_CFG_INIT) | BIT(FUNC_CFG_MTU) | - BIT(FUNC_CFG_RX_BUF_SIZE); - - func_tbl_cfg.mtu = 0x3FFF; /* default, max mtu */ - func_tbl_cfg.rx_wqe_buf_size = nic_cfg->rx_buff_len; - - return spnic_set_function_table(nic_cfg, cfg_bitmap, &func_tbl_cfg); -} - -int spnic_set_port_mtu(void *hwdev, u16 new_mtu) -{ - struct spnic_func_tbl_cfg func_tbl_cfg = {0}; - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (new_mtu < SPNIC_MIN_MTU_SIZE) { - nic_err(nic_cfg->dev_hdl, "Invalid mtu size: %ubytes, mtu size < %ubytes", - new_mtu, SPNIC_MIN_MTU_SIZE); - return -EINVAL; - } - - if (new_mtu > SPNIC_MAX_JUMBO_FRAME_SIZE) { - nic_err(nic_cfg->dev_hdl, "Invalid mtu size: %ubytes, mtu size > %ubytes", - new_mtu, SPNIC_MAX_JUMBO_FRAME_SIZE); - return -EINVAL; - } - - func_tbl_cfg.mtu = new_mtu; - return spnic_set_function_table(nic_cfg, BIT(FUNC_CFG_MTU), &func_tbl_cfg); -} - -static int nic_feature_nego(void *hwdev, u8 opcode, u64 *s_feature, u16 size) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_cmd_feature_nego feature_nego; - u16 out_size = sizeof(feature_nego); - int err; - - if (!hwdev || !s_feature || size > NIC_MAX_FEATURE_QWORD) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&feature_nego, 0, sizeof(feature_nego)); - feature_nego.func_id = sphw_global_func_id(hwdev); - feature_nego.opcode = opcode; - if (opcode == SPNIC_CMD_OP_SET) - memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64)); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_FEATURE_NEGO, - &feature_nego, sizeof(feature_nego), - &feature_nego, &out_size); - if (err || !out_size || feature_nego.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to negotiate nic feature, err:%d, status: 0x%x, out_size: 0x%x\n", - err, feature_nego.msg_head.status, out_size); - return -EIO; - } - - if (opcode == SPNIC_CMD_OP_GET) - memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64)); - - return 0; -} - -static int spnic_get_nic_feature_from_hw(void *hwdev, u64 *s_feature, u16 size) -{ - return nic_feature_nego(hwdev, SPNIC_CMD_OP_GET, s_feature, size); -} - -int spnic_set_nic_feature_to_hw(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - return nic_feature_nego(hwdev, SPNIC_CMD_OP_SET, &nic_cfg->feature_cap, 1); -} - -u64 spnic_get_feature_cap(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - return nic_cfg->feature_cap; -} - -void spnic_update_nic_feature(void *hwdev, u64 feature) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - nic_cfg->feature_cap = feature; - - nic_info(nic_cfg->dev_hdl, "Update nic feature to 0x%llx\n", nic_cfg->feature_cap); -} - -static inline int init_nic_hwdev_param_valid(void *hwdev, void *pcidev_hdl, void *dev_hdl) -{ - if (!hwdev || !pcidev_hdl || !dev_hdl) - return -EINVAL; - - return 0; -} - -/* spnic_init_nic_hwdev - init nic hwdev - * @hwdev: pointer to hwdev - * @pcidev_hdl: pointer to pcidev or handler - * @dev_hdl: pointer to pcidev->dev or handler, for sdk_err() or dma_alloc() - * @rx_buff_len: rx_buff_len is receive buffer length - */ -int spnic_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, u16 rx_buff_len) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (init_nic_hwdev_param_valid(hwdev, pcidev_hdl, dev_hdl)) - return -EINVAL; - - nic_cfg = kzalloc(sizeof(*nic_cfg), GFP_KERNEL); - if (!nic_cfg) - return -ENOMEM; - - nic_cfg->dev_hdl = dev_hdl; - nic_cfg->pcidev_hdl = pcidev_hdl; - nic_cfg->hwdev = hwdev; - - sema_init(&nic_cfg->cfg_lock, 1); - mutex_init(&nic_cfg->sfp_mutex); - - err = sphw_register_service_adapter(hwdev, nic_cfg, SERVICE_T_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to register service adapter\n"); - goto register_sa_err; - } - - err = spnic_init_function_table(nic_cfg); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to init function table\n"); - goto init_func_tbl_err; - } - - err = spnic_get_nic_feature_from_hw(hwdev, &nic_cfg->feature_cap, 1); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to get nic features\n"); - goto get_feature_err; - } - - sdk_info(dev_hdl, "nic features: 0x%llx\n", nic_cfg->feature_cap); - - err = sphw_aeq_register_swe_cb(hwdev, SPHW_STATELESS_EVENT, spnic_nic_sw_aeqe_handler); - if (err) { - nic_err(nic_cfg->dev_hdl, - "Failed to register sw aeqe handler\n"); - goto register_sw_aeqe_err; - } - - err = spnic_vf_func_init(nic_cfg); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to init vf info\n"); - goto vf_init_err; - } - - nic_cfg->rx_buff_len = rx_buff_len; - - return 0; - -vf_init_err: - sphw_aeq_unregister_swe_cb(hwdev, SPHW_STATELESS_EVENT); - -register_sw_aeqe_err: -get_feature_err: -init_func_tbl_err: - sphw_unregister_service_adapter(hwdev, SERVICE_T_NIC); - -register_sa_err: - kfree(nic_cfg); - - return err; -} - -void spnic_free_nic_hwdev(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) - return; - - spnic_vf_func_free(nic_cfg); - - sphw_aeq_unregister_swe_cb(hwdev, SPHW_STATELESS_EVENT); - - sphw_unregister_service_adapter(hwdev, SERVICE_T_NIC); - - kfree(nic_cfg); -} - -/* to do : send cmd to MPU to drop nic tx pkt*/ -int spnic_force_drop_tx_pkt(void *hwdev) -{ - return 0; -} - -int spnic_set_rx_mode(void *hwdev, u32 enable) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_rx_mode_config rx_mode_cfg; - u16 out_size = sizeof(rx_mode_cfg); - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&rx_mode_cfg, 0, sizeof(rx_mode_cfg)); - rx_mode_cfg.func_id = sphw_global_func_id(hwdev); - rx_mode_cfg.rx_mode = enable; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_RX_MODE, - &rx_mode_cfg, sizeof(rx_mode_cfg), - &rx_mode_cfg, &out_size); - if (err || !out_size || rx_mode_cfg.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set rx mode, err: %d, status: 0x%x, out size: 0x%x\n", - err, rx_mode_cfg.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_set_rx_vlan_offload(void *hwdev, u8 en) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_cmd_vlan_offload vlan_cfg; - u16 out_size = sizeof(vlan_cfg); - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&vlan_cfg, 0, sizeof(vlan_cfg)); - vlan_cfg.func_id = sphw_global_func_id(hwdev); - vlan_cfg.vlan_offload = en; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_RX_VLAN_OFFLOAD, - &vlan_cfg, sizeof(vlan_cfg), - &vlan_cfg, &out_size); - if (err || !out_size || vlan_cfg.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n", - err, vlan_cfg.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id) -{ - struct vf_data_storage *vf_info = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 func_id; - int err; - - if (!hwdev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); - - if (!nic_cfg->vf_infos || is_zero_ether_addr(vf_info->drv_mac_addr)) - return 0; - - func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; - - err = spnic_del_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, - old_vlan, func_id, SPHW_CHANNEL_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to delete VF %d MAC %pM vlan %u\n", - HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac_addr, old_vlan); - return err; - } - - err = spnic_set_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, - new_vlan, func_id, SPHW_CHANNEL_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to add VF %d MAC %pM vlan %u\n", - HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac_addr, new_vlan); - spnic_set_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, - old_vlan, func_id, SPHW_CHANNEL_NIC); - return err; - } - - return 0; -} - -static int spnic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 lro_max_pkt_len) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_cmd_lro_config lro_cfg; - u16 out_size = sizeof(lro_cfg); - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&lro_cfg, 0, sizeof(lro_cfg)); - lro_cfg.func_id = sphw_global_func_id(hwdev); - lro_cfg.opcode = SPNIC_CMD_OP_SET; - lro_cfg.lro_ipv4_en = ipv4_en; - lro_cfg.lro_ipv6_en = ipv6_en; - lro_cfg.lro_max_pkt_len = lro_max_pkt_len; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CFG_RX_LRO, - &lro_cfg, sizeof(lro_cfg), - &lro_cfg, &out_size); - if (err || !out_size || lro_cfg.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n", - err, lro_cfg.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -static int spnic_set_rx_lro_timer(void *hwdev, u32 timer_value) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_cmd_lro_timer lro_timer; - u16 out_size = sizeof(lro_timer); - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&lro_timer, 0, sizeof(lro_timer)); - lro_timer.opcode = SPNIC_CMD_OP_SET; - lro_timer.timer = timer_value; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CFG_LRO_TIMER, - &lro_timer, sizeof(lro_timer), - &lro_timer, &out_size); - if (err || !out_size || lro_timer.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n", - err, lro_timer.msg_head.status, out_size); - - return -EINVAL; - } - - return 0; -} - -int spnic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 lro_max_pkt_len) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u8 ipv4_en = 0, ipv6_en = 0; - int err; - - if (!hwdev) - return -EINVAL; - - ipv4_en = lro_en ? 1 : 0; - ipv6_en = lro_en ? 1 : 0; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - nic_info(nic_cfg->dev_hdl, "Set LRO max coalesce packet size to %uK\n", - lro_max_pkt_len); - - err = spnic_set_rx_lro(hwdev, ipv4_en, ipv6_en, (u8)lro_max_pkt_len); - if (err) - return err; - - /* we don't set LRO timer for VF */ - if (sphw_func_type(hwdev) == TYPE_VF) - return 0; - - nic_info(nic_cfg->dev_hdl, "Set LRO timer to %u\n", lro_timer); - - return spnic_set_rx_lro_timer(hwdev, lro_timer); -} - -int spnic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_cmd_set_vlan_filter vlan_filter; - u16 out_size = sizeof(vlan_filter); - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&vlan_filter, 0, sizeof(vlan_filter)); - vlan_filter.func_id = sphw_global_func_id(hwdev); - vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_VLAN_FILTER_EN, - &vlan_filter, sizeof(vlan_filter), - &vlan_filter, &out_size); - if (err || !out_size || vlan_filter.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n", - err, vlan_filter.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} - -int spnic_add_tcam_rule(void *hwdev, struct nic_tcam_cfg_rule *tcam_rule) -{ - u16 out_size = sizeof(struct nic_cmd_fdir_add_rule); - struct nic_cmd_fdir_add_rule tcam_cmd; - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !tcam_rule) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (tcam_rule->index >= SPNIC_MAX_TCAM_RULES_NUM) { - nic_err(nic_cfg->dev_hdl, "Tcam rules num to add is invalid\n"); - return -EINVAL; - } - - memset(&tcam_cmd, 0, sizeof(struct nic_cmd_fdir_add_rule)); - memcpy((void *)&tcam_cmd.rule, (void *)tcam_rule, - sizeof(struct nic_tcam_cfg_rule)); - tcam_cmd.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_ADD_TC_FLOW, - &tcam_cmd, sizeof(tcam_cmd), - &tcam_cmd, &out_size); - if (err || tcam_cmd.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Add tcam rule failed, err: %d, status: 0x%x, out size: 0x%x\n", - err, tcam_cmd.head.status, out_size); - return -EIO; - } - - return 0; -} - -int spnic_del_tcam_rule(void *hwdev, u32 index) -{ - u16 out_size = sizeof(struct nic_cmd_fdir_del_rules); - struct nic_cmd_fdir_del_rules tcam_cmd; - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (index >= SPNIC_MAX_TCAM_RULES_NUM) { - nic_err(nic_cfg->dev_hdl, "Tcam rules num to del is invalid\n"); - return -EINVAL; - } - - memset(&tcam_cmd, 0, sizeof(struct nic_cmd_fdir_del_rules)); - tcam_cmd.index_start = index; - tcam_cmd.index_num = 1; - tcam_cmd.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_DEL_TC_FLOW, - &tcam_cmd, sizeof(tcam_cmd), - &tcam_cmd, &out_size); - if (err || tcam_cmd.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, "Del tcam rule failed, err: %d, status: 0x%x, out size: 0x%x\n", - err, tcam_cmd.head.status, out_size); - return -EIO; - } - - return 0; -} - -/** - * spnic_mgmt_tcam_block - alloc or free tcam block for IO packet. - * - * @param hwdev - * The hardware interface of a nic device. - * @param alloc_en - * 1 alloc block. - * 0 free block. - * @param index - * block index from firmware. - * @return - * 0 on success, - * negative error value otherwise. - */ -static int spnic_mgmt_tcam_block(void *hwdev, u8 alloc_en, u16 *index) -{ - struct nic_cmd_ctrl_tcam_block_out tcam_block_info; - u16 out_size = sizeof(struct nic_cmd_ctrl_tcam_block_out); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !index) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&tcam_block_info, 0, sizeof(struct nic_cmd_ctrl_tcam_block_out)); - - tcam_block_info.func_id = sphw_global_func_id(hwdev); - tcam_block_info.alloc_en = alloc_en; - tcam_block_info.tcam_type = SPNIC_TCAM_BLOCK_NORMAL_TYPE; - tcam_block_info.tcam_block_index = *index; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_CFG_TCAM_BLOCK, - &tcam_block_info, sizeof(tcam_block_info), - &tcam_block_info, &out_size); - if (err || tcam_block_info.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Set tcam block failed, err: %d, status: 0x%x, out size: 0x%x\n", - err, tcam_block_info.head.status, out_size); - return -EIO; - } - - if (alloc_en) - *index = tcam_block_info.tcam_block_index; - - return 0; -} - -int spnic_alloc_tcam_block(void *hwdev, u16 *index) -{ - return spnic_mgmt_tcam_block(hwdev, SPNIC_TCAM_BLOCK_ENABLE, index); -} - -int spnic_free_tcam_block(void *hwdev, u16 *index) -{ - return spnic_mgmt_tcam_block(hwdev, SPNIC_TCAM_BLOCK_DISABLE, index); -} - -int spnic_set_fdir_tcam_rule_filter(void *hwdev, bool enable) -{ - struct nic_cmd_set_tcam_enable port_tcam_cmd; - u16 out_size = sizeof(port_tcam_cmd); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd)); - port_tcam_cmd.func_id = sphw_global_func_id(hwdev); - port_tcam_cmd.tcam_enable = (u8)enable; - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_ENABLE_TCAM, - &port_tcam_cmd, sizeof(port_tcam_cmd), - &port_tcam_cmd, &out_size); - if (err || port_tcam_cmd.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, "Set fdir tcam filter failed, err: %d, status: 0x%x, out size: 0x%x, enable: 0x%x\n", - err, port_tcam_cmd.head.status, out_size, - enable); - return -EIO; - } - - return 0; -} - -int spnic_flush_tcam_rule(void *hwdev) -{ - struct nic_cmd_flush_tcam_rules tcam_flush; - u16 out_size = sizeof(struct nic_cmd_flush_tcam_rules); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&tcam_flush, 0, sizeof(struct nic_cmd_flush_tcam_rules)); - tcam_flush.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_FLUSH_TCAM, - &tcam_flush, - sizeof(struct nic_cmd_flush_tcam_rules), - &tcam_flush, &out_size); - if (err || tcam_flush.head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, - "Flush tcam fdir rules failed, err: %d, status: 0x%x, out size: 0x%x\n", - err, tcam_flush.head.status, out_size); - return -EIO; - } - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h deleted file mode 100644 index f280b41fe362..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg.h +++ /dev/null @@ -1,709 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_CFG_H -#define SPNIC_NIC_CFG_H - -#include <linux/if_link.h> - -#include "spnic_nic_cmd.h" -#include "spnic_mgmt_interface.h" -#include "spnic_mag_cmd.h" - -#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) -#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) - -#define SPNIC_VLAN_PRIORITY_SHIFT 13 - -#define SPNIC_RSS_KEY_RSV_NUM 2 - -#define SPNIC_MIN_MTU_SIZE 256 -#define SPNIC_MAX_JUMBO_FRAME_SIZE 9600 - -#define SPNIC_PF_SET_VF_ALREADY 0x4 -#define SPNIC_MGMT_STATUS_EXIST 0x6 - -#define SPNIC_LOWEST_LATENCY 1 -#define SPNIC_MULTI_VM_LATENCY 32 -#define SPNIC_MULTI_VM_PENDING_LIMIT 4 -#define SPNIC_RX_RATE_LOW 400000 -#define SPNIC_RX_COAL_TIME_LOW 16 -#define SPNIC_RX_PENDING_LIMIT_LOW 2 -#define SPNIC_RX_RATE_HIGH 1000000 -#define SPNIC_RX_COAL_TIME_HIGH 225 -#define SPNIC_RX_PENDING_LIMIT_HIGH 8 -#define SPNIC_RX_RATE_THRESH 50000 -#define SPNIC_TX_RATE_THRESH 50000 -#define SPNIC_RX_RATE_LOW_VM 100000 -#define SPNIC_RX_PENDING_LIMIT_HIGH_VM 87 - -enum spnic_valid_link_settings { - HILINK_LINK_SET_SPEED = 0x1, - HILINK_LINK_SET_AUTONEG = 0x2, - HILINK_LINK_SET_FEC = 0x4, -}; - -struct spnic_link_ksettings { - u32 valid_bitmap; - u8 speed; /* enum nic_speed_level */ - u8 autoneg; /* 0 - off; 1 - on */ - u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ -}; - -u64 spnic_get_feature_cap(void *hwdev); - -#define SPNIC_SUPPORT_FEATURE(hwdev, feature) (spnic_get_feature_cap(hwdev) & NIC_F_##feature) -#define SPNIC_SUPPORT_CSUM(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, CSUM) -#define SPNIC_SUPPORT_SCTP_CRC(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, SCTP_CRC) -#define SPNIC_SUPPORT_TSO(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, TSO) -#define SPNIC_SUPPORT_UFO(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, UFO) -#define SPNIC_SUPPORT_LRO(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, LRO) -#define SPNIC_SUPPORT_RSS(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, RSS) -#define SPNIC_SUPPORT_RXVLAN_FILTER(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, RX_VLAN_FILTER) -#define SPNIC_SUPPORT_VLAN_OFFLOAD(hwdev) (SPNIC_SUPPORT_FEATURE(hwdev, RX_VLAN_STRIP) && \ - SPNIC_SUPPORT_FEATURE(hwdev, TX_VLAN_INSERT)) -#define SPNIC_SUPPORT_VXLAN_OFFLOAD(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, VXLAN_OFFLOAD) -#define SPNIC_SUPPORT_IPSEC_OFFLOAD(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, IPSEC_OFFLOAD) -#define SPNIC_SUPPORT_FDIR(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, FDIR) -#define SPNIC_SUPPORT_PROMISC(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, PROMISC) -#define SPNIC_SUPPORT_ALLMULTI(hwdev) SPNIC_SUPPORT_FEATURE(hwdev, ALLMULTI) - -struct nic_rss_type { - u8 tcp_ipv6_ext; - u8 ipv6_ext; - u8 tcp_ipv6; - u8 ipv6; - u8 tcp_ipv4; - u8 ipv4; - u8 udp_ipv6; - u8 udp_ipv4; -}; - -enum spnic_rss_hash_type { - SPNIC_RSS_HASH_ENGINE_TYPE_XOR = 0, - SPNIC_RSS_HASH_ENGINE_TYPE_TOEP, - SPNIC_RSS_HASH_ENGINE_TYPE_MAX, -}; - -/* rss */ -struct nic_rss_indirect_tbl { - u32 rsvd[4]; /* Make sure that 16B beyond entry[] */ - u16 entry[SPNIC_RSS_INDIR_SIZE]; -}; - -struct nic_rss_context_tbl { - u32 rsvd[4]; - u32 ctx; -}; - -#define NIC_CONFIG_ALL_QUEUE_VLAN_CTX 0xFFFF -struct nic_vlan_ctx { - u32 func_id; - u32 qid; /* if qid = 0xFFFF, config current function all queue */ - u32 vlan_tag; - u32 vlan_mode; - u32 vlan_sel; -}; - -enum spnic_link_status { - SPNIC_LINK_DOWN = 0, - SPNIC_LINK_UP -}; - -struct nic_port_info { - u8 port_type; - u8 autoneg_cap; - u8 autoneg_state; - u8 duplex; - u8 speed; - u8 fec; - u32 supported_mode; - u32 advertised_mode; -}; - -struct nic_pause_config { - u8 auto_neg; - u8 rx_pause; - u8 tx_pause; -}; - -#define MODULE_TYPE_SFP 0x3 -#define MODULE_TYPE_QSFP28 0x11 -#define MODULE_TYPE_QSFP 0x0C -#define MODULE_TYPE_QSFP_PLUS 0x0D - -#define TCAM_IP_TYPE_MASK 0x1 -#define TCAM_TUNNEL_TYPE_MASK 0xF -#define TCAM_FUNC_ID_MASK 0x7FFF - -struct spnic_tcam_key_ipv4_mem { - u32 rsvd1:4; - u32 tunnel_type:4; - u32 ip_proto:8; - u32 rsvd0:16; - u32 sipv4_h:16; - u32 ip_type:1; - u32 function_id:15; - u32 dipv4_h:16; - u32 sipv4_l:16; - u32 rsvd2:16; - u32 dipv4_l:16; - u32 rsvd3; - u32 dport:16; - u32 rsvd4:16; - u32 rsvd5:16; - u32 sport:16; - u32 outer_sipv4_h:16; - u32 rsvd6:16; - u32 outer_dipv4_h:16; - u32 outer_sipv4_l:16; - u32 vni_h:16; - u32 outer_dipv4_l:16; - u32 rsvd7:16; - u32 vni_l:16; -}; - -struct spnic_tcam_key_ipv6_mem { - u32 rsvd1:4; - u32 tunnel_type:4; - u32 ip_proto:8; - u32 rsvd0:16; - u32 sipv6_key0:16; - u32 ip_type:1; - u32 function_id:15; - u32 sipv6_key2:16; - u32 sipv6_key1:16; - u32 sipv6_key4:16; - u32 sipv6_key3:16; - u32 sipv6_key6:16; - u32 sipv6_key5:16; - u32 dport:16; - u32 sipv6_key7:16; - u32 dipv6_key0:16; - u32 sport:16; - u32 dipv6_key2:16; - u32 dipv6_key1:16; - u32 dipv6_key4:16; - u32 dipv6_key3:16; - u32 dipv6_key6:16; - u32 dipv6_key5:16; - u32 rsvd2:16; - u32 dipv6_key7:16; -}; - -struct tag_tcam_key { - union { - struct spnic_tcam_key_ipv4_mem key_info; - struct spnic_tcam_key_ipv6_mem key_info_ipv6; - }; - - union { - struct spnic_tcam_key_ipv4_mem key_mask; - struct spnic_tcam_key_ipv6_mem key_mask_ipv6; - }; -}; - -int spnic_add_tcam_rule(void *hwdev, struct nic_tcam_cfg_rule *tcam_rule); -int spnic_del_tcam_rule(void *hwdev, u32 index); - -int spnic_alloc_tcam_block(void *hwdev, u16 *index); -int spnic_free_tcam_block(void *hwdev, u16 *index); - -int spnic_set_fdir_tcam_rule_filter(void *hwdev, bool enable); - -int spnic_flush_tcam_rule(void *hwdev); - -/* * - * @brief spnic_update_mac - update mac address to hardware - * @param hwdev: device pointer to hwdev - * @param old_mac: old mac to delete - * @param new_mac: new mac to update - * @param vlan_id: vlan id - * @param func_id: function index - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id, u16 func_id); - -/* * - * @brief spnic_get_default_mac - get default mac address - * @param hwdev: device pointer to hwdev - * @param mac_addr: mac address from hardware - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_default_mac(void *hwdev, u8 *mac_addr); - -/* * - * @brief spnic_set_port_mtu - set function mtu - * @param hwdev: device pointer to hwdev - * @param new_mtu: mtu - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_port_mtu(void *hwdev, u16 new_mtu); - -/* * - * @brief spnic_get_link_state - get link state - * @param hwdev: device pointer to hwdev - * @param link_state: link state, 0-link down, 1-link up - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_link_state(void *hwdev, u8 *link_state); - -/* * - * @brief spnic_get_vport_stats - get function stats - * @param hwdev: device pointer to hwdev - * @param stats: function stats - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_vport_stats(void *hwdev, struct spnic_vport_stats *stats); - -/* * - * @brief spnic_notify_all_vfs_link_changed - notify to all vfs link changed - * @param hwdev: device pointer to hwdev - * @param link_status: link state, 0-link down, 1-link up - */ -void spnic_notify_all_vfs_link_changed(void *hwdev, u8 link_status); - -/* * - * @brief spnic_force_drop_tx_pkt - force drop tx packet - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_force_drop_tx_pkt(void *hwdev); - -/* * - * @brief spnic_set_rx_mode - set function rx mode - * @param hwdev: device pointer to hwdev - * @param enable: rx mode state - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_rx_mode(void *hwdev, u32 enable); - -/* * - * @brief spnic_set_rx_vlan_offload - set function vlan offload valid state - * @param hwdev: device pointer to hwdev - * @param en: 0-disable, 1-enable - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_rx_vlan_offload(void *hwdev, u8 en); - -/* * - * @brief spnic_set_rx_lro_state - set rx LRO configuration - * @param hwdev: device pointer to hwdev - * @param lro_en: 0-disable, 1-enable - * @param lro_timer: LRO aggregation timeout - * @param lro_max_pkt_len: LRO coalesce packet size(unit is 1K) - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, u32 lro_max_pkt_len); - -/* * - * @brief spnic_set_vf_spoofchk - set vf spoofchk - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param spoofchk: spoofchk - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk); - -/* * - * @brief spnic_vf_info_spoofchk - get vf spoofchk info - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @retval spoofchk state - */ -bool spnic_vf_info_spoofchk(void *hwdev, int vf_id); - -/* * - * @brief spnic_add_vf_vlan - add vf vlan id - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param vlan: vlan id - * @param qos: qos - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos); - -/* * - * @brief spnic_kill_vf_vlan - kill vf vlan - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param vlan: vlan id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_kill_vf_vlan(void *hwdev, int vf_id); - -/* * - * @brief spnic_set_vf_mac - set vf mac - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param mac_addr: vf mac address - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_vf_mac(void *hwdev, int vf_id, unsigned char *mac_addr); - -/* * - * @brief spnic_vf_info_vlanprio - get vf vlan priority - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @retval zero: vlan priority - */ -u16 spnic_vf_info_vlanprio(void *hwdev, int vf_id); - -/* * - * @brief spnic_set_vf_tx_rate - set vf tx rate - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param max_rate: max rate - * @param min_rate: min rate - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate); - -/* * - * @brief spnic_set_vf_tx_rate - set vf tx rate - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param ivi: vf info - * @retval zero: success - * @retval non-zero: failure - */ -void spnic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi); - -/* * - * @brief spnic_set_vf_link_state - set vf link state - * @param hwdev: device pointer to hwdev - * @param vf_id: vf id - * @param link: link state - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_vf_link_state(void *hwdev, u16 vf_id, int link); - -/* * - * @brief spnic_get_port_info - set port info - * @param hwdev: device pointer to hwdev - * @param port_info: port info - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_port_info(void *hwdev, struct nic_port_info *port_info, u16 channel); - -/* * - * @brief spnic_set_rss_type - set rss type - * @param hwdev: device pointer to hwdev - * @param rss_type: rss type - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_rss_type(void *hwdev, struct nic_rss_type rss_type); - -/* * - * @brief spnic_get_rss_type - get rss type - * @param hwdev: device pointer to hwdev - * @param rss_type: rss type - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_rss_type(void *hwdev, struct nic_rss_type *rss_type); - -/* * - * @brief spnic_rss_get_hash_engine - get rss hash engine - * @param hwdev: device pointer to hwdev - * @param type: hash engine - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_get_hash_engine(void *hwdev, u8 *type); - -/* * - * @brief spnic_rss_set_hash_engine - set rss hash engine - * @param hwdev: device pointer to hwdev - * @param type: hash engine - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_set_hash_engine(void *hwdev, u8 type); - -/* * - * @brief spnic_rss_cfg - set rss configuration - * @param hwdev: device pointer to hwdev - * @param rss_en: enable rss flag - * @param type: number of TC - * @param prio_tc: priorityof TC - * @param num_qps: number of queue - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_cfg(void *hwdev, u8 rss_en, u8 tc_num, u8 *prio_tc, u16 num_qps); - -/* * - * @brief spnic_rss_set_template_tbl - set template table - * @param hwdev: device pointer to hwdev - * @param key: rss key - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_set_hash_key(void *hwdev, const u8 *key); - -/* * - * @brief spnic_rss_get_template_tbl - get template table - * @param hwdev: device pointer to hwdev - * @param key: rss key - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_get_hash_key(void *hwdev, u8 *key); - -/* * - * @brief spnic_refresh_nic_cfg - refresh port cfg - * @param hwdev: device pointer to hwdev - * @param port_info: port information - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_refresh_nic_cfg(void *hwdev, struct nic_port_info *port_info); - -/* * - * @brief spnic_add_vlan - add vlan - * @param hwdev: device pointer to hwdev - * @param vlan_id: vlan id - * @param func_id: function id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_add_vlan(void *hwdev, u16 vlan_id, u16 func_id); - -/* * - * @brief spnic_del_vlan - delete vlan - * @param hwdev: device pointer to hwdev - * @param vlan_id: vlan id - * @param func_id: function id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_del_vlan(void *hwdev, u16 vlan_id, u16 func_id); - -/* * - * @brief spnic_set_mac - set mac address - * @param hwdev: device pointer to hwdev - * @param mac_addr: mac address from hardware - * @param vlan_id: vlan id - * @param func_id: function index - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel); - -/* * - * @brief spnic_del_mac - delete mac address - * @param hwdev: device pointer to hwdev - * @param mac_addr: mac address from hardware - * @param vlan_id: vlan id - * @param func_id: function index - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel); - -/* * - * @brief spnic_set_vport_enable - set function valid status - * @param hwdev: device pointer to hwdev - * @param func_id: global function index - * @param enable: 0-disable, 1-enable - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_vport_enable(void *hwdev, u16 func_id, bool enable, u16 channel); - -/* * - * @brief spnic_set_port_enable - set port status - * @param hwdev: device pointer to hwdev - * @param enable: 0-disable, 1-enable - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_set_port_enable(void *hwdev, bool enable, u16 channel); - -/* * - * @brief spnic_flush_qps_res - flush queue pairs resource in hardware - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_flush_qps_res(void *hwdev); - -/* * - * @brief spnic_init_nic_hwdev - init nic hwdev - * @param hwdev: device pointer to hwdev - * @param pcidev_hdl: pointer to pcidev or handler - * @param dev_hdl: pointer to pcidev->dev or handler, for sdk_err() or - * dma_alloc() - * @param rx_buff_len: rx_buff_len is receive buffer length - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_init_nic_hwdev(void *hwdev, void *pcidev_hdl, void *dev_hdl, u16 rx_buff_len); - -/* * - * @brief spnic_free_nic_hwdev - free nic hwdev - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -void spnic_free_nic_hwdev(void *hwdev); - -/* * - * @brief spnic_get_speed - set link speed - * @param hwdev: device pointer to hwdev - * @param port_info: link speed - * @param channel: channel id - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_speed(void *hwdev, enum mag_cmd_port_speed *speed, u16 channel); - -int spnic_get_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state); - -int spnic_get_pf_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state); - -/* * - * @brief spnic_create_qps - create queue pairs - * @param hwdev: device pointer to hwdev - * @param num_qp: number of queue pairs - * @param sq_depth: sq depth - * @param rq_depth: rq depth - * @param qps_msix_arry: msix info - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_create_qps(void *hwdev, u16 num_qp, u32 sq_depth, u32 rq_depth, - struct irq_info *qps_msix_arry); - -/* * - * @brief spnic_destroy_qps - destroy queue pairs - * @param hwdev: device pointer to hwdev - */ -void spnic_destroy_qps(void *hwdev); - -enum spnic_queue_type { - SPNIC_SQ, - SPNIC_RQ, - SPNIC_MAX_QUEUE_TYPE -}; - -/* * - * @brief spnic_get_nic_queue - get nic queue - * @param hwdev: device pointer to hwdev - * @param q_id: queue index - * @param q_type: queue type - * @retval queue address - */ -void *spnic_get_nic_queue(void *hwdev, u16 q_id, enum spnic_queue_type q_type); - -/* * - * @brief spnic_init_qp_ctxts - init queue pair context - * @param hwdev: device pointer to hwdev - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_init_qp_ctxts(void *hwdev); - -/* * - * @brief spnic_free_qp_ctxts - free queue pairs - * @param hwdev: device pointer to hwdev - */ -void spnic_free_qp_ctxts(void *hwdev); - -/* * - * @brief spnic_rss_set_indir_tbl - set rss indirect table - * @param hwdev: device pointer to hwdev - * @param indir_table: rss indirect table - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_set_indir_tbl(void *hwdev, const u32 *indir_table); - -/* * - * @brief spnic_rss_get_indir_tbl - get rss indirect table - * @param hwdev: device pointer to hwdev - * @param indir_table: rss indirect table - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_rss_get_indir_tbl(void *hwdev, u32 *indir_table); - -/* * - * @brief spnic_get_phy_port_stats - get port stats - * @param hwdev: device pointer to hwdev - * @param stats: port stats - * @retval zero: success - * @retval non-zero: failure - */ -int spnic_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats); - -int spnic_set_port_funcs_state(void *hwdev, bool enable); - -int spnic_reset_port_link_cfg(void *hwdev); - -int spnic_force_port_relink(void *hwdev); - -int spnic_set_dcb_state(void *hwdev, struct spnic_dcb_state *dcb_state); - -int spnic_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap); - -int spnic_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap); - -int spnic_dcb_set_ets(void *hwdev, u8 *cos_tc, u8 *cos_bw, u8 *cos_prio, u8 *tc_bw, u8 *tc_prio); - -int spnic_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up, u8 max_cos_num); - -int spnic_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause); - -int spnic_set_pause_info(void *hwdev, struct nic_pause_config nic_pause); - -int spnic_set_link_settings(void *hwdev, struct spnic_link_ksettings *settings); - -int spnic_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl); - -void spnic_clear_vfs_info(void *hwdev); - -int spnic_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id); - -int spnic_set_led_status(void *hwdev, enum mag_led_type type, enum mag_led_mode mode); - -int spnic_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en); - -int spnic_set_loopback_mode(void *hwdev, u8 mode, u8 enable); -int spnic_get_loopback_mode(void *hwdev, u8 *mode, u8 *enable); - -bool spnic_get_vf_trust(void *hwdev, int vf_id); -int spnic_set_vf_trust(void *hwdev, u16 vf_id, bool trust); - -int spnic_set_autoneg(void *hwdev, bool enable); - -int spnic_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext); -int spnic_get_sfp_eeprom(void *hwdev, u8 *data, u32 len); - -int spnic_set_nic_feature_to_hw(void *hwdeve); -void spnic_update_nic_feature(void *hwdev, u64 feature); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c deleted file mode 100644 index 634f82153dff..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cfg_vf.c +++ /dev/null @@ -1,658 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/etherdevice.h> -#include <linux/if_vlan.h> -#include <linux/ethtool.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/pci.h> -#include <linux/netdevice.h> -#include <linux/module.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" -#include "spnic_nic_cmd.h" - -static unsigned char set_vf_link_state; -module_param(set_vf_link_state, byte, 0444); -MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto, 1 represents link always up, 2 represents link always down. - default is 0."); - -/* In order to adapt different linux version */ -enum { - SPNIC_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ - SPNIC_IFLA_VF_LINK_STATE_ENABLE, /* link always up */ - SPNIC_IFLA_VF_LINK_STATE_DISABLE, /* link always down */ -}; - -#define NIC_CVLAN_INSERT_ENABLE 0x1 -#define NIC_QINQ_INSERT_ENABLE 0X3 -static int spnic_set_vlan_ctx(struct spnic_nic_cfg *nic_cfg, u16 func_id, - u16 vlan_tag, u16 q_id, bool add) -{ - struct nic_vlan_ctx *vlan_ctx = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - u64 out_param = 0; - int err; - - cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - cmd_buf->size = sizeof(struct nic_vlan_ctx); - vlan_ctx = (struct nic_vlan_ctx *)cmd_buf->buf; - - vlan_ctx->func_id = func_id; - vlan_ctx->qid = q_id; - vlan_ctx->vlan_tag = vlan_tag; - vlan_ctx->vlan_sel = 0; /* TPID0 in IPSU */ - vlan_ctx->vlan_mode = add ? - NIC_QINQ_INSERT_ENABLE : NIC_CVLAN_INSERT_ENABLE; - - sphw_cpu_to_be32(vlan_ctx, sizeof(struct nic_vlan_ctx)); - - err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_MODIFY_VLAN_CTX, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - - sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); - - if (err || out_param != 0) { - nic_err(nic_cfg->dev_hdl, "Failed to set vlan context, err: %d, out_param: 0x%llx\n", - err, out_param); - return -EFAULT; - } - - return err; -} - -int spnic_cfg_vf_vlan(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vid, u8 qos, int vf_id) -{ - struct spnic_cmd_vf_vlan_config vf_vlan; - u16 out_size = sizeof(vf_vlan); - u16 glb_func_id; - int err; - u16 vlan_tag; - - /* VLAN 0 is a special case, don't allow it to be removed */ - if (!vid && opcode == SPNIC_CMD_OP_DEL) - return 0; - - memset(&vf_vlan, 0, sizeof(vf_vlan)); - - vf_vlan.opcode = opcode; - vf_vlan.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; - vf_vlan.vlan_id = vid; - vf_vlan.qos = qos; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_VF_VLAN, - &vf_vlan, sizeof(vf_vlan), &vf_vlan, &out_size); - if (err || !out_size || vf_vlan.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d vlan, err: %d, status: 0x%x,out size: 0x%x\n", - HW_VF_ID_TO_OS(vf_id), err, vf_vlan.msg_head.status, out_size); - return -EFAULT; - } - - vlan_tag = vid + (u16)(qos << VLAN_PRIO_SHIFT); - - glb_func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; - err = spnic_set_vlan_ctx(nic_cfg, glb_func_id, vlan_tag, NIC_CONFIG_ALL_QUEUE_VLAN_CTX, - opcode == SPNIC_CMD_OP_ADD); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d vlan ctx, err: %d\n", - HW_VF_ID_TO_OS(vf_id), err); - - /* rollback vlan config */ - if (opcode == SPNIC_CMD_OP_DEL) - vf_vlan.opcode = SPNIC_CMD_OP_ADD; - else - vf_vlan.opcode = SPNIC_CMD_OP_DEL; - l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_VF_VLAN, &vf_vlan, - sizeof(vf_vlan), &vf_vlan, &out_size); - return err; - } - - return 0; -} - -/* this function just be called by spnic_ndo_set_vf_mac, - * others are not permitted. - */ -int spnic_set_vf_mac(void *hwdev, int vf, unsigned char *mac_addr) -{ - struct vf_data_storage *vf_info; - struct spnic_nic_cfg *nic_cfg; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); - - /* duplicate request, so just return success */ - if (ether_addr_equal(vf_info->user_mac_addr, mac_addr)) - return 0; - - ether_addr_copy(vf_info->user_mac_addr, mac_addr); - - return 0; -} - -int spnic_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos) -{ - struct spnic_nic_cfg *nic_cfg; - int err; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - err = spnic_cfg_vf_vlan(nic_cfg, SPNIC_CMD_OP_ADD, vlan, qos, vf_id); - if (err) - return err; - - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan; - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos; - - nic_info(nic_cfg->dev_hdl, "Setting VLAN %u, QOS 0x%x on VF %d\n", - vlan, qos, HW_VF_ID_TO_OS(vf_id)); - - return 0; -} - -int spnic_kill_vf_vlan(void *hwdev, int vf_id) -{ - struct vf_data_storage *vf_infos; - struct spnic_nic_cfg *nic_cfg; - int err; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - vf_infos = nic_cfg->vf_infos; - - err = spnic_cfg_vf_vlan(nic_cfg, SPNIC_CMD_OP_DEL, vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, - vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos, vf_id); - if (err) - return err; - - nic_info(nic_cfg->dev_hdl, "Remove VLAN %u on VF %d\n", - vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, HW_VF_ID_TO_OS(vf_id)); - - vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0; - vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0; - - return 0; -} - -u16 spnic_vf_info_vlanprio(void *hwdev, int vf_id) -{ - struct spnic_nic_cfg *nic_cfg; - u16 pf_vlan, vlanprio; - u8 pf_qos; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - pf_vlan = nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan; - pf_qos = nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos; - vlanprio = (u16)(pf_vlan | pf_qos << SPNIC_VLAN_PRIORITY_SHIFT); - - return vlanprio; -} - -int spnic_set_vf_link_state(void *hwdev, u16 vf_id, int link) -{ - struct spnic_nic_cfg *nic_cfg = - sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - struct vf_data_storage *vf_infos = nic_cfg->vf_infos; - u8 link_status = 0; - - switch (link) { - case SPNIC_IFLA_VF_LINK_STATE_AUTO: - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false; - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = nic_cfg->link_status ? true : false; - link_status = nic_cfg->link_status; - break; - case SPNIC_IFLA_VF_LINK_STATE_ENABLE: - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true; - link_status = SPNIC_LINK_UP; - break; - case SPNIC_IFLA_VF_LINK_STATE_DISABLE: - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; - vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false; - link_status = SPNIC_LINK_DOWN; - break; - default: - return -EINVAL; - } - - /* Notify the VF of its new link state */ - spnic_notify_vf_link_status(nic_cfg, vf_id, link_status); - - return 0; -} - -int spnic_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk) -{ - struct spnic_cmd_spoofchk_set spoofchk_cfg; - struct vf_data_storage *vf_infos = NULL; - u16 out_size = sizeof(spoofchk_cfg); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - vf_infos = nic_cfg->vf_infos; - - memset(&spoofchk_cfg, 0, sizeof(spoofchk_cfg)); - - spoofchk_cfg.func_id = sphw_glb_pf_vf_offset(hwdev) + vf_id; - spoofchk_cfg.state = spoofchk ? 1 : 0; - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_SET_SPOOPCHK_STATE, - &spoofchk_cfg, sizeof(spoofchk_cfg), &spoofchk_cfg, &out_size); - if (err || !out_size || spoofchk_cfg.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf_id), err, spoofchk_cfg.msg_head.status, out_size); - err = -EINVAL; - } - - vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk; - - return err; -} - -bool spnic_vf_info_spoofchk(void *hwdev, int vf_id) -{ - struct spnic_nic_cfg *nic_cfg; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - return nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk; -} - -int spnic_set_vf_trust(void *hwdev, u16 vf_id, bool trust) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (vf_id > nic_cfg->max_vfs) - return -EINVAL; - - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust = trust; - - return 0; -} - -bool spnic_get_vf_trust(void *hwdev, int vf_id) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (vf_id > nic_cfg->max_vfs) - return -EINVAL; - - return nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust; -} - -static int spnic_cfg_vf_qps(struct spnic_nic_cfg *nic_cfg, u8 opcode, u16 vf_id, u16 num_qps) -{ - struct spnic_cmd_cfg_qps qps_info; - u16 out_size = sizeof(qps_info); - int err; - - memset(&qps_info, 0, sizeof(qps_info)); - - qps_info.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + vf_id; - qps_info.opcode = opcode; - qps_info.num_qps = num_qps; - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_FLEX_QUEUE, &qps_info, - sizeof(qps_info), &qps_info, &out_size); - if (err || !out_size || qps_info.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to %s VF(%d) qps, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == SPNIC_CMD_OP_ALLOC ? "alloc" : "free", - HW_VF_ID_TO_OS(vf_id), err, qps_info.msg_head.status, out_size); - return -EFAULT; - } - - return 0; -} - -int spnic_alloc_vf_qps(void *hwdev, u16 vf_id, u16 num_qps) -{ - struct vf_data_storage *vf_infos = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (vf_id > nic_cfg->max_vfs) - return -EINVAL; - - err = spnic_cfg_vf_qps(nic_cfg, SPNIC_CMD_OP_ALLOC, vf_id, num_qps); - if (err) - return err; - - vf_infos = nic_cfg->vf_infos; - vf_infos[HW_VF_ID_TO_OS(vf_id)].num_qps = num_qps; - - return 0; -} - -int spnic_free_vf_qps(void *hwdev, u16 vf_id) -{ - struct vf_data_storage *vf_infos = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (vf_id > nic_cfg->max_vfs) - return -EINVAL; - - vf_infos = nic_cfg->vf_infos; - err = spnic_cfg_vf_qps(nic_cfg, SPNIC_CMD_OP_FREE, vf_id, - vf_infos[HW_VF_ID_TO_OS(vf_id)].num_qps); - if (err) - return err; - - vf_infos[HW_VF_ID_TO_OS(vf_id)].num_qps = 0; - - return 0; -} - -static int spnic_set_vf_tx_rate_max_min(struct spnic_nic_cfg *nic_cfg, u16 vf_id, - u32 max_rate, u32 min_rate) -{ - struct spnic_cmd_tx_rate_cfg rate_cfg; - u16 out_size = sizeof(rate_cfg); - int err; - - memset(&rate_cfg, 0, sizeof(rate_cfg)); - - rate_cfg.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + vf_id; - rate_cfg.max_rate = max_rate; - rate_cfg.min_rate = min_rate; - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_MAX_MIN_RATE, - &rate_cfg, sizeof(rate_cfg), &rate_cfg, &out_size); - if (rate_cfg.msg_head.status || err || !out_size) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d max rate %u, min rate %u, err: %d, status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err, - rate_cfg.msg_head.status, out_size); - return -EIO; - } - - return 0; -} - -int spnic_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - err = spnic_set_vf_tx_rate_max_min(nic_cfg, vf_id, max_rate, min_rate); - if (err) - return err; - - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate; - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate; - - return 0; -} - -void spnic_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi) -{ - struct vf_data_storage *vfinfo; - struct spnic_nic_cfg *nic_cfg; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - vfinfo = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); - - ivi->vf = HW_VF_ID_TO_OS(vf_id); - ether_addr_copy(ivi->mac, vfinfo->user_mac_addr); - ivi->vlan = vfinfo->pf_vlan; - ivi->qos = vfinfo->pf_qos; - - ivi->spoofchk = vfinfo->spoofchk; - - ivi->trusted = vfinfo->trust; - - ivi->max_tx_rate = vfinfo->max_rate; - ivi->min_tx_rate = vfinfo->min_rate; - - if (!vfinfo->link_forced) - ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; - else if (vfinfo->link_up) - ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; - else - ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; -} - -static int spnic_init_vf_infos(struct spnic_nic_cfg *nic_cfg, u16 vf_id) -{ - struct vf_data_storage *vf_infos = nic_cfg->vf_infos; - u8 vf_link_state; - - if (set_vf_link_state > SPNIC_IFLA_VF_LINK_STATE_DISABLE) { - nic_warn(nic_cfg->dev_hdl, "Module Parameter set_vf_link_state value %u is out of range, resetting to %d\n", - set_vf_link_state, SPNIC_IFLA_VF_LINK_STATE_AUTO); - set_vf_link_state = SPNIC_IFLA_VF_LINK_STATE_AUTO; - } - - vf_link_state = set_vf_link_state; - - switch (vf_link_state) { - case SPNIC_IFLA_VF_LINK_STATE_AUTO: - vf_infos[vf_id].link_forced = false; - break; - case SPNIC_IFLA_VF_LINK_STATE_ENABLE: - vf_infos[vf_id].link_forced = true; - vf_infos[vf_id].link_up = true; - break; - case SPNIC_IFLA_VF_LINK_STATE_DISABLE: - vf_infos[vf_id].link_forced = true; - vf_infos[vf_id].link_up = false; - break; - default: - nic_err(nic_cfg->dev_hdl, "Input parameter set_vf_link_state error: %u\n", - vf_link_state); - return -EINVAL; - } - - return 0; -} - -static int vf_func_register(struct spnic_nic_cfg *nic_cfg) -{ - struct spnic_cmd_register_vf register_info; - u16 out_size = sizeof(register_info); - int err; - - err = sphw_register_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC, nic_cfg, - spnic_vf_event_handler); - if (err) - return err; - - err = sphw_register_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK, nic_cfg, - spnic_vf_mag_event_handler); - if (err) - goto reg_hilink_err; - - memset(®ister_info, 0, sizeof(register_info)); - register_info.op_register = 1; - err = sphw_mbox_to_pf(nic_cfg->hwdev, SPHW_MOD_L2NIC, SPNIC_NIC_CMD_VF_REGISTER, - ®ister_info, sizeof(register_info), ®ister_info, &out_size, 0, - SPHW_CHANNEL_NIC); - if (err || !out_size || register_info.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", - err, register_info.msg_head.status, out_size); - err = -EIO; - goto register_err; - } - - return 0; - -register_err: - sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); - -reg_hilink_err: - sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - - return err; -} - -static int pf_init_vf_infos(struct spnic_nic_cfg *nic_cfg) -{ - u32 size; - int err; - u16 i; - - err = sphw_register_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC, nic_cfg, - spnic_pf_event_handler); - if (err) - return err; - - err = sphw_register_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_HILINK, nic_cfg, - spnic_pf_mag_event_handler); - if (err) - goto register_mag_mgmt_cb_err; - - nic_cfg->max_vfs = sphw_func_max_vf(nic_cfg->hwdev); - size = sizeof(*nic_cfg->vf_infos) * nic_cfg->max_vfs; - if (!size) - return 0; - - nic_cfg->vf_infos = kzalloc(size, GFP_KERNEL); - if (!nic_cfg->vf_infos) { - err = -ENOMEM; - goto alloc_vf_infos_err; - } - - for (i = 0; i < nic_cfg->max_vfs; i++) { - err = spnic_init_vf_infos(nic_cfg, i); - if (err) - goto init_vf_infos_err; - } - - err = sphw_register_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC, nic_cfg, - spnic_pf_mbox_handler); - if (err) - goto register_nic_mbox_cb_err; - - err = sphw_register_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK, nic_cfg, - spnic_pf_mag_mbox_handler); - if (err) - goto register_mag_mbox_cb_err; - - return 0; - -register_mag_mbox_cb_err: - sphw_unregister_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - -register_nic_mbox_cb_err: -init_vf_infos_err: - kfree(nic_cfg->vf_infos); - -alloc_vf_infos_err: - sphw_unregister_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); - -register_mag_mgmt_cb_err: - sphw_unregister_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - - return err; -} - -int spnic_vf_func_init(struct spnic_nic_cfg *nic_cfg) -{ - if (sphw_func_type(nic_cfg->hwdev) == TYPE_VF) - return vf_func_register(nic_cfg); - - return pf_init_vf_infos(nic_cfg); -} - -void spnic_vf_func_free(struct spnic_nic_cfg *nic_cfg) -{ - struct spnic_cmd_register_vf unregister; - u16 out_size = sizeof(unregister); - int err; - - memset(&unregister, 0, sizeof(unregister)); - unregister.op_register = 0; - if (sphw_func_type(nic_cfg->hwdev) == TYPE_VF) { - err = sphw_mbox_to_pf(nic_cfg->hwdev, SPHW_MOD_L2NIC, SPNIC_NIC_CMD_VF_REGISTER, - &unregister, sizeof(unregister), &unregister, &out_size, 0, - SPHW_CHANNEL_NIC); - if (err || !out_size || unregister.msg_head.status) - nic_err(nic_cfg->dev_hdl, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n", - err, unregister.msg_head.status, out_size); - - sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - sphw_unregister_vf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); - } else { - if (nic_cfg->vf_infos) { - sphw_unregister_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - sphw_unregister_pf_mbox_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); - spnic_clear_vfs_info(nic_cfg->hwdev); - kfree(nic_cfg->vf_infos); - } - - sphw_unregister_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_L2NIC); - sphw_unregister_mgmt_msg_cb(nic_cfg->hwdev, SPHW_MOD_HILINK); - } -} - -static void clear_vf_infos(void *hwdev, u16 vf_id) -{ - struct vf_data_storage *vf_infos; - struct spnic_nic_cfg *nic_cfg; - u16 func_id; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - func_id = sphw_glb_pf_vf_offset(hwdev) + vf_id; - vf_infos = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); - if (vf_infos->use_specified_mac) - spnic_del_mac(hwdev, vf_infos->drv_mac_addr, vf_infos->pf_vlan, - func_id, SPHW_CHANNEL_NIC); - - if (spnic_vf_info_vlanprio(hwdev, vf_id)) - spnic_kill_vf_vlan(hwdev, vf_id); - - if (vf_infos->max_rate) - spnic_set_vf_tx_rate(hwdev, vf_id, 0, 0); - - if (vf_infos->spoofchk) - spnic_set_vf_spoofchk(hwdev, vf_id, false); - - if (vf_infos->trust) - spnic_set_vf_trust(hwdev, vf_id, false); - - memset(vf_infos, 0, sizeof(*vf_infos)); - /* set vf_infos to default */ - spnic_init_vf_infos(nic_cfg, HW_VF_ID_TO_OS(vf_id)); -} - -void spnic_clear_vfs_info(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - u16 i; - - for (i = 0; i < nic_cfg->max_vfs; i++) - clear_vf_infos(hwdev, OS_VF_ID_TO_HW(i)); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h deleted file mode 100644 index 689e84d90e97..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_cmd.h +++ /dev/null @@ -1,105 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_CMD_H -#define SPNIC_NIC_CMD_H - -/* Commands between NIC to MPU - */ -enum spnic_nic_cmd { - SPNIC_NIC_CMD_VF_REGISTER = 0, /* only for PFD and VFD */ - - /* FUNC CFG */ - SPNIC_NIC_CMD_SET_FUNC_TBL = 5, - SPNIC_NIC_CMD_SET_VPORT_ENABLE, - SPNIC_NIC_CMD_SET_RX_MODE, - SPNIC_NIC_CMD_SQ_CI_ATTR_SET, - SPNIC_NIC_CMD_GET_VPORT_STAT, - SPNIC_NIC_CMD_CLEAN_VPORT_STAT, - SPNIC_NIC_CMD_CLEAR_QP_RESOURCE, - SPNIC_NIC_CMD_CFG_FLEX_QUEUE, - /* LRO CFG */ - SPNIC_NIC_CMD_CFG_RX_LRO, - SPNIC_NIC_CMD_CFG_LRO_TIMER, - SPNIC_NIC_CMD_FEATURE_NEGO, - - /* MAC & VLAN CFG */ - SPNIC_NIC_CMD_GET_MAC = 20, - SPNIC_NIC_CMD_SET_MAC, - SPNIC_NIC_CMD_DEL_MAC, - SPNIC_NIC_CMD_UPDATE_MAC, - SPNIC_NIC_CMD_GET_ALL_DEFAULT_MAC, - - SPNIC_NIC_CMD_CFG_FUNC_VLAN, - SPNIC_NIC_CMD_SET_VLAN_FILTER_EN, - SPNIC_NIC_CMD_SET_RX_VLAN_OFFLOAD, - - /* SR-IOV */ - SPNIC_NIC_CMD_CFG_VF_VLAN = 40, - SPNIC_NIC_CMD_SET_SPOOPCHK_STATE, - /* RATE LIMIT */ - SPNIC_NIC_CMD_SET_MAX_MIN_RATE, - - /* RSS CFG */ - SPNIC_NIC_CMD_RSS_CFG = 60, - SPNIC_NIC_CMD_RSS_TEMP_MGR, - SPNIC_NIC_CMD_GET_RSS_CTX_TBL, - SPNIC_NIC_CMD_CFG_RSS_HASH_KEY, - SPNIC_NIC_CMD_CFG_RSS_HASH_ENGINE, - SPNIC_NIC_CMD_GET_INDIR_TBL, - - /* DPI/FDIR */ - SPNIC_NIC_CMD_ADD_TC_FLOW = 80, - SPNIC_NIC_CMD_DEL_TC_FLOW, - SPNIC_NIC_CMD_GET_TC_FLOW, - SPNIC_NIC_CMD_FLUSH_TCAM, - SPNIC_NIC_CMD_CFG_TCAM_BLOCK, - SPNIC_NIC_CMD_ENABLE_TCAM, - SPNIC_NIC_CMD_GET_TCAM_BLOCK, - SPNIC_NIC_CMD_CFG_DPI_TABLE_ID, - - /* PORT CFG */ - SPNIC_NIC_CMD_SET_PORT_ENABLE = 100, - SPNIC_NIC_CMD_CFG_PAUSE_INFO, - - SPNIC_NIC_CMD_SET_PORT_CAR, - SPNIC_NIC_CMD_SET_ER_DROP_PKT, - - SPNIC_NIC_CMD_VF_COS, - SPNIC_NIC_CMD_SETUP_COS_MAPPING, - SPNIC_NIC_CMD_SET_ETS, - SPNIC_NIC_CMD_SET_PFC, - - SPNIC_NIC_CMD_TX_PAUSE_EXCP_NOTICE = 118, - SPNIC_NIC_CMD_INQUIRT_PAUSE_CFG = 119, - - /* MISC */ - SPNIC_NIC_CMD_BIOS_CFG = 120, - SPNIC_NIC_CMD_SET_FIRMWARE_CUSTOM_PACKETS_MSG, - - /* DFX */ - SPNIC_NIC_CMD_GET_SM_TABLE = 140, - SPNIC_NIC_CMD_RD_LINE_TBL, - - SPNIC_NIC_CMD_SET_VHD_CFG = 161, - - SPNIC_NIC_CMD_MAX = 256, -}; - -/* NIC CMDQ MODE */ -enum spnic_ucode_cmd { - SPNIC_UCODE_CMD_MODIFY_QUEUE_CTX = 0, - SPNIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, - SPNIC_UCODE_CMD_ARM_SQ, - SPNIC_UCODE_CMD_ARM_RQ, - SPNIC_UCODE_CMD_SET_RSS_INDIR_TABLE, - SPNIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, - SPNIC_UCODE_CMD_GET_RSS_INDIR_TABLE, - SPNIC_UCODE_CMD_GET_RSS_CONTEXT_TABLE, - SPNIC_UCODE_CMD_SET_IQ_ENABLE, - SPNIC_UCODE_CMD_SET_RQ_FLUSH = 10, - SPNIC_UCODE_CMD_MODIFY_VLAN_CTX, - SPNIC_UCODE_CMD_DPI_HASH_TABLE, -}; - -#endif /* SPNIC_NIC_CMD_H */ diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c deleted file mode 100644 index 08fe958a6d00..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.c +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/types.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_mt.h" -#include "spnic_nic_qp.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" - -int spnic_dbg_get_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, - u8 *wqe, u16 *wqe_size, enum spnic_queue_type q_type) -{ - struct spnic_io_queue *queue = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - void *src_wqebb = NULL; - u32 i, offset; - - if (!hwdev) { - pr_err("hwdev is NULL.\n"); - return -EINVAL; - } - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (q_id >= nic_cfg->num_qps) { - pr_err("q_id[%u] > num_qps_cfg[%u].\n", q_id, nic_cfg->num_qps); - return -EINVAL; - } - - queue = (q_type == SPNIC_SQ) ? &nic_cfg->sq[q_id] : &nic_cfg->rq[q_id]; - - if ((idx + wqebb_cnt) > queue->wq.q_depth) { - pr_err("(idx[%u] + idx[%u]) > q_depth[%u].\n", - idx, wqebb_cnt, queue->wq.q_depth); - return -EINVAL; - } - - if (*wqe_size != (queue->wq.wqebb_size * wqebb_cnt)) { - pr_err("Unexpect out buf size from user :%u, expect: %d\n", - *wqe_size, (queue->wq.wqebb_size * wqebb_cnt)); - return -EINVAL; - } - - for (i = 0; i < wqebb_cnt; i++) { - src_wqebb = sphw_wq_wqebb_addr(&queue->wq, WQ_MASK_IDX(&queue->wq, idx + i)); - offset = queue->wq.wqebb_size * i; - memcpy(wqe + offset, src_wqebb, queue->wq.wqebb_size); - } - - return 0; -} - -int spnic_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, u32 msg_size) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_io_queue *sq = NULL; - - if (!hwdev || !sq_info) { - pr_err("hwdev or sq_info is NULL.\n"); - return -EINVAL; - } - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (q_id >= nic_cfg->num_qps) { - nic_err(nic_cfg->dev_hdl, "Input queue id(%u) is larger than the actual queue number\n", - q_id); - return -EINVAL; - } - - if (msg_size != sizeof(*sq_info)) { - nic_err(nic_cfg->dev_hdl, "Unexpect out buf size from user :%u, expect: %lu\n", - msg_size, sizeof(*sq_info)); - return -EINVAL; - } - - sq = &nic_cfg->sq[q_id]; - - sq_info->q_id = q_id; - sq_info->pi = spnic_get_sq_local_pi(sq); - sq_info->ci = spnic_get_sq_local_ci(sq); - sq_info->fi = spnic_get_sq_hw_ci(sq); - sq_info->q_depth = sq->wq.q_depth; - sq_info->wqebb_size = sq->wq.wqebb_size; - - sq_info->ci_addr = sq->tx.cons_idx_addr; - - sq_info->cla_addr = sq->wq.wq_block_paddr; - sq_info->slq_handle = sq; - - sq_info->doorbell.map_addr = (u64 *)sq->db_addr; - - return 0; -} - -int spnic_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, u32 msg_size) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_io_queue *rq = NULL; - - if (!hwdev || !rq_info) { - pr_err("hwdev or rq_info is NULL.\n"); - return -EINVAL; - } - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (q_id >= nic_cfg->num_qps) { - nic_err(nic_cfg->dev_hdl, "Input queue id(%u) is larger than the actual queue number\n", - q_id); - return -EINVAL; - } - - if (msg_size != sizeof(*rq_info)) { - nic_err(nic_cfg->dev_hdl, "Unexpect out buf size from user: %u, expect: %lu\n", - msg_size, sizeof(*rq_info)); - return -EINVAL; - } - - rq = &nic_cfg->rq[q_id]; - - rq_info->q_id = q_id; - - rq_info->hw_pi = cpu_to_be16(*rq->rx.pi_virt_addr); - rq_info->ci = spnic_get_rq_local_ci(rq); - - rq_info->sw_pi = 0; - - rq_info->wqebb_size = rq->wq.wqebb_size; - rq_info->q_depth = (u16)rq->wq.q_depth; - - rq_info->buf_len = nic_cfg->rx_buff_len; - - rq_info->slq_handle = rq; - - rq_info->ci_wqe_page_addr = sphw_wq_get_first_wqe_page_addr(&rq->wq); - rq_info->ci_cla_tbl_addr = rq->wq.wq_block_paddr; - - rq_info->msix_idx = rq->msix_entry_idx; - rq_info->msix_vector = 0; - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h deleted file mode 100644 index d86c65ed5f4f..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dbg.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_DBG_H -#define SPNIC_NIC_DBG_H - -#include "spnic_nic_io.h" - -int spnic_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, u32 msg_size); - -int spnic_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, u32 msg_size); - -int spnic_dbg_get_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, - u8 *wqe, u16 *wqe_size, enum spnic_queue_type q_type); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h deleted file mode 100644 index 8a0708fda19a..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_dev.h +++ /dev/null @@ -1,354 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_DEV_H -#define SPNIC_NIC_DEV_H - -#include <linux/netdevice.h> -#include <linux/semaphore.h> -#include <linux/types.h> -#include <linux/bitops.h> - -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_tx.h" -#include "spnic_rx.h" -#include "spnic_dcb.h" - -#define SPNIC_NIC_DRV_NAME "spnic" -#define SPNIC_DRV_VERSION "B090" -#define SPNIC_DRV_DESC "Ramaxel(R) Network Interface Card Driver" - -#define SPNIC_FUNC_IS_VF(hwdev) (sphw_func_type(hwdev) == TYPE_VF) - -#define SPNIC_AVG_PKT_SMALL 256U -#define SPNIC_MODERATONE_DELAY HZ - -#define LP_PKT_CNT 64 - -enum spnic_flags { - SPNIC_INTF_UP, - SPNIC_MAC_FILTER_CHANGED, - SPNIC_LP_TEST, - SPNIC_RSS_ENABLE, - SPNIC_DCB_ENABLE, - SPNIC_SAME_RXTX, - SPNIC_INTR_ADAPT, - SPNIC_UPDATE_MAC_FILTER, - SPNIC_CHANGE_RES_INVALID, - SPNIC_RSS_DEFAULT_INDIR, -}; - -#define SPHW_CHANNEL_RES_VALID(nic_dev) \ - (test_bit(SPNIC_INTF_UP, &(nic_dev)->flags) && \ - !test_bit(SPNIC_CHANGE_RES_INVALID, &(nic_dev)->flags)) - -#define RX_BUFF_NUM_PER_PAGE 2 - -#define VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap)) -#define VLAN_BITMAP_BITS_SIZE(nic_dev) (VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8) -#define VLAN_NUM_BITMAPS(nic_dev) (VLAN_N_VID / \ - VLAN_BITMAP_BITS_SIZE(nic_dev)) -#define VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \ - VLAN_BITMAP_BYTE_SIZE(nic_dev)) -#define VID_LINE(nic_dev, vid) ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev)) -#define VID_COL(nic_dev, vid) ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1)) - -#define SPNIC_DRV_FEATURE NIC_F_ALL_MASK - -enum spnic_event_work_flags { - EVENT_WORK_TX_TIMEOUT, -}; - -enum spnic_rx_mode_state { - SPNIC_HW_PROMISC_ON, - SPNIC_HW_ALLMULTI_ON, - SPNIC_PROMISC_FORCE_ON, - SPNIC_ALLMULTI_FORCE_ON, -}; - -enum mac_filter_state { - SPNIC_MAC_WAIT_HW_SYNC, - SPNIC_MAC_HW_SYNCED, - SPNIC_MAC_WAIT_HW_UNSYNC, - SPNIC_MAC_HW_UNSYNCED, -}; - -struct spnic_mac_filter { - struct list_head list; - u8 addr[ETH_ALEN]; - unsigned long state; -}; - -struct spnic_irq { - struct net_device *netdev; - /* IRQ corresponding index number */ - u16 msix_entry_idx; - u32 irq_id; /* The IRQ number from OS */ - char irq_name[IFNAMSIZ + 16]; - struct napi_struct napi; - cpumask_t affinity_mask; - struct spnic_txq *txq; - struct spnic_rxq *rxq; -}; - -struct spnic_intr_coal_info { - u8 pending_limt; - u8 coalesce_timer_cfg; - u8 resend_timer_cfg; - - u64 pkt_rate_low; - u8 rx_usecs_low; - u8 rx_pending_limt_low; - u64 pkt_rate_high; - u8 rx_usecs_high; - u8 rx_pending_limt_high; - - u8 user_set_intr_coal_flag; -}; - -struct spnic_dyna_txrxq_params { - u16 num_qps; - u16 num_rss; - u16 rss_limit; - u8 num_tc; - u8 rsvd1; - u32 sq_depth; - u32 rq_depth; - - struct spnic_dyna_txq_res *txqs_res; - struct spnic_dyna_rxq_res *rxqs_res; - struct spnic_irq *irq_cfg; -}; - -#define SPNIC_NIC_STATS_INC(nic_dev, field) \ -do { \ - u64_stats_update_begin(&(nic_dev)->stats.syncp);\ - (nic_dev)->stats.field++; \ - u64_stats_update_end(&(nic_dev)->stats.syncp); \ -} while (0) - -struct spnic_nic_stats { - u64 netdev_tx_timeout; - - /* Subdivision statistics show in private tool */ - u64 tx_carrier_off_drop; - u64 tx_invalid_qid; - - struct u64_stats_sync syncp; -}; - -#define SPNIC_TCAM_DYNAMIC_BLOCK_SIZE 16 -#define SPNIC_MAX_TCAM_FILTERS 512 - -#define SPNIC_PKT_TCAM_DYNAMIC_INDEX_START(block_index) \ - (SPNIC_TCAM_DYNAMIC_BLOCK_SIZE * (block_index)) - -struct spnic_rx_flow_rule { - struct list_head rules; - int tot_num_rules; -}; - -struct spnic_tcam_dynamic_block { - struct list_head block_list; - u16 dynamic_block_id; - u16 dynamic_index_cnt; - u8 dynamic_index_used[SPNIC_TCAM_DYNAMIC_BLOCK_SIZE]; -}; - -struct spnic_tcam_dynamic_block_info { - struct list_head tcam_dynamic_list; - u16 dynamic_block_cnt; -}; - -struct spnic_tcam_filter { - struct list_head tcam_filter_list; - u16 dynamic_block_id; - u16 index; - struct tag_tcam_key tcam_key; - u16 queue; -}; - -/* function level struct info */ -struct spnic_tcam_info { - u16 tcam_rule_nums; - struct list_head tcam_list; - struct spnic_tcam_dynamic_block_info tcam_dynamic_info; -}; - -struct spnic_nic_dev { - struct pci_dev *pdev; - struct net_device *netdev; - void *hwdev; - - int poll_weight; - - unsigned long *vlan_bitmap; - - u16 max_qps; - - u32 msg_enable; - unsigned long flags; - - u32 lro_replenish_thld; - u32 dma_rx_buff_size; - u16 rx_buff_len; - u32 page_order; - - /* Rss related varibles */ - u8 rss_hash_engine; - struct nic_rss_type rss_type; - u8 *rss_hkey; - /* hkey in big endian */ - u32 *rss_hkey_be; - u32 *rss_indir; - - u32 dcb_changes; - struct spnic_dcb_config hw_dcb_cfg; - struct spnic_dcb_config wanted_dcb_cfg; - unsigned long dcb_flags; - int disable_port_cnt; - /* lock for disable or enable traffic flow */ - struct semaphore dcb_sem; - - struct spnic_intr_coal_info *intr_coalesce; - unsigned long last_moder_jiffies; - u32 adaptive_rx_coal; - u8 intr_coal_set_flag; - - struct spnic_nic_stats stats; - - /* lock for nic resource */ - struct mutex nic_mutex; - bool force_port_disable; - struct semaphore port_state_sem; - u8 link_status; - - struct nic_service_cap nic_cap; - - struct spnic_txq *txqs; - struct spnic_rxq *rxqs; - struct spnic_dyna_txrxq_params q_params; - - u16 num_qp_irq; - struct irq_info *qps_irq_info; - - struct workqueue_struct *workq; - - struct work_struct rx_mode_work; - struct delayed_work moderation_task; - - struct list_head uc_filter_list; - struct list_head mc_filter_list; - unsigned long rx_mod_state; - int netdev_uc_cnt; - int netdev_mc_cnt; - - int lb_test_rx_idx; - int lb_pkt_len; - u8 *lb_test_rx_buf; - - struct spnic_tcam_info tcam; - struct spnic_rx_flow_rule rx_flow_rule; - - struct bpf_prog *xdp_prog; - - struct delayed_work periodic_work; - /* reference to enum spnic_event_work_flags */ - unsigned long event_flag; -}; - -#define IPSEC_CAP_IS_SUPPORT(nic_dev) ((nic_dev)->ipsec) - -#define spnic_msg(level, nic_dev, msglvl, format, arg...) \ -do { \ - if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \ - == NETREG_REGISTERED) \ - nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \ - format, ## arg); \ - else \ - nic_##level(&(nic_dev)->pdev->dev, \ - format, ## arg); \ -} while (0) - -#define spnic_info(nic_dev, msglvl, format, arg...) \ - spnic_msg(info, nic_dev, msglvl, format, ## arg) - -#define spnic_warn(nic_dev, msglvl, format, arg...) \ - spnic_msg(warn, nic_dev, msglvl, format, ## arg) - -#define spnic_err(nic_dev, msglvl, format, arg...) \ - spnic_msg(err, nic_dev, msglvl, format, ## arg) - -#define nicif_err(priv, type, dev, fmt, args...) \ - netif_level(err, priv, type, dev, "[NIC]" fmt, ##args) -#define nicif_warn(priv, type, dev, fmt, args...) \ - netif_level(warn, priv, type, dev, "[NIC]" fmt, ##args) -#define nicif_notice(priv, type, dev, fmt, args...) \ - netif_level(notice, priv, type, dev, "[NIC]" fmt, ##args) -#define nicif_info(priv, type, dev, fmt, args...) \ - netif_level(info, priv, type, dev, "[NIC]" fmt, ##args) -#define nicif_dbg(priv, type, dev, fmt, args...) \ - netif_level(dbg, priv, type, dev, "[NIC]" fmt, ##args) - -extern struct spnic_uld_info nic_uld_info; - -u32 spnic_get_io_stats_size(struct spnic_nic_dev *nic_dev); - -void spnic_get_io_stats(struct spnic_nic_dev *nic_dev, void *stats); - -int spnic_open(struct net_device *netdev); - -int spnic_close(struct net_device *netdev); - -void spnic_set_ethtool_ops(struct net_device *netdev); - -void spnicvf_set_ethtool_ops(struct net_device *netdev); - -int nic_ioctl(void *uld_dev, u32 cmd, const void *buf_in, - u32 in_size, void *buf_out, u32 *out_size); - -void spnic_update_num_qps(struct net_device *netdev); - -int spnic_qps_irq_init(struct spnic_nic_dev *nic_dev); - -void spnic_qps_irq_deinit(struct spnic_nic_dev *nic_dev); - -void spnic_set_netdev_ops(struct spnic_nic_dev *nic_dev); - -int spnic_set_hw_features(struct spnic_nic_dev *nic_dev); - -void spnic_set_rx_mode_work(struct work_struct *work); - -void spnic_clean_mac_list_filter(struct spnic_nic_dev *nic_dev); - -void spnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data); - -void spnic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data); - -int spnic_get_sset_count(struct net_device *netdev, int sset); - -int spnic_force_port_disable(struct spnic_nic_dev *nic_dev); - -int spnic_force_set_port_state(struct spnic_nic_dev *nic_dev, bool enable); - -int spnic_maybe_set_port_state(struct spnic_nic_dev *nic_dev, bool enable); - -int spnic_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *link_settings); -int spnic_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *link_settings); - -void spnic_auto_moderation_work(struct work_struct *work); - -typedef void (*spnic_reopen_handler)(struct spnic_nic_dev *nic_dev, const void *priv_data); -int spnic_change_channel_settings(struct spnic_nic_dev *nic_dev, - struct spnic_dyna_txrxq_params *trxq_params, - spnic_reopen_handler reopen_handler, const void *priv_data); - -void spnic_link_status_change(struct spnic_nic_dev *nic_dev, bool status); - -bool spnic_is_xdp_enable(struct spnic_nic_dev *nic_dev); -int spnic_xdp_max_mtu(struct spnic_nic_dev *nic_dev); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c deleted file mode 100644 index 0e8a2c4a3961..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_event.c +++ /dev/null @@ -1,506 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/etherdevice.h> -#include <linux/if_vlan.h> -#include <linux/ethtool.h> -#include <linux/kernel.h> -#include <linux/device.h> -#include <linux/pci.h> -#include <linux/netdevice.h> -#include <linux/module.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" -#include "spnic_nic_cmd.h" - -static int spnic_init_vf_config(struct spnic_nic_cfg *nic_cfg, u16 vf_id) -{ - struct vf_data_storage *vf_info; - u16 func_id; - int err = 0; - - vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); - ether_addr_copy(vf_info->drv_mac_addr, vf_info->user_mac_addr); - if (!is_zero_ether_addr(vf_info->drv_mac_addr)) { - vf_info->use_specified_mac = true; - func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + vf_id; - - err = spnic_set_mac(nic_cfg->hwdev, vf_info->drv_mac_addr, - vf_info->pf_vlan, func_id, SPHW_CHANNEL_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d MAC\n", - HW_VF_ID_TO_OS(vf_id)); - return err; - } - } else { - vf_info->use_specified_mac = false; - } - - if (spnic_vf_info_vlanprio(nic_cfg->hwdev, vf_id)) { - err = spnic_cfg_vf_vlan(nic_cfg, SPNIC_CMD_OP_ADD, - vf_info->pf_vlan, vf_info->pf_qos, vf_id); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to add VF %d VLAN_QOS\n", - HW_VF_ID_TO_OS(vf_id)); - return err; - } - } - - if (vf_info->max_rate) { - err = spnic_set_vf_tx_rate(nic_cfg->hwdev, vf_id, vf_info->max_rate, - vf_info->min_rate); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d max rate %u, min rate %u\n", - HW_VF_ID_TO_OS(vf_id), vf_info->max_rate, - vf_info->min_rate); - return err; - } - } - - return 0; -} - -static int register_vf_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id) -{ - int err; - - if (vf_id > nic_cfg->max_vfs) { - nic_err(nic_cfg->dev_hdl, "Register VF id %d exceed limit[0-%d]\n", - HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_cfg->max_vfs)); - return -EFAULT; - } - - err = spnic_init_vf_config(nic_cfg, vf_id); - if (err) - return err; - - nic_cfg->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true; - - return 0; -} - -static int unregister_vf_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id) -{ - struct vf_data_storage *vf_info = - nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf_id); - struct spnic_port_mac_set mac_info; - u16 out_size = sizeof(mac_info); - int err; - - if (vf_id > nic_cfg->max_vfs) - return -EFAULT; - - vf_info->registered = false; - - memset(&mac_info, 0, sizeof(mac_info)); - mac_info.func_id = sphw_glb_pf_vf_offset(nic_cfg->hwdev) + (u16)vf_id; - mac_info.vlan_id = vf_info->pf_vlan; - ether_addr_copy(mac_info.mac, vf_info->drv_mac_addr); - - if (vf_info->use_specified_mac || vf_info->pf_vlan) { - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_DEL_MAC, - &mac_info, sizeof(mac_info), &mac_info, &out_size); - if (err || mac_info.msg_head.status || !out_size) { - nic_err(nic_cfg->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf_id), err, - mac_info.msg_head.status, out_size); - return -EFAULT; - } - } - - memset(vf_info->drv_mac_addr, 0, ETH_ALEN); - - return 0; -} - -static int spnic_register_vf_msg_handler(struct spnic_nic_cfg *nic_cfg, - u16 vf_id, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct spnic_cmd_register_vf *register_vf = buf_in; - struct spnic_cmd_register_vf *register_info = buf_out; - int err; - - if (register_vf->op_register) - err = register_vf_msg_handler(nic_cfg, vf_id); - else - err = unregister_vf_msg_handler(nic_cfg, vf_id); - - if (err) - register_info->msg_head.status = EFAULT; - - *out_size = sizeof(*register_info); - - return 0; -} - -static int spnic_get_vf_cos_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf_id, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size) -{ - struct spnic_cmd_vf_dcb_state *dcb_state = buf_out; - - memcpy(&dcb_state->state, &nic_cfg->dcb_state, - sizeof(nic_cfg->dcb_state)); - - dcb_state->msg_head.status = 0; - *out_size = sizeof(*dcb_state); - return 0; -} - -static int spnic_get_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); - struct spnic_port_mac_set *mac_info = buf_out; - - int err; - - if (sphw_support_ovs(nic_cfg->hwdev, NULL)) { - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_GET_MAC, buf_in, - in_size, buf_out, out_size); - if (!err) { - if (is_zero_ether_addr(mac_info->mac)) - ether_addr_copy(mac_info->mac, vf_info->drv_mac_addr); - } - return err; - } - - ether_addr_copy(mac_info->mac, vf_info->drv_mac_addr); - mac_info->msg_head.status = 0; - *out_size = sizeof(*mac_info); - - return 0; -} - -static int spnic_set_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); - struct spnic_port_mac_set *mac_in = buf_in; - struct spnic_port_mac_set *mac_out = buf_out; - int err; - - if (vf_info->use_specified_mac && !vf_info->trust && - is_valid_ether_addr(mac_in->mac)) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", - HW_VF_ID_TO_OS(vf)); - mac_out->msg_head.status = SPNIC_PF_SET_VF_ALREADY; - *out_size = sizeof(*mac_out); - return 0; - } - - if (is_valid_ether_addr(mac_in->mac)) - mac_in->vlan_id = vf_info->pf_vlan; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_SET_MAC, - buf_in, in_size, buf_out, out_size); - if (err || !(*out_size)) { - nic_err(nic_cfg->dev_hdl, "Failed to set VF %d MAC address, err: %d,status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, - *out_size); - return -EFAULT; - } - - if (is_valid_ether_addr(mac_in->mac) && !mac_out->msg_head.status) - ether_addr_copy(vf_info->drv_mac_addr, mac_in->mac); - - return err; -} - -static int spnic_del_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, u16 vf, - void *buf_in, u16 in_size, void *buf_out, u16 *out_size) -{ - struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); - struct spnic_port_mac_set *mac_in = buf_in; - struct spnic_port_mac_set *mac_out = buf_out; - int err; - - if (vf_info->use_specified_mac && !vf_info->trust && - is_valid_ether_addr(mac_in->mac)) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", - HW_VF_ID_TO_OS(vf)); - mac_out->msg_head.status = SPNIC_PF_SET_VF_ALREADY; - *out_size = sizeof(*mac_out); - return 0; - } - - if (is_valid_ether_addr(mac_in->mac)) - mac_in->vlan_id = vf_info->pf_vlan; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_DEL_MAC, - buf_in, in_size, buf_out, out_size); - if (err || !(*out_size)) { - nic_err(nic_cfg->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, - *out_size); - return -EFAULT; - } - - if (is_valid_ether_addr(mac_in->mac) && !mac_out->msg_head.status) - eth_zero_addr(vf_info->drv_mac_addr); - - return err; -} - -static int spnic_update_vf_mac_msg_handler(struct spnic_nic_cfg *nic_cfg, - u16 vf, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct vf_data_storage *vf_info = nic_cfg->vf_infos + HW_VF_ID_TO_OS(vf); - struct spnic_port_mac_update *mac_in = buf_in; - struct spnic_port_mac_update *mac_out = buf_out; - int err; - - if (!is_valid_ether_addr(mac_in->new_mac)) { - nic_err(nic_cfg->dev_hdl, "Update VF MAC is invalid.\n"); - return -EINVAL; - } - - if (vf_info->use_specified_mac && !vf_info->trust) { - nic_warn(nic_cfg->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", - HW_VF_ID_TO_OS(vf)); - mac_out->msg_head.status = SPNIC_PF_SET_VF_ALREADY; - *out_size = sizeof(*mac_out); - return 0; - } - - mac_in->vlan_id = vf_info->pf_vlan; - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_UPDATE_MAC, - buf_in, in_size, buf_out, out_size); - if (err || !(*out_size)) { - nic_warn(nic_cfg->dev_hdl, "Failed to update VF %d MAC, err: %d,status: 0x%x, out size: 0x%x\n", - HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, - *out_size); - return -EFAULT; - } - - if (!mac_out->msg_head.status) - ether_addr_copy(vf_info->drv_mac_addr, mac_in->new_mac); - - return err; -} - -const struct vf_msg_handler vf_cmd_handler[] = { - { - .cmd = SPNIC_NIC_CMD_VF_REGISTER, - .handler = spnic_register_vf_msg_handler, - }, - - { - .cmd = SPNIC_NIC_CMD_GET_MAC, - .handler = spnic_get_vf_mac_msg_handler, - }, - - { - .cmd = SPNIC_NIC_CMD_SET_MAC, - .handler = spnic_set_vf_mac_msg_handler, - }, - - { - .cmd = SPNIC_NIC_CMD_DEL_MAC, - .handler = spnic_del_vf_mac_msg_handler, - }, - - { - .cmd = SPNIC_NIC_CMD_UPDATE_MAC, - .handler = spnic_update_vf_mac_msg_handler, - }, - - { - .cmd = SPNIC_NIC_CMD_VF_COS, - .handler = spnic_get_vf_cos_msg_handler - }, -}; - -static int _l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, - u16 in_size, void *buf_out, u16 *out_size, u16 channel) -{ - u32 i, cmd_cnt = ARRAY_LEN(vf_cmd_handler); - bool cmd_to_pf = false; - - if (sphw_func_type(hwdev) == TYPE_VF) { - for (i = 0; i < cmd_cnt; i++) { - if (cmd == vf_cmd_handler[i].cmd) - cmd_to_pf = true; - } - } - - if (cmd_to_pf) - return sphw_mbox_to_pf(hwdev, SPHW_MOD_L2NIC, cmd, buf_in, in_size, buf_out, - out_size, 0, channel); - - return sphw_msg_to_mgmt_sync(hwdev, SPHW_MOD_L2NIC, cmd, buf_in, in_size, buf_out, - out_size, 0, channel); -} - -int l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - return _l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, - out_size, SPHW_CHANNEL_NIC); -} - -int l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size, u16 channel) -{ - return _l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, out_size, channel); -} - -/* pf/ppf handler mbox msg from vf */ -int spnic_pf_mbox_handler(void *hwdev, void *pri_handle, - u16 vf_id, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - u32 index, cmd_size = ARRAY_LEN(vf_cmd_handler); - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EFAULT; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - for (index = 0; index < cmd_size; index++) { - if (cmd == vf_cmd_handler[index].cmd) - return vf_cmd_handler[index].handler(nic_cfg, vf_id, buf_in, in_size, - buf_out, out_size); - } - - nic_warn(nic_cfg->dev_hdl, "NO handler for nic cmd(%u) received from vf id: %u\n", - cmd, vf_id); - - return -EINVAL; -} - -void spnic_notify_dcb_state_event(struct spnic_nic_cfg *nic_cfg, struct spnic_dcb_state *dcb_state) -{ - struct sphw_event_info event_info = {0}; - - /* This is 8 user priority to cos mapping relationships */ - sdk_info(nic_cfg->dev_hdl, "DCB %s, default cos %u, up2cos %u%u%u%u%u%u%u%u\n", - dcb_state->dcb_on ? "on" : "off", dcb_state->default_cos, - dcb_state->up_cos[0], dcb_state->up_cos[1], - dcb_state->up_cos[2], dcb_state->up_cos[3], - dcb_state->up_cos[4], dcb_state->up_cos[5], - dcb_state->up_cos[6], dcb_state->up_cos[7]); - - /* Saved in sdk for statefull module */ - spnic_save_dcb_state(nic_cfg, dcb_state); - - event_info.type = SPHW_EVENT_DCB_STATE_CHANGE; - memcpy(&event_info.dcb_state, dcb_state, sizeof(event_info.dcb_state)); - - sphw_event_callback(nic_cfg->hwdev, &event_info); -} - -void dcb_state_event(void *hwdev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct spnic_cmd_vf_dcb_state *vf_dcb; - struct spnic_nic_cfg *nic_cfg; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - vf_dcb = buf_in; - if (!vf_dcb) - return; - - spnic_notify_dcb_state_event(nic_cfg, &vf_dcb->state); -} - -void tx_pause_excp_event_handler(void *hwdev, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct nic_cmd_tx_pause_notice *excp_info = buf_in; - struct spnic_nic_cfg *nic_cfg = NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - if (in_size != sizeof(*excp_info)) { - nic_err(nic_cfg->dev_hdl, "Invalid in_size: %u, should be %ld\n", - in_size, sizeof(*excp_info)); - return; - } - - nic_warn(nic_cfg->dev_hdl, "Receive tx pause exception event, excp: %u, level: %u\n", - excp_info->tx_pause_except, excp_info->except_level); - - sphw_fault_event_report(hwdev, SPHW_FAULT_SRC_TX_PAUSE_EXCP, (u16)excp_info->except_level); -} - -struct nic_event_handler nic_cmd_handler[] = { - { - .cmd = SPNIC_NIC_CMD_VF_COS, - .handler = dcb_state_event, - }, - { - .cmd = SPNIC_NIC_CMD_TX_PAUSE_EXCP_NOTICE, - .handler = tx_pause_excp_event_handler, - }, -}; - -static void _event_handler(void *hwdev, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u32 size = sizeof(nic_cmd_handler) / sizeof(struct nic_event_handler); - u32 i; - - if (!hwdev) - return; - - *out_size = 0; - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - for (i = 0; i < size; i++) { - if (cmd == nic_cmd_handler[i].cmd) { - nic_cmd_handler[i].handler(hwdev, buf_in, in_size, buf_out, out_size); - break; - } - } - - /* can't find this event cmd */ - if (i == size) - sdk_warn(nic_cfg->dev_hdl, "Unsupported event cmd(%u) to process\n", - cmd); -} - -/* vf handler mbox msg from ppf/pf */ -/* vf link change event - * vf fault report event, TBD - */ -int spnic_vf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - _event_handler(hwdev, cmd, buf_in, in_size, buf_out, out_size); - return 0; -} - -/* pf/ppf handler mgmt cpu report nic event*/ -void spnic_pf_event_handler(void *hwdev, void *pri_handle, u16 cmd, void *buf_in, u16 in_size, - void *buf_out, u16 *out_size) -{ - _event_handler(hwdev, cmd, buf_in, in_size, buf_out, out_size); -} - -u8 spnic_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return 0; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - nic_err(nic_cfg->dev_hdl, "Received nic ucode aeq event type: 0x%x, data: 0x%llx\n", - event, *((u64 *)data)); - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c deleted file mode 100644 index 3f1fb1381844..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.c +++ /dev/null @@ -1,1123 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/types.h> -#include <linux/module.h> - -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "sphw_common.h" -#include "spnic_nic_qp.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic.h" -#include "spnic_nic_cmd.h" - -#define SPNIC_DEAULT_TX_CI_PENDING_LIMIT 0 -#define SPNIC_DEAULT_TX_CI_COALESCING_TIME 0 -#define SPNIC_DEAULT_DROP_THD_ON 0xFFFF -#define SPNIC_DEAULT_DROP_THD_OFF 0 - -static unsigned char tx_pending_limit = SPNIC_DEAULT_TX_CI_PENDING_LIMIT; -module_param(tx_pending_limit, byte, 0444); -MODULE_PARM_DESC(tx_pending_limit, "TX CI coalescing parameter pending_limit (default=0)"); - -static unsigned char tx_coalescing_time = SPNIC_DEAULT_TX_CI_COALESCING_TIME; -module_param(tx_coalescing_time, byte, 0444); -MODULE_PARM_DESC(tx_coalescing_time, "TX CI coalescing parameter coalescing_time (default=0)"); - -static unsigned char rq_wqe_type = SPNIC_NORMAL_RQ_WQE; -module_param(rq_wqe_type, byte, 0444); -MODULE_PARM_DESC(rq_wqe_type, "RQ WQE type 0-8Bytes, 1-16Bytes, 2-32Bytes (default=2)"); - -static u32 tx_drop_thd_on = SPNIC_DEAULT_DROP_THD_ON; -module_param(tx_drop_thd_on, uint, 0644); -MODULE_PARM_DESC(tx_drop_thd_on, "TX parameter drop_thd_on (default=0xffff)"); - -static u32 tx_drop_thd_off = SPNIC_DEAULT_DROP_THD_OFF; -module_param(tx_drop_thd_off, uint, 0644); -MODULE_PARM_DESC(tx_drop_thd_off, "TX parameter drop_thd_off (default=0)"); -/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ -#define SPNIC_CI_Q_ADDR_SIZE 64 - -#define CI_TABLE_SIZE(num_qps, pg_sz) (ALIGN((num_qps) * SPNIC_CI_Q_ADDR_SIZE, pg_sz)) - -#define SPNIC_CI_VADDR(base_addr, q_id) ((u8 *)(base_addr) + (q_id) * SPNIC_CI_Q_ADDR_SIZE) - -#define SPNIC_CI_PADDR(base_paddr, q_id) ((base_paddr) + (q_id) * SPNIC_CI_Q_ADDR_SIZE) - -#define WQ_PREFETCH_MAX 4 -#define WQ_PREFETCH_MIN 1 -#define WQ_PREFETCH_THRESHOLD 256 - -#define SPNIC_Q_CTXT_MAX 31 /* (2048 - 8) / 64 */ - -enum spnic_qp_ctxt_type { - SPNIC_QP_CTXT_TYPE_SQ, - SPNIC_QP_CTXT_TYPE_RQ, -}; - -struct spnic_qp_ctxt_header { - u16 num_queues; - u16 queue_type; - u16 start_qid; - u16 rsvd; -}; - -struct spnic_sq_ctxt { - u32 ci_pi; - u32 drop_mode_sp; - u32 wq_pfn_hi_owner; - u32 wq_pfn_lo; - - u32 rsvd0; - u32 pkt_drop_thd; - u32 global_sq_id; - u32 vlan_ceq_attr; - - u32 pref_cache; - u32 pref_ci_owner; - u32 pref_wq_pfn_hi_ci; - u32 pref_wq_pfn_lo; - - u32 rsvd8; - u32 rsvd9; - u32 wq_block_pfn_hi; - u32 wq_block_pfn_lo; -}; - -struct spnic_rq_ctxt { - u32 ci_pi; - u32 ceq_attr; - u32 wq_pfn_hi_type_owner; - u32 wq_pfn_lo; - - u32 rsvd[3]; - u32 cqe_sge_len; - - u32 pref_cache; - u32 pref_ci_owner; - u32 pref_wq_pfn_hi_ci; - u32 pref_wq_pfn_lo; - - u32 pi_paddr_hi; - u32 pi_paddr_lo; - u32 wq_block_pfn_hi; - u32 wq_block_pfn_lo; -}; - -struct spnic_sq_ctxt_block { - struct spnic_qp_ctxt_header cmdq_hdr; - struct spnic_sq_ctxt sq_ctxt[SPNIC_Q_CTXT_MAX]; -}; - -struct spnic_rq_ctxt_block { - struct spnic_qp_ctxt_header cmdq_hdr; - struct spnic_rq_ctxt rq_ctxt[SPNIC_Q_CTXT_MAX]; -}; - -struct spnic_clean_queue_ctxt { - struct spnic_qp_ctxt_header cmdq_hdr; - u32 rsvd; -}; - -#define SQ_CTXT_SIZE(num_sqs) ((u16)(sizeof(struct spnic_qp_ctxt_header) + \ - (num_sqs) * sizeof(struct spnic_sq_ctxt))) - -#define RQ_CTXT_SIZE(num_rqs) ((u16)(sizeof(struct spnic_qp_ctxt_header) + \ - (num_rqs) * sizeof(struct spnic_rq_ctxt))) - -#define CI_IDX_HIGH_SHIFH 12 - -#define CI_HIGN_IDX(val) ((val) >> CI_IDX_HIGH_SHIFH) - -#define SQ_CTXT_PI_IDX_SHIFT 0 -#define SQ_CTXT_CI_IDX_SHIFT 16 - -#define SQ_CTXT_PI_IDX_MASK 0xFFFFU -#define SQ_CTXT_CI_IDX_MASK 0xFFFFU - -#define SQ_CTXT_CI_PI_SET(val, member) (((val) & SQ_CTXT_##member##_MASK) << \ - SQ_CTXT_##member##_SHIFT) - -#define SQ_CTXT_MODE_SP_FLAG_SHIFT 0 -#define SQ_CTXT_MODE_PKT_DROP_SHIFT 1 - -#define SQ_CTXT_MODE_SP_FLAG_MASK 0x1U -#define SQ_CTXT_MODE_PKT_DROP_MASK 0x1U - -#define SQ_CTXT_MODE_SET(val, member) (((val) & SQ_CTXT_MODE_##member##_MASK) << \ - SQ_CTXT_MODE_##member##_SHIFT) - -#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 -#define SQ_CTXT_WQ_PAGE_OWNER_SHIFT 23 - -#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU -#define SQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U - -#define SQ_CTXT_WQ_PAGE_SET(val, member) (((val) & SQ_CTXT_WQ_PAGE_##member##_MASK) << \ - SQ_CTXT_WQ_PAGE_##member##_SHIFT) - -#define SQ_CTXT_PKT_DROP_THD_ON_SHIFT 0 -#define SQ_CTXT_PKT_DROP_THD_OFF_SHIFT 16 - -#define SQ_CTXT_PKT_DROP_THD_ON_MASK 0xFFFFU -#define SQ_CTXT_PKT_DROP_THD_OFF_MASK 0xFFFFU - -#define SQ_CTXT_PKT_DROP_THD_SET(val, member) (((val) & SQ_CTXT_PKT_DROP_##member##_MASK) << \ - SQ_CTXT_PKT_DROP_##member##_SHIFT) - -#define SQ_CTXT_GLOBAL_SQ_ID_SHIFT 0 - -#define SQ_CTXT_GLOBAL_SQ_ID_MASK 0x1FFFU - -#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) (((val) & SQ_CTXT_##member##_MASK) << \ - SQ_CTXT_##member##_SHIFT) - -#define SQ_CTXT_VLAN_TAG_SHIFT 0 -#define SQ_CTXT_VLAN_TYPE_SEL_SHIFT 16 -#define SQ_CTXT_VLAN_INSERT_MODE_SHIFT 19 -#define SQ_CTXT_VLAN_CEQ_EN_SHIFT 23 - -#define SQ_CTXT_VLAN_TAG_MASK 0xFFFFU -#define SQ_CTXT_VLAN_TYPE_SEL_MASK 0x7U -#define SQ_CTXT_VLAN_INSERT_MODE_MASK 0x3U -#define SQ_CTXT_VLAN_CEQ_EN_MASK 0x1U - -#define SQ_CTXT_VLAN_CEQ_SET(val, member) (((val) & SQ_CTXT_VLAN_##member##_MASK) << \ - SQ_CTXT_VLAN_##member##_SHIFT) - -#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 -#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 -#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 - -#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU -#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU -#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU - -#define SQ_CTXT_PREF_CI_HI_SHIFT 0 -#define SQ_CTXT_PREF_OWNER_SHIFT 4 - -#define SQ_CTXT_PREF_CI_HI_MASK 0xFU -#define SQ_CTXT_PREF_OWNER_MASK 0x1U - -#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 -#define SQ_CTXT_PREF_CI_LOW_SHIFT 20 - -#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU -#define SQ_CTXT_PREF_CI_LOW_MASK 0xFFFU - -#define SQ_CTXT_PREF_SET(val, member) (((val) & SQ_CTXT_PREF_##member##_MASK) << \ - SQ_CTXT_PREF_##member##_SHIFT) - -#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 - -#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU - -#define SQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & SQ_CTXT_WQ_BLOCK_##member##_MASK) << \ - SQ_CTXT_WQ_BLOCK_##member##_SHIFT) - -#define RQ_CTXT_PI_IDX_SHIFT 0 -#define RQ_CTXT_CI_IDX_SHIFT 16 - -#define RQ_CTXT_PI_IDX_MASK 0xFFFFU -#define RQ_CTXT_CI_IDX_MASK 0xFFFFU - -#define RQ_CTXT_CI_PI_SET(val, member) (((val) & RQ_CTXT_##member##_MASK) << \ - RQ_CTXT_##member##_SHIFT) - -#define RQ_CTXT_CEQ_ATTR_INTR_SHIFT 21 -#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 31 - -#define RQ_CTXT_CEQ_ATTR_INTR_MASK 0x3FFU -#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U - -#define RQ_CTXT_CEQ_ATTR_SET(val, member) (((val) & RQ_CTXT_CEQ_ATTR_##member##_MASK) << \ - RQ_CTXT_CEQ_ATTR_##member##_SHIFT) - -#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 -#define RQ_CTXT_WQ_PAGE_WQE_TYPE_SHIFT 28 -#define RQ_CTXT_WQ_PAGE_OWNER_SHIFT 31 - -#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU -#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK 0x3U -#define RQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U - -#define RQ_CTXT_WQ_PAGE_SET(val, member) (((val) & RQ_CTXT_WQ_PAGE_##member##_MASK) << \ - RQ_CTXT_WQ_PAGE_##member##_SHIFT) - -#define RQ_CTXT_CQE_LEN_SHIFT 28 - -#define RQ_CTXT_CQE_LEN_MASK 0x3U - -#define RQ_CTXT_CQE_LEN_SET(val, member) (((val) & RQ_CTXT_##member##_MASK) << \ - RQ_CTXT_##member##_SHIFT) - -#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 -#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 -#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 - -#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU -#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU -#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU - -#define RQ_CTXT_PREF_CI_HI_SHIFT 0 -#define RQ_CTXT_PREF_OWNER_SHIFT 4 - -#define RQ_CTXT_PREF_CI_HI_MASK 0xFU -#define RQ_CTXT_PREF_OWNER_MASK 0x1U - -#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 -#define RQ_CTXT_PREF_CI_LOW_SHIFT 20 - -#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU -#define RQ_CTXT_PREF_CI_LOW_MASK 0xFFFU - -#define RQ_CTXT_PREF_SET(val, member) (((val) & RQ_CTXT_PREF_##member##_MASK) << \ - RQ_CTXT_PREF_##member##_SHIFT) - -#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 - -#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU - -#define RQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ - RQ_CTXT_WQ_BLOCK_##member##_SHIFT) - -#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4) - -#define WQ_PAGE_PFN_SHIFT 12 -#define WQ_BLOCK_PFN_SHIFT 9 - -#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) -#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) - -/* sq and rq */ -#define TOTAL_DB_NUM(num_qps) ((u16)(2 * (num_qps))) - -int spnic_create_sq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq, - u16 q_id, u32 sq_depth, u16 sq_msix_idx) -{ - int err; - - /* sq used & hardware request init 1*/ - sq->owner = 1; - - sq->q_id = q_id; - sq->msix_entry_idx = sq_msix_idx; - - err = sphw_wq_create(nic_cfg->hwdev, &sq->wq, sq_depth, (u16)BIT(SPNIC_SQ_WQEBB_SHIFT)); - if (err) { - sdk_err(nic_cfg->dev_hdl, "Failed to create tx queue(%u) wq\n", - q_id); - return err; - } - - return 0; -} - -void spnic_destroy_sq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq) -{ - sphw_wq_destroy(&sq->wq); -} - -int spnic_create_rq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *rq, - u16 q_id, u32 rq_depth, u16 rq_msix_idx) -{ - int err; - - rq->wqe_type = rq_wqe_type; - rq->q_id = q_id; - rq->msix_entry_idx = rq_msix_idx; - - err = sphw_wq_create(nic_cfg->hwdev, &rq->wq, rq_depth, - (u16)BIT(SPNIC_RQ_WQEBB_SHIFT + rq_wqe_type)); - if (err) { - sdk_err(nic_cfg->dev_hdl, "Failed to create rx queue(%u) wq\n", - q_id); - return err; - } - - rq->rx.pi_virt_addr = dma_alloc_coherent(nic_cfg->dev_hdl, PAGE_SIZE, - &rq->rx.pi_dma_addr, GFP_KERNEL); - if (!rq->rx.pi_virt_addr) { - sphw_wq_destroy(&rq->wq); - nic_err(nic_cfg->dev_hdl, "Failed to allocate rq pi virt addr\n"); - return -ENOMEM; - } - - return 0; -} - -void spnic_destroy_rq(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *rq) -{ - dma_free_coherent(nic_cfg->dev_hdl, PAGE_SIZE, rq->rx.pi_virt_addr, - rq->rx.pi_dma_addr); - - sphw_wq_destroy(&rq->wq); -} - -static int create_qp(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq, - struct spnic_io_queue *rq, u16 q_id, u32 sq_depth, - u32 rq_depth, u16 qp_msix_idx) -{ - int err; - - err = spnic_create_sq(nic_cfg, sq, q_id, sq_depth, qp_msix_idx); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to create sq, qid: %u\n", - q_id); - return err; - } - - err = spnic_create_rq(nic_cfg, rq, q_id, rq_depth, qp_msix_idx); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to create rq, qid: %u\n", - q_id); - goto create_rq_err; - } - - return 0; - -create_rq_err: - spnic_destroy_sq(nic_cfg->hwdev, sq); - - return err; -} - -void destroy_qp(struct spnic_nic_cfg *nic_cfg, struct spnic_io_queue *sq, - struct spnic_io_queue *rq) -{ - spnic_destroy_sq(nic_cfg, sq); - spnic_destroy_rq(nic_cfg, rq); -} - -/* try to alloc the expect number of doorbell, and return the actual number - * of doorbell. - */ -static int spnic_doorbell_change(struct spnic_nic_cfg *nic_cfg, u16 dst_num_db) -{ - void __iomem *db_addr = NULL; - u16 cur_db_num = nic_cfg->allocated_num_db; - u16 db_num_gap, idx, i, cur_db_alloc; - int err; - - if (dst_num_db > nic_cfg->allocated_num_db) { - db_num_gap = dst_num_db - nic_cfg->allocated_num_db; - for (idx = 0; idx < db_num_gap; idx++) { - /* we don't use direct wqe for sq */ - err = sphw_alloc_db_addr(nic_cfg->hwdev, &db_addr, NULL); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to alloc sq doorbell addr\n"); - goto alloc_db_err; - } - nic_cfg->db_addr[cur_db_num + idx] = db_addr; - } - - nic_cfg->allocated_num_db = dst_num_db; - } else if (dst_num_db < nic_cfg->allocated_num_db) { - db_num_gap = nic_cfg->allocated_num_db - dst_num_db; - for (idx = 0; idx < db_num_gap; idx++) { - cur_db_alloc = (cur_db_num - idx) - 1; - sphw_free_db_addr(nic_cfg->hwdev, nic_cfg->db_addr[cur_db_alloc], NULL); - nic_cfg->db_addr[cur_db_alloc] = NULL; - } - - nic_cfg->allocated_num_db = dst_num_db; - } - - return 0; - -alloc_db_err: - for (i = 0; i < idx; i++) { - sphw_free_db_addr(nic_cfg->hwdev, nic_cfg->db_addr[cur_db_num + i], NULL); - nic_cfg->db_addr[cur_db_num + i] = NULL; - } - - return -EFAULT; -} - -int spnic_init_nicio_res(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return -EFAULT; - } - - nic_cfg->max_qps = sphw_func_max_qnum(hwdev); - - nic_cfg->allocated_num_db = 0; - nic_cfg->db_addr = kcalloc(TOTAL_DB_NUM(nic_cfg->max_qps), - sizeof(*nic_cfg->db_addr), GFP_KERNEL); - if (!nic_cfg->db_addr) { - nic_err(nic_cfg->dev_hdl, "Failed to alloc db addr array\n"); - return -ENOMEM; - } - - nic_cfg->ci_vaddr_base = - dma_alloc_coherent(nic_cfg->dev_hdl, CI_TABLE_SIZE(nic_cfg->max_qps, PAGE_SIZE), - &nic_cfg->ci_dma_base, GFP_KERNEL); - if (!nic_cfg->ci_vaddr_base) { - kfree(nic_cfg->db_addr); - nic_err(nic_cfg->dev_hdl, "Failed to allocate ci area\n"); - return -ENOMEM; - } - - return 0; -} - -void spnic_deinit_nicio_res(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return; - } - - dma_free_coherent(nic_cfg->dev_hdl, CI_TABLE_SIZE(nic_cfg->max_qps, PAGE_SIZE), - nic_cfg->ci_vaddr_base, nic_cfg->ci_dma_base); - /* free all doorbell */ - spnic_doorbell_change(nic_cfg, 0); - kfree(nic_cfg->db_addr); -} - -int spnic_alloc_qps(void *hwdev, struct irq_info *qps_msix_arry, - struct spnic_dyna_qp_params *qp_params) -{ - struct spnic_io_queue *sqs = NULL; - struct spnic_io_queue *rqs = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 q_id, i, cur_allocated_db, num_qps; - int err; - - if (!hwdev || !qps_msix_arry || !qp_params) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return -EFAULT; - } - - if (qp_params->num_qps > nic_cfg->max_qps) - return -EINVAL; - - num_qps = qp_params->num_qps; - - cur_allocated_db = nic_cfg->allocated_num_db; - if (cur_allocated_db < TOTAL_DB_NUM(num_qps)) { - err = spnic_doorbell_change(nic_cfg, TOTAL_DB_NUM(num_qps)); - if (err) - return err; - } - - sqs = kcalloc(num_qps, sizeof(*sqs), GFP_KERNEL); - if (!sqs) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate sq\n"); - err = -ENOMEM; - goto alloc_sqs_err; - } - - rqs = kcalloc(num_qps, sizeof(*rqs), GFP_KERNEL); - if (!rqs) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate rq\n"); - err = -ENOMEM; - goto alloc_rqs_err; - } - - for (q_id = 0; q_id < num_qps; q_id++) { - err = create_qp(nic_cfg, &sqs[q_id], &rqs[q_id], q_id, - qp_params->sq_depth, qp_params->rq_depth, - qps_msix_arry[q_id].msix_entry_idx); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate qp %u, err: %d\n", - q_id, err); - goto create_qp_err; - } - } - - qp_params->sqs = sqs; - qp_params->rqs = rqs; - - return 0; - -create_qp_err: - for (i = 0; i < q_id; i++) - destroy_qp(nic_cfg, &sqs[i], &rqs[i]); - - kfree(rqs); - -alloc_rqs_err: - kfree(sqs); - -alloc_sqs_err: - /* Only release the newly added doorbell resource, - * the old resource is still in use - */ - spnic_doorbell_change(nic_cfg, cur_allocated_db); - - return err; -} - -void spnic_free_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u16 q_id; - - if (!hwdev || !qp_params) - return; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return; - } - - for (q_id = 0; q_id < qp_params->num_qps; q_id++) - destroy_qp(nic_cfg, &qp_params->sqs[q_id], - &qp_params->rqs[q_id]); - - kfree(qp_params->sqs); - kfree(qp_params->rqs); -} - -void init_qps_info(struct spnic_nic_cfg *nic_cfg, struct spnic_dyna_qp_params *qp_params) -{ - struct spnic_io_queue *sqs = qp_params->sqs; - struct spnic_io_queue *rqs = qp_params->rqs; - u16 q_id; - - nic_cfg->num_qps = qp_params->num_qps; - nic_cfg->sq = qp_params->sqs; - nic_cfg->rq = qp_params->rqs; - for (q_id = 0; q_id < nic_cfg->num_qps; q_id++) { - sqs[q_id].tx.cons_idx_addr = SPNIC_CI_VADDR(nic_cfg->ci_vaddr_base, q_id); - /* clear ci value */ - *(u16 *)sqs[q_id].tx.cons_idx_addr = 0; - sqs[q_id].db_addr = nic_cfg->db_addr[q_id]; - - /* The first num_qps doorbell is used by sq */ - rqs[q_id].db_addr = nic_cfg->db_addr[nic_cfg->num_qps + q_id]; - } -} - -int spnic_init_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !qp_params) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return -EFAULT; - } - - if (nic_cfg->allocated_num_db > TOTAL_DB_NUM(qp_params->num_qps)) - spnic_doorbell_change(nic_cfg, TOTAL_DB_NUM(qp_params->num_qps)); - - init_qps_info(nic_cfg, qp_params); - - return spnic_init_qp_ctxts(hwdev); -} - -void spnic_deinit_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !qp_params) - return; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return; - } - - qp_params->sqs = nic_cfg->sq; - qp_params->rqs = nic_cfg->rq; - qp_params->num_qps = nic_cfg->num_qps; - - spnic_free_qp_ctxts(hwdev); -} - -int spnic_create_qps(void *hwdev, u16 num_qp, u32 sq_depth, u32 rq_depth, - struct irq_info *qps_msix_arry) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_dyna_qp_params qp_params = {0}; - int err; - - if (!hwdev || !qps_msix_arry) - return -EFAULT; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) { - pr_err("Failed to get nic service adapter\n"); - return -EFAULT; - } - - err = spnic_init_nicio_res(hwdev); - if (err) - return err; - - qp_params.num_qps = num_qp; - qp_params.sq_depth = sq_depth; - qp_params.rq_depth = rq_depth; - err = spnic_alloc_qps(hwdev, qps_msix_arry, &qp_params); - if (err) { - spnic_deinit_nicio_res(hwdev); - nic_err(nic_cfg->dev_hdl, "Failed to allocate qps, err: %d\n", err); - return err; - } - - init_qps_info(nic_cfg, &qp_params); - - return 0; -} - -void spnic_destroy_qps(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_dyna_qp_params qp_params = {0}; - - if (!hwdev) - return; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) - return; - - spnic_deinit_qps(hwdev, &qp_params); - spnic_free_qps(hwdev, &qp_params); - spnic_deinit_nicio_res(hwdev); -} - -void *spnic_get_nic_queue(void *hwdev, u16 q_id, enum spnic_queue_type q_type) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || q_type >= SPNIC_MAX_QUEUE_TYPE) - return NULL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) - return NULL; - - return ((q_type == SPNIC_SQ) ? &nic_cfg->sq[q_id] : &nic_cfg->rq[q_id]); -} - -void spnic_qp_prepare_cmdq_header(struct spnic_qp_ctxt_header *qp_ctxt_hdr, - enum spnic_qp_ctxt_type ctxt_type, - u16 num_queues, u16 q_id) -{ - qp_ctxt_hdr->queue_type = ctxt_type; - qp_ctxt_hdr->num_queues = num_queues; - qp_ctxt_hdr->start_qid = q_id; - qp_ctxt_hdr->rsvd = 0; - - sphw_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); -} - -void spnic_sq_prepare_ctxt(struct spnic_io_queue *sq, u16 sq_id, struct spnic_sq_ctxt *sq_ctxt) -{ - u64 wq_page_addr; - u64 wq_page_pfn, wq_block_pfn; - u32 wq_page_pfn_hi, wq_page_pfn_lo; - u32 wq_block_pfn_hi, wq_block_pfn_lo; - u16 pi_start, ci_start; - - ci_start = spnic_get_sq_local_ci(sq); - pi_start = spnic_get_sq_local_pi(sq); - - wq_page_addr = sphw_wq_get_first_wqe_page_addr(&sq->wq); - - wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); - wq_page_pfn_hi = upper_32_bits(wq_page_pfn); - wq_page_pfn_lo = lower_32_bits(wq_page_pfn); - - wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr); - wq_block_pfn_hi = upper_32_bits(wq_block_pfn); - wq_block_pfn_lo = lower_32_bits(wq_block_pfn); - - sq_ctxt->ci_pi = - SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | - SQ_CTXT_CI_PI_SET(pi_start, PI_IDX); - - sq_ctxt->drop_mode_sp = - SQ_CTXT_MODE_SET(0, SP_FLAG) | - SQ_CTXT_MODE_SET(0, PKT_DROP); - - sq_ctxt->wq_pfn_hi_owner = - SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | - SQ_CTXT_WQ_PAGE_SET(1, OWNER); - - sq_ctxt->wq_pfn_lo = wq_page_pfn_lo; - - /* TO DO */ - sq_ctxt->pkt_drop_thd = - SQ_CTXT_PKT_DROP_THD_SET(tx_drop_thd_on, THD_ON) | - SQ_CTXT_PKT_DROP_THD_SET(tx_drop_thd_off, THD_OFF); - - sq_ctxt->global_sq_id = - SQ_CTXT_GLOBAL_QUEUE_ID_SET(sq_id, GLOBAL_SQ_ID); - - /* enable insert c-vlan in default */ - sq_ctxt->vlan_ceq_attr = - SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) | - SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE); - - sq_ctxt->rsvd0 = 0; - - sq_ctxt->pref_cache = - SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | - SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | - SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); - - sq_ctxt->pref_ci_owner = - SQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) | - SQ_CTXT_PREF_SET(1, OWNER); - - sq_ctxt->pref_wq_pfn_hi_ci = - SQ_CTXT_PREF_SET(ci_start, CI_LOW) | - SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI); - - sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; - - sq_ctxt->wq_block_pfn_hi = SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); - - sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; - - sphw_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); -} - -void spnic_rq_prepare_ctxt(struct spnic_io_queue *rq, struct spnic_rq_ctxt *rq_ctxt) -{ - u64 wq_page_addr; - u64 wq_page_pfn, wq_block_pfn; - u32 wq_page_pfn_hi, wq_page_pfn_lo; - u32 wq_block_pfn_hi, wq_block_pfn_lo; - u16 pi_start, ci_start; - u16 wqe_type = rq->wqe_type; - - /* RQ depth is in unit of 8Bytes */ - ci_start = (u16)((u32)spnic_get_rq_local_ci(rq) << wqe_type); - pi_start = (u16)((u32)spnic_get_rq_local_pi(rq) << wqe_type); - - wq_page_addr = sphw_wq_get_first_wqe_page_addr(&rq->wq); - - wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); - wq_page_pfn_hi = upper_32_bits(wq_page_pfn); - wq_page_pfn_lo = lower_32_bits(wq_page_pfn); - - wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr); - wq_block_pfn_hi = upper_32_bits(wq_block_pfn); - wq_block_pfn_lo = lower_32_bits(wq_block_pfn); - - rq_ctxt->ci_pi = - RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | - RQ_CTXT_CI_PI_SET(pi_start, PI_IDX); - - rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) | - RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR); - - rq_ctxt->wq_pfn_hi_type_owner = - RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | - RQ_CTXT_WQ_PAGE_SET(1, OWNER); - - switch (wqe_type) { - case SPNIC_EXTEND_RQ_WQE: - /* use 32Byte WQE with SGE for CQE */ - rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(0, WQE_TYPE); - break; - case SPNIC_NORMAL_RQ_WQE: - /* use 16Byte WQE with 32Bytes SGE for CQE */ - rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE); - rq_ctxt->cqe_sge_len = RQ_CTXT_CQE_LEN_SET(1, CQE_LEN); - break; - default: - pr_err("Invalid rq wqe type: %u", wqe_type); - } - - rq_ctxt->wq_pfn_lo = wq_page_pfn_lo; - - rq_ctxt->pref_cache = - RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | - RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | - RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); - - rq_ctxt->pref_ci_owner = - RQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) | - RQ_CTXT_PREF_SET(1, OWNER); - - rq_ctxt->pref_wq_pfn_hi_ci = - RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) | - RQ_CTXT_PREF_SET(ci_start, CI_LOW); - - rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; - - rq_ctxt->pi_paddr_hi = upper_32_bits(rq->rx.pi_dma_addr); - rq_ctxt->pi_paddr_lo = lower_32_bits(rq->rx.pi_dma_addr); - - rq_ctxt->wq_block_pfn_hi = RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); - - rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; - - sphw_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); -} - -static int init_sq_ctxts(struct spnic_nic_cfg *nic_cfg) -{ - struct spnic_sq_ctxt_block *sq_ctxt_block = NULL; - struct spnic_sq_ctxt *sq_ctxt = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - struct spnic_io_queue *sq = NULL; - u64 out_param = 0; - u16 q_id, curr_id, max_ctxts, i; - int err = 0; - - cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - q_id = 0; - while (q_id < nic_cfg->num_qps) { - sq_ctxt_block = cmd_buf->buf; - sq_ctxt = sq_ctxt_block->sq_ctxt; - - max_ctxts = (nic_cfg->num_qps - q_id) > SPNIC_Q_CTXT_MAX ? - SPNIC_Q_CTXT_MAX : (nic_cfg->num_qps - q_id); - - spnic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr, SPNIC_QP_CTXT_TYPE_SQ, - max_ctxts, q_id); - - for (i = 0; i < max_ctxts; i++) { - curr_id = q_id + i; - sq = &nic_cfg->sq[curr_id]; - - spnic_sq_prepare_ctxt(sq, curr_id, &sq_ctxt[i]); - } - - cmd_buf->size = SQ_CTXT_SIZE(max_ctxts); - - err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, - SPNIC_UCODE_CMD_MODIFY_QUEUE_CTX, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - if (err || out_param != 0) { - nic_err(nic_cfg->dev_hdl, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n", - err, out_param); - - err = -EFAULT; - break; - } - - q_id += max_ctxts; - } - - sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); - - return err; -} - -static int init_rq_ctxts(struct spnic_nic_cfg *nic_cfg) -{ - struct spnic_rq_ctxt_block *rq_ctxt_block = NULL; - struct spnic_rq_ctxt *rq_ctxt = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - struct spnic_io_queue *rq = NULL; - u64 out_param = 0; - u16 q_id, curr_id, max_ctxts, i; - int err = 0; - - cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - q_id = 0; - while (q_id < nic_cfg->num_qps) { - rq_ctxt_block = cmd_buf->buf; - rq_ctxt = rq_ctxt_block->rq_ctxt; - - max_ctxts = (nic_cfg->num_qps - q_id) > SPNIC_Q_CTXT_MAX ? - SPNIC_Q_CTXT_MAX : (nic_cfg->num_qps - q_id); - - spnic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr, SPNIC_QP_CTXT_TYPE_RQ, - max_ctxts, q_id); - - for (i = 0; i < max_ctxts; i++) { - curr_id = q_id + i; - rq = &nic_cfg->rq[curr_id]; - - spnic_rq_prepare_ctxt(rq, &rq_ctxt[i]); - } - - cmd_buf->size = RQ_CTXT_SIZE(max_ctxts); - - err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, - SPNIC_UCODE_CMD_MODIFY_QUEUE_CTX, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - - if (err || out_param != 0) { - nic_err(nic_cfg->dev_hdl, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n", - err, out_param); - - err = -EFAULT; - break; - } - - q_id += max_ctxts; - } - - sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); - - return err; -} - -static int init_qp_ctxts(struct spnic_nic_cfg *nic_cfg) -{ - int err; - - err = init_sq_ctxts(nic_cfg); - if (err) - return err; - - err = init_rq_ctxts(nic_cfg); - if (err) - return err; - - return 0; -} - -static int clean_queue_offload_ctxt(struct spnic_nic_cfg *nic_cfg, - enum spnic_qp_ctxt_type ctxt_type) -{ - struct spnic_clean_queue_ctxt *ctxt_block = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - u64 out_param = 0; - int err; - - cmd_buf = sphw_alloc_cmd_buf(nic_cfg->hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - ctxt_block = cmd_buf->buf; - ctxt_block->cmdq_hdr.num_queues = nic_cfg->max_qps; - ctxt_block->cmdq_hdr.queue_type = ctxt_type; - ctxt_block->cmdq_hdr.start_qid = 0; - - sphw_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); - - cmd_buf->size = sizeof(*ctxt_block); - - err = sphw_cmdq_direct_resp(nic_cfg->hwdev, SPHW_MOD_L2NIC, - SPNIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - - if ((err) || (out_param)) { - nic_err(nic_cfg->dev_hdl, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n", - err, out_param); - - err = -EFAULT; - } - - sphw_free_cmd_buf(nic_cfg->hwdev, cmd_buf); - - return err; -} - -static int clean_qp_offload_ctxt(struct spnic_nic_cfg *nic_cfg) -{ - /* clean LRO/TSO context space */ - return (clean_queue_offload_ctxt(nic_cfg, SPNIC_QP_CTXT_TYPE_SQ) || - clean_queue_offload_ctxt(nic_cfg, SPNIC_QP_CTXT_TYPE_RQ)); -} - -/* init qps ctxt and set sq ci attr and arm all sq*/ -int spnic_init_qp_ctxts(void *hwdev) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - struct spnic_sq_attr sq_attr; - u32 rq_depth; - u16 q_id; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - if (!nic_cfg) - return -EFAULT; - - err = init_qp_ctxts(nic_cfg); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to init QP ctxts\n"); - return err; - } - - /* clean LRO/TSO context space */ - err = clean_qp_offload_ctxt(nic_cfg); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to clean qp offload ctxts\n"); - return err; - } - - rq_depth = nic_cfg->rq[0].wq.q_depth << nic_cfg->rq[0].wqe_type; - - err = sphw_set_root_ctxt(hwdev, rq_depth, nic_cfg->sq[0].wq.q_depth, - nic_cfg->rx_buff_len, SPHW_CHANNEL_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to set root context\n"); - return err; - } - - for (q_id = 0; q_id < nic_cfg->num_qps; q_id++) { - sq_attr.ci_dma_base = SPNIC_CI_PADDR(nic_cfg->ci_dma_base, q_id) >> 2; - sq_attr.pending_limit = tx_pending_limit; - sq_attr.coalescing_time = tx_coalescing_time; - sq_attr.intr_en = 1; - sq_attr.intr_idx = nic_cfg->sq[q_id].msix_entry_idx; - sq_attr.l2nic_sqn = q_id; - sq_attr.dma_attr_off = 0; - err = spnic_set_ci_table(hwdev, &sq_attr); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to set ci table\n"); - goto set_cons_idx_table_err; - } - } - - return 0; - -set_cons_idx_table_err: - sphw_clean_root_ctxt(hwdev, SPHW_CHANNEL_NIC); - - return err; -} - -void spnic_free_qp_ctxts(void *hwdev) -{ - if (!hwdev) - return; - - sphw_clean_root_ctxt(hwdev, SPHW_CHANNEL_NIC); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h deleted file mode 100644 index e237ba33d82d..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_io.h +++ /dev/null @@ -1,305 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_IO_H -#define SPNIC_NIC_IO_H - -#include "sphw_crm.h" -#include "sphw_common.h" -#include "sphw_wq.h" - -#define SPNIC_MAX_TX_QUEUE_DEPTH 65536 -#define SPNIC_MAX_RX_QUEUE_DEPTH 16384 - -#define SPNIC_MIN_QUEUE_DEPTH 128 - -#define SPNIC_SQ_WQEBB_SHIFT 4 -#define SPNIC_RQ_WQEBB_SHIFT 3 - -#define SPNIC_SQ_WQEBB_SIZE BIT(SPNIC_SQ_WQEBB_SHIFT) -#define SPNIC_CQE_SIZE_SHIFT 4 - -enum spnic_rq_wqe_type { - SPNIC_COMPACT_RQ_WQE, - SPNIC_NORMAL_RQ_WQE, - SPNIC_EXTEND_RQ_WQE, -}; - -struct spnic_io_queue { - struct sphw_wq wq; - union { - u8 wqe_type; /* for rq */ - u8 owner; /* for sq */ - }; - - u16 q_id; - u16 msix_entry_idx; - - u8 __iomem *db_addr; - - union { - struct { - void *cons_idx_addr; - } tx; - - struct { - u16 *pi_virt_addr; - dma_addr_t pi_dma_addr; - } rx; - }; -} ____cacheline_aligned; - -struct spnic_nic_db { - u32 db_info; - u32 pi_hi; -}; - -/* * - * @brief spnic_get_sq_free_wqebbs - get send queue free wqebb - * @param sq: send queue - * @retval : number of free wqebb - */ -static inline u16 spnic_get_sq_free_wqebbs(struct spnic_io_queue *sq) -{ - return sphw_wq_free_wqebbs(&sq->wq); -} - -/* * - * @brief spnic_update_sq_local_ci - update send queue local consumer index - * @param sq: send queue - * @param wqe_cnt: number of wqebb - */ -static inline void spnic_update_sq_local_ci(struct spnic_io_queue *sq, u16 wqebb_cnt) -{ - sphw_wq_put_wqebbs(&sq->wq, wqebb_cnt); -} - -/* * - * @brief spnic_get_sq_local_ci - get send queue local consumer index - * @param sq: send queue - * @retval : local consumer index - */ -static inline u16 spnic_get_sq_local_ci(struct spnic_io_queue *sq) -{ - return WQ_MASK_IDX(&sq->wq, sq->wq.cons_idx); -} - -/* * - * @brief spnic_get_sq_local_pi - get send queue local producer index - * @param sq: send queue - * @retval : local producer index - */ -static inline u16 spnic_get_sq_local_pi(struct spnic_io_queue *sq) -{ - return WQ_MASK_IDX(&sq->wq, sq->wq.prod_idx); -} - -/* * - * @brief spnic_get_sq_hw_ci - get send queue hardware consumer index - * @param sq: send queue - * @retval : hardware consumer index - */ -static inline u16 spnic_get_sq_hw_ci(struct spnic_io_queue *sq) -{ - return WQ_MASK_IDX(&sq->wq, *(u16 *)sq->tx.cons_idx_addr); -} - -/* * - * @brief spnic_get_sq_one_wqebb - get send queue wqe with single wqebb - * @param sq: send queue - * @param pi: return current pi - * @retval : wqe base address - */ -static inline void *spnic_get_sq_one_wqebb(struct spnic_io_queue *sq, u16 *pi) -{ - return sphw_wq_get_one_wqebb(&sq->wq, pi); -} - -/* * - * @brief spnic_get_sq_multi_wqebb - get send queue wqe with multiple wqebbs - * @param sq: send queue - * @param wqebb_cnt: wqebb counter - * @param pi: return current pi - * @param second_part_wqebbs_addr: second part wqebbs base address - * @param first_part_wqebbs_num: number wqebbs of first part - * @retval : first part wqebbs base address - */ -static inline void *spnic_get_sq_multi_wqebbs(struct spnic_io_queue *sq, u16 wqebb_cnt, u16 *pi, - void **second_part_wqebbs_addr, - u16 *first_part_wqebbs_num) -{ - return sphw_wq_get_multi_wqebbs(&sq->wq, wqebb_cnt, pi, second_part_wqebbs_addr, - first_part_wqebbs_num); -} - -/* * - * @brief spnic_get_and_update_sq_owner - get and update send queue owner bit - * @param sq: send queue - * @param curr_pi: current pi - * @param wqebb_cnt: wqebb counter - * @retval : owner bit - */ -static inline u16 spnic_get_and_update_sq_owner(struct spnic_io_queue *sq, - u16 curr_pi, u16 wqebb_cnt) -{ - u16 owner = sq->owner; - - if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth)) - sq->owner = !sq->owner; - - return owner; -} - -/* * - * @brief spnic_get_sq_wqe_with_owner - get send queue wqe with owner - * @param sq: send queue - * @param wqebb_cnt: wqebb counter - * @param pi: return current pi - * @param owner: return owner bit - * @param second_part_wqebbs_addr: second part wqebbs base address - * @param first_part_wqebbs_num: number wqebbs of first part - * @retval : first part wqebbs base address - */ -static inline void *spnic_get_sq_wqe_with_owner(struct spnic_io_queue *sq, - u16 wqebb_cnt, u16 *pi, u16 *owner, - void **second_part_wqebbs_addr, - u16 *first_part_wqebbs_num) -{ - void *wqe = sphw_wq_get_multi_wqebbs(&sq->wq, wqebb_cnt, pi, second_part_wqebbs_addr, - first_part_wqebbs_num); - - *owner = sq->owner; - if (unlikely(*pi + wqebb_cnt >= sq->wq.q_depth)) - sq->owner = !sq->owner; - - return wqe; -} - -/* * - * @brief spnic_rollback_sq_wqebbs - rollback send queue wqe - * @param sq: send queue - * @param wqebb_cnt: wqebb counter - * @param owner: owner bit - */ -static inline void spnic_rollback_sq_wqebbs(struct spnic_io_queue *sq, u16 wqebb_cnt, u16 owner) -{ - if (owner != sq->owner) - sq->owner = owner; - sq->wq.prod_idx -= wqebb_cnt; -} - -/* * - * @brief spnic_rq_wqe_addr - get receive queue wqe address by queue index - * @param rq: receive queue - * @param idx: wq index - * @retval: wqe base address - */ -static inline void *spnic_rq_wqe_addr(struct spnic_io_queue *rq, u16 idx) -{ - return sphw_wq_wqebb_addr(&rq->wq, idx); -} - -/* * - * @brief spnic_update_rq_hw_pi - update receive queue hardware pi - * @param rq: receive queue - * @param pi: pi - */ -static inline void spnic_update_rq_hw_pi(struct spnic_io_queue *rq, u16 pi) -{ - *rq->rx.pi_virt_addr = cpu_to_be16((pi & rq->wq.idx_mask) << rq->wqe_type); -} - -/* * - * @brief spnic_update_rq_local_ci - update receive queue local consumer index - * @param sq: receive queue - * @param wqe_cnt: number of wqebb - */ -static inline void spnic_update_rq_local_ci(struct spnic_io_queue *rq, u16 wqebb_cnt) -{ - sphw_wq_put_wqebbs(&rq->wq, wqebb_cnt); -} - -/* * - * @brief spnic_get_rq_local_ci - get receive queue local ci - * @param rq: receive queue - * @retval: receive queue local ci - */ -static inline u16 spnic_get_rq_local_ci(struct spnic_io_queue *rq) -{ - return WQ_MASK_IDX(&rq->wq, rq->wq.cons_idx); -} - -/* * - * @brief spnic_get_rq_local_pi - get receive queue local pi - * @param rq: receive queue - * @retval: receive queue local pi - */ -static inline u16 spnic_get_rq_local_pi(struct spnic_io_queue *rq) -{ - return WQ_MASK_IDX(&rq->wq, rq->wq.prod_idx); -} - -/* ******************** DB INFO ******************** */ -#define DB_INFO_QID_SHIFT 0 -#define DB_INFO_NON_FILTER_SHIFT 22 -#define DB_INFO_CFLAG_SHIFT 23 -#define DB_INFO_COS_SHIFT 24 -#define DB_INFO_TYPE_SHIFT 27 - -#define DB_INFO_QID_MASK 0x1FFFU -#define DB_INFO_NON_FILTER_MASK 0x1U -#define DB_INFO_CFLAG_MASK 0x1U -#define DB_INFO_COS_MASK 0x7U -#define DB_INFO_TYPE_MASK 0x1FU -#define DB_INFO_SET(val, member) \ - (((u32)(val) & DB_INFO_##member##_MASK) << \ - DB_INFO_##member##_SHIFT) - -#define DB_PI_LOW_MASK 0xFFU -#define DB_PI_HIGH_MASK 0xFFU -#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK) -#define DB_PI_HI_SHIFT 8 -#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK) -#define DB_ADDR(queue, pi) ((u64 *)((queue)->db_addr) + DB_PI_LOW(pi)) -#define SRC_TYPE 1 - -/* CFLAG_DATA_PATH */ -#define SQ_CFLAG_DP 0 -#define RQ_CFLAG_DP 1 -/* * - * @brief spnic_write_db - write doorbell - * @param queue: nic io queue - * @param cos: cos index - * @param cflag: 0--sq, 1--rq - * @param pi: product index - */ -static inline void spnic_write_db(struct spnic_io_queue *queue, int cos, u8 cflag, u16 pi) -{ - struct spnic_nic_db db; - - db.db_info = DB_INFO_SET(SRC_TYPE, TYPE) | DB_INFO_SET(cflag, CFLAG) | - DB_INFO_SET(cos, COS) | DB_INFO_SET(queue->q_id, QID); - db.pi_hi = DB_PI_HIGH(pi); - - wmb(); /* Write all before the doorbell */ - - writeq(*((u64 *)&db), DB_ADDR(queue, pi)); -} - -struct spnic_dyna_qp_params { - u16 num_qps; - u32 sq_depth; - u32 rq_depth; - - struct spnic_io_queue *sqs; - struct spnic_io_queue *rqs; -}; - -int spnic_alloc_qps(void *hwdev, struct irq_info *qps_msix_arry, - struct spnic_dyna_qp_params *qp_params); -void spnic_free_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params); -int spnic_init_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params); -void spnic_deinit_qps(void *hwdev, struct spnic_dyna_qp_params *qp_params); -int spnic_init_nicio_res(void *hwdev); -void spnic_deinit_nicio_res(void *hwdev); -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h b/drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h deleted file mode 100644 index a8abdc1734d3..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_nic_qp.h +++ /dev/null @@ -1,416 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_NIC_QP_H -#define SPNIC_NIC_QP_H - -#include "sphw_common.h" - -#define TX_MSS_DEFAULT 0x3E00 -#define TX_MSS_MIN 0x50 - -#define SPNIC_MAX_SQ_SGE 18 - -#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 -#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_SHIFT 5 -#define RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_SHIFT 7 -#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_SHIFT 8 -#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19 -#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 -#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24 - -#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0x1FU -#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK 0x3U -#define RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_MASK 0x1U -#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK 0xFU -#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U -#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U -#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU - -#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ - (((val) >> RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ - RQ_CQE_OFFOLAD_TYPE_##member##_MASK) - -#define SPNIC_GET_RX_PKT_TYPE(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) -#define SPNIC_GET_RX_IP_TYPE(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE) -#define SPNIC_GET_RX_ENC_L3_TYPE(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, ENC_L3_TYPE) -#define SPNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT) - -#define SPNIC_GET_RX_PKT_UMBCAST(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST) - -#define SPNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) - -#define SPNIC_GET_RSS_TYPES(offload_type) \ - RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE) - -#define RQ_CQE_SGE_VLAN_SHIFT 0 -#define RQ_CQE_SGE_LEN_SHIFT 16 - -#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU -#define RQ_CQE_SGE_LEN_MASK 0xFFFFU - -#define RQ_CQE_SGE_GET(val, member) \ - (((val) >> RQ_CQE_SGE_##member##_SHIFT) & RQ_CQE_SGE_##member##_MASK) - -#define SPNIC_GET_RX_VLAN_TAG(vlan_len) RQ_CQE_SGE_GET(vlan_len, VLAN) - -#define SPNIC_GET_RX_PKT_LEN(vlan_len) RQ_CQE_SGE_GET(vlan_len, LEN) - -#define RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 -#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16 -#define RQ_CQE_STATUS_LRO_PUSH_SHIFT 25 -#define RQ_CQE_STATUS_LRO_ENTER_SHIFT 26 -#define RQ_CQE_STATUS_LRO_INTR_SHIFT 27 - -#define RQ_CQE_STATUS_BP_EN_SHIFT 30 -#define RQ_CQE_STATUS_RXDONE_SHIFT 31 -#define RQ_CQE_STATUS_DECRY_PKT_SHIFT 29 -#define RQ_CQE_STATUS_FLUSH_SHIFT 28 - -#define RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU -#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU -#define RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U -#define RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U -#define RQ_CQE_STATUS_LRO_INTR_MASK 0X1U -#define RQ_CQE_STATUS_BP_EN_MASK 0X1U -#define RQ_CQE_STATUS_RXDONE_MASK 0x1U -#define RQ_CQE_STATUS_FLUSH_MASK 0x1U -#define RQ_CQE_STATUS_DECRY_PKT_MASK 0x1U - -#define RQ_CQE_STATUS_GET(val, member) \ - (((val) >> RQ_CQE_STATUS_##member##_SHIFT) & \ - RQ_CQE_STATUS_##member##_MASK) - -#define SPNIC_GET_RX_CSUM_ERR(status) RQ_CQE_STATUS_GET(status, CSUM_ERR) - -#define SPNIC_GET_RX_DONE(status) RQ_CQE_STATUS_GET(status, RXDONE) - -#define SPNIC_GET_RX_FLUSH(status) RQ_CQE_STATUS_GET(status, FLUSH) - -#define SPNIC_GET_RX_BP_EN(status) RQ_CQE_STATUS_GET(status, BP_EN) - -#define SPNIC_GET_RX_NUM_LRO(status) RQ_CQE_STATUS_GET(status, NUM_LRO) - -#define SPNIC_RX_IS_DECRY_PKT(status) RQ_CQE_STATUS_GET(status, DECRY_PKT) - -#define RQ_CQE_SUPER_CQE_EN_SHIFT 0 -#define RQ_CQE_PKT_NUM_SHIFT 1 -#define RQ_CQE_PKT_LAST_LEN_SHIFT 6 -#define RQ_CQE_PKT_FIRST_LEN_SHIFT 19 - -#define RQ_CQE_SUPER_CQE_EN_MASK 0x1 -#define RQ_CQE_PKT_NUM_MASK 0x1FU -#define RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU -#define RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU - -#define RQ_CQE_PKT_NUM_GET(val, member) \ - (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK) -#define SPNIC_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM) - -#define RQ_CQE_SUPER_CQE_EN_GET(val, member) \ - (((val) >> RQ_CQE_##member##_SHIFT) & RQ_CQE_##member##_MASK) -#define SPNIC_GET_SUPER_CQE_EN(pkt_info) \ - RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN) - -#define RQ_CQE_PKT_LEN_GET(val, member) \ - (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK) - -#define RQ_CQE_DECRY_INFO_DECRY_STATUS_SHIFT 8 -#define RQ_CQE_DECRY_INFO_ESP_NEXT_HEAD_SHIFT 0 - -#define RQ_CQE_DECRY_INFO_DECRY_STATUS_MASK 0xFFU -#define RQ_CQE_DECRY_INFO_ESP_NEXT_HEAD_MASK 0xFFU - -#define RQ_CQE_DECRY_INFO_GET(val, member) \ - (((val) >> RQ_CQE_DECRY_INFO_##member##_SHIFT) & \ - RQ_CQE_DECRY_INFO_##member##_MASK) - -#define SPNIC_GET_DECRYPT_STATUS(decry_info) \ - RQ_CQE_DECRY_INFO_GET(decry_info, DECRY_STATUS) - -#define SPNIC_GET_ESP_NEXT_HEAD(decry_info) \ - RQ_CQE_DECRY_INFO_GET(decry_info, ESP_NEXT_HEAD) - -struct spnic_rq_cqe { - u32 status; - u32 vlan_len; - - u32 offload_type; - u32 hash_val; - u32 xid; - u32 decrypt_info; - u32 rsvd6; - u32 pkt_info; -}; - -struct spnic_sge_sect { - struct sphw_sge sge; - u32 rsvd; -}; - -struct spnic_rq_extend_wqe { - struct spnic_sge_sect buf_desc; - struct spnic_sge_sect cqe_sect; -}; - -struct spnic_rq_normal_wqe { - u32 buf_hi_addr; - u32 buf_lo_addr; - u32 cqe_hi_addr; - u32 cqe_lo_addr; -}; - -struct spnic_rq_wqe { - union { - struct spnic_rq_normal_wqe normal_wqe; - struct spnic_rq_extend_wqe extend_wqe; - }; -}; - -struct spnic_sq_wqe_desc { - u32 ctrl_len; - u32 queue_info; - u32 hi_addr; - u32 lo_addr; -}; - -/* Engine only pass first 12B TS field directly to uCode through metadata - * vlan_offoad is used for hardware when vlan insert in tx - */ -struct spnic_sq_task { - u32 pkt_info0; - u32 ip_identify; - u32 pkt_info2; /* ipsec used as spi */ - u32 vlan_offload; -}; - -struct spnic_sq_bufdesc { - u32 len; /* 31-bits Length, L2NIC only use length[17:0] */ - u32 rsvd; - u32 hi_addr; - u32 lo_addr; -}; - -struct spnic_sq_compact_wqe { - struct spnic_sq_wqe_desc wqe_desc; -}; - -struct spnic_sq_extend_wqe { - struct spnic_sq_wqe_desc wqe_desc; - struct spnic_sq_task task; - struct spnic_sq_bufdesc buf_desc[0]; -}; - -struct spnic_sq_wqe { - union { - struct spnic_sq_compact_wqe compact_wqe; - struct spnic_sq_extend_wqe extend_wqe; - }; -}; - -/* use section pointer for support non continuous wqe */ -struct spnic_sq_wqe_combo { - struct spnic_sq_wqe_desc *ctrl_bd0; - struct spnic_sq_task *task; - struct spnic_sq_bufdesc *bds_head; - struct spnic_sq_bufdesc *bds_sec2; - u16 first_bds_num; - u32 wqe_type; - u32 task_type; -}; - -/* ************* SQ_CTRL ************** */ -enum sq_wqe_data_format { - SQ_NORMAL_WQE = 0, -}; - -enum sq_wqe_ec_type { - SQ_WQE_COMPACT_TYPE = 0, - SQ_WQE_EXTENDED_TYPE = 1, -}; - -enum sq_wqe_tasksect_len_type { - SQ_WQE_TASKSECT_46BITS = 0, - SQ_WQE_TASKSECT_16BYTES = 1, -}; - -#define SQ_CTRL_BD0_LEN_SHIFT 0 -#define SQ_CTRL_RSVD_SHIFT 18 -#define SQ_CTRL_BUFDESC_NUM_SHIFT 19 -#define SQ_CTRL_TASKSECT_LEN_SHIFT 27 -#define SQ_CTRL_DATA_FORMAT_SHIFT 28 -#define SQ_CTRL_DIRECT_SHIFT 29 -#define SQ_CTRL_EXTENDED_SHIFT 30 -#define SQ_CTRL_OWNER_SHIFT 31 - -#define SQ_CTRL_BD0_LEN_MASK 0x3FFFFU -#define SQ_CTRL_RSVD_MASK 0x1U -#define SQ_CTRL_BUFDESC_NUM_MASK 0xFFU -#define SQ_CTRL_TASKSECT_LEN_MASK 0x1U -#define SQ_CTRL_DATA_FORMAT_MASK 0x1U -#define SQ_CTRL_DIRECT_MASK 0x1U -#define SQ_CTRL_EXTENDED_MASK 0x1U -#define SQ_CTRL_OWNER_MASK 0x1U - -#define SQ_CTRL_SET(val, member) \ - (((u32)(val) & SQ_CTRL_##member##_MASK) << SQ_CTRL_##member##_SHIFT) - -#define SQ_CTRL_GET(val, member) \ - (((val) >> SQ_CTRL_##member##_SHIFT) & SQ_CTRL_##member##_MASK) - -#define SQ_CTRL_CLEAR(val, member) \ - ((val) & (~(SQ_CTRL_##member##_MASK << SQ_CTRL_##member##_SHIFT))) - -#define SQ_CTRL_QUEUE_INFO_PKT_TYPE_SHIFT 0 -#define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 -#define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 -#define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 -#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12 -#define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 -#define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 -#define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 -#define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 - -#define SQ_CTRL_QUEUE_INFO_PKT_TYPE_MASK 0x3U -#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU -#define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U -#define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U -#define SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1U -#define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU -#define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U -#define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U -#define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U - -#define SQ_CTRL_QUEUE_INFO_SET(val, member) \ - (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) << \ - SQ_CTRL_QUEUE_INFO_##member##_SHIFT) - -#define SQ_CTRL_QUEUE_INFO_GET(val, member) \ - (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) & \ - SQ_CTRL_QUEUE_INFO_##member##_MASK) - -#define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \ - ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \ - SQ_CTRL_QUEUE_INFO_##member##_SHIFT))) - -#define SQ_TASK_INFO0_TUNNEL_FLAG_SHIFT 19 -#define SQ_TASK_INFO0_ESP_NEXT_PROTO_SHIFT 22 -#define SQ_TASK_INFO0_INNER_L4_EN_SHIFT 24 -#define SQ_TASK_INFO0_INNER_L3_EN_SHIFT 25 -#define SQ_TASK_INFO0_INNER_L4_PSEUDO_SHIFT 26 -#define SQ_TASK_INFO0_OUT_L4_EN_SHIFT 27 -#define SQ_TASK_INFO0_OUT_L3_EN_SHIFT 28 -#define SQ_TASK_INFO0_OUT_L4_PSEUDO_SHIFT 29 -#define SQ_TASK_INFO0_ESP_OFFLOAD_SHIFT 30 -#define SQ_TASK_INFO0_IPSEC_PROTO_SHIFT 31 - -#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK 0x1U -#define SQ_TASK_INFO0_ESP_NEXT_PROTO_MASK 0x3U -#define SQ_TASK_INFO0_INNER_L4_EN_MASK 0x1U -#define SQ_TASK_INFO0_INNER_L3_EN_MASK 0x1U -#define SQ_TASK_INFO0_INNER_L4_PSEUDO_MASK 0x1U -#define SQ_TASK_INFO0_OUT_L4_EN_MASK 0x1U -#define SQ_TASK_INFO0_OUT_L3_EN_MASK 0x1U -#define SQ_TASK_INFO0_OUT_L4_PSEUDO_MASK 0x1U -#define SQ_TASK_INFO0_ESP_OFFLOAD_MASK 0x1U -#define SQ_TASK_INFO0_IPSEC_PROTO_MASK 0x1U - -#define SQ_TASK_INFO0_SET(val, member) \ - (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \ - SQ_TASK_INFO0_##member##_SHIFT) -#define SQ_TASK_INFO0_GET(val, member) \ - (((val) >> SQ_TASK_INFO0_##member##_SHIFT) & \ - SQ_TASK_INFO0_##member##_MASK) - -#define SQ_TASK_INFO1_SET(val, member) \ - (((val) & SQ_TASK_INFO1_##member##_MASK) << \ - SQ_TASK_INFO1_##member##_SHIFT) -#define SQ_TASK_INFO1_GET(val, member) \ - (((val) >> SQ_TASK_INFO1_##member##_SHIFT) & \ - SQ_TASK_INFO1_##member##_MASK) - -#define SQ_TASK_INFO3_VLAN_TAG_SHIFT 0 -#define SQ_TASK_INFO3_VLAN_TYPE_SHIFT 16 -#define SQ_TASK_INFO3_VLAN_TAG_VALID_SHIFT 19 - -#define SQ_TASK_INFO3_VLAN_TAG_MASK 0xFFFFU -#define SQ_TASK_INFO3_VLAN_TYPE_MASK 0x7U -#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK 0x1U - -#define SQ_TASK_INFO3_SET(val, member) \ - (((val) & SQ_TASK_INFO3_##member##_MASK) << \ - SQ_TASK_INFO3_##member##_SHIFT) -#define SQ_TASK_INFO3_GET(val, member) \ - (((val) >> SQ_TASK_INFO3_##member##_SHIFT) & \ - SQ_TASK_INFO3_##member##_MASK) - -static inline u32 spnic_get_pkt_len_for_super_cqe(struct spnic_rq_cqe *cqe, bool last) -{ - u32 pkt_len = cqe->pkt_info; - - if (!last) - return RQ_CQE_PKT_LEN_GET(pkt_len, FIRST_LEN); - else - return RQ_CQE_PKT_LEN_GET(pkt_len, LAST_LEN); -} - -/* * - * spnic_prepare_sq_ctrl - init sq wqe cs - * @nr_descs: total sge_num, include bd0 in cs - * to do : check with zhangxingguo to confirm WQE init - */ -static inline void spnic_prepare_sq_ctrl(struct spnic_sq_wqe_combo *wqe_combo, - u32 queue_info, int nr_descs, u16 owner) -{ - struct spnic_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; - - if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) { - wqe_desc->ctrl_len |= - SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | - SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | - SQ_CTRL_SET(owner, OWNER); - - /* compact wqe queue_info will transfer to ucode */ - wqe_desc->queue_info = 0; - return; - } - - wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | - SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) | - SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | - SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | - SQ_CTRL_SET(owner, OWNER); - - wqe_desc->queue_info = queue_info; - wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC); - - if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) { - wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS); - } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) < TX_MSS_MIN) { - /* mss should not less than 80 */ - wqe_desc->queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(wqe_desc->queue_info, MSS); - wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS); - } -} - -/* * - * spnic_set_vlan_tx_offload - set vlan offload info - * @task: wqe task section - * @vlan_tag: vlan tag - * @vlan_type: 0--select TPID0 in IPSU, 1--select TPID0 in IPSU - * 2--select TPID2 in IPSU, 3--select TPID3 in IPSU, 4--select TPID4 in IPSU - */ -static inline void spnic_set_vlan_tx_offload(struct spnic_sq_task *task, u16 vlan_tag, u8 vlan_type) -{ - task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) | - SQ_TASK_INFO3_SET(vlan_type, VLAN_TYPE) | - SQ_TASK_INFO3_SET(1U, VLAN_TAG_VALID); -} - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c b/drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c deleted file mode 100644 index cd92de93a57e..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_ntuple.c +++ /dev/null @@ -1,841 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/kernel.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> -#include <linux/device.h> -#include <linux/ethtool.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/types.h> -#include <linux/errno.h> - -#include "sphw_common.h" -#include "sphw_crm.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_dev.h" - -#define MAX_NUM_OF_ETHTOOL_NTUPLE_RULES BIT(9) -struct spnic_ethtool_rx_flow_rule { - struct list_head list; - struct ethtool_rx_flow_spec flow_spec; -}; - -static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len) -{ - u8 idx; - - for (idx = 0; idx < len; idx++) - key_y[idx] = src_input[idx] & mask[idx]; -} - -static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len) -{ - u8 idx; - - for (idx = 0; idx < len; idx++) - key_x[idx] = key_y[idx] ^ mask[idx]; -} - -static void tcam_key_calculate(struct tag_tcam_key *tcam_key, - struct nic_tcam_cfg_rule *fdir_tcam_rule) -{ - tcam_translate_key_y(fdir_tcam_rule->key.y, (u8 *)(&tcam_key->key_info), - (u8 *)(&tcam_key->key_mask), TCAM_FLOW_KEY_SIZE); - tcam_translate_key_x(fdir_tcam_rule->key.x, fdir_tcam_rule->key.y, - (u8 *)(&tcam_key->key_mask), TCAM_FLOW_KEY_SIZE); -} - -#define TCAM_IPV4_TYPE 0 -#define TCAM_IPV6_TYPE 1 - -static int spnic_base_ipv4_parse(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec; - struct ethtool_tcpip4_spec *val = &fs->h_u.tcp_ip4_spec; - u32 temp; - - switch (mask->ip4src) { - case U32_MAX: - temp = ntohl(val->ip4src); - tcam_key->key_info.sipv4_h = high_16_bits(temp); - tcam_key->key_info.sipv4_l = low_16_bits(temp); - - tcam_key->key_mask.sipv4_h = U16_MAX; - tcam_key->key_mask.sipv4_l = U16_MAX; - break; - case 0: - break; - - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ip mask\n"); - return -EINVAL; - } - - switch (mask->ip4dst) { - case U32_MAX: - temp = ntohl(val->ip4dst); - tcam_key->key_info.dipv4_h = high_16_bits(temp); - tcam_key->key_info.dipv4_l = low_16_bits(temp); - - tcam_key->key_mask.dipv4_h = U16_MAX; - tcam_key->key_mask.dipv4_l = U16_MAX; - break; - case 0: - break; - - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ip mask\n"); - return -EINVAL; - } - - tcam_key->key_info.ip_type = TCAM_IPV4_TYPE; - tcam_key->key_mask.ip_type = TCAM_IP_TYPE_MASK; - - tcam_key->key_info.function_id = sphw_global_func_id(nic_dev->hwdev); - tcam_key->key_mask.function_id = TCAM_FUNC_ID_MASK; - - return 0; -} - -static int spnic_fdir_tcam_ipv4_l4_init(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec; - struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec; - int err; - - err = spnic_base_ipv4_parse(nic_dev, fs, tcam_key); - if (err) - return err; - - tcam_key->key_info.dport = ntohs(l4_val->pdst); - tcam_key->key_mask.dport = l4_mask->pdst; - - tcam_key->key_info.sport = ntohs(l4_val->psrc); - tcam_key->key_mask.sport = l4_mask->psrc; - - if (fs->flow_type == TCP_V4_FLOW) - tcam_key->key_info.ip_proto = IPPROTO_TCP; - else - tcam_key->key_info.ip_proto = IPPROTO_UDP; - tcam_key->key_mask.ip_proto = U8_MAX; - - return 0; -} - -static int spnic_fdir_tcam_ipv4_init(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec; - struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec; - int err; - - err = spnic_base_ipv4_parse(nic_dev, fs, tcam_key); - if (err) - return err; - - tcam_key->key_info.ip_proto = l3_val->proto; - tcam_key->key_mask.ip_proto = l3_mask->proto; - - return 0; -} - -#ifndef UNSUPPORT_NTUPLE_IPV6 -enum ipv6_parse_res { - IPV6_MASK_INVALID, - IPV6_MASK_ALL_MASK, - IPV6_MASK_ALL_ZERO, -}; - -enum ipv6_index { - IPV6_IDX0, - IPV6_IDX1, - IPV6_IDX2, - IPV6_IDX3, -}; - -static int ipv6_mask_parse(u32 *ipv6_mask) -{ - if (ipv6_mask[IPV6_IDX0] == 0 && ipv6_mask[IPV6_IDX1] == 0 && - ipv6_mask[IPV6_IDX2] == 0 && ipv6_mask[IPV6_IDX3] == 0) - return IPV6_MASK_ALL_ZERO; - - if (ipv6_mask[IPV6_IDX0] == U32_MAX && - ipv6_mask[IPV6_IDX1] == U32_MAX && - ipv6_mask[IPV6_IDX2] == U32_MAX && ipv6_mask[IPV6_IDX3] == U32_MAX) - return IPV6_MASK_ALL_MASK; - - return IPV6_MASK_INVALID; -} - -static int spnic_base_ipv6_parse(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_tcpip6_spec *mask = &fs->m_u.tcp_ip6_spec; - struct ethtool_tcpip6_spec *val = &fs->h_u.tcp_ip6_spec; - int parse_res; - u32 temp; - - parse_res = ipv6_mask_parse((u32 *)mask->ip6src); - if (parse_res == IPV6_MASK_ALL_MASK) { - temp = ntohl(val->ip6src[IPV6_IDX0]); - tcam_key->key_info_ipv6.sipv6_key0 = high_16_bits(temp); - tcam_key->key_info_ipv6.sipv6_key1 = low_16_bits(temp); - temp = ntohl(val->ip6src[IPV6_IDX1]); - tcam_key->key_info_ipv6.sipv6_key2 = high_16_bits(temp); - tcam_key->key_info_ipv6.sipv6_key3 = low_16_bits(temp); - temp = ntohl(val->ip6src[IPV6_IDX2]); - tcam_key->key_info_ipv6.sipv6_key4 = high_16_bits(temp); - tcam_key->key_info_ipv6.sipv6_key5 = low_16_bits(temp); - temp = ntohl(val->ip6src[IPV6_IDX3]); - tcam_key->key_info_ipv6.sipv6_key6 = high_16_bits(temp); - tcam_key->key_info_ipv6.sipv6_key7 = low_16_bits(temp); - - tcam_key->key_mask_ipv6.sipv6_key0 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key1 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key2 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key3 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key4 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key5 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key6 = U16_MAX; - tcam_key->key_mask_ipv6.sipv6_key7 = U16_MAX; - } else if (parse_res == IPV6_MASK_INVALID) { - nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ipv6 mask\n"); - return -EINVAL; - } - - parse_res = ipv6_mask_parse((u32 *)mask->ip6dst); - if (parse_res == IPV6_MASK_ALL_MASK) { - temp = ntohl(val->ip6dst[IPV6_IDX0]); - tcam_key->key_info_ipv6.dipv6_key0 = high_16_bits(temp); - tcam_key->key_info_ipv6.dipv6_key1 = low_16_bits(temp); - temp = ntohl(val->ip6dst[IPV6_IDX1]); - tcam_key->key_info_ipv6.dipv6_key2 = high_16_bits(temp); - tcam_key->key_info_ipv6.dipv6_key3 = low_16_bits(temp); - temp = ntohl(val->ip6dst[IPV6_IDX2]); - tcam_key->key_info_ipv6.dipv6_key4 = high_16_bits(temp); - tcam_key->key_info_ipv6.dipv6_key5 = low_16_bits(temp); - temp = ntohl(val->ip6dst[IPV6_IDX3]); - tcam_key->key_info_ipv6.dipv6_key6 = high_16_bits(temp); - tcam_key->key_info_ipv6.dipv6_key7 = low_16_bits(temp); - - tcam_key->key_mask_ipv6.dipv6_key0 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key1 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key2 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key3 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key4 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key5 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key6 = U16_MAX; - tcam_key->key_mask_ipv6.dipv6_key7 = U16_MAX; - } else if (parse_res == IPV6_MASK_INVALID) { - nicif_err(nic_dev, drv, nic_dev->netdev, "invalid dst_ipv6 mask\n"); - return -EINVAL; - } - - tcam_key->key_info_ipv6.ip_type = TCAM_IPV6_TYPE; - tcam_key->key_mask_ipv6.ip_type = TCAM_IP_TYPE_MASK; - - tcam_key->key_info_ipv6.function_id = sphw_global_func_id(nic_dev->hwdev); - tcam_key->key_mask_ipv6.function_id = TCAM_FUNC_ID_MASK; - - return 0; -} - -static int spnic_fdir_tcam_ipv6_l4_init(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec; - struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec; - int err; - - err = spnic_base_ipv6_parse(nic_dev, fs, tcam_key); - if (err) - return err; - - tcam_key->key_info_ipv6.dport = ntohs(l4_val->pdst); - tcam_key->key_mask_ipv6.dport = l4_mask->pdst; - - tcam_key->key_info_ipv6.sport = ntohs(l4_val->psrc); - tcam_key->key_mask_ipv6.sport = l4_mask->psrc; - - if (fs->flow_type == TCP_V6_FLOW) - tcam_key->key_info_ipv6.ip_proto = NEXTHDR_TCP; - else - tcam_key->key_info_ipv6.ip_proto = NEXTHDR_UDP; - tcam_key->key_mask_ipv6.ip_proto = U8_MAX; - - return 0; -} - -static int spnic_fdir_tcam_ipv6_init(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key) -{ - struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec; - struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec; - int err; - - err = spnic_base_ipv6_parse(nic_dev, fs, tcam_key); - if (err) - return err; - - tcam_key->key_info_ipv6.ip_proto = l3_val->l4_proto; - tcam_key->key_mask_ipv6.ip_proto = l3_mask->l4_proto; - - return 0; -} -#endif - -static int spnic_fdir_tcam_info_init(struct spnic_nic_dev *nic_dev, - struct ethtool_rx_flow_spec *fs, - struct tag_tcam_key *tcam_key, - struct nic_tcam_cfg_rule *fdir_tcam_rule) -{ - int err; - - switch (fs->flow_type) { - case TCP_V4_FLOW: - case UDP_V4_FLOW: - err = spnic_fdir_tcam_ipv4_l4_init(nic_dev, fs, tcam_key); - if (err) - return err; - break; - case IP_USER_FLOW: - err = spnic_fdir_tcam_ipv4_init(nic_dev, fs, tcam_key); - if (err) - return err; - break; -#ifndef UNSUPPORT_NTUPLE_IPV6 - case TCP_V6_FLOW: - case UDP_V6_FLOW: - err = spnic_fdir_tcam_ipv6_l4_init(nic_dev, fs, tcam_key); - if (err) - return err; - break; - case IPV6_USER_FLOW: - err = spnic_fdir_tcam_ipv6_init(nic_dev, fs, tcam_key); - if (err) - return err; - break; -#endif - default: - return -EOPNOTSUPP; - } - - tcam_key->key_info.tunnel_type = 0; - tcam_key->key_mask.tunnel_type = TCAM_TUNNEL_TYPE_MASK; - - fdir_tcam_rule->data.qid = (u32)fs->ring_cookie; - tcam_key_calculate(tcam_key, fdir_tcam_rule); - - return 0; -} - -void spnic_flush_rx_flow_rule(struct spnic_nic_dev *nic_dev) -{ - struct spnic_tcam_info *tcam_info = &nic_dev->tcam; - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - struct spnic_ethtool_rx_flow_rule *eth_rule_tmp = NULL; - struct spnic_tcam_filter *tcam_iter = NULL; - struct spnic_tcam_filter *tcam_iter_tmp = NULL; - struct spnic_tcam_dynamic_block *block = NULL; - struct spnic_tcam_dynamic_block *block_tmp = NULL; - struct list_head *dynamic_list = &tcam_info->tcam_dynamic_info.tcam_dynamic_list; - - if (!list_empty(&tcam_info->tcam_list)) { - list_for_each_entry_safe(tcam_iter, tcam_iter_tmp, &tcam_info->tcam_list, - tcam_filter_list) { - list_del(&tcam_iter->tcam_filter_list); - kfree(tcam_iter); - } - } - if (!list_empty(dynamic_list)) { - list_for_each_entry_safe(block, block_tmp, dynamic_list, block_list) { - list_del(&block->block_list); - kfree(block); - } - } - - if (!list_empty(&nic_dev->rx_flow_rule.rules)) { - list_for_each_entry_safe(eth_rule, eth_rule_tmp, - &nic_dev->rx_flow_rule.rules, list) { - list_del(ð_rule->list); - kfree(eth_rule); - } - } - -#ifndef FPGA_SUPPORT - spnic_flush_tcam_rule(nic_dev->hwdev); - spnic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false); -#endif -} - -static struct spnic_tcam_dynamic_block * -spnic_alloc_dynamic_block_resource(struct spnic_nic_dev *nic_dev, - struct spnic_tcam_info *tcam_info, u16 dynamic_block_id) -{ - struct spnic_tcam_dynamic_block *dynamic_block_ptr = NULL; - - dynamic_block_ptr = kzalloc(sizeof(*dynamic_block_ptr), GFP_KERNEL); - if (!dynamic_block_ptr) { - nicif_err(nic_dev, drv, nic_dev->netdev, "fdir filter dynamic alloc block index %d memory failed\n", - dynamic_block_id); - return NULL; - } - - dynamic_block_ptr->dynamic_block_id = dynamic_block_id; - list_add_tail(&dynamic_block_ptr->block_list, - &tcam_info->tcam_dynamic_info.tcam_dynamic_list); - - tcam_info->tcam_dynamic_info.dynamic_block_cnt++; - - return dynamic_block_ptr; -} - -static void -spnic_free_dynamic_block_resource(struct spnic_tcam_info *tcam_info, - struct spnic_tcam_dynamic_block *block_ptr) -{ - if (!block_ptr) - return; - - list_del(&block_ptr->block_list); - kfree(block_ptr); - - tcam_info->tcam_dynamic_info.dynamic_block_cnt--; -} - -static struct spnic_tcam_dynamic_block * -spnic_dynamic_lookup_tcam_filter(struct spnic_nic_dev *nic_dev, - struct nic_tcam_cfg_rule *fdir_tcam_rule, - struct spnic_tcam_info *tcam_info, - struct spnic_tcam_filter *tcam_filter, u16 *tcam_index) -{ - struct spnic_tcam_dynamic_block *tmp = NULL; - u16 index; - - list_for_each_entry(tmp, &tcam_info->tcam_dynamic_info.tcam_dynamic_list, block_list) - if (tmp->dynamic_index_cnt < SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) - break; - - if (!tmp || tmp->dynamic_index_cnt >= SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter dynamic lookup for index failed\n"); - return NULL; - } - - for (index = 0; index < SPNIC_TCAM_DYNAMIC_BLOCK_SIZE; index++) - if (tmp->dynamic_index_used[index] == 0) - break; - - if (index == SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) { - nicif_err(nic_dev, drv, nic_dev->netdev, "tcam block 0x%x supports filter rules is full\n", - tmp->dynamic_block_id); - return NULL; - } - - tcam_filter->dynamic_block_id = tmp->dynamic_block_id; - tcam_filter->index = index; - *tcam_index = index; - - fdir_tcam_rule->index = index + - SPNIC_PKT_TCAM_DYNAMIC_INDEX_START(tmp->dynamic_block_id); - - return tmp; -} - -static int spnic_add_tcam_filter(struct spnic_nic_dev *nic_dev, - struct spnic_tcam_filter *tcam_filter, - struct nic_tcam_cfg_rule *fdir_tcam_rule) -{ - struct spnic_tcam_info *tcam_info = &nic_dev->tcam; - struct spnic_tcam_dynamic_block *dynamic_block_ptr = NULL; - struct spnic_tcam_dynamic_block *tmp = NULL; - u16 block_cnt = tcam_info->tcam_dynamic_info.dynamic_block_cnt; - u16 tcam_block_index = 0; - int block_alloc_flag = 0; - u16 index = 0; - int err; - - if (tcam_info->tcam_rule_nums >= - block_cnt * SPNIC_TCAM_DYNAMIC_BLOCK_SIZE) { - if (block_cnt >= (SPNIC_MAX_TCAM_FILTERS / SPNIC_TCAM_DYNAMIC_BLOCK_SIZE)) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Dynamic tcam block is full, alloc failed\n"); - goto failed; - } - - err = spnic_alloc_tcam_block(nic_dev->hwdev, &tcam_block_index); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter dynamic tcam alloc block failed\n"); - goto failed; - } - - block_alloc_flag = 1; - - dynamic_block_ptr = - spnic_alloc_dynamic_block_resource(nic_dev, tcam_info, tcam_block_index); - if (!dynamic_block_ptr) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter dynamic alloc block memory failed\n"); - goto block_alloc_failed; - } - } - - tmp = spnic_dynamic_lookup_tcam_filter(nic_dev, fdir_tcam_rule, tcam_info, - tcam_filter, &index); - if (!tmp) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Dynamic lookup tcam filter failed\n"); - goto lookup_tcam_index_failed; - } - - err = spnic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir_tcam_rule add failed\n"); - goto add_tcam_rules_failed; - } - - nicif_info(nic_dev, drv, nic_dev->netdev, - "Add fdir tcam rule, function_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, queue: %d, tcam_rule_nums: %d succeed\n", - sphw_global_func_id(nic_dev->hwdev), - tcam_filter->dynamic_block_id, index, fdir_tcam_rule->index, - fdir_tcam_rule->data.qid, tcam_info->tcam_rule_nums + 1); - - if (tcam_info->tcam_rule_nums == 0) { - err = spnic_set_fdir_tcam_rule_filter(nic_dev->hwdev, true); - if (err) - goto enable_failed; - } - - list_add_tail(&tcam_filter->tcam_filter_list, &tcam_info->tcam_list); - - tmp->dynamic_index_used[index] = 1; - tmp->dynamic_index_cnt++; - - tcam_info->tcam_rule_nums++; - - return 0; - -enable_failed: - spnic_del_tcam_rule(nic_dev->hwdev, fdir_tcam_rule->index); - -add_tcam_rules_failed: -lookup_tcam_index_failed: - if (block_alloc_flag == 1) - spnic_free_dynamic_block_resource(tcam_info, dynamic_block_ptr); - -block_alloc_failed: - if (block_alloc_flag == 1) - spnic_free_tcam_block(nic_dev->hwdev, &tcam_block_index); - -failed: - return -EFAULT; -} - -static int spnic_del_tcam_filter(struct spnic_nic_dev *nic_dev, - struct spnic_tcam_filter *tcam_filter) -{ - struct spnic_tcam_info *tcam_info = &nic_dev->tcam; - u16 dynamic_block_id = tcam_filter->dynamic_block_id; - struct spnic_tcam_dynamic_block *tmp = NULL; - u32 index = 0; - int err; - - list_for_each_entry(tmp, &tcam_info->tcam_dynamic_info.tcam_dynamic_list, block_list) { - if (tmp->dynamic_block_id == dynamic_block_id) - break; - } - if (!tmp || tmp->dynamic_block_id != dynamic_block_id) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter del dynamic lookup for block failed\n"); - return -EFAULT; - } - - index = SPNIC_PKT_TCAM_DYNAMIC_INDEX_START(tmp->dynamic_block_id) + tcam_filter->index; - - err = spnic_del_tcam_rule(nic_dev->hwdev, index); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "fdir_tcam_rule del failed\n"); - return -EFAULT; - } - - nicif_info(nic_dev, drv, nic_dev->netdev, - "Del fdir_tcam_dynamic_rule function_id: 0x%x, tcam_block_id: %d, local_index: %d, global_index: %d, local_rules_nums: %d, global_rule_nums: %d succeed\n", - sphw_global_func_id(nic_dev->hwdev), dynamic_block_id, - tcam_filter->index, index, tmp->dynamic_index_cnt - 1, - tcam_info->tcam_rule_nums - 1); - - tmp->dynamic_index_used[tcam_filter->index] = 0; - tmp->dynamic_index_cnt--; - tcam_info->tcam_rule_nums--; - if (tmp->dynamic_index_cnt == 0) { - spnic_free_tcam_block(nic_dev->hwdev, &dynamic_block_id); - spnic_free_dynamic_block_resource(tcam_info, tmp); - } - - if (tcam_info->tcam_rule_nums == 0) - spnic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false); - - list_del(&tcam_filter->tcam_filter_list); - kfree(tcam_filter); - - return 0; -} - -static inline struct spnic_tcam_filter * -spnic_tcam_filter_lookup(struct list_head *filter_list, struct tag_tcam_key *key) -{ - struct spnic_tcam_filter *iter; - - list_for_each_entry(iter, filter_list, tcam_filter_list) { - if (memcmp(key, &iter->tcam_key, sizeof(struct tag_tcam_key)) == 0) - return iter; - } - - return NULL; -} - -static void del_ethtool_rule(struct spnic_nic_dev *nic_dev, - struct spnic_ethtool_rx_flow_rule *eth_rule) -{ - list_del(ð_rule->list); - nic_dev->rx_flow_rule.tot_num_rules--; - - kfree(eth_rule); -} - -static int spnic_remove_one_rule(struct spnic_nic_dev *nic_dev, - struct spnic_ethtool_rx_flow_rule *eth_rule) -{ - struct spnic_tcam_info *tcam_info = &nic_dev->tcam; - struct spnic_tcam_filter *tcam_filter; - struct nic_tcam_cfg_rule fdir_tcam_rule; - struct tag_tcam_key tcam_key; - int err; - - memset(&fdir_tcam_rule, 0, sizeof(fdir_tcam_rule)); - memset(&tcam_key, 0, sizeof(tcam_key)); - - err = spnic_fdir_tcam_info_init(nic_dev, ð_rule->flow_spec, &tcam_key, &fdir_tcam_rule); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Init fdir info failed\n"); - return err; - } - - tcam_filter = spnic_tcam_filter_lookup(&tcam_info->tcam_list, &tcam_key); - if (!tcam_filter) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Filter does not exists\n"); - return -EEXIST; - } - - err = spnic_del_tcam_filter(nic_dev, tcam_filter); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Delete tcam filter failed\n"); - return err; - } - - del_ethtool_rule(nic_dev, eth_rule); - - return 0; -} - -static void add_rule_to_list(struct spnic_nic_dev *nic_dev, - struct spnic_ethtool_rx_flow_rule *rule) -{ - struct spnic_ethtool_rx_flow_rule *iter = NULL; - struct list_head *head = &nic_dev->rx_flow_rule.rules; - - list_for_each_entry(iter, &nic_dev->rx_flow_rule.rules, list) { - if (iter->flow_spec.location > rule->flow_spec.location) - break; - head = &iter->list; - } - nic_dev->rx_flow_rule.tot_num_rules++; - list_add(&rule->list, head); -} - -static int spnic_add_one_rule(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs) -{ - struct nic_tcam_cfg_rule fdir_tcam_rule; - struct tag_tcam_key tcam_key; - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - struct spnic_tcam_filter *tcam_filter = NULL; - struct spnic_tcam_info *tcam_info = &nic_dev->tcam; - int err; - - memset(&fdir_tcam_rule, 0, sizeof(fdir_tcam_rule)); - memset(&tcam_key, 0, sizeof(tcam_key)); - err = spnic_fdir_tcam_info_init(nic_dev, fs, &tcam_key, &fdir_tcam_rule); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Init fdir info failed\n"); - return err; - } - - tcam_filter = spnic_tcam_filter_lookup(&tcam_info->tcam_list, &tcam_key); - if (tcam_filter) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Filter exists\n"); - return -EEXIST; - } - - tcam_filter = kzalloc(sizeof(*tcam_filter), GFP_KERNEL); - if (!tcam_filter) - return -ENOMEM; - memcpy(&tcam_filter->tcam_key, &tcam_key, sizeof(struct tag_tcam_key)); - tcam_filter->queue = (u16)fdir_tcam_rule.data.qid; - - err = spnic_add_tcam_filter(nic_dev, tcam_filter, &fdir_tcam_rule); - if (err) - goto add_tcam_filter_fail; - - /* driver save new rule filter */ - eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL); - if (!eth_rule) { - err = -ENOMEM; - goto alloc_eth_rule_fail; - } - - eth_rule->flow_spec = *fs; - add_rule_to_list(nic_dev, eth_rule); - - return 0; - -alloc_eth_rule_fail: - spnic_del_tcam_filter(nic_dev, tcam_filter); -add_tcam_filter_fail: - kfree(tcam_filter); - return err; -} - -static struct spnic_ethtool_rx_flow_rule * -find_ethtool_rule(struct spnic_nic_dev *nic_dev, u32 location) -{ - struct spnic_ethtool_rx_flow_rule *iter = NULL; - - list_for_each_entry(iter, &nic_dev->rx_flow_rule.rules, list) { - if (iter->flow_spec.location == location) - return iter; - } - return NULL; -} - -static int validate_flow(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs) -{ - if (fs->location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) { - nicif_err(nic_dev, drv, nic_dev->netdev, "loc exceed limit[0,%lu]\n", - MAX_NUM_OF_ETHTOOL_NTUPLE_RULES); - return -EINVAL; - } - - if (fs->ring_cookie >= nic_dev->q_params.num_qps) { - nicif_err(nic_dev, drv, nic_dev->netdev, "action is larger than queue number %u\n", - nic_dev->q_params.num_qps); - return -EINVAL; - } - - switch (fs->flow_type) { - case TCP_V4_FLOW: - case UDP_V4_FLOW: - case IP_USER_FLOW: -#ifndef UNSUPPORT_NTUPLE_IPV6 - case TCP_V6_FLOW: - case UDP_V6_FLOW: - case IPV6_USER_FLOW: -#endif - break; - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "flow type is not supported\n"); - return -EOPNOTSUPP; - } - - return 0; -} - -int spnic_ethtool_flow_replace(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs) -{ - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - struct ethtool_rx_flow_spec flow_spec_temp; - int loc_exit_flag = 0; - int err; - - err = validate_flow(nic_dev, fs); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "flow is not valid %d\n", err); - return err; - } - - eth_rule = find_ethtool_rule(nic_dev, fs->location); - /* when location is same, delete old location rule. */ - if (eth_rule) { - memcpy(&flow_spec_temp, ð_rule->flow_spec, sizeof(struct ethtool_rx_flow_spec)); - err = spnic_remove_one_rule(nic_dev, eth_rule); - if (err) - return err; - - loc_exit_flag = 1; - } - - /* add new rule filter */ - err = spnic_add_one_rule(nic_dev, fs); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Add new rule filter failed\n"); - if (loc_exit_flag) - spnic_add_one_rule(nic_dev, &flow_spec_temp); - - return -ENOENT; - } - - return 0; -} - -int spnic_ethtool_flow_remove(struct spnic_nic_dev *nic_dev, u32 location) -{ - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - int err; - - if (location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) - return -ENOSPC; - - eth_rule = find_ethtool_rule(nic_dev, location); - if (!eth_rule) - return -ENOENT; - - err = spnic_remove_one_rule(nic_dev, eth_rule); - - return err; -} - -int spnic_ethtool_get_flow(struct spnic_nic_dev *nic_dev, - struct ethtool_rxnfc *info, u32 location) -{ - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - - if (location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) - return -EINVAL; - - list_for_each_entry(eth_rule, &nic_dev->rx_flow_rule.rules, list) { - if (eth_rule->flow_spec.location == location) { - info->fs = eth_rule->flow_spec; - return 0; - } - } - - return -ENOENT; -} - -int spnic_ethtool_get_all_flows(struct spnic_nic_dev *nic_dev, - struct ethtool_rxnfc *info, u32 *rule_locs) -{ - int idx = 0; - struct spnic_ethtool_rx_flow_rule *eth_rule = NULL; - - info->data = MAX_NUM_OF_ETHTOOL_NTUPLE_RULES; - list_for_each_entry(eth_rule, &nic_dev->rx_flow_rule.rules, list) - rule_locs[idx++] = eth_rule->flow_spec.location; - - return info->rule_cnt == idx ? 0 : -ENOENT; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h b/drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h deleted file mode 100644 index 9d32608e6bb7..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_pci_id_tbl.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_PCI_ID_TBL_H -#define SPNIC_PCI_ID_TBL_H - -#define PCI_VENDOR_ID_RAMAXEL 0x1E81 -#define SPNIC_DEV_ID_PF_STD 0x9020 -#define SPNIC_DEV_ID_VF 0x9001 -#define SPNIC_DEV_ID_VF_HV 0x9002 - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rss.c b/drivers/net/ethernet/ramaxel/spnic/spnic_rss.c deleted file mode 100644 index 956d868df5b5..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_rss.c +++ /dev/null @@ -1,741 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/kernel.h> -#include <linux/pci.h> -#include <linux/interrupt.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> -#include <linux/device.h> -#include <linux/ethtool.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/dcbnl.h> - -#include "sphw_crm.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_dev.h" -#include "sphw_hw.h" -#include "spnic_rss.h" - -static u16 num_qps; -module_param(num_qps, ushort, 0444); -MODULE_PARM_DESC(num_qps, "Number of Queue Pairs (default=0)"); - -#define MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, out_qps) do { \ - if ((num_qps) > (nic_dev)->max_qps) \ - nic_warn(&(nic_dev)->pdev->dev, \ - "Module Parameter %s value %u is out of range, " \ - "Maximum value for the device: %u, using %u\n", \ - #num_qps, num_qps, (nic_dev)->max_qps, \ - (nic_dev)->max_qps); \ - if (!(num_qps) || (num_qps) > (nic_dev)->max_qps) \ - (out_qps) = (nic_dev)->max_qps; \ - else \ - (out_qps) = (num_qps); \ -} while (0) - -static void spnic_fillout_indir_tbl(struct spnic_nic_dev *nic_dev, u8 num_tcs, u32 *indir) -{ - u16 num_rss, tc_group_size; - int i; - - if (num_tcs) - tc_group_size = SPNIC_RSS_INDIR_SIZE / num_tcs; - else - tc_group_size = SPNIC_RSS_INDIR_SIZE; - - num_rss = nic_dev->q_params.num_rss; - for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) - indir[i] = (i / tc_group_size) * num_rss + i % num_rss; -} - -int spnic_rss_init(struct spnic_nic_dev *nic_dev) -{ - struct net_device *netdev = nic_dev->netdev; - u8 cos, num_tc = 0; - u8 prio_tc[SPNIC_DCB_UP_MAX] = {0}; - u8 max_cos = nic_dev->hw_dcb_cfg.max_cos; - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - num_tc = max_cos; - for (cos = 0; cos < SPNIC_DCB_COS_MAX; cos++) { - if (cos < SPNIC_DCB_COS_MAX - max_cos) - prio_tc[cos] = max_cos - 1; - else - prio_tc[cos] = (SPNIC_DCB_COS_MAX - 1) - cos; - } - } else { - num_tc = 0; - } - - return spnic_set_hw_rss_parameters(netdev, 1, num_tc, prio_tc); -} - -void spnic_rss_deinit(struct spnic_nic_dev *nic_dev) -{ - u8 prio_tc[SPNIC_DCB_UP_MAX] = {0}; - - spnic_rss_cfg(nic_dev->hwdev, 0, 0, prio_tc, 1); -} - -void spnic_init_rss_parameters(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - nic_dev->rss_hash_engine = SPNIC_RSS_HASH_ENGINE_TYPE_XOR; - nic_dev->rss_type.tcp_ipv6_ext = 1; - nic_dev->rss_type.ipv6_ext = 1; - nic_dev->rss_type.tcp_ipv6 = 1; - nic_dev->rss_type.ipv6 = 1; - nic_dev->rss_type.tcp_ipv4 = 1; - nic_dev->rss_type.ipv4 = 1; - nic_dev->rss_type.udp_ipv6 = 1; - nic_dev->rss_type.udp_ipv4 = 1; -} - -void spnic_clear_rss_config(struct spnic_nic_dev *nic_dev) -{ - kfree(nic_dev->rss_hkey); - nic_dev->rss_hkey = NULL; - - kfree(nic_dev->rss_indir); - nic_dev->rss_indir = NULL; -} - -void spnic_set_default_rss_indir(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - set_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); -} - -static void spnic_maybe_reconfig_rss_indir(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int i; - - /* if dcb is enabled, user can not config rss indir table */ - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags)) { - nicif_info(nic_dev, drv, netdev, "DCB is enabled, set default rss indir\n"); - goto discard_user_rss_indir; - } - - for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) { - if (nic_dev->rss_indir[i] >= nic_dev->q_params.num_qps) - goto discard_user_rss_indir; - } - - return; - -discard_user_rss_indir: - spnic_set_default_rss_indir(netdev); -} - -static void decide_num_qps(struct spnic_nic_dev *nic_dev) -{ - u16 tmp_num_qps = nic_dev->q_params.num_qps; - u16 num_cpus = 0; - int i, node; - - MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, tmp_num_qps); - - /* To reduce memory footprint in ovs mode. - * VF can't get board info correctly with early pf driver. - */ - /* if ((spnic_get_func_mode(nic_dev->hwdev) == FUNC_MOD_NORMAL_HOST) && - * service_mode == SPNIC_WORK_MODE_OVS && - * sphw_func_type(nic_dev->hwdev) != TYPE_VF) - * MOD_PARA_VALIDATE_NUM_QPS(nic_dev, ovs_num_qps, - * tmp_num_qps); - */ - - for (i = 0; i < (int)num_online_cpus(); i++) { - node = (int)cpu_to_node(i); - if (node == dev_to_node(&nic_dev->pdev->dev)) - num_cpus++; - } - - if (!num_cpus) - num_cpus = (u16)num_online_cpus(); - - nic_dev->q_params.num_qps = min_t(u16, tmp_num_qps, num_cpus); -} - -static void copy_value_to_rss_hkey(struct spnic_nic_dev *nic_dev, const u8 *hkey) -{ - u32 i; - u32 *rss_hkey = (u32 *)nic_dev->rss_hkey; - - memcpy(nic_dev->rss_hkey, hkey, SPNIC_RSS_KEY_SIZE); - - /* make a copy of the key, and convert it to Big Endian */ - for (i = 0; i < SPNIC_RSS_KEY_SIZE / sizeof(u32); i++) - nic_dev->rss_hkey_be[i] = cpu_to_be32(rss_hkey[i]); -} - -int alloc_rss_resource(struct spnic_nic_dev *nic_dev) -{ - u8 default_rss_key[SPNIC_RSS_KEY_SIZE] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, - 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, - 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, - 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, - 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa}; - - /* We request double spaces for the hash key, - * the second one holds the key of Big Edian - * format. - */ - nic_dev->rss_hkey = - kzalloc(SPNIC_RSS_KEY_SIZE * SPNIC_RSS_KEY_RSV_NUM, GFP_KERNEL); - if (!nic_dev->rss_hkey) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc memory for rss_hkey\n"); - return -ENOMEM; - } - - /* The second space is for big edian hash key */ - nic_dev->rss_hkey_be = (u32 *)(nic_dev->rss_hkey + SPNIC_RSS_KEY_SIZE); - copy_value_to_rss_hkey(nic_dev, (u8 *)default_rss_key); - - nic_dev->rss_indir = kzalloc(sizeof(u32) * SPNIC_RSS_INDIR_SIZE, GFP_KERNEL); - if (!nic_dev->rss_indir) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc memory for rss_indir\n"); - kfree(nic_dev->rss_hkey); - nic_dev->rss_hkey = NULL; - return -ENOMEM; - } - - set_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); - - return 0; -} - -void spnic_try_to_enable_rss(struct spnic_nic_dev *nic_dev) -{ - u8 prio_tc[SPNIC_DCB_UP_MAX] = {0}; - int err = 0; - - if (!nic_dev) - return; - - nic_dev->max_qps = sphw_func_max_nic_qnum(nic_dev->hwdev); - if (nic_dev->max_qps <= 1 || !SPNIC_SUPPORT_RSS(nic_dev->hwdev)) - goto set_q_params; - - err = alloc_rss_resource(nic_dev); - if (err) { - nic_dev->max_qps = 1; - goto set_q_params; - } - - set_bit(SPNIC_RSS_ENABLE, &nic_dev->flags); - nic_dev->max_qps = sphw_func_max_nic_qnum(nic_dev->hwdev); - - decide_num_qps(nic_dev); - - nic_dev->q_params.rss_limit = nic_dev->q_params.num_qps; - nic_dev->q_params.num_rss = nic_dev->q_params.num_qps; - - spnic_init_rss_parameters(nic_dev->netdev); - err = spnic_set_hw_rss_parameters(nic_dev->netdev, 0, 0, prio_tc); - if (err) { - nic_err(&nic_dev->pdev->dev, "Failed to set hardware rss parameters\n"); - - spnic_clear_rss_config(nic_dev); - nic_dev->max_qps = 1; - goto set_q_params; - } - return; - -set_q_params: - clear_bit(SPNIC_RSS_ENABLE, &nic_dev->flags); - nic_dev->q_params.rss_limit = nic_dev->max_qps; - nic_dev->q_params.num_qps = nic_dev->max_qps; - nic_dev->q_params.num_rss = nic_dev->max_qps; -} - -static int spnic_config_rss_hw_resource(struct spnic_nic_dev *nic_dev, u32 *indir_tbl) -{ - int err; - - err = spnic_rss_set_indir_tbl(nic_dev->hwdev, indir_tbl); - if (err) - return err; - - err = spnic_set_rss_type(nic_dev->hwdev, nic_dev->rss_type); - if (err) - return err; - - return spnic_rss_set_hash_engine(nic_dev->hwdev, nic_dev->rss_hash_engine); -} - -int spnic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc, u8 *prio_tc) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err; - - /* RSS key */ - err = spnic_rss_set_hash_key(nic_dev->hwdev, nic_dev->rss_hkey); - if (err) - return err; - - spnic_maybe_reconfig_rss_indir(netdev); - - if (test_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags)) - spnic_fillout_indir_tbl(nic_dev, num_tc, nic_dev->rss_indir); - - err = spnic_config_rss_hw_resource(nic_dev, nic_dev->rss_indir); - if (err) - return err; - - err = spnic_rss_cfg(nic_dev->hwdev, rss_en, num_tc, prio_tc, nic_dev->q_params.num_qps); - if (err) - return err; - - return 0; -} - -/* for ethtool */ -static int set_l4_rss_hash_ops(struct ethtool_rxnfc *cmd, struct nic_rss_type *rss_type) -{ - u8 rss_l4_en = 0; - - switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - rss_l4_en = 0; - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): - rss_l4_en = 1; - break; - default: - return -EINVAL; - } - - switch (cmd->flow_type) { - case TCP_V4_FLOW: - rss_type->tcp_ipv4 = rss_l4_en; - break; - case TCP_V6_FLOW: - rss_type->tcp_ipv6 = rss_l4_en; - break; - case UDP_V4_FLOW: - rss_type->udp_ipv4 = rss_l4_en; - break; - case UDP_V6_FLOW: - rss_type->udp_ipv6 = rss_l4_en; - break; - default: - return -EINVAL; - } - - return 0; -} - -static int update_rss_hash_opts(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *cmd, - struct nic_rss_type *rss_type) -{ - int err; - - switch (cmd->flow_type) { - case TCP_V4_FLOW: - case TCP_V6_FLOW: - case UDP_V4_FLOW: - case UDP_V6_FLOW: - err = set_l4_rss_hash_ops(cmd, rss_type); - if (err) - return err; - - break; - case IPV4_FLOW: - rss_type->ipv4 = 1; - break; - case IPV6_FLOW: - rss_type->ipv6 = 1; - break; - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported flow type\n"); - return -EINVAL; - } - - return 0; -} - -static int spnic_set_rss_hash_opts(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) -{ - struct nic_rss_type *rss_type = &nic_dev->rss_type; - int err; - - if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - cmd->data = 0; - nicif_err(nic_dev, drv, nic_dev->netdev, - "RSS is disable, not support to set flow-hash\n"); - return -EOPNOTSUPP; - } - - /* RSS does not support anything other than hashing - * to queues on src and dst IPs and ports - */ - if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | - RXH_L4_B_2_3)) - return -EINVAL; - - /* We need at least the IP SRC and DEST fields for hashing */ - if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) - return -EINVAL; - - err = spnic_get_rss_type(nic_dev->hwdev, rss_type); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n"); - return -EFAULT; - } - - err = update_rss_hash_opts(nic_dev, cmd, rss_type); - if (err) - return err; - - err = spnic_set_rss_type(nic_dev->hwdev, *rss_type); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to set rss type\n"); - return -EFAULT; - } - - nicif_info(nic_dev, drv, nic_dev->netdev, "Set rss hash options success\n"); - - return 0; -} - -static void convert_rss_type(u8 rss_opt, struct ethtool_rxnfc *cmd) -{ - if (rss_opt) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; -} - -static int spnic_convert_rss_type(struct spnic_nic_dev *nic_dev, struct nic_rss_type *rss_type, - struct ethtool_rxnfc *cmd) -{ - cmd->data = RXH_IP_SRC | RXH_IP_DST; - switch (cmd->flow_type) { - case TCP_V4_FLOW: - convert_rss_type(rss_type->tcp_ipv4, cmd); - break; - case TCP_V6_FLOW: - convert_rss_type(rss_type->tcp_ipv6, cmd); - break; - case UDP_V4_FLOW: - convert_rss_type(rss_type->udp_ipv4, cmd); - break; - case UDP_V6_FLOW: - convert_rss_type(rss_type->udp_ipv6, cmd); - break; - case IPV4_FLOW: - case IPV6_FLOW: - break; - default: - nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported flow type\n"); - cmd->data = 0; - return -EINVAL; - } - - return 0; -} - -static int spnic_get_rss_hash_opts(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) -{ - struct nic_rss_type rss_type = {0}; - int err; - - cmd->data = 0; - - if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) - return 0; - - err = spnic_get_rss_type(nic_dev->hwdev, &rss_type); - if (err) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n"); - return err; - } - - return spnic_convert_rss_type(nic_dev, &rss_type, cmd); -} - -int spnic_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = nic_dev->q_params.num_qps; - break; - case ETHTOOL_GRXCLSRLCNT: - cmd->rule_cnt = nic_dev->rx_flow_rule.tot_num_rules; - break; - case ETHTOOL_GRXCLSRULE: - err = spnic_ethtool_get_flow(nic_dev, cmd, cmd->fs.location); - break; - case ETHTOOL_GRXCLSRLALL: - err = spnic_ethtool_get_all_flows(nic_dev, cmd, rule_locs); - break; - case ETHTOOL_GRXFH: - err = spnic_get_rss_hash_opts(nic_dev, cmd); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -int spnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - err = spnic_set_rss_hash_opts(nic_dev, cmd); - break; - case ETHTOOL_SRXCLSRLINS: - err = spnic_ethtool_flow_replace(nic_dev, &cmd->fs); - break; - case ETHTOOL_SRXCLSRLDEL: - err = spnic_ethtool_flow_remove(nic_dev, cmd->fs.location); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static u16 spnic_max_channels(struct spnic_nic_dev *nic_dev) -{ - u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev); - - return tcs ? nic_dev->max_qps / tcs : nic_dev->max_qps; -} - -static u16 spnic_curr_channels(struct spnic_nic_dev *nic_dev) -{ - if (netif_running(nic_dev->netdev)) - return nic_dev->q_params.num_rss ? nic_dev->q_params.num_rss : 1; - else - return min_t(u16, spnic_max_channels(nic_dev), - nic_dev->q_params.rss_limit); -} - -void spnic_get_channels(struct net_device *netdev, struct ethtool_channels *channels) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - channels->max_rx = 0; - channels->max_tx = 0; - channels->max_other = 0; - /* report maximum channels */ - channels->max_combined = spnic_max_channels(nic_dev); - channels->rx_count = 0; - channels->tx_count = 0; - channels->other_count = 0; - /* report flow director queues as maximum channels */ - channels->combined_count = spnic_curr_channels(nic_dev); -} - -void spnic_update_num_qps(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u16 num_qps; - u8 tcs; - - /* change num_qps to change counter in ethtool -S */ - tcs = (u8)netdev_get_num_tc(nic_dev->netdev); - nic_dev->q_params.num_tc = tcs; - num_qps = (u16)(nic_dev->q_params.rss_limit * (tcs ? tcs : 1)); - nic_dev->q_params.num_qps = min_t(u16, nic_dev->max_qps, num_qps); -} - -static int spnic_validate_channel_parameter(struct net_device *netdev, - struct ethtool_channels *channels) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u16 max_channel = spnic_max_channels(nic_dev); - unsigned int count = channels->combined_count; - - if (!count) { - nicif_err(nic_dev, drv, netdev, "Unsupported combined_count=0\n"); - return -EINVAL; - } - - if (channels->tx_count || channels->rx_count || channels->other_count) { - nicif_err(nic_dev, drv, netdev, "Setting rx/tx/other count not supported\n"); - return -EINVAL; - } - - if (count > max_channel) { - nicif_err(nic_dev, drv, netdev, "Combined count %u exceed limit %u\n", - count, max_channel); - return -EINVAL; - } - - return 0; -} - -static void change_num_channel_reopen_handler(struct spnic_nic_dev *nic_dev, const void *priv_data) -{ - spnic_set_default_rss_indir(nic_dev->netdev); -} - -int spnic_set_channels(struct net_device *netdev, struct ethtool_channels *channels) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_dyna_txrxq_params q_params = {0}; - unsigned int count = channels->combined_count; - int err; - - if (spnic_validate_channel_parameter(netdev, channels)) - return -EINVAL; - - if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, netdev, - "This function don't support RSS, only support 1 queue pair\n"); - return -EOPNOTSUPP; - } - - nicif_info(nic_dev, drv, netdev, "Set max combined queue number from %u to %u\n", - nic_dev->q_params.rss_limit, count); - - if (netif_running(netdev)) { - q_params = nic_dev->q_params; - q_params.rss_limit = (u16)count; - q_params.txqs_res = NULL; - q_params.rxqs_res = NULL; - q_params.irq_cfg = NULL; - - nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); - err = spnic_change_channel_settings(nic_dev, &q_params, - change_num_channel_reopen_handler, NULL); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); - return -EFAULT; - } - } else { - /* Discard user configured rss */ - spnic_set_default_rss_indir(netdev); - nic_dev->q_params.rss_limit = (u16)count; - spnic_update_num_qps(netdev); - } - - return 0; -} - -static int set_rss_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err; - - if (indir) { - err = spnic_rss_set_indir_tbl(nic_dev->hwdev, indir); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set rss indir table\n"); - return -EFAULT; - } - clear_bit(SPNIC_RSS_DEFAULT_INDIR, &nic_dev->flags); - - memcpy(nic_dev->rss_indir, indir, - sizeof(u32) * SPNIC_RSS_INDIR_SIZE); - nicif_info(nic_dev, drv, netdev, "Change rss indir success\n"); - } - - if (key) { - err = spnic_rss_set_hash_key(nic_dev->hwdev, key); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to set rss key\n"); - return -EFAULT; - } - - copy_value_to_rss_hkey(nic_dev, key); - nicif_info(nic_dev, drv, netdev, "Change rss key success\n"); - } - - return 0; -} - -u32 spnic_get_rxfh_indir_size(struct net_device *netdev) -{ - return SPNIC_RSS_INDIR_SIZE; -} - -u32 spnic_get_rxfh_key_size(struct net_device *netdev) -{ - return SPNIC_RSS_KEY_SIZE; -} - -int spnic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Rss is disable\n"); - return -EOPNOTSUPP; - } - - if (hfunc) - *hfunc = nic_dev->rss_hash_engine ? - ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; - - if (indir) { - err = spnic_rss_get_indir_tbl(nic_dev->hwdev, indir); - if (err) - return -EFAULT; - } - - if (key) - memcpy(key, nic_dev->rss_hkey, SPNIC_RSS_KEY_SIZE); - - return err; -} - -int spnic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err = 0; - - if (!test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Not support to set rss parameters when rss is disable\n"); - return -EOPNOTSUPP; - } - - if (test_bit(SPNIC_DCB_ENABLE, &nic_dev->flags) && indir) { - nicif_err(nic_dev, drv, netdev, "Not support to set indir when DCB is enabled\n"); - return -EOPNOTSUPP; - } - - if (hfunc != ETH_RSS_HASH_NO_CHANGE) { - if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) { - nicif_err(nic_dev, drv, netdev, "Not support to set hfunc type except TOP and XOR\n"); - return -EOPNOTSUPP; - } - - nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? - SPNIC_RSS_HASH_ENGINE_TYPE_XOR : - SPNIC_RSS_HASH_ENGINE_TYPE_TOEP; - err = spnic_rss_set_hash_engine(nic_dev->hwdev, nic_dev->rss_hash_engine); - if (err) - return -EFAULT; - - nicif_info(nic_dev, drv, netdev, "Change hfunc to RSS_HASH_%s success\n", - (hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP"); - } - err = set_rss_rxfh(netdev, indir, key); - - return err; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rss.h b/drivers/net/ethernet/ramaxel/spnic/spnic_rss.h deleted file mode 100644 index e64a4dcf39dd..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_rss.h +++ /dev/null @@ -1,50 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_RSS_H -#define SPNIC_RSS_H - -#include "spnic_nic_dev.h" - -int spnic_rss_init(struct spnic_nic_dev *nic_dev); - -void spnic_rss_deinit(struct spnic_nic_dev *nic_dev); - -int spnic_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, u8 num_tc, u8 *prio_tc); - -void spnic_init_rss_parameters(struct net_device *netdev); - -void spnic_set_default_rss_indir(struct net_device *netdev); - -void spnic_try_to_enable_rss(struct spnic_nic_dev *nic_dev); - -void spnic_clear_rss_config(struct spnic_nic_dev *nic_dev); - -void spnic_flush_rx_flow_rule(struct spnic_nic_dev *nic_dev); -int spnic_ethtool_get_flow(struct spnic_nic_dev *nic_dev, struct ethtool_rxnfc *info, u32 location); - -int spnic_ethtool_get_all_flows(struct spnic_nic_dev *nic_dev, - struct ethtool_rxnfc *info, u32 *rule_locs); - -int spnic_ethtool_flow_remove(struct spnic_nic_dev *nic_dev, u32 location); - -int spnic_ethtool_flow_replace(struct spnic_nic_dev *nic_dev, struct ethtool_rx_flow_spec *fs); - -/* for ethtool */ -int spnic_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs); - -int spnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd); - -void spnic_get_channels(struct net_device *netdev, struct ethtool_channels *channels); - -int spnic_set_channels(struct net_device *netdev, struct ethtool_channels *channels); - -u32 spnic_get_rxfh_indir_size(struct net_device *netdev); - -u32 spnic_get_rxfh_key_size(struct net_device *netdev); - -int spnic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); - -int spnic_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c b/drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c deleted file mode 100644 index 12da3aa59400..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_rss_cfg.c +++ /dev/null @@ -1,333 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/kernel.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/dcbnl.h> - -#include "sphw_crm.h" -#include "spnic_nic_cfg.h" -#include "sphw_hw.h" -#include "spnic_nic.h" -#include "sphw_common.h" - -static int spnic_rss_cfg_hash_key(struct spnic_nic_cfg *nic_cfg, u8 opcode, u8 *key) -{ - struct spnic_cmd_rss_hash_key hash_key; - u16 out_size = sizeof(hash_key); - int err; - - memset(&hash_key, 0, sizeof(struct spnic_cmd_rss_hash_key)); - hash_key.func_id = sphw_global_func_id(nic_cfg->hwdev); - hash_key.opcode = opcode; - - if (opcode == SPNIC_CMD_OP_SET) - memcpy(hash_key.key, key, SPNIC_RSS_KEY_SIZE); - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, - SPNIC_NIC_CMD_CFG_RSS_HASH_KEY, - &hash_key, sizeof(hash_key), - &hash_key, &out_size); - if (err || !out_size || hash_key.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to %s hash key, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == SPNIC_CMD_OP_SET ? "set" : "get", - err, hash_key.msg_head.status, out_size); - return -EINVAL; - } - - if (opcode == SPNIC_CMD_OP_GET) - memcpy(key, hash_key.key, SPNIC_RSS_KEY_SIZE); - - return 0; -} - -int spnic_rss_set_hash_key(void *hwdev, const u8 *key) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - u8 hash_key[SPNIC_RSS_KEY_SIZE]; - - if (!hwdev || !key) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memcpy(hash_key, key, SPNIC_RSS_KEY_SIZE); - return spnic_rss_cfg_hash_key(nic_cfg, SPNIC_CMD_OP_SET, hash_key); -} - -int spnic_rss_get_hash_key(void *hwdev, u8 *key) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !key) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - return spnic_rss_cfg_hash_key(nic_cfg, SPNIC_CMD_OP_GET, key); -} - -int spnic_rss_get_indir_tbl(void *hwdev, u32 *indir_table) -{ - struct sphw_cmd_buf *cmd_buf = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - u16 *indir_tbl = NULL; - int err, i; - - if (!hwdev || !indir_table) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - cmd_buf = sphw_alloc_cmd_buf(hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd_buf.\n"); - return -ENOMEM; - } - - cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); - err = sphw_cmdq_detail_resp(hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_GET_RSS_INDIR_TABLE, - cmd_buf, cmd_buf, NULL, 0, SPHW_CHANNEL_NIC); - if (err) { - nic_err(nic_cfg->dev_hdl, "Failed to get rss indir table\n"); - goto get_indir_tbl_failed; - } - - indir_tbl = (u16 *)cmd_buf->buf; - for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) - indir_table[i] = *(indir_tbl + i); - -get_indir_tbl_failed: - sphw_free_cmd_buf(hwdev, cmd_buf); - - return err; -} - -int spnic_rss_set_indir_tbl(void *hwdev, const u32 *indir_table) -{ - struct nic_rss_indirect_tbl *indir_tbl = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - u32 *temp = NULL; - u32 i, size; - u64 out_param = 0; - int err; - - if (!hwdev || !indir_table) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - cmd_buf = sphw_alloc_cmd_buf(hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); - indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf; - memset(indir_tbl, 0, sizeof(*indir_tbl)); - - for (i = 0; i < SPNIC_RSS_INDIR_SIZE; i++) - indir_tbl->entry[i] = (u16)(*(indir_table + i)); - - size = sizeof(indir_tbl->entry) / sizeof(u32); - temp = (u32 *)indir_tbl->entry; - for (i = 0; i < size; i++) - temp[i] = cpu_to_be32(temp[i]); - - err = sphw_cmdq_direct_resp(hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_SET_RSS_INDIR_TABLE, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - if (err || out_param != 0) { - nic_err(nic_cfg->dev_hdl, "Failed to set rss indir table\n"); - err = -EFAULT; - } - - sphw_free_cmd_buf(hwdev, cmd_buf); - return err; -} - -#define SPNIC_RSS_TYPE_VALID_SHIFT 23 -#define SPNIC_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 -#define SPNIC_RSS_TYPE_IPV6_EXT_SHIFT 25 -#define SPNIC_RSS_TYPE_TCP_IPV6_SHIFT 26 -#define SPNIC_RSS_TYPE_IPV6_SHIFT 27 -#define SPNIC_RSS_TYPE_TCP_IPV4_SHIFT 28 -#define SPNIC_RSS_TYPE_IPV4_SHIFT 29 -#define SPNIC_RSS_TYPE_UDP_IPV6_SHIFT 30 -#define SPNIC_RSS_TYPE_UDP_IPV4_SHIFT 31 -#define SPNIC_RSS_TYPE_SET(val, member) (((u32)(val) & 0x1) << SPNIC_RSS_TYPE_##member##_SHIFT) - -#define SPNIC_RSS_TYPE_GET(val, member) (((u32)(val) >> SPNIC_RSS_TYPE_##member##_SHIFT) & 0x1) - -int spnic_set_rss_type(void *hwdev, struct nic_rss_type rss_type) -{ - struct nic_rss_context_tbl *ctx_tbl = NULL; - struct sphw_cmd_buf *cmd_buf = NULL; - struct spnic_nic_cfg *nic_cfg = NULL; - u32 ctx = 0; - u64 out_param = 0; - int err; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - cmd_buf = sphw_alloc_cmd_buf(hwdev); - if (!cmd_buf) { - nic_err(nic_cfg->dev_hdl, "Failed to allocate cmd buf\n"); - return -ENOMEM; - } - - ctx |= SPNIC_RSS_TYPE_SET(1, VALID) | - SPNIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) | - SPNIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) | - SPNIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | - SPNIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | - SPNIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | - SPNIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | - SPNIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | - SPNIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); - - cmd_buf->size = sizeof(struct nic_rss_context_tbl); - ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf; - memset(ctx_tbl, 0, sizeof(*ctx_tbl)); - ctx_tbl->ctx = cpu_to_be32(ctx); - - /* cfg the rss context table by command queue */ - err = sphw_cmdq_direct_resp(hwdev, SPHW_MOD_L2NIC, SPNIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE, - cmd_buf, &out_param, 0, SPHW_CHANNEL_NIC); - - sphw_free_cmd_buf(hwdev, cmd_buf); - - if (err || out_param != 0) { - nic_err(nic_cfg->dev_hdl, "Failed to set rss context table, err: %d\n", - err); - return -EFAULT; - } - - return 0; -} - -int spnic_get_rss_type(void *hwdev, struct nic_rss_type *rss_type) -{ - struct spnic_rss_context_table ctx_tbl; - u16 out_size = sizeof(ctx_tbl); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - if (!hwdev || !rss_type) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - - memset(&ctx_tbl, 0, sizeof(struct spnic_rss_context_table)); - ctx_tbl.func_id = sphw_global_func_id(hwdev); - - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_GET_RSS_CTX_TBL, - &ctx_tbl, sizeof(ctx_tbl), - &ctx_tbl, &out_size); - if (err || !out_size || ctx_tbl.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n", - err, ctx_tbl.msg_head.status, out_size); - return -EINVAL; - } - - rss_type->ipv4 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, IPV4); - rss_type->ipv6 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, IPV6); - rss_type->ipv6_ext = SPNIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT); - rss_type->tcp_ipv4 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4); - rss_type->tcp_ipv6 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6); - rss_type->tcp_ipv6_ext = SPNIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6_EXT); - rss_type->udp_ipv4 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4); - rss_type->udp_ipv6 = SPNIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6); - - return 0; -} - -static int spnic_rss_cfg_hash_engine(struct spnic_nic_cfg *nic_cfg, u8 opcode, u8 *type) -{ - struct spnic_cmd_rss_engine_type hash_type; - u16 out_size = sizeof(hash_type); - int err; - - memset(&hash_type, 0, sizeof(struct spnic_cmd_rss_engine_type)); - - hash_type.func_id = sphw_global_func_id(nic_cfg->hwdev); - hash_type.opcode = opcode; - - if (opcode == SPNIC_CMD_OP_SET) - hash_type.hash_engine = *type; - - err = l2nic_msg_to_mgmt_sync(nic_cfg->hwdev, SPNIC_NIC_CMD_CFG_RSS_HASH_ENGINE, - &hash_type, sizeof(hash_type), - &hash_type, &out_size); - if (err || !out_size || hash_type.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to %s hash engine, err: %d, status: 0x%x, out size: 0x%x\n", - opcode == SPNIC_CMD_OP_SET ? "set" : "get", - err, hash_type.msg_head.status, out_size); - return -EIO; - } - - if (opcode == SPNIC_CMD_OP_GET) - *type = hash_type.hash_engine; - - return 0; -} - -int spnic_rss_set_hash_engine(void *hwdev, u8 type) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - return spnic_rss_cfg_hash_engine(nic_cfg, SPNIC_CMD_OP_SET, &type); -} - -int spnic_rss_get_hash_engine(void *hwdev, u8 *type) -{ - struct spnic_nic_cfg *nic_cfg = NULL; - - if (!hwdev || !type) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - return spnic_rss_cfg_hash_engine(nic_cfg, SPNIC_CMD_OP_GET, type); -} - -int spnic_rss_cfg(void *hwdev, u8 rss_en, u8 tc_num, u8 *prio_tc, u16 num_qps) -{ - struct spnic_cmd_rss_config rss_cfg; - u16 out_size = sizeof(rss_cfg); - struct spnic_nic_cfg *nic_cfg = NULL; - int err; - - /* micro code required: number of TC should be power of 2 */ - if (!hwdev || !prio_tc || (tc_num & (tc_num - 1))) - return -EINVAL; - - nic_cfg = sphw_get_service_adapter(hwdev, SERVICE_T_NIC); - memset(&rss_cfg, 0, sizeof(struct spnic_cmd_rss_config)); - rss_cfg.func_id = sphw_global_func_id(hwdev); - rss_cfg.rss_en = rss_en; - rss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0; - rss_cfg.num_qps = num_qps; - - memcpy(rss_cfg.prio_tc, prio_tc, SPNIC_DCB_UP_MAX); - err = l2nic_msg_to_mgmt_sync(hwdev, SPNIC_NIC_CMD_RSS_CFG, - &rss_cfg, sizeof(rss_cfg), - &rss_cfg, &out_size); - if (err || !out_size || rss_cfg.msg_head.status) { - nic_err(nic_cfg->dev_hdl, "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n", - err, rss_cfg.msg_head.status, out_size); - return -EINVAL; - } - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rx.c b/drivers/net/ethernet/ramaxel/spnic/spnic_rx.c deleted file mode 100644 index 3ae2f15c727b..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_rx.c +++ /dev/null @@ -1,1238 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt -#include <linux/types.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/skbuff.h> -#include <linux/dma-mapping.h> -#include <linux/interrupt.h> -#include <linux/etherdevice.h> -#include <linux/netdevice.h> -#include <linux/device.h> -#include <linux/pci.h> -#include <linux/u64_stats_sync.h> -#include <linux/ip.h> -#include <linux/tcp.h> -#include <linux/sctp.h> -#include <linux/pkt_sched.h> -#include <linux/ipv6.h> -#include <linux/module.h> -#include <linux/compiler.h> - -#include "sphw_crm.h" -#include "sphw_common.h" -#include "spnic_nic_qp.h" -#include "spnic_nic_io.h" -#include "spnic_nic_dev.h" -#include "spnic_rx.h" -#include "spnic_rss.h" - -static u32 rq_pi_rd_en; -module_param(rq_pi_rd_en, uint, 0644); -MODULE_PARM_DESC(rq_pi_rd_en, "Enable rq read pi from host, defaut update pi by doorbell (default=0)"); - -/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ -#define SPNIC_RX_HDR_SIZE 256 -#define SPNIC_RX_BUFFER_WRITE 16 - -#define SPNIC_RX_TCP_PKT 0x3 -#define SPNIC_RX_UDP_PKT 0x4 -#define SPNIC_RX_SCTP_PKT 0x7 - -#define SPNIC_RX_IPV4_PKT 0 -#define SPNIC_RX_IPV6_PKT 1 -#define SPNIC_RX_INVALID_IP_TYPE 2 - -#define SPNIC_RX_PKT_FORMAT_NON_TUNNEL 0 -#define SPNIC_RX_PKT_FORMAT_VXLAN 1 - -#define RXQ_STATS_INC(rxq, field) \ -do { \ - u64_stats_update_begin(&(rxq)->rxq_stats.syncp); \ - (rxq)->rxq_stats.field++; \ - u64_stats_update_end(&(rxq)->rxq_stats.syncp); \ -} while (0) - -static bool rx_alloc_mapped_page(struct spnic_nic_dev *nic_dev, - struct spnic_rx_info *rx_info) -{ - struct pci_dev *pdev = nic_dev->pdev; - struct page *page = rx_info->page; - dma_addr_t dma = rx_info->buf_dma_addr; - - if (likely(dma)) - return true; - - /* alloc new page for storage */ - page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC | __GFP_COMP, nic_dev->page_order); - if (unlikely(!page)) - return false; - - /* map page for use */ - dma = dma_map_page(&pdev->dev, page, 0, nic_dev->dma_rx_buff_size, DMA_FROM_DEVICE); - - /* if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (unlikely(dma_mapping_error(&pdev->dev, dma))) { - __free_pages(page, nic_dev->page_order); - return false; - } - - rx_info->page = page; - rx_info->buf_dma_addr = dma; - rx_info->page_offset = 0; - - return true; -} - -static u32 spnic_rx_fill_wqe(struct spnic_rxq *rxq) -{ - struct net_device *netdev = rxq->netdev; - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_rq_wqe *rq_wqe = NULL; - struct spnic_rx_info *rx_info = NULL; - u32 i; - - for (i = 0; i < rxq->q_depth; i++) { - rx_info = &rxq->rx_info[i]; - rq_wqe = spnic_rq_wqe_addr(rxq->rq, (u16)i); - - if (rxq->rq->wqe_type == SPNIC_EXTEND_RQ_WQE) { - /* unit of cqe length is 16B */ - sphw_set_sge(&rq_wqe->extend_wqe.cqe_sect.sge, rx_info->cqe_dma, - (sizeof(struct spnic_rq_cqe) >> SPNIC_CQE_SIZE_SHIFT)); - /* use fixed len */ - rq_wqe->extend_wqe.buf_desc.sge.len = - nic_dev->rx_buff_len; - } else { - rq_wqe->normal_wqe.cqe_hi_addr = upper_32_bits(rx_info->cqe_dma); - rq_wqe->normal_wqe.cqe_lo_addr = lower_32_bits(rx_info->cqe_dma); - } - - rx_info->rq_wqe = rq_wqe; - } - - return i; -} - -static struct sk_buff *stub_rx_alloc_skb(struct spnic_rxq *rxq, struct spnic_rx_info *rx_info) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); - struct sk_buff *skb = NULL; - u16 random_id; - dma_addr_t addr; - int err; - - get_random_bytes(&random_id, sizeof(u16)); - - rx_info->skb_len = SPNIC_RX_HDR_SIZE + (random_id & 0x3EFF); - skb = netdev_alloc_skb_ip_align(rxq->netdev, rx_info->skb_len); - if (!skb) { - nicif_err(nic_dev, drv, rxq->netdev, "Failed to allocate Rx SKB\n"); - return NULL; - } - - addr = dma_map_single(&nic_dev->pdev->dev, skb->data, rx_info->skb_len, - DMA_FROM_DEVICE); - - err = dma_mapping_error(&nic_dev->pdev->dev, addr); - if (err) { - nicif_err(nic_dev, drv, rxq->netdev, "Failed to map Rx DMA, err = %d\n", err); - goto err_rx_map; - } - - rx_info->buf_dma_addr = addr; - - return skb; - -err_rx_map: - dev_kfree_skb_any(skb); - return NULL; -} - -static u32 stub_spnic_rx_fill_buffers(struct spnic_rxq *rxq) -{ - struct net_device *netdev = rxq->netdev; - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u32 i, free_wqebbs = rxq->delta - 1; - struct spnic_rq_wqe *rq_wqe = NULL; - struct spnic_rx_info *rx_info = NULL; - struct sk_buff *skb = NULL; - dma_addr_t dma_addr; - - for (i = 0; i < free_wqebbs; i++) { - rx_info = &rxq->rx_info[rxq->next_to_update]; - - skb = stub_rx_alloc_skb(rxq, rx_info); - if (!skb) { - nicif_err(nic_dev, drv, rxq->netdev, "Failed to alloc Rx skb\n"); - break; - } - - rq_wqe = rx_info->rq_wqe; - rx_info->saved_skb = skb; - dma_addr = rx_info->buf_dma_addr; - - if (rxq->rq->wqe_type == SPNIC_EXTEND_RQ_WQE) { - rq_wqe->extend_wqe.buf_desc.sge.hi_addr = upper_32_bits(dma_addr); - rq_wqe->extend_wqe.buf_desc.sge.lo_addr = lower_32_bits(dma_addr); - rq_wqe->extend_wqe.buf_desc.sge.len = rx_info->skb_len; - } else { - rq_wqe->normal_wqe.buf_hi_addr = upper_32_bits(dma_addr); - rq_wqe->normal_wqe.buf_lo_addr = lower_32_bits(dma_addr); - } - rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; - } - - if (likely(i)) { - if (!rq_pi_rd_en) { - spnic_write_db(rxq->rq, rxq->q_id & (SPNIC_DCB_COS_MAX - 1), RQ_CFLAG_DP, - (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); - } else { - /* Write all the wqes before pi update */ - wmb(); - - spnic_update_rq_hw_pi(rxq->rq, rxq->next_to_update); - } - rxq->delta -= i; - rxq->next_to_alloc = rxq->next_to_update; - } else { - nicif_err(nic_dev, drv, netdev, "Failed to allocate rx buffers, rxq id: %u\n", - rxq->q_id); - } - - return i; -} - -static u32 spnic_rx_fill_buffers(struct spnic_rxq *rxq) -{ - struct net_device *netdev = rxq->netdev; - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_rq_wqe *rq_wqe = NULL; - struct spnic_rx_info *rx_info = NULL; - dma_addr_t dma_addr; - u32 i, free_wqebbs = rxq->delta - 1; - - for (i = 0; i < free_wqebbs; i++) { - rx_info = &rxq->rx_info[rxq->next_to_update]; - - if (unlikely(!rx_alloc_mapped_page(nic_dev, rx_info))) { - RXQ_STATS_INC(rxq, alloc_rx_buf_err); - break; - } - - dma_addr = rx_info->buf_dma_addr + rx_info->page_offset; - - rq_wqe = rx_info->rq_wqe; - - if (rxq->rq->wqe_type == SPNIC_EXTEND_RQ_WQE) { - rq_wqe->extend_wqe.buf_desc.sge.hi_addr = upper_32_bits(dma_addr); - rq_wqe->extend_wqe.buf_desc.sge.lo_addr = lower_32_bits(dma_addr); - } else { - rq_wqe->normal_wqe.buf_hi_addr = upper_32_bits(dma_addr); - rq_wqe->normal_wqe.buf_lo_addr = lower_32_bits(dma_addr); - } - rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; - } - - if (likely(i)) { - if (!rq_pi_rd_en) { - spnic_write_db(rxq->rq, rxq->q_id & (SPNIC_DCB_COS_MAX - 1), RQ_CFLAG_DP, - (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); - } else { - /* Write all the wqes before pi update */ - wmb(); - - spnic_update_rq_hw_pi(rxq->rq, rxq->next_to_update); - } - rxq->delta -= i; - rxq->next_to_alloc = rxq->next_to_update; - } else if (free_wqebbs == rxq->q_depth - 1) { - RXQ_STATS_INC(rxq, rx_buf_empty); - } - - return i; -} - -static u32 spnic_rx_alloc_buffers(struct spnic_nic_dev *nic_dev, u32 rq_depth, - struct spnic_rx_info *rx_info_arr) -{ - u32 free_wqebbs = rq_depth - 1; - u32 idx; - - for (idx = 0; idx < free_wqebbs; idx++) { - if (!rx_alloc_mapped_page(nic_dev, &rx_info_arr[idx])) - break; - } - - return idx; -} - -void spnic_rx_free_buffers(struct spnic_nic_dev *nic_dev, u32 q_depth, - struct spnic_rx_info *rx_info_arr) -{ - struct spnic_rx_info *rx_info = NULL; - u32 i; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < q_depth; i++) { - rx_info = &rx_info_arr[i]; - - if (rx_info->buf_dma_addr) { - dma_unmap_page(&nic_dev->pdev->dev, rx_info->buf_dma_addr, - nic_dev->dma_rx_buff_size, DMA_FROM_DEVICE); - rx_info->buf_dma_addr = 0; - } - - if (rx_info->page) { - __free_pages(rx_info->page, nic_dev->page_order); - rx_info->page = NULL; - } - } -} - -void stub_spnic_rx_free_buffers(struct spnic_rxq *rxq) -{ - struct spnic_rx_info *rx_info = NULL; - u32 i; - - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rxq->q_depth; i++) { - rx_info = &rxq->rx_info[i]; - - if (rx_info->buf_dma_addr) { - dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, rx_info->skb_len, - DMA_FROM_DEVICE); - rx_info->buf_dma_addr = 0; - } - - if (rx_info->saved_skb) { - dev_kfree_skb_any(rx_info->saved_skb); - rx_info->saved_skb = NULL; - } - } -} - -static void spnic_reuse_rx_page(struct spnic_rxq *rxq, struct spnic_rx_info *old_rx_info) -{ - struct spnic_rx_info *new_rx_info; - u16 nta = rxq->next_to_alloc; - - new_rx_info = &rxq->rx_info[nta]; - - /* update, and store next to alloc */ - nta++; - rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0; - - new_rx_info->page = old_rx_info->page; - new_rx_info->page_offset = old_rx_info->page_offset; - new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr; - - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr, - new_rx_info->page_offset, rxq->buf_len, - DMA_FROM_DEVICE); -} - -static bool spnic_add_rx_frag(struct spnic_rxq *rxq, struct spnic_rx_info *rx_info, - struct sk_buff *skb, u32 size) -{ - struct page *page; - u8 *va; - - page = rx_info->page; - va = (u8 *)page_address(page) + rx_info->page_offset; - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif - - dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr, - rx_info->page_offset, - rxq->buf_len, DMA_FROM_DEVICE); - - if (size <= SPNIC_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* page is not reserved, we can reuse buffer as-is */ - if (likely(page_to_nid(page) == numa_node_id())) - return true; - - /* this page cannot be reused so discard it */ - put_page(page); - return false; - } - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - (int)rx_info->page_offset, (int)size, rxq->buf_len); - - /* avoid re-using remote pages */ - if (unlikely(page_to_nid(page) != numa_node_id())) - return false; - - /* if we are only owner of page we can reuse it */ - if (unlikely(page_count(page) != 1)) - return false; - - /* flip page offset to other buffer */ - rx_info->page_offset ^= rxq->buf_len; - get_page(page); - - return true; -} - -static void packaging_skb(struct spnic_rxq *rxq, struct sk_buff *head_skb, u8 sge_num, u32 pkt_len) -{ - struct spnic_rx_info *rx_info = NULL; - struct sk_buff *skb = NULL; - u8 frag_num = 0; - u32 size; - u32 sw_ci; - - sw_ci = rxq->cons_idx & rxq->q_mask; - skb = head_skb; - while (sge_num) { - rx_info = &rxq->rx_info[sw_ci]; - sw_ci = (sw_ci + 1) & rxq->q_mask; - if (unlikely(pkt_len > rxq->buf_len)) { - size = rxq->buf_len; - pkt_len -= rxq->buf_len; - } else { - size = pkt_len; - } - - if (unlikely(frag_num == MAX_SKB_FRAGS)) { - frag_num = 0; - if (skb == head_skb) - skb = skb_shinfo(skb)->frag_list; - else - skb = skb->next; - } - - if (unlikely(skb != head_skb)) { - head_skb->len += size; - head_skb->data_len += size; - head_skb->truesize += rxq->buf_len; - } - - if (likely(spnic_add_rx_frag(rxq, rx_info, skb, size))) { - spnic_reuse_rx_page(rxq, rx_info); - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, - rxq->dma_rx_buff_size, DMA_FROM_DEVICE); - } - /* clear contents of buffer_info */ - rx_info->buf_dma_addr = 0; - rx_info->page = NULL; - sge_num--; - frag_num++; - } -} - -#define SPNIC_GET_SGE_NUM(pkt_len, rxq) \ - ((u8)(((pkt_len) >> (rxq)->rx_buff_shift) + \ - (((pkt_len) & ((rxq)->buf_len - 1)) ? 1 : 0))) - -static struct sk_buff *spnic_fetch_rx_buffer(struct spnic_rxq *rxq, u32 pkt_len) -{ - struct sk_buff *head_skb = NULL; - struct sk_buff *cur_skb = NULL; - struct sk_buff *skb = NULL; - struct net_device *netdev = rxq->netdev; - u8 sge_num, skb_num; - u16 wqebb_cnt = 0; - - head_skb = netdev_alloc_skb_ip_align(netdev, SPNIC_RX_HDR_SIZE); - if (unlikely(!head_skb)) - return NULL; - - sge_num = SPNIC_GET_SGE_NUM(pkt_len, rxq); - if (likely(sge_num <= MAX_SKB_FRAGS)) - skb_num = 1; - else - skb_num = (sge_num / MAX_SKB_FRAGS) + ((sge_num % MAX_SKB_FRAGS) ? 1 : 0); - - while (unlikely(skb_num > 1)) { - cur_skb = netdev_alloc_skb_ip_align(netdev, SPNIC_RX_HDR_SIZE); - if (unlikely(!cur_skb)) - goto alloc_skb_fail; - - if (!skb) { - skb_shinfo(head_skb)->frag_list = cur_skb; - skb = cur_skb; - } else { - skb->next = cur_skb; - skb = cur_skb; - } - - skb_num--; - } - - prefetchw(head_skb->data); - wqebb_cnt = sge_num; - - packaging_skb(rxq, head_skb, sge_num, pkt_len); - - rxq->cons_idx += wqebb_cnt; - rxq->delta += wqebb_cnt; - - return head_skb; - -alloc_skb_fail: - dev_kfree_skb_any(head_skb); - return NULL; -} - -void spnic_rxq_get_stats(struct spnic_rxq *rxq, struct spnic_rxq_stats *stats) -{ - struct spnic_rxq_stats *rxq_stats = &rxq->rxq_stats; - unsigned int start; - - u64_stats_update_begin(&stats->syncp); - do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - stats->bytes = rxq_stats->bytes; - stats->packets = rxq_stats->packets; - stats->errors = rxq_stats->csum_errors + - rxq_stats->other_errors; - stats->csum_errors = rxq_stats->csum_errors; - stats->other_errors = rxq_stats->other_errors; - stats->dropped = rxq_stats->dropped; - stats->xdp_dropped = rxq_stats->xdp_dropped; - stats->rx_buf_empty = rxq_stats->rx_buf_empty; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - u64_stats_update_end(&stats->syncp); -} - -void spnic_rxq_clean_stats(struct spnic_rxq_stats *rxq_stats) -{ - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->bytes = 0; - rxq_stats->packets = 0; - rxq_stats->errors = 0; - rxq_stats->csum_errors = 0; - rxq_stats->other_errors = 0; - rxq_stats->dropped = 0; - rxq_stats->xdp_dropped = 0; - rxq_stats->rx_buf_empty = 0; - - rxq_stats->alloc_skb_err = 0; - rxq_stats->alloc_rx_buf_err = 0; - rxq_stats->xdp_large_pkt = 0; - u64_stats_update_end(&rxq_stats->syncp); -} - -static void rxq_stats_init(struct spnic_rxq *rxq) -{ - struct spnic_rxq_stats *rxq_stats = &rxq->rxq_stats; - - u64_stats_init(&rxq_stats->syncp); - spnic_rxq_clean_stats(rxq_stats); -} - -static void spnic_pull_tail(struct sk_buff *skb) -{ - skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va = NULL; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(skb->dev, va, SPNIC_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, (int)pull_len); - skb_frag_off_add(frag, (int)pull_len); - - skb->data_len -= pull_len; - skb->tail += pull_len; -} - -static void spnic_rx_csum(struct spnic_rxq *rxq, u32 offload_type, u32 status, struct sk_buff *skb) -{ - struct net_device *netdev = rxq->netdev; - u32 pkt_type = SPNIC_GET_RX_PKT_TYPE(offload_type); - u32 ip_type = SPNIC_GET_RX_IP_TYPE(offload_type); - u32 pkt_fmt = SPNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type); - - u32 csum_err; - - csum_err = SPNIC_GET_RX_CSUM_ERR(status); - - if (unlikely(csum_err == SPNIC_RX_CSUM_IPSU_OTHER_ERR)) - rxq->rxq_stats.other_errors++; - - if (!(netdev->features & NETIF_F_RXCSUM)) - return; - - if (unlikely(csum_err)) { - /* pkt type is recognized by HW, and csum is wrong */ - if (!(csum_err & (SPNIC_RX_CSUM_HW_CHECK_NONE | SPNIC_RX_CSUM_IPSU_OTHER_ERR))) - rxq->rxq_stats.csum_errors++; - skb->ip_summed = CHECKSUM_NONE; - return; - } - - if (ip_type == SPNIC_RX_INVALID_IP_TYPE || - !(pkt_fmt == SPNIC_RX_PKT_FORMAT_NON_TUNNEL || - pkt_fmt == SPNIC_RX_PKT_FORMAT_VXLAN)) { - skb->ip_summed = CHECKSUM_NONE; - return; - } - - switch (pkt_type) { - case SPNIC_RX_TCP_PKT: - case SPNIC_RX_UDP_PKT: - case SPNIC_RX_SCTP_PKT: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - skb->ip_summed = CHECKSUM_NONE; - break; - } -} - -static void spnic_rx_gro(struct spnic_rxq *rxq, u32 offload_type, struct sk_buff *skb) -{ - struct net_device *netdev = rxq->netdev; - bool l2_tunnel = false; - - if (!(netdev->features & NETIF_F_GRO)) - return; - - l2_tunnel = - SPNIC_GET_RX_TUNNEL_PKT_FORMAT(offload_type) == SPNIC_RX_PKT_FORMAT_VXLAN ? 1 : 0; - - if (l2_tunnel && skb->ip_summed == CHECKSUM_UNNECESSARY) - /* If we checked the outer header let the stack know */ - skb->csum_level = 1; -} - -static void spnic_copy_lp_data(struct spnic_nic_dev *nic_dev, struct sk_buff *skb) -{ - struct net_device *netdev = nic_dev->netdev; - u8 *lb_buf = nic_dev->lb_test_rx_buf; - void *frag_data = NULL; - int lb_len = nic_dev->lb_pkt_len; - int pkt_offset, frag_len, i; - - if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { - nic_dev->lb_test_rx_idx = 0; - nicif_warn(nic_dev, rx_err, netdev, "Loopback test warning, receive too many test pkts\n"); - } - - if (skb->len != nic_dev->lb_pkt_len) { - nicif_warn(nic_dev, rx_err, netdev, "Wrong packet length\n"); - nic_dev->lb_test_rx_idx++; - return; - } - - pkt_offset = nic_dev->lb_test_rx_idx * lb_len; - frag_len = (int)skb_headlen(skb); - memcpy(lb_buf + pkt_offset, skb->data, frag_len); - - pkt_offset += frag_len; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); - frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); - memcpy(lb_buf + pkt_offset, frag_data, frag_len); - - pkt_offset += frag_len; - } - nic_dev->lb_test_rx_idx++; -} - -static inline void spnic_lro_set_gso_params(struct sk_buff *skb, u16 num_lro) -{ - struct ethhdr *eth = (struct ethhdr *)(skb->data); - __be16 proto; - - proto = __vlan_get_protocol(skb, eth->h_proto, NULL); - - skb_shinfo(skb)->gso_size = (u16)DIV_ROUND_UP((skb->len - skb_headlen(skb)), num_lro); - skb_shinfo(skb)->gso_type = (proto == htons(ETH_P_IP)) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; -} - -enum spnic_xdp_pkt { - SPNIC_XDP_PKT_PASS, - SPNIC_XDP_PKT_DROP, -}; - -static inline void update_drop_rx_info(struct spnic_rxq *rxq, u16 weqbb_num) -{ - struct spnic_rx_info *rx_info = NULL; - - while (weqbb_num) { - rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; - if (likely(page_to_nid(rx_info->page) == numa_node_id())) - spnic_reuse_rx_page(rxq, rx_info); - - rx_info->buf_dma_addr = 0; - rx_info->page = NULL; - rxq->cons_idx++; - rxq->delta++; - - weqbb_num--; - } -} - -int spnic_run_xdp(struct spnic_rxq *rxq, u32 pkt_len) -{ - struct bpf_prog *xdp_prog = NULL; - struct spnic_rx_info *rx_info = NULL; - struct xdp_buff xdp; - int result = SPNIC_XDP_PKT_PASS; - u16 weqbb_num = 1; /* xdp can only use one rx_buff */ - u8 *va = NULL; - u32 act; - - rcu_read_lock(); - xdp_prog = READ_ONCE(rxq->xdp_prog); - if (!xdp_prog) - goto unlock_rcu; - - if (unlikely(pkt_len > rxq->buf_len)) { - RXQ_STATS_INC(rxq, xdp_large_pkt); - weqbb_num = (u16)(pkt_len >> rxq->rx_buff_shift) + - ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); - result = SPNIC_XDP_PKT_DROP; - goto xdp_out; - } - - rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; - va = (u8 *)page_address(rx_info->page) + rx_info->page_offset; - prefetch(va); - dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr, rx_info->page_offset, - rxq->buf_len, DMA_FROM_DEVICE); - xdp.data = va; - xdp.data_hard_start = xdp.data; - xdp.data_end = xdp.data + pkt_len; - xdp.frame_sz = rxq->buf_len; - xdp_set_data_meta_invalid(&xdp); - prefetchw(xdp.data_hard_start); - act = bpf_prog_run_xdp(xdp_prog, &xdp); - switch (act) { - case XDP_PASS: - break; - case XDP_DROP: - result = SPNIC_XDP_PKT_DROP; - break; - default: - result = SPNIC_XDP_PKT_DROP; - bpf_warn_invalid_xdp_action(act); - } - -xdp_out: - if (result == SPNIC_XDP_PKT_DROP) { - RXQ_STATS_INC(rxq, xdp_dropped); - update_drop_rx_info(rxq, weqbb_num); - } - -unlock_rcu: - rcu_read_unlock(); - - return result; -} - -int recv_one_pkt(struct spnic_rxq *rxq, struct spnic_rq_cqe *rx_cqe, - u32 pkt_len, u32 vlan_len, u32 status) -{ - struct sk_buff *skb; - struct net_device *netdev = rxq->netdev; - u32 offload_type; - u16 num_lro; - struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); - - u32 xdp_status; - - xdp_status = spnic_run_xdp(rxq, pkt_len); - if (xdp_status == SPNIC_XDP_PKT_DROP) - return 0; - - skb = spnic_fetch_rx_buffer(rxq, pkt_len); - if (unlikely(!skb)) { - RXQ_STATS_INC(rxq, alloc_skb_err); - return -ENOMEM; - } - - /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - spnic_pull_tail(skb); - - offload_type = rx_cqe->offload_type; - spnic_rx_csum(rxq, offload_type, status, skb); - - spnic_rx_gro(rxq, offload_type, skb); - - if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - SPNIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) { - u16 vid = SPNIC_GET_RX_VLAN_TAG(vlan_len); - - /* if the packet is a vlan pkt, the vid may be 0 */ - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); - } - - if (unlikely(test_bit(SPNIC_LP_TEST, &nic_dev->flags))) - spnic_copy_lp_data(nic_dev, skb); - - num_lro = SPNIC_GET_RX_NUM_LRO(status); - if (num_lro) - spnic_lro_set_gso_params(skb, num_lro); - - skb_record_rx_queue(skb, rxq->q_id); - skb->protocol = eth_type_trans(skb, netdev); - - if (skb_has_frag_list(skb)) { - napi_gro_flush(&rxq->irq_cfg->napi, false); - netif_receive_skb(skb); - } else { - napi_gro_receive(&rxq->irq_cfg->napi, skb); - } - - return 0; -} - -void rx_pass_super_cqe(struct spnic_rxq *rxq, u32 index, u32 pkt_num, struct spnic_rq_cqe *cqe) -{ - u8 sge_num = 0; - u32 pkt_len; - - while (index < pkt_num) { - pkt_len = spnic_get_pkt_len_for_super_cqe(cqe, index == (pkt_num - 1)); - sge_num += (u8)(pkt_len >> rxq->rx_buff_shift) + - ((pkt_len & (rxq->buf_len - 1)) ? 1 : 0); - index++; - } - - rxq->cons_idx += sge_num; - rxq->delta += sge_num; -} - -static inline int recv_supper_cqe(struct spnic_rxq *rxq, struct spnic_rq_cqe *rx_cqe, u32 pkt_info, - u32 vlan_len, u32 status, int *pkts, u64 *rx_bytes, u32 *dropped) -{ - u32 pkt_len; - int i, pkt_num = 0; - - pkt_num = SPNIC_GET_RQ_CQE_PKT_NUM(pkt_info); - i = 0; - while (i < pkt_num) { - pkt_len = ((i == (pkt_num - 1)) ? - RQ_CQE_PKT_LEN_GET(pkt_info, LAST_LEN) : - RQ_CQE_PKT_LEN_GET(pkt_info, FIRST_LEN)); - if (unlikely(recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status))) { - if (i) { - rx_pass_super_cqe(rxq, i, pkt_num, rx_cqe); - *dropped += (pkt_num - i); - } - break; - } - - *rx_bytes += pkt_len; - (*pkts)++; - i++; - } - - if (!i) - return -EFAULT; - - return 0; -} - -#define LRO_PKT_HDR_LEN_IPV4 66 -#define LRO_PKT_HDR_LEN_IPV6 86 -#define LRO_PKT_HDR_LEN(cqe) \ - (SPNIC_GET_RX_IP_TYPE((cqe)->offload_type) == \ - SPNIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) - -static void stub_rx_recv_jumbo_pkt(struct spnic_rxq *rxq, struct sk_buff *head_skb, - unsigned int left_pkt_len) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); - struct sk_buff *skb = NULL; - struct sk_buff *curr_skb = head_skb; - struct spnic_rx_info *rx_info = NULL; - unsigned int curr_len; - - while (left_pkt_len > 0) { - rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; - skb = rx_info->saved_skb; - - dma_unmap_single(&nic_dev->pdev->dev, rx_info->buf_dma_addr, - rx_info->skb_len, DMA_FROM_DEVICE); - - rx_info->buf_dma_addr = 0; - rx_info->saved_skb = NULL; - - prefetch(skb->data); - - curr_len = (left_pkt_len > rx_info->skb_len) ? rx_info->skb_len : left_pkt_len; - - left_pkt_len -= curr_len; - - __skb_put(skb, curr_len); - - if (curr_skb == head_skb) - skb_shinfo(head_skb)->frag_list = skb; - else - curr_skb->next = skb; - - head_skb->len += skb->len; - head_skb->data_len += skb->len; - head_skb->truesize += skb->truesize; - - curr_skb = skb; - rxq->cons_idx++; - rxq->delta++; - } -} - -int stub_spnic_rx_poll(struct spnic_rxq *rxq, int budget) -{ - u32 sw_ci, status, pkt_len, vlan_len = 0; - struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); - struct spnic_rq_cqe *rx_cqe = NULL; - u64 rx_bytes = 0; - int pkts = 0; - struct sk_buff *skb = NULL; - struct spnic_rx_info *rx_info = NULL; - u32 offload_type; - - while (likely(pkts < budget)) { - sw_ci = rxq->cons_idx & rxq->q_mask; - rx_info = &rxq->rx_info[sw_ci]; - rx_cqe = rxq->rx_info[sw_ci].cqe; - status = be32_to_cpu(rx_cqe->status); - - if (!SPNIC_GET_RX_DONE(status)) - break; - - /* make sure we read rx_done before packet length */ - rmb(); - - vlan_len = be32_to_cpu(rx_cqe->vlan_len); - pkt_len = SPNIC_GET_RX_PKT_LEN(vlan_len); - skb = rx_info->saved_skb; - - dma_unmap_single(&nic_dev->pdev->dev, rx_info->buf_dma_addr, - rx_info->skb_len, DMA_FROM_DEVICE); - - rx_info->buf_dma_addr = 0; - rx_info->saved_skb = NULL; - - rxq->cons_idx++; - rxq->delta++; - - if (pkt_len <= rx_info->skb_len) { - __skb_put(skb, pkt_len); - } else { - __skb_put(skb, rx_info->skb_len); - stub_rx_recv_jumbo_pkt(rxq, skb, pkt_len - rx_info->skb_len); - } - - offload_type = be32_to_cpu(rx_cqe->offload_type); - spnic_rx_csum(rxq, offload_type, status, skb); - - spnic_rx_gro(rxq, offload_type, skb); - - skb_record_rx_queue(skb, rxq->q_id); - skb->protocol = eth_type_trans(skb, rxq->netdev); - - if (skb_has_frag_list(skb)) { - napi_gro_flush(&rxq->irq_cfg->napi, false); - netif_receive_skb(skb); - } else { - napi_gro_receive(&rxq->irq_cfg->napi, skb); - } - rx_bytes += pkt_len; - pkts++; - rx_cqe->status = 0; - } - - if (rxq->delta >= SPNIC_RX_BUFFER_WRITE) - stub_spnic_rx_fill_buffers(rxq); - - u64_stats_update_begin(&rxq->rxq_stats.syncp); - rxq->rxq_stats.packets += pkts; - rxq->rxq_stats.bytes += rx_bytes; - u64_stats_update_end(&rxq->rxq_stats.syncp); - return pkts; -} - -int spnic_rx_poll(struct spnic_rxq *rxq, int budget) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(rxq->netdev); - u32 sw_ci, status, pkt_len, vlan_len, pkt_info, dropped = 0; - struct spnic_rq_cqe *rx_cqe = NULL; - u64 rx_bytes = 0; - u16 num_lro; - int pkts = 0, nr_pkts = 0; - u16 num_wqe = 0; - - while (likely(pkts < budget)) { - sw_ci = rxq->cons_idx & rxq->q_mask; - rx_cqe = rxq->rx_info[sw_ci].cqe; - status = rx_cqe->status; - - if (!SPNIC_GET_RX_DONE(status)) - break; - - /* make sure we read rx_done before packet length */ - rmb(); - - vlan_len = rx_cqe->vlan_len; - pkt_info = rx_cqe->pkt_info; - pkt_len = SPNIC_GET_RX_PKT_LEN(vlan_len); - - if (unlikely(SPNIC_GET_SUPER_CQE_EN(pkt_info))) { - if (unlikely(recv_supper_cqe(rxq, rx_cqe, pkt_info, vlan_len, status, &pkts, - &rx_bytes, &dropped))) - break; - nr_pkts += (int)SPNIC_GET_RQ_CQE_PKT_NUM(pkt_info); - } else { - if (recv_one_pkt(rxq, rx_cqe, pkt_len, - vlan_len, status)) - break; - rx_bytes += pkt_len; - pkts++; - nr_pkts++; - - num_lro = SPNIC_GET_RX_NUM_LRO(status); - if (num_lro) { - rx_bytes += ((num_lro - 1) * LRO_PKT_HDR_LEN(rx_cqe)); - num_wqe += SPNIC_GET_SGE_NUM(pkt_len, rxq); - } - } - - rx_cqe->status = 0; - - if (num_wqe >= nic_dev->lro_replenish_thld) - break; - } - - if (rxq->delta >= SPNIC_RX_BUFFER_WRITE) - spnic_rx_fill_buffers(rxq); - - u64_stats_update_begin(&rxq->rxq_stats.syncp); - rxq->rxq_stats.packets += nr_pkts; - rxq->rxq_stats.bytes += rx_bytes; - rxq->rxq_stats.dropped += dropped; - u64_stats_update_end(&rxq->rxq_stats.syncp); - return pkts; -} - -int spnic_alloc_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res) -{ - struct spnic_dyna_rxq_res *rqres = NULL; - u64 cqe_mem_size = sizeof(struct spnic_rq_cqe) * rq_depth; - int idx, i; - u32 pkts; - u64 size; - - for (idx = 0; idx < num_rq; idx++) { - rqres = &rxqs_res[idx]; - size = sizeof(*rqres->rx_info) * rq_depth; - rqres->rx_info = kzalloc(size, GFP_KERNEL); - if (!rqres->rx_info) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc rxq%d rx info\n", idx); - goto err_out; - } - - rqres->cqe_start_vaddr = - dma_alloc_coherent(&nic_dev->pdev->dev, cqe_mem_size, - &rqres->cqe_start_paddr, GFP_KERNEL); - if (!rqres->cqe_start_vaddr) { - kfree(rqres->rx_info); - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc rxq%d cqe\n", idx); - goto err_out; - } - - pkts = spnic_rx_alloc_buffers(nic_dev, rq_depth, rqres->rx_info); - if (!pkts) { - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, - rqres->cqe_start_vaddr, - rqres->cqe_start_paddr); - kfree(rqres->rx_info); - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc rxq%d rx buffers\n", idx); - goto err_out; - } - rqres->next_to_alloc = (u16)pkts; - } - return 0; - -err_out: - for (i = 0; i < idx; i++) { - rqres = &rxqs_res[i]; - - spnic_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, - rqres->cqe_start_vaddr, rqres->cqe_start_paddr); - kfree(rqres->rx_info); - } - - return -ENOMEM; -} - -void spnic_free_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res) -{ - struct spnic_dyna_rxq_res *rqres = NULL; - u64 cqe_mem_size = sizeof(struct spnic_rq_cqe) * rq_depth; - int idx; - - for (idx = 0; idx < num_rq; idx++) { - rqres = &rxqs_res[idx]; - - spnic_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); - dma_free_coherent(&nic_dev->pdev->dev, cqe_mem_size, rqres->cqe_start_vaddr, - rqres->cqe_start_paddr); - kfree(rqres->rx_info); - } -} - -int spnic_configure_rxqs(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res) -{ - struct spnic_dyna_rxq_res *rqres = NULL; - struct irq_info *msix_entry = NULL; - struct spnic_rxq *rxq = NULL; - struct spnic_rq_cqe *cqe_va = NULL; - dma_addr_t cqe_pa; - u16 q_id; - u32 idx; - u32 pkts; - - for (q_id = 0; q_id < num_rq; q_id++) { - rxq = &nic_dev->rxqs[q_id]; - rqres = &rxqs_res[q_id]; - msix_entry = &nic_dev->qps_irq_info[q_id]; - - rxq->irq_id = msix_entry->irq_id; - rxq->msix_entry_idx = msix_entry->msix_entry_idx; - rxq->next_to_update = 0; - rxq->next_to_alloc = rqres->next_to_alloc; - rxq->q_depth = rq_depth; - rxq->delta = rxq->q_depth; - rxq->q_mask = rxq->q_depth - 1; - rxq->cons_idx = 0; - - rxq->rx_info = rqres->rx_info; - - /* fill cqe */ - cqe_va = (struct spnic_rq_cqe *)rqres->cqe_start_vaddr; - cqe_pa = rqres->cqe_start_paddr; - for (idx = 0; idx < rq_depth; idx++) { - rxq->rx_info[idx].cqe = cqe_va; - rxq->rx_info[idx].cqe_dma = cqe_pa; - cqe_va++; - cqe_pa += sizeof(*rxq->rx_info->cqe); - } - - rxq->rq = spnic_get_nic_queue(nic_dev->hwdev, rxq->q_id, SPNIC_RQ); - if (!rxq->rq) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rq\n"); - return -EINVAL; - } - - pkts = spnic_rx_fill_wqe(rxq); - if (pkts != rxq->q_depth) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to fill rx wqe\n"); - return -EFAULT; - } - - pkts = spnic_rx_fill_buffers(rxq); - if (!pkts) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to fill Rx buffer\n"); - return -ENOMEM; - } - } - - return 0; -} - -void spnic_free_rxqs(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - kfree(nic_dev->rxqs); -} - -int spnic_alloc_rxqs(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct pci_dev *pdev = nic_dev->pdev; - struct spnic_rxq *rxq = NULL; - u16 num_rxqs = nic_dev->max_qps; - u16 q_id; - u64 rxq_size; - - rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); - if (!rxq_size) { - nic_err(&pdev->dev, "Cannot allocate zero size rxqs\n"); - return -EINVAL; - } - - nic_dev->rxqs = kzalloc(rxq_size, GFP_KERNEL); - if (!nic_dev->rxqs) { - nic_err(&pdev->dev, "Failed to allocate rxqs\n"); - return -ENOMEM; - } - - for (q_id = 0; q_id < num_rxqs; q_id++) { - rxq = &nic_dev->rxqs[q_id]; - rxq->netdev = netdev; - rxq->dev = &pdev->dev; - rxq->q_id = q_id; - rxq->buf_len = nic_dev->rx_buff_len; - rxq->rx_buff_shift = ilog2(nic_dev->rx_buff_len); - rxq->dma_rx_buff_size = nic_dev->dma_rx_buff_size; - rxq->q_depth = nic_dev->q_params.rq_depth; - rxq->q_mask = nic_dev->q_params.rq_depth - 1; - - rxq_stats_init(rxq); - } - - return 0; -} - -int spnic_rx_configure(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - int err; - - if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) { - err = spnic_rss_init(nic_dev); - if (err) { - nicif_err(nic_dev, drv, netdev, "Failed to init rss\n"); - return -EFAULT; - } - } - - return 0; -} - -void spnic_rx_remove_configure(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - if (test_bit(SPNIC_RSS_ENABLE, &nic_dev->flags)) - spnic_rss_deinit(nic_dev); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_rx.h b/drivers/net/ethernet/ramaxel/spnic/spnic_rx.h deleted file mode 100644 index 564b5765a41b..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_rx.h +++ /dev/null @@ -1,118 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_RX_H -#define SPNIC_RX_H - -#include <linux/types.h> - -/*rx cqe checksum err*/ -#define SPNIC_RX_CSUM_IP_CSUM_ERR BIT(0) -#define SPNIC_RX_CSUM_TCP_CSUM_ERR BIT(1) -#define SPNIC_RX_CSUM_UDP_CSUM_ERR BIT(2) -#define SPNIC_RX_CSUM_IGMP_CSUM_ERR BIT(3) -#define SPNIC_RX_CSUM_ICMPV4_CSUM_ERR BIT(4) -#define SPNIC_RX_CSUM_ICMPV6_CSUM_ERR BIT(5) -#define SPNIC_RX_CSUM_SCTP_CRC_ERR BIT(6) -#define SPNIC_RX_CSUM_HW_CHECK_NONE BIT(7) -#define SPNIC_RX_CSUM_IPSU_OTHER_ERR BIT(8) - -#define SPNIC_HEADER_DATA_UNIT 2 - -struct spnic_rxq_stats { - u64 packets; - u64 bytes; - u64 errors; - u64 csum_errors; - u64 other_errors; - u64 dropped; - u64 xdp_dropped; - u64 rx_buf_empty; - - u64 alloc_skb_err; - u64 alloc_rx_buf_err; - u64 xdp_large_pkt; - struct u64_stats_sync syncp; -}; - -struct spnic_rx_info { - dma_addr_t buf_dma_addr; - - struct spnic_rq_cqe *cqe; - dma_addr_t cqe_dma; - struct page *page; - u32 page_offset; - struct spnic_rq_wqe *rq_wqe; - struct sk_buff *saved_skb; - u32 skb_len; -}; - -struct spnic_rxq { - struct net_device *netdev; - - u16 q_id; - u32 q_depth; - u32 q_mask; - - u16 buf_len; - u32 rx_buff_shift; - u32 dma_rx_buff_size; - - struct spnic_rxq_stats rxq_stats; - u32 cons_idx; - u32 delta; - - u32 irq_id; - u16 msix_entry_idx; - - struct spnic_rx_info *rx_info; - struct spnic_io_queue *rq; - struct bpf_prog *xdp_prog; - - struct spnic_irq *irq_cfg; - u16 next_to_alloc; - u16 next_to_update; - struct device *dev; /* device for DMA mapping */ - - unsigned long status; - dma_addr_t cqe_start_paddr; - void *cqe_start_vaddr; - - u64 last_moder_packets; - u64 last_moder_bytes; - u8 last_coalesc_timer_cfg; - u8 last_pending_limt; -} ____cacheline_aligned; - -struct spnic_dyna_rxq_res { - u16 next_to_alloc; - struct spnic_rx_info *rx_info; - dma_addr_t cqe_start_paddr; - void *cqe_start_vaddr; -}; - -int spnic_alloc_rxqs(struct net_device *netdev); - -void spnic_free_rxqs(struct net_device *netdev); - -int spnic_alloc_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res); - -void spnic_free_rxqs_res(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res); - -int spnic_configure_rxqs(struct spnic_nic_dev *nic_dev, u16 num_rq, - u32 rq_depth, struct spnic_dyna_rxq_res *rxqs_res); - -int spnic_rx_configure(struct net_device *netdev); - -void spnic_rx_remove_configure(struct net_device *netdev); - -int spnic_rx_poll(struct spnic_rxq *rxq, int budget); -int stub_spnic_rx_poll(struct spnic_rxq *rxq, int budget); - -void spnic_rxq_get_stats(struct spnic_rxq *rxq, struct spnic_rxq_stats *stats); - -void spnic_rxq_clean_stats(struct spnic_rxq_stats *rxq_stats); - -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c b/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c deleted file mode 100644 index aac22cb302ea..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.c +++ /dev/null @@ -1,200 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/pci.h> -#include <linux/interrupt.h> - -#include "sphw_common.h" -#include "sphw_crm.h" -#include "sphw_hw.h" -#include "spnic_lld.h" -#include "spnic_sriov.h" -#include "spnic_dev_mgmt.h" - -int spnic_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) -{ - u16 i, func_idx; - int err; - - /* mbox msg channel resources will be freed during remove process */ - err = sphw_init_func_mbox_msg_channel(hwdev, sphw_func_max_vf(hwdev)); - if (err) - return err; - - /* vf use 256K as default wq page size, and can't change it */ - for (i = start_vf_id; i <= end_vf_id; i++) { - func_idx = sphw_glb_pf_vf_offset(hwdev) + i; - err = sphw_set_wq_page_size(hwdev, func_idx, SPHW_DEFAULT_WQ_PAGE_SIZE, - SPHW_CHANNEL_COMM); - if (err) - return err; - } - - return 0; -} - -int spnic_deinit_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) -{ - u16 func_idx, idx; - - for (idx = start_vf_id; idx <= end_vf_id; idx++) { - func_idx = sphw_glb_pf_vf_offset(hwdev) + idx; - sphw_set_wq_page_size(hwdev, func_idx, SPHW_HW_WQ_PAGE_SIZE, SPHW_CHANNEL_COMM); - } - - return 0; -} - -int spnic_pci_sriov_disable(struct pci_dev *dev) -{ -#ifdef CONFIG_PCI_IOV - struct spnic_sriov_info *sriov_info = NULL; - struct sphw_event_info event = {0}; - void *hwdev = NULL; - u16 tmp_vfs; - - sriov_info = spnic_get_sriov_info_by_pcidev(dev); - hwdev = spnic_get_hwdev_by_pcidev(dev); - if (!hwdev) { - sdk_err(&dev->dev, "SR-IOV disable is not permitted, please wait...\n"); - return -EPERM; - } - - /* if SR-IOV is already disabled then there is nothing to do */ - if (!sriov_info->sriov_enabled) - return 0; - - if (test_and_set_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state)) { - sdk_err(&dev->dev, "SR-IOV disable in process, please wait"); - return -EPERM; - } - - /* If our VFs are assigned we cannot shut down SR-IOV - * without causing issues, so just leave the hardware - * available but disabled - */ - if (pci_vfs_assigned(dev)) { - clear_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state); - sdk_warn(&dev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); - return -EPERM; - } - - event.type = SPHW_EVENT_SRIOV_STATE_CHANGE; - event.sriov_state.enable = 0; - sphw_event_callback(hwdev, &event); - - sriov_info->sriov_enabled = false; - - /* disable iov and allow time for transactions to clear */ - pci_disable_sriov(dev); - - tmp_vfs = (u16)sriov_info->num_vfs; - sriov_info->num_vfs = 0; - spnic_deinit_vf_hw(hwdev, 1, tmp_vfs); - - clear_bit(SPNIC_SRIOV_DISABLE, &sriov_info->state); - -#endif - - return 0; -} - -int spnic_pci_sriov_enable(struct pci_dev *dev, int num_vfs) -{ -#ifdef CONFIG_PCI_IOV - struct spnic_sriov_info *sriov_info = NULL; - struct sphw_event_info event = {0}; - void *hwdev = NULL; - int pre_existing_vfs = 0; - int err = 0; - - sriov_info = spnic_get_sriov_info_by_pcidev(dev); - hwdev = spnic_get_hwdev_by_pcidev(dev); - if (!hwdev) { - sdk_err(&dev->dev, "SR-IOV enable is not permitted, please wait...\n"); - return -EPERM; - } - - if (test_and_set_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state)) { - sdk_err(&dev->dev, "SR-IOV enable in process, please wait, num_vfs %d\n", - num_vfs); - return -EPERM; - } - - pre_existing_vfs = pci_num_vf(dev); - - if (num_vfs > pci_sriov_get_totalvfs(dev)) { - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - return -ERANGE; - } - if (pre_existing_vfs && pre_existing_vfs != num_vfs) { - err = spnic_pci_sriov_disable(dev); - if (err) { - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - return err; - } - } else if (pre_existing_vfs == num_vfs) { - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - return num_vfs; - } - - err = spnic_init_vf_hw(hwdev, 1, (u16)num_vfs); - if (err) { - sdk_err(&dev->dev, "Failed to init vf in hardware before enable sriov, error %d\n", - err); - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - return err; - } - - err = pci_enable_sriov(dev, num_vfs); - if (err) { - sdk_err(&dev->dev, "Failed to enable SR-IOV, error %d\n", err); - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - return err; - } - - sriov_info->sriov_enabled = true; - sriov_info->num_vfs = num_vfs; - - event.type = SPHW_EVENT_SRIOV_STATE_CHANGE; - event.sriov_state.enable = 1; - event.sriov_state.num_vfs = (u16)num_vfs; - sphw_event_callback(hwdev, &event); - - clear_bit(SPNIC_SRIOV_ENABLE, &sriov_info->state); - - return num_vfs; -#else - - return 0; -#endif -} - -static bool spnic_is_support_sriov_configure(struct pci_dev *pdev) -{ - /* TODO: get cap from firmware */ - - return true; -} - -int spnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs) -{ - struct spnic_sriov_info *sriov_info = NULL; - - if (!spnic_is_support_sriov_configure(dev)) - return -EFAULT; - - sriov_info = spnic_get_sriov_info_by_pcidev(dev); - if (!sriov_info) - return -EFAULT; - - if (!test_bit(SPNIC_FUNC_PERSENT, &sriov_info->state)) - return -EFAULT; - - if (!num_vfs) - return spnic_pci_sriov_disable(dev); - else - return spnic_pci_sriov_enable(dev, num_vfs); -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h b/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h deleted file mode 100644 index 622845e30427..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_sriov.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_SRIOV_H -#define SPNIC_SRIOV_H -#include <linux/types.h> - -enum spnic_sriov_state { - SPNIC_SRIOV_DISABLE, - SPNIC_SRIOV_ENABLE, - SPNIC_FUNC_PERSENT, -}; - -struct spnic_sriov_info { - bool sriov_enabled; - unsigned int num_vfs; - unsigned long state; -}; - -struct spnic_sriov_info *spnic_get_sriov_info_by_pcidev(struct pci_dev *pdev); -int spnic_pci_sriov_disable(struct pci_dev *dev); -int spnic_pci_sriov_enable(struct pci_dev *dev, int num_vfs); -int spnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs); -#endif diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_tx.c b/drivers/net/ethernet/ramaxel/spnic/spnic_tx.c deleted file mode 100644 index 7478e76aa729..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_tx.c +++ /dev/null @@ -1,877 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt - -#include <linux/netdevice.h> -#include <linux/kernel.h> -#include <linux/skbuff.h> -#include <linux/interrupt.h> -#include <linux/device.h> -#include <linux/pci.h> -#include <linux/tcp.h> -#include <linux/sctp.h> -#include <net/xfrm.h> -#include <linux/dma-mapping.h> -#include <linux/types.h> -#include <linux/u64_stats_sync.h> -#include <linux/module.h> -#include <linux/vmalloc.h> - -#include "sphw_crm.h" -#include "spnic_nic_qp.h" -#include "spnic_nic_io.h" -#include "spnic_nic_cfg.h" -#include "spnic_nic_dev.h" -#include "spnic_tx.h" - -#define MIN_SKB_LEN 32 - -#define MAX_PAYLOAD_OFFSET 221 - -#define NIC_QID(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) - -#define SPNIC_TX_TASK_WRAPPED 1 -#define SPNIC_TX_BD_DESC_WRAPPED 2 - -#define TXQ_STATS_INC(txq, field) \ -do { \ - u64_stats_update_begin(&(txq)->txq_stats.syncp); \ - (txq)->txq_stats.field++; \ - u64_stats_update_end(&(txq)->txq_stats.syncp); \ -} while (0) - -void spnic_txq_get_stats(struct spnic_txq *txq, struct spnic_txq_stats *stats) -{ - struct spnic_txq_stats *txq_stats = &txq->txq_stats; - unsigned int start; - - u64_stats_update_begin(&stats->syncp); - do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - stats->bytes = txq_stats->bytes; - stats->packets = txq_stats->packets; - stats->busy = txq_stats->busy; - stats->wake = txq_stats->wake; - stats->dropped = txq_stats->dropped; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - u64_stats_update_end(&stats->syncp); -} - -void spnic_txq_clean_stats(struct spnic_txq_stats *txq_stats) -{ - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->bytes = 0; - txq_stats->packets = 0; - txq_stats->busy = 0; - txq_stats->wake = 0; - txq_stats->dropped = 0; - - txq_stats->skb_pad_err = 0; - txq_stats->frag_len_overflow = 0; - txq_stats->offload_cow_skb_err = 0; - txq_stats->map_frag_err = 0; - txq_stats->unknown_tunnel_pkt = 0; - txq_stats->frag_size_err = 0; - u64_stats_update_end(&txq_stats->syncp); -} - -static void txq_stats_init(struct spnic_txq *txq) -{ - struct spnic_txq_stats *txq_stats = &txq->txq_stats; - - u64_stats_init(&txq_stats->syncp); - spnic_txq_clean_stats(txq_stats); -} - -static inline void spnic_set_buf_desc(struct spnic_sq_bufdesc *buf_descs, dma_addr_t addr, u32 len) -{ - buf_descs->hi_addr = upper_32_bits(addr); - buf_descs->lo_addr = lower_32_bits(addr); - buf_descs->len = len; -} - -static int tx_map_skb(struct spnic_nic_dev *nic_dev, struct sk_buff *skb, - u16 valid_nr_frags, struct spnic_txq *txq, - struct spnic_tx_info *tx_info, - struct spnic_sq_wqe_combo *wqe_combo) -{ - struct spnic_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; - struct spnic_sq_bufdesc *buf_desc = wqe_combo->bds_head; - struct spnic_dma_info *dma_info = tx_info->dma_info; - struct pci_dev *pdev = nic_dev->pdev; - skb_frag_t *frag = NULL; - u32 j, i; - int err; - - dma_info[0].dma = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, dma_info[0].dma)) { - TXQ_STATS_INC(txq, map_frag_err); - return -EFAULT; - } - - dma_info[0].len = skb_headlen(skb); - - wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma); - wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma); - - wqe_desc->ctrl_len = dma_info[0].len; - - for (i = 0; i < valid_nr_frags;) { - frag = &(skb_shinfo(skb)->frags[i]); - if (unlikely(i == wqe_combo->first_bds_num)) - buf_desc = wqe_combo->bds_sec2; - - i++; - dma_info[i].dma = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, dma_info[i].dma)) { - TXQ_STATS_INC(txq, map_frag_err); - i--; - err = -EFAULT; - goto frag_map_err; - } - dma_info[i].len = skb_frag_size(frag); - - spnic_set_buf_desc(buf_desc, dma_info[i].dma, dma_info[i].len); - buf_desc++; - } - - return 0; - -frag_map_err: - for (j = 0; j < i;) { - j++; - dma_unmap_page(&pdev->dev, dma_info[j].dma, - dma_info[j].len, DMA_TO_DEVICE); - } - dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, DMA_TO_DEVICE); - return err; -} - -static inline void tx_unmap_skb(struct spnic_nic_dev *nic_dev, - struct sk_buff *skb, u16 valid_nr_frags, - struct spnic_dma_info *dma_info) -{ - struct pci_dev *pdev = nic_dev->pdev; - int i; - - for (i = 0; i < valid_nr_frags; ) { - i++; - dma_unmap_page(&pdev->dev, dma_info[i].dma, dma_info[i].len, DMA_TO_DEVICE); - } - - dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, DMA_TO_DEVICE); -} - -union spnic_l4 { - struct tcphdr *tcp; - struct udphdr *udp; - unsigned char *hdr; -}; - -enum sq_l3_type { - UNKNOWN_L3TYPE = 0, - IPV6_PKT = 1, - IPV4_PKT_NO_CHKSUM_OFFLOAD = 2, - IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3, -}; - -enum sq_l4offload_type { - OFFLOAD_DISABLE = 0, - TCP_OFFLOAD_ENABLE = 1, - SCTP_OFFLOAD_ENABLE = 2, - UDP_OFFLOAD_ENABLE = 3, -}; - -/*initialize l4_len and offset*/ -static inline void get_inner_l4_info(struct sk_buff *skb, union spnic_l4 *l4, - u8 l4_proto, u32 *offset, - enum sq_l4offload_type *l4_offload) -{ - switch (l4_proto) { - case IPPROTO_TCP: - *l4_offload = TCP_OFFLOAD_ENABLE; - /* To keep same with TSO, payload offset begins from paylaod */ - *offset = (l4->tcp->doff << 2) + TRANSPORT_OFFSET(l4->hdr, skb); - break; - - case IPPROTO_UDP: - *l4_offload = UDP_OFFLOAD_ENABLE; - *offset = TRANSPORT_OFFSET(l4->hdr, skb); - break; - default: - break; - } -} - -static int spnic_tx_csum(struct spnic_txq *txq, struct spnic_sq_task *task, struct sk_buff *skb) -{ - if (skb->ip_summed != CHECKSUM_PARTIAL) - return 0; - - if (skb->encapsulation) { - union spnic_ip ip; - u8 l4_proto; - - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, TUNNEL_FLAG); - - ip.hdr = skb_network_header(skb); - if (ip.v4->version == IPV4_VERSION) { - l4_proto = ip.v4->protocol; - } else if (ip.v4->version == IPV6_VERSION) { - union spnic_l4 l4; - unsigned char *exthdr; - __be16 frag_off; - - exthdr = ip.hdr + sizeof(*ip.v6); - l4_proto = ip.v6->nexthdr; - l4.hdr = skb_transport_header(skb); - if (l4.hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); - } else { - l4_proto = IPPROTO_RAW; - } - - if (l4_proto != IPPROTO_UDP || - ((struct udphdr *)skb_transport_header(skb))->dest != VXLAN_OFFLOAD_PORT_BE) { - TXQ_STATS_INC(txq, unknown_tunnel_pkt); - /* Unsupport tunnel packet, disable csum offload */ - skb_checksum_help(skb); - return 0; - } - } - - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); - - return 1; -} - -static void get_inner_l3_l4_type(struct sk_buff *skb, union spnic_ip *ip, - union spnic_l4 *l4, enum sq_l3_type *l3_type, u8 *l4_proto) -{ - unsigned char *exthdr = NULL; - - if (ip->v4->version == 4) { - *l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; - *l4_proto = ip->v4->protocol; - - } else if (ip->v4->version == 6) { - *l3_type = IPV6_PKT; - exthdr = ip->hdr + sizeof(*ip->v6); - *l4_proto = ip->v6->nexthdr; - if (exthdr != l4->hdr) { - __be16 frag_off = 0; - - ipv6_skip_exthdr(skb, (int)(exthdr - skb->data), l4_proto, &frag_off); - } - } else { - *l3_type = UNKNOWN_L3TYPE; - *l4_proto = 0; - } -} - -static inline void spnic_set_tso_info(struct spnic_sq_task *task, u32 *queue_info, - enum sq_l4offload_type l4_offload, u32 offset, u32 mss) -{ - if (l4_offload == TCP_OFFLOAD_ENABLE) { - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, TSO); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); - } else if (l4_offload == UDP_OFFLOAD_ENABLE) { - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UFO); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); - } - - /*Default enable L3 calculation*/ - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L3_EN); - - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF); - - /* set MSS value */ - *queue_info = SQ_CTRL_QUEUE_INFO_CLEAR(*queue_info, MSS); - *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); -} - -static int spnic_tso(struct spnic_sq_task *task, u32 *queue_info, struct sk_buff *skb) -{ - enum sq_l4offload_type l4_offload = OFFLOAD_DISABLE; - enum sq_l3_type l3_type; - union spnic_ip ip; - union spnic_l4 l4; - u32 offset = 0; - u8 l4_proto; - int err; - - if (!skb_is_gso(skb)) - return 0; - - err = skb_cow_head(skb, 0); - if (err < 0) - return err; - - if (skb->encapsulation) { - u32 gso_type = skb_shinfo(skb)->gso_type; - /* L3 checksum always enable */ - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, OUT_L3_EN); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, TUNNEL_FLAG); - - l4.hdr = skb_transport_header(skb); - ip.hdr = skb_network_header(skb); - - if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { - l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); - task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, OUT_L4_EN); - } else if (gso_type & SKB_GSO_UDP_TUNNEL) { - } - - ip.hdr = skb_inner_network_header(skb); - l4.hdr = skb_inner_transport_header(skb); - } else { - ip.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - } - - get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto); - - if (l4_proto == IPPROTO_TCP) - l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); - - get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload); - - spnic_set_tso_info(task, queue_info, l4_offload, offset, skb_shinfo(skb)->gso_size); - - return 1; -} - -static u32 spnic_tx_offload(struct sk_buff *skb, struct spnic_sq_task *task, - u32 *queue_info, struct spnic_txq *txq) -{ - u32 offload = 0; - int tso_cs_en; - - task->pkt_info0 = 0; - task->ip_identify = 0; - task->pkt_info2 = 0; - task->vlan_offload = 0; - - tso_cs_en = spnic_tso(task, queue_info, skb); - if (tso_cs_en < 0) { - offload = TX_OFFLOAD_INVALID; - return offload; - } else if (tso_cs_en) { - offload |= TX_OFFLOAD_TSO; - } else { - tso_cs_en = spnic_tx_csum(txq, task, skb); - if (tso_cs_en) - offload |= TX_OFFLOAD_CSUM; - } - -#define VLAN_INSERT_MODE_MAX 5 - if (unlikely(skb_vlan_tag_present(skb))) { - /* select vlan insert mode by qid, default 802.1Q Tag type*/ - spnic_set_vlan_tx_offload(task, skb_vlan_tag_get(skb), - txq->q_id % VLAN_INSERT_MODE_MAX); - offload |= TX_OFFLOAD_VLAN; - } - - if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) > MAX_PAYLOAD_OFFSET)) { - offload = TX_OFFLOAD_INVALID; - return offload; - } - - return offload; -} - -static inline void get_pkt_stats(struct spnic_tx_info *tx_info, struct sk_buff *skb) -{ - u32 ihs, hdr_len; - - if (skb_is_gso(skb)) { - if (skb->encapsulation) - ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); - else - ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); - - hdr_len = (skb_shinfo(skb)->gso_segs - 1) * ihs; - tx_info->num_bytes = skb->len + (u64)hdr_len; - - } else { - tx_info->num_bytes = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; - } - - tx_info->num_pkts = 1; -} - -static inline int spnic_maybe_stop_tx(struct spnic_txq *txq, u16 wqebb_cnt) -{ - if (likely(spnic_get_sq_free_wqebbs(txq->sq) >= wqebb_cnt)) - return 0; - - /* We need to check again in a case another CPU has just - * made room available. - */ - netif_stop_subqueue(txq->netdev, txq->q_id); - - if (likely(spnic_get_sq_free_wqebbs(txq->sq) < wqebb_cnt)) - return -EBUSY; - - /* there have enough wqebbs after queue is wake up */ - netif_start_subqueue(txq->netdev, txq->q_id); - - return 0; -} - -static inline u16 spnic_set_wqe_combo(struct spnic_txq *txq, struct spnic_sq_wqe_combo *wqe_combo, - u32 offload, u16 num_sge, u16 *curr_pi) -{ - void *second_part_wqebbs_addr = NULL; - void *wqe = NULL; - u16 first_part_wqebbs_num, tmp_pi; - - wqe_combo->ctrl_bd0 = spnic_get_sq_one_wqebb(txq->sq, curr_pi); - if (!offload && num_sge == 1) { - wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE; - return spnic_get_and_update_sq_owner(txq->sq, *curr_pi, 1); - } - - wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE; - - if (offload) { - wqe_combo->task = spnic_get_sq_one_wqebb(txq->sq, &tmp_pi); - wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES; - } else { - wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS; - } - - if (num_sge > 1) { - /* first wqebb contain bd0, and bd size is equal to sq wqebb - * size, so we use (num_sge - 1) as wanted weqbb_cnt - */ - wqe = spnic_get_sq_multi_wqebbs(txq->sq, num_sge - 1, &tmp_pi, - &second_part_wqebbs_addr, - &first_part_wqebbs_num); - wqe_combo->bds_head = wqe; - wqe_combo->bds_sec2 = second_part_wqebbs_addr; - wqe_combo->first_bds_num = first_part_wqebbs_num; - } - - return spnic_get_and_update_sq_owner(txq->sq, *curr_pi, num_sge + (u16)!!offload); -} - -inline u8 spnic_get_vlan_pri(struct sk_buff *skb) -{ - u16 vlan_tci = 0; - int err; - - err = vlan_get_tag(skb, &vlan_tci); - if (err) - return 0; - - return (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; -} - -static netdev_tx_t spnic_send_one_skb(struct sk_buff *skb, struct net_device *netdev, - struct spnic_txq *txq) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_sq_wqe_combo wqe_combo = {0}; - struct spnic_tx_info *tx_info = NULL; - struct spnic_sq_task task; - u32 offload, queue_info = 0; - u16 owner = 0, pi = 0; - u16 wqebb_cnt, num_sge, valid_nr_frags; - bool find_zero_sge_len = false; - int err, i; - - if (unlikely(skb->len < MIN_SKB_LEN)) { - if (skb_pad(skb, (int)(MIN_SKB_LEN - skb->len))) { - TXQ_STATS_INC(txq, skb_pad_err); - goto tx_skb_pad_err; - } - - skb->len = MIN_SKB_LEN; - } - - valid_nr_frags = 0; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - if (!skb_frag_size(&skb_shinfo(skb)->frags[i])) { - find_zero_sge_len = true; - continue; - } else if (find_zero_sge_len) { - TXQ_STATS_INC(txq, frag_size_err); - goto tx_drop_pkts; - } - - valid_nr_frags++; - } - - num_sge = valid_nr_frags + 1; - - /* assume need normal TS format wqe, task info need 1 wqebb */ - wqebb_cnt = num_sge + 1; - if (unlikely(spnic_maybe_stop_tx(txq, wqebb_cnt))) { - TXQ_STATS_INC(txq, busy); - return NETDEV_TX_BUSY; - } - - offload = spnic_tx_offload(skb, &task, &queue_info, txq); - if (unlikely(offload == TX_OFFLOAD_INVALID)) { - TXQ_STATS_INC(txq, offload_cow_skb_err); - goto tx_drop_pkts; - } else if (!offload) { - /* no TS in current wqe */ - wqebb_cnt -= 1; - } - - owner = spnic_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi); - if (offload) { - wqe_combo.task->ip_identify = task.ip_identify; - wqe_combo.task->pkt_info0 = task.pkt_info0; - wqe_combo.task->pkt_info2 = task.pkt_info2; - wqe_combo.task->vlan_offload = task.vlan_offload; - } - - tx_info = &txq->tx_info[pi]; - tx_info->skb = skb; - tx_info->wqebb_cnt = wqebb_cnt; - tx_info->valid_nr_frags = valid_nr_frags; - - err = tx_map_skb(nic_dev, skb, valid_nr_frags, txq, tx_info, &wqe_combo); - if (err) { - spnic_rollback_sq_wqebbs(txq->sq, wqebb_cnt, owner); - goto tx_drop_pkts; - } - - get_pkt_stats(tx_info, skb); - - spnic_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner); - - spnic_write_db(txq->sq, txq->cos, SQ_CFLAG_DP, spnic_get_sq_local_pi(txq->sq)); - - return NETDEV_TX_OK; - -tx_drop_pkts: - dev_kfree_skb_any(skb); - -tx_skb_pad_err: - TXQ_STATS_INC(txq, dropped); - - return NETDEV_TX_OK; -} - -netdev_tx_t spnic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u16 q_id = skb_get_queue_mapping(skb); - struct spnic_txq *txq = &nic_dev->txqs[q_id]; - - return spnic_send_one_skb(skb, netdev, txq); -} - -netdev_tx_t spnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct spnic_txq *txq = NULL; - u16 q_id = skb_get_queue_mapping(skb); - - if (unlikely(!netif_carrier_ok(netdev))) { - dev_kfree_skb_any(skb); - SPNIC_NIC_STATS_INC(nic_dev, tx_carrier_off_drop); - return NETDEV_TX_OK; - } - - if (unlikely(q_id >= nic_dev->q_params.num_qps)) { - txq = &nic_dev->txqs[0]; - SPNIC_NIC_STATS_INC(nic_dev, tx_invalid_qid); - goto tx_drop_pkts; - } - txq = &nic_dev->txqs[q_id]; - - return spnic_send_one_skb(skb, netdev, txq); - -tx_drop_pkts: - dev_kfree_skb_any(skb); - u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.dropped++; - u64_stats_update_end(&txq->txq_stats.syncp); - - return NETDEV_TX_OK; -} - -static inline void tx_free_skb(struct spnic_nic_dev *nic_dev, struct spnic_tx_info *tx_info) -{ - tx_unmap_skb(nic_dev, tx_info->skb, tx_info->valid_nr_frags, - tx_info->dma_info); - dev_kfree_skb_any(tx_info->skb); - tx_info->skb = NULL; -} - -static void free_all_tx_skbs(struct spnic_nic_dev *nic_dev, u32 sq_depth, - struct spnic_tx_info *tx_info_arr) -{ - struct spnic_tx_info *tx_info = NULL; - u32 idx; - - for (idx = 0; idx < sq_depth; idx++) { - tx_info = &tx_info_arr[idx]; - if (tx_info->skb) - tx_free_skb(nic_dev, tx_info); - } -} - -int spnic_tx_poll(struct spnic_txq *txq, int budget) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(txq->netdev); - struct spnic_tx_info *tx_info = NULL; - u64 tx_bytes = 0, wake = 0; - int pkts = 0, nr_pkts = 0; - u16 wqebb_cnt = 0; - u16 hw_ci, sw_ci = 0, q_id = txq->sq->q_id; - - hw_ci = spnic_get_sq_hw_ci(txq->sq); - dma_rmb(); - sw_ci = spnic_get_sq_local_ci(txq->sq); - - do { - tx_info = &txq->tx_info[sw_ci]; - - /* Whether all of the wqebb of this wqe is completed */ - if (hw_ci == sw_ci || - ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt) - break; - - sw_ci = (u16)(sw_ci + tx_info->wqebb_cnt) & txq->q_mask; - prefetch(&txq->tx_info[sw_ci]); - - wqebb_cnt += tx_info->wqebb_cnt; - - tx_bytes += tx_info->num_bytes; - nr_pkts += tx_info->num_pkts; - pkts++; - - tx_free_skb(nic_dev, tx_info); - - } while (likely(pkts < budget)); - - spnic_update_sq_local_ci(txq->sq, wqebb_cnt); - - if (unlikely(__netif_subqueue_stopped(nic_dev->netdev, q_id) && - spnic_get_sq_free_wqebbs(txq->sq) >= 1 && - test_bit(SPNIC_INTF_UP, &nic_dev->flags))) { - struct netdev_queue *netdev_txq = netdev_get_tx_queue(txq->netdev, q_id); - - __netif_tx_lock(netdev_txq, smp_processor_id()); - /* To avoid re-waking subqueue with xmit_frame */ - if (__netif_subqueue_stopped(nic_dev->netdev, q_id)) { - netif_wake_subqueue(nic_dev->netdev, q_id); - wake++; - } - __netif_tx_unlock(netdev_txq); - } - - u64_stats_update_begin(&txq->txq_stats.syncp); - txq->txq_stats.bytes += tx_bytes; - txq->txq_stats.packets += nr_pkts; - txq->txq_stats.wake += wake; - u64_stats_update_end(&txq->txq_stats.syncp); - - return pkts; -} - -void spnic_set_txq_cos(struct spnic_nic_dev *nic_dev, u16 start_qid, u16 q_num, u8 cos) -{ - u16 idx; - - for (idx = 0; idx < q_num; idx++) - nic_dev->txqs[idx + start_qid].cos = cos; -} - -#define SPNIC_BDS_PER_SQ_WQEBB \ - (SPNIC_SQ_WQEBB_SIZE / sizeof(struct spnic_sq_bufdesc)) - -int spnic_alloc_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res) -{ - struct spnic_dyna_txq_res *tqres = NULL; - int idx, i; - u64 size; - - for (idx = 0; idx < num_sq; idx++) { - tqres = &txqs_res[idx]; - - size = sizeof(*tqres->tx_info) * sq_depth; - tqres->tx_info = kzalloc(size, GFP_KERNEL); - if (!tqres->tx_info) { - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc txq%d tx info\n", idx); - goto err_out; - } - - size = sizeof(*tqres->bds) * (sq_depth * SPNIC_BDS_PER_SQ_WQEBB + SPNIC_MAX_SQ_SGE); - tqres->bds = kzalloc(size, GFP_KERNEL); - if (!tqres->bds) { - kfree(tqres->tx_info); - nicif_err(nic_dev, drv, nic_dev->netdev, - "Failed to alloc txq%d bds info\n", idx); - goto err_out; - } - } - - return 0; - -err_out: - for (i = 0; i < idx; i++) { - tqres = &txqs_res[i]; - - kfree(tqres->bds); - kfree(tqres->tx_info); - } - - return -ENOMEM; -} - -void spnic_free_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res) -{ - struct spnic_dyna_txq_res *tqres = NULL; - int idx; - - for (idx = 0; idx < num_sq; idx++) { - tqres = &txqs_res[idx]; - - free_all_tx_skbs(nic_dev, sq_depth, tqres->tx_info); - kfree(tqres->bds); - kfree(tqres->tx_info); - } -} - -int spnic_configure_txqs(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res) -{ - struct spnic_dyna_txq_res *tqres = NULL; - struct spnic_txq *txq = NULL; - u16 q_id; - u32 idx; - - for (q_id = 0; q_id < num_sq; q_id++) { - txq = &nic_dev->txqs[q_id]; - tqres = &txqs_res[q_id]; - - txq->q_depth = sq_depth; - txq->q_mask = sq_depth - 1; - - txq->tx_info = tqres->tx_info; - for (idx = 0; idx < sq_depth; idx++) - txq->tx_info[idx].dma_info = &tqres->bds[idx * SPNIC_BDS_PER_SQ_WQEBB]; - - txq->sq = spnic_get_nic_queue(nic_dev->hwdev, q_id, SPNIC_SQ); - if (!txq->sq) { - nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get %u sq\n", q_id); - return -EFAULT; - } - } - - return 0; -} - -int spnic_alloc_txqs(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - struct pci_dev *pdev = nic_dev->pdev; - struct spnic_txq *txq = NULL; - u16 q_id, num_txqs = nic_dev->max_qps; - u64 txq_size; - - txq_size = num_txqs * sizeof(*nic_dev->txqs); - if (!txq_size) { - nic_err(&pdev->dev, "Cannot allocate zero size txqs\n"); - return -EINVAL; - } - - nic_dev->txqs = kzalloc(txq_size, GFP_KERNEL); - if (!nic_dev->txqs) { - nic_err(&pdev->dev, "Failed to allocate txqs\n"); - return -ENOMEM; - } - - for (q_id = 0; q_id < num_txqs; q_id++) { - txq = &nic_dev->txqs[q_id]; - txq->netdev = netdev; - txq->q_id = q_id; - txq->q_depth = nic_dev->q_params.sq_depth; - txq->q_mask = nic_dev->q_params.sq_depth - 1; - txq->dev = &pdev->dev; - - txq_stats_init(txq); - } - - return 0; -} - -void spnic_free_txqs(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - - kfree(nic_dev->txqs); -} - -static bool is_hw_complete_sq_process(struct spnic_io_queue *sq) -{ - u16 sw_pi, hw_ci; - - sw_pi = spnic_get_sq_local_pi(sq); - hw_ci = spnic_get_sq_hw_ci(sq); - - return sw_pi == hw_ci; -} - -#define SPNIC_FLUSH_QUEUE_TIMEOUT 1000 -static int spnic_stop_sq(struct spnic_txq *txq) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(txq->netdev); - unsigned long timeout; - int err; - - timeout = msecs_to_jiffies(SPNIC_FLUSH_QUEUE_TIMEOUT) + jiffies; - do { - if (is_hw_complete_sq_process(txq->sq)) - return 0; - - usleep_range(900, 1000); - } while (time_before(jiffies, timeout)); - - /* force hardware to drop packets */ - timeout = msecs_to_jiffies(SPNIC_FLUSH_QUEUE_TIMEOUT) + jiffies; - do { - if (is_hw_complete_sq_process(txq->sq)) - return 0; - - err = spnic_force_drop_tx_pkt(nic_dev->hwdev); - if (err) - break; - - usleep_range(9900, 10000); - } while (time_before(jiffies, timeout)); - - /* Avoid msleep takes too long and get a fake result */ - if (is_hw_complete_sq_process(txq->sq)) - return 0; - - return -EFAULT; -} - -/* should stop transmit any packets before calling this function */ -int spnic_flush_txqs(struct net_device *netdev) -{ - struct spnic_nic_dev *nic_dev = netdev_priv(netdev); - u16 qid; - int err; - - for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { - err = spnic_stop_sq(&nic_dev->txqs[qid]); - if (err) - nicif_err(nic_dev, drv, netdev, "Failed to stop sq%u\n", qid); - } - - return 0; -} diff --git a/drivers/net/ethernet/ramaxel/spnic/spnic_tx.h b/drivers/net/ethernet/ramaxel/spnic/spnic_tx.h deleted file mode 100644 index c3109db83299..000000000000 --- a/drivers/net/ethernet/ramaxel/spnic/spnic_tx.h +++ /dev/null @@ -1,129 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */ - -#ifndef SPNIC_TX_H -#define SPNIC_TX_H - -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <net/ipv6.h> -#include <net/checksum.h> -#include <net/ip6_checksum.h> - -#include "spnic_nic_qp.h" - -#define VXLAN_OFFLOAD_PORT_BE htons(4789) - -enum tx_offload_type { - TX_OFFLOAD_TSO = BIT(0), - TX_OFFLOAD_CSUM = BIT(1), - TX_OFFLOAD_VLAN = BIT(2), - TX_OFFLOAD_INVALID = BIT(3), - TX_OFFLOAD_ESP = BIT(4), -}; - -struct spnic_txq_stats { - u64 packets; - u64 bytes; - u64 busy; - u64 wake; - u64 dropped; - - /* Subdivision statistics show in private tool */ - u64 skb_pad_err; - u64 frag_len_overflow; - u64 offload_cow_skb_err; - u64 map_frag_err; - u64 unknown_tunnel_pkt; - u64 frag_size_err; - - struct u64_stats_sync syncp; -}; - -struct spnic_dma_info { - dma_addr_t dma; - u32 len; -}; - -#define IPV4_VERSION 4 -#define IPV6_VERSION 6 -#define TCP_HDR_DOFF_UNIT 2 -#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) - -union spnic_ip { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; -}; - -struct spnic_tx_info { - struct sk_buff *skb; - - u16 wqebb_cnt; - u16 valid_nr_frags; - - int num_sge; - u16 num_pkts; - u64 num_bytes; - struct spnic_dma_info *dma_info; -}; - -struct spnic_txq { - struct net_device *netdev; - struct device *dev; - - struct spnic_txq_stats txq_stats; - - u8 cos; - u16 q_id; - u32 q_mask; - u32 q_depth; - - struct spnic_tx_info *tx_info; - struct spnic_io_queue *sq; - - u64 last_moder_packets; - u64 last_moder_bytes; -} ____cacheline_aligned; - -netdev_tx_t spnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); - -netdev_tx_t spnic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev); - -struct spnic_dyna_txq_res { - struct spnic_tx_info *tx_info; - struct spnic_dma_info *bds; -}; - -void spnic_txq_get_stats(struct spnic_txq *txq, struct spnic_txq_stats *stats); - -void spnic_txq_clean_stats(struct spnic_txq_stats *txq_stats); - -struct spnic_nic_dev; -int spnic_alloc_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res); - -void spnic_free_txqs_res(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res); - -int spnic_configure_txqs(struct spnic_nic_dev *nic_dev, u16 num_sq, - u32 sq_depth, struct spnic_dyna_txq_res *txqs_res); - -int spnic_alloc_txqs(struct net_device *netdev); - -void spnic_free_txqs(struct net_device *netdev); - -int spnic_tx_poll(struct spnic_txq *txq, int budget); - -int spnic_flush_txqs(struct net_device *netdev); - -void spnic_set_txq_cos(struct spnic_nic_dev *nic_dev, u16 start_qid, u16 q_num, u8 cos); - -static inline __sum16 csum_magic(union spnic_ip *ip, unsigned short proto) -{ - return (ip->v4->version == IPV4_VERSION) ? - csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : - csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); -} - -#endif