[PATCH OLK-6.6] net/hinic5: Add Huawei Intelligent Network Card Driver: hinic5
From: zhengjiezhen <zhengjiezhen@h-partners.com> driver inclusion category: feature bugzilla: https://atomgit.com/openeuler/kernel/issues/9130 CVE: NA --------------------------------- The NIC driver supports the following features: Supports IPv4/IPv6 TCP/UDP checksum, TSO, LRO offload and RSS functions. Supports interrupt aggregation parameter configuration and interrupt adaptation. Supports 802.1Q VLAN (Virtual Local Area Network) offloading and filtering. Supports NIC SR-IOV (Single Root I/O Virtualization). Support PF promiscuous mode Supports VF QinQ mode. Supports VF link state configuration and QoS configuration. Support VF MAC address management. Support VF spoofchk check. Support port lighting. Support Ethernet mouth self-negotiation, support pause frame. Signed-off-by: zhengjiezhen <zhengjiezhen@h-partners.com> Signed-off-by: shijing <shijing34@huawei.com> Signed-off-by: sunhuayi <sunhuayi@huawei.com> Signed-off-by: gongfan <gongfan1@huawei.com> --- hinic5/src/GLOBAL_VERSION_NEW | 1 + hinic5/src/dpu_develop_interface/base_type.h | 20 + .../drv_sdk_intf/hisdk/hinic5_chip_info.h | 37 + .../drv_sdk_intf/hisdk/hinic5_common.h | 235 ++ .../drv_sdk_intf/hisdk/hinic5_crm.h | 1852 +++++++++++ .../drv_sdk_intf/hisdk/hinic5_hinic5_cqm.h | 904 ++++++ .../hisdk/hinic5_hinic5_cqm_adpt.h | 51 + .../hisdk/hinic5_hinic5_vram_api.h | 50 + .../drv_sdk_intf/hisdk/hinic5_hw.h | 1277 ++++++++ .../drv_sdk_intf/hisdk/hinic5_lld.h | 399 +++ .../drv_sdk_intf/hisdk/hinic5_mt.h | 750 +++++ .../drv_sdk_intf/hisdk/hinic5_profile.h | 357 ++ .../drv_sdk_intf/hisdk/hinic5_wq.h | 260 ++ .../drv_sdk_intf/ossl/nic_kcompat.h | 60 + .../drv_sdk_intf/ossl/ossl_knl.h | 52 + .../drv_sdk_intf/ossl/ossl_knl_linux.h | 1713 ++++++++++ .../drv_sdk_intf/ossl/ossl_knl_linux_nic.h | 379 +++ .../drv_sdk_intf/ossl/ossl_knl_uefi.h | 844 +++++ .../drv_sdk_intf/ossl/sdk_kcompat.h | 20 + .../ossl/uefi/HwSafeMemOpWrapper.h | 33 + .../drv_sdk_intf/ossl/uefi/HwSafePrint.h | 53 + .../drv_sdk_intf/ossl/vbs_kcompat.h | 13 + .../drv_srvc_intf/drv_bond_api.h | 192 ++ .../drv_srvc_intf/nic/drv_nic_api.h | 226 ++ .../fw_msg_intf/bond/bond_common_defs.h | 87 + .../fw_msg_intf/bond/bond_mpu_cmd_defs.h | 121 + .../fw_msg_intf/cfg_mgmt/cfg_mgmt_mpu_cmd.h | 10 + .../cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h | 470 +++ .../fw_msg_intf/cfm/cfm_cmd.h | 41 + .../fw_msg_intf/cfm/fast_msg_common_define.h | 60 + .../hinic5_cqm/hinic5_cqm_npu_cmd.h | 23 + .../hinic5_cqm/hinic5_cqm_npu_cmd_defs.h | 83 + .../fw_msg_intf/macsec/macsec_mpu_cmd.h | 102 + .../fw_msg_intf/macsec/macsec_mpu_cmd_defs.h | 387 +++ .../fw_msg_intf/mag/mag_mpu_cmd.h | 85 + .../fw_msg_intf/mag/mag_mpu_cmd_defs.h | 1673 ++++++++++ .../fw_msg_intf/mpu/mpu_board_defs.h | 144 + .../fw_msg_intf/mpu/mpu_cmd_base_defs.h | 163 + .../fw_msg_intf/mpu/mpu_inband_cmd.h | 200 ++ .../fw_msg_intf/mpu/mpu_inband_cmd_defs.h | 1840 +++++++++++ .../fw_msg_intf/mpu/mpu_mailbox_msg_header.h | 61 + .../fw_msg_intf/nic/nic_cfg_comm.h | 807 +++++ .../fw_msg_intf/nic/nic_mpu_cmd.h | 257 ++ .../fw_msg_intf/nic/nic_mpu_cmd_extend.h | 12 + .../fw_msg_intf/nic/nic_mpu_cmd_structs.h | 1868 +++++++++++ .../nic/nic_mpu_cmd_structs_extend.h | 26 + .../fw_msg_intf/nic/nic_mpu_tc_cmd_defs.h | 137 + .../fw_msg_intf/nic/nic_npu_cmd.h | 82 + .../fw_msg_intf/nic/nic_tc_rule_defs.h | 617 ++++ .../fw_msg_intf/public/comm_defs.h | 57 + .../fw_msg_intf/public/hinic5_comm_cmd.h | 105 + .../fw_msg_intf/public/npu_cmdq_base_defs.h | 220 ++ .../public/sml_table_struct_dict_def.h | 24 + .../host/cfm/bond/hinic5_bond.c | 1287 ++++++++ .../host/cfm/bond/hinic5_bond_event.c | 422 +++ .../host/cfm/bond/hinic5_bond_inner.h | 94 + .../host/cfm/fast_msg/hinic5_fast_msg.c | 253 ++ .../host/cfm/fast_msg/hinic5_fast_msg_init.c | 294 ++ .../host/include/cfm/bond/hinic5_bond.h | 38 + .../include/cfm/fast_msg/hinic5_fast_msg.h | 107 + .../cfm/fast_msg/hinic5_fast_msg_init.h | 19 + .../include/sdk/knldk/hinic5_hinic5_vram.h | 33 + .../include/sdk/knldk/hinic5_vram_common.h | 182 ++ .../host/include/typedef.h | 9 + .../host/sdk/knldk/crm/hinic5_hw_cfg.c | 2286 +++++++++++++ .../host/sdk/knldk/crm/hinic5_hw_cfg.h | 444 +++ .../host/sdk/knldk/crm/hinic5_hw_comm.c | 1852 +++++++++++ .../host/sdk/knldk/crm/hinic5_hw_comm.h | 80 + .../host/sdk/knldk/crm/hinic5_hwdev.c | 2767 ++++++++++++++++ .../host/sdk/knldk/crm/hinic5_prof_adap.c | 66 + .../host/sdk/knldk/crm/hinic5_prof_adap.h | 89 + .../hinic5_cqm_182x_cmdq_ops.c | 81 + .../hinic5_cqm_182x_cmdq_ops.h | 41 + .../hinic5_cqm_187x_cmdq_ops.c | 80 + .../hinic5_cqm_187x_cmdq_ops.h | 54 + .../sdk/knldk/hinic5_cqm/hinic5_cqm_bat_cla.c | 2859 +++++++++++++++++ .../sdk/knldk/hinic5_cqm/hinic5_cqm_bat_cla.h | 258 ++ .../hinic5_cqm/hinic5_cqm_bitmap_table.c | 1744 ++++++++++ .../hinic5_cqm/hinic5_cqm_bitmap_table.h | 78 + .../knldk/hinic5_cqm/hinic5_cqm_bloomfilter.c | 517 +++ .../knldk/hinic5_cqm/hinic5_cqm_bloomfilter.h | 53 + .../sdk/knldk/hinic5_cqm/hinic5_cqm_cmd.c | 207 ++ .../sdk/knldk/hinic5_cqm/hinic5_cqm_cmd.h | 40 + .../sdk/knldk/hinic5_cqm/hinic5_cqm_cmdq.h | 26 + .../knldk/hinic5_cqm/hinic5_cqm_cmdq_adapt.c | 14 + .../host/sdk/knldk/hinic5_cqm/hinic5_cqm_db.c | 568 ++++ .../host/sdk/knldk/hinic5_cqm/hinic5_cqm_db.h | 36 + .../knldk/hinic5_cqm/hinic5_cqm_fast_msg.c | 159 + .../knldk/hinic5_cqm/hinic5_cqm_fast_msg.h | 35 + .../sdk/knldk/hinic5_cqm/hinic5_cqm_main.c | 2071 ++++++++++++ .../sdk/knldk/hinic5_cqm/hinic5_cqm_main.h | 505 +++ .../sdk/knldk/hinic5_cqm/hinic5_cqm_object.c | 1700 ++++++++++ .../sdk/knldk/hinic5_cqm/hinic5_cqm_object.h | 383 +++ .../hinic5_cqm/hinic5_cqm_object_intern.c | 1470 +++++++++ .../hinic5_cqm/hinic5_cqm_object_intern.h | 117 + .../knldk/hinic5_vram/hinic5_hinic5_vram.c | 294 ++ .../knldk/hinic5_vram/hinic5_vram_common.c | 210 ++ .../host/sdk/knldk/hwif/hinic5_api_cmd.c | 1216 +++++++ .../host/sdk/knldk/hwif/hinic5_cmdq.c | 1871 +++++++++++ .../host/sdk/knldk/hwif/hinic5_common.c | 100 + .../host/sdk/knldk/hwif/hinic5_enhance_cmdq.c | 238 ++ .../host/sdk/knldk/hwif/hinic5_eqs.c | 1824 +++++++++++ .../host/sdk/knldk/hwif/hinic5_hw_api.c | 453 +++ .../host/sdk/knldk/hwif/hinic5_hwif.c | 1335 ++++++++ .../host/sdk/knldk/hwif/hinic5_mbox.c | 2187 +++++++++++++ .../host/sdk/knldk/hwif/hinic5_mgmt.c | 1569 +++++++++ .../host/sdk/knldk/hwif/hinic5_sm_lt.h | 160 + .../host/sdk/knldk/hwif/hinic5_sml_lt.c | 143 + .../host/sdk/knldk/hwif/hinic5_wq.c | 156 + .../host/sdk/knldk/include/hinic5_api_cmd.h | 279 ++ .../host/sdk/knldk/include/hinic5_cmdq.h | 257 ++ .../sdk/knldk/include/hinic5_cmdq_enhance.h | 187 ++ .../host/sdk/knldk/include/hinic5_csr_inner.h | 214 ++ .../host/sdk/knldk/include/hinic5_eqs.h | 203 ++ .../host/sdk/knldk/include/hinic5_hw_api.h | 141 + .../host/sdk/knldk/include/hinic5_hwdev.h | 355 ++ .../sdk/knldk/include/hinic5_hwif_inner.h | 321 ++ .../host/sdk/knldk/include/hinic5_lld_inner.h | 25 + .../host/sdk/knldk/include/hinic5_mbox.h | 379 +++ .../host/sdk/knldk/include/hinic5_mgmt.h | 180 ++ .../sdk/knldk/include/hinic5_typedef_inner.h | 20 + .../host/sdk/knldk/lld/CMakeLists.txt | 94 + .../host/sdk/knldk/lld/hinic5_bus.c | 150 + .../host/sdk/knldk/lld/hinic5_bus.h | 58 + .../host/sdk/knldk/lld/hinic5_dev_mgmt.c | 1030 ++++++ .../host/sdk/knldk/lld/hinic5_dev_mgmt.h | 192 ++ .../host/sdk/knldk/lld/hinic5_lld.c | 1091 +++++++ .../host/sdk/knldk/lld/hinic5_lld_private.h | 18 + .../host/sdk/knldk/lld/hinic5_pci_id_tbl.h | 50 + .../host/sdk/knldk/lld/hinic5_pcie.c | 1109 +++++++ .../host/sdk/knldk/lld/hinic5_pcie.h | 26 + .../host/sdk/knldk/lld/hinic5_sriov.c | 300 ++ .../host/sdk/knldk/lld/hinic5_sriov.h | 27 + .../host/sdk/knldk/lld/hinic5_sysfs.c | 85 + .../host/sdk/knldk/lld/hinic5_sysfs.h | 16 + .../host/sdk/knldk/lld/hinic5_ubus.c | 707 ++++ .../host/sdk/knldk/lld/hinic5_ubus.h | 66 + .../host/sdk/knldk/lld/hinic5_ubus_id_tbl.h | 22 + .../host/sdk/knldk/lld/hinic5_ubus_sriov.c | 235 ++ .../host/sdk/knldk/lld/hinic5_ubus_sriov.h | 11 + .../host/sdk/knldk/mt/hinic5_devlink.c | 491 +++ .../host/sdk/knldk/mt/hinic5_devlink.h | 84 + .../host/sdk/knldk/mt/hinic5_fw_update.c | 925 ++++++ .../host/sdk/knldk/mt/hinic5_fw_update.h | 96 + .../host/sdk/knldk/mt/hinic5_hw_mt.c | 639 ++++ .../host/sdk/knldk/mt/hinic5_hw_mt.h | 179 ++ .../host/sdk/knldk/mt/hinic5_nictool.c | 1298 ++++++++ .../host/sdk/knldk/mt/hinic5_nictool.h | 39 + .../host/sdk/knldk/mt/hinic5_non_ptp.c | 446 +++ .../host/sdk/knldk/mt/hinic5_non_ptp.h | 25 + .../host/sdk/knldk/mt/hinic5_sdk_attack.c | 68 + .../host/sdk/knldk/mt/hinic5_sdk_attack.h | 15 + .../sdk/ossl/linux/kernel/ossl_knl_linux.c | 378 +++ .../host/service/include/hinic5_srv_nic.h | 237 ++ .../nic/comm/182x_cmdq_adapt/182x_cmdq_ops.c | 210 ++ .../nic/comm/182x_cmdq_adapt/182x_cmdq_ops.h | 38 + .../nic/comm/187x_cmdq_adapt/187x_cmdq_ops.c | 192 ++ .../nic/comm/187x_cmdq_adapt/187x_cmdq_ops.h | 67 + .../host/service/nic/comm/hinic5_cmdq_adapt.c | 12 + .../host/service/nic/comm/hinic5_mag_cfg.c | 1557 +++++++++ .../host/service/nic/comm/hinic5_nic.h | 262 ++ .../host/service/nic/comm/hinic5_nic_cfg.c | 2054 ++++++++++++ .../host/service/nic/comm/hinic5_nic_cfg_vf.c | 696 ++++ .../host/service/nic/comm/hinic5_nic_cmdq.h | 137 + .../host/service/nic/comm/hinic5_nic_dbg.c | 146 + .../host/service/nic/comm/hinic5_nic_event.c | 961 ++++++ .../host/service/nic/comm/hinic5_nic_io.c | 1172 +++++++ .../host/service/nic/comm/hinic5_rss_cfg.c | 360 +++ .../host/service/nic/include/hinic5_mag_cfg.h | 34 + .../host/service/nic/include/hinic5_nic_cfg.h | 776 +++++ .../service/nic/include/hinic5_nic_cfg_vf.h | 18 + .../host/service/nic/include/hinic5_nic_dbg.h | 21 + .../service/nic/include/hinic5_nic_event.h | 21 + .../host/service/nic/include/hinic5_nic_io.h | 415 +++ .../host/service/nic/include/hinic5_nic_rq.h | 276 ++ .../host/service/nic/include/hinic5_nic_sq.h | 279 ++ .../host/service/nic/linux/1588/hinic5_ptp.c | 301 ++ .../host/service/nic/linux/1588/hinic5_ptp.h | 40 + .../host/service/nic/linux/CMakeLists.txt | 82 + .../host/service/nic/linux/Makefile | 245 ++ .../host/service/nic/linux/ctrl/hinic5_irq.c | 194 ++ .../host/service/nic/linux/ctrl/hinic5_irq.h | 13 + .../host/service/nic/linux/ctrl/hinic5_main.c | 1578 +++++++++ .../host/service/nic/linux/ctrl/hinic5_main.h | 28 + .../nic/linux/ethtool/hinic5_ethtool.c | 757 +++++ .../nic/linux/ethtool/hinic5_ethtool.h | 53 + .../linux/ethtool/hinic5_ethtool_coalesce.c | 417 +++ .../linux/ethtool/hinic5_ethtool_coalesce.h | 31 + .../linux/ethtool/hinic5_ethtool_lb_test.c | 241 ++ .../linux/ethtool/hinic5_ethtool_lb_test.h | 35 + .../linux/ethtool/hinic5_ethtool_link_stats.c | 637 ++++ .../linux/ethtool/hinic5_ethtool_link_stats.h | 34 + .../linux/ethtool/hinic5_ethtool_port_stats.c | 763 +++++ .../linux/ethtool/hinic5_ethtool_port_stats.h | 73 + .../linux/ethtool/hinic5_ethtool_priv_flags.c | 164 + .../linux/ethtool/hinic5_ethtool_priv_flags.h | 21 + .../service/nic/linux/ethtool/hinic5_ntuple.c | 1226 +++++++ .../service/nic/linux/ethtool/hinic5_ntuple.h | 24 + .../service/nic/linux/ethtool/hinic5_rss.c | 1000 ++++++ .../service/nic/linux/ethtool/hinic5_rss.h | 103 + .../service/nic/linux/ethtool/hinic5_tc.c | 1163 +++++++ .../service/nic/linux/ethtool/hinic5_tc.h | 258 ++ .../host/service/nic/linux/ioctl/hinic5_dbg.c | 1276 ++++++++ .../host/service/nic/linux/ioctl/hinic5_dbg.h | 21 + .../host/service/nic/linux/ioctl/hinic5_dcb.c | 410 +++ .../host/service/nic/linux/ioctl/hinic5_dcb.h | 77 + .../nic/linux/macsec/hinic5_macsec_api.h | 32 + .../nic/linux/macsec/hinic5_macsec_common.h | 75 + .../nic/linux/macsec/hinic5_macsec_dev.h | 71 + .../nic/linux/macsec/hinic5_macsec_dfx.c | 97 + .../nic/linux/macsec/hinic5_macsec_dfx.h | 35 + .../nic/linux/macsec/hinic5_macsec_main.c | 201 ++ .../nic/linux/macsec/hinic5_macsec_mgmt.c | 231 ++ .../nic/linux/macsec/hinic5_macsec_nictool.c | 575 ++++ .../nic/linux/macsec/hinic5_macsec_protocol.c | 926 ++++++ .../nic/linux/macsec/hinic5_macsec_service.c | 401 +++ .../service/nic/linux/netdev/hinic5_filter.c | 473 +++ .../service/nic/linux/netdev/hinic5_filter.h | 10 + .../nic/linux/netdev/hinic5_netdev_ops.c | 2416 ++++++++++++++ .../nic/linux/netdev/hinic5_netdev_ops.h | 13 + .../service/nic/linux/netdev/hinic5_nic_dev.h | 472 +++ .../host/service/nic/linux/nicio/hinic5_rx.c | 1588 +++++++++ .../host/service/nic/linux/nicio/hinic5_rx.h | 231 ++ .../host/service/nic/linux/nicio/hinic5_tx.c | 1321 ++++++++ .../host/service/nic/linux/nicio/hinic5_tx.h | 217 ++ .../host/service/nic/linux/nicio/hinic5_xdp.c | 457 +++ .../host/service/nic/linux/nicio/hinic5_xdp.h | 67 + .../include/drv_fw_msg/cfm/bond_cfm_cmd.h | 155 + .../include/drv_fw_msg/cfm/qos_base_cmd.h | 113 + .../drv_fw_msg/mpu/inband_mpu_cmd_defs.h | 220 ++ .../include/drv_tool_msg/bond_pub_cmd.h | 45 + .../include/drv_tool_msg/hisec_pub_cmd.h | 279 ++ .../include/drv_tool_msg/macsec_pub_cmd.h | 197 ++ .../include/drv_tool_msg/nic_pub_cmd.h | 171 + .../include/drv_tool_msg/sdk_pub_cmd.h | 42 + .../dpu_platform_library/include/fw_typedef.h | 74 + hinic5/src/tools/micro_log/hinic5_micro_log.c | 1139 +++++++ hinic5/src/tools/micro_log/hinic5_micro_log.h | 66 + hinic5/src/tools/micro_log/micro_log_comm.c | 71 + hinic5/src/tools/micro_log/micro_log_comm.h | 67 + hinic5/src/tools/micro_log/micro_log_index.c | 134 + hinic5/src/tools/micro_log/micro_log_index.h | 12 + .../tools/micro_log/micro_log_procfs_cmd.c | 228 ++ .../tools/micro_log/micro_log_procfs_cmd.h | 12 + 244 files changed, 101385 insertions(+) create mode 100644 hinic5/src/GLOBAL_VERSION_NEW create mode 100644 hinic5/src/dpu_develop_interface/base_type.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_chip_info.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_common.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_crm.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_cqm.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_cqm_adpt.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_vram_api.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hw.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_lld.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_mt.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_profile.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_wq.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/nic_kcompat.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_linux.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_linux_nic.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_uefi.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/sdk_kcompat.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/uefi/HwSafeMemOpWrapper.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/uefi/HwSafePrint.h create mode 100644 hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/vbs_kcompat.h create mode 100644 hinic5/src/dpu_develop_interface/drv_srvc_intf/drv_bond_api.h create mode 100644 hinic5/src/dpu_develop_interface/drv_srvc_intf/nic/drv_nic_api.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/bond/bond_common_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/bond/bond_mpu_cmd_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/cfg_mgmt/cfg_mgmt_mpu_cmd.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/cfm/cfm_cmd.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/cfm/fast_msg_common_define.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/hinic5_cqm/hinic5_cqm_npu_cmd.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/hinic5_cqm/hinic5_cqm_npu_cmd_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/macsec/macsec_mpu_cmd.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/macsec/macsec_mpu_cmd_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/mag/mag_mpu_cmd.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/mag/mag_mpu_cmd_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_board_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_cmd_base_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_inband_cmd.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_inband_cmd_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_mailbox_msg_header.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_cfg_comm.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_extend.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_structs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_structs_extend.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_tc_cmd_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_npu_cmd.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_tc_rule_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/public/comm_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/public/hinic5_comm_cmd.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/public/npu_cmdq_base_defs.h create mode 100644 hinic5/src/dpu_develop_interface/fw_msg_intf/public/sml_table_struct_dict_def.h create mode 100644 hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond.c create mode 100644 hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond_event.c create mode 100644 hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond_inner.h create mode 100644 hinic5/src/dpu_platform_library/host/cfm/fast_msg/hinic5_fast_msg.c create mode 100644 hinic5/src/dpu_platform_library/host/cfm/fast_msg/hinic5_fast_msg_init.c create mode 100644 hinic5/src/dpu_platform_library/host/include/cfm/bond/hinic5_bond.h create mode 100644 hinic5/src/dpu_platform_library/host/include/cfm/fast_msg/hinic5_fast_msg.h create mode 100644 hinic5/src/dpu_platform_library/host/include/cfm/fast_msg/hinic5_fast_msg_init.h create mode 100644 hinic5/src/dpu_platform_library/host/include/sdk/knldk/hinic5_hinic5_vram.h create mode 100644 hinic5/src/dpu_platform_library/host/include/sdk/knldk/hinic5_vram_common.h create mode 100644 hinic5/src/dpu_platform_library/host/include/typedef.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_cfg.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_cfg.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_comm.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_comm.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hwdev.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_prof_adap.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_prof_adap.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_182x_cmdq_adapt/hinic5_cqm_182x_cmdq_ops.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_182x_cmdq_adapt/hinic5_cqm_182x_cmdq_ops.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_187x_cmdq_adapt/hinic5_cqm_187x_cmdq_ops.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_187x_cmdq_adapt/hinic5_cqm_187x_cmdq_ops.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bat_cla.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bat_cla.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bitmap_table.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bitmap_table.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bloomfilter.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bloomfilter.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmd.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmd.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmdq.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmdq_adapt.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_db.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_db.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_fast_msg.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_fast_msg.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_main.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_main.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object_intern.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object_intern.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_vram/hinic5_hinic5_vram.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_vram/hinic5_vram_common.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_api_cmd.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_cmdq.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_common.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_enhance_cmdq.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_eqs.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_hw_api.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_hwif.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_mbox.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_mgmt.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_sm_lt.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_sml_lt.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_wq.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_api_cmd.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_cmdq.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_cmdq_enhance.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_csr_inner.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_eqs.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hw_api.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hwdev.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hwif_inner.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_lld_inner.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_mbox.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_mgmt.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_typedef_inner.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/CMakeLists.txt create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_bus.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_bus.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_dev_mgmt.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_dev_mgmt.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_lld.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_lld_private.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pci_id_tbl.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pcie.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pcie.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sriov.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sriov.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sysfs.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sysfs.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_id_tbl.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_sriov.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_sriov.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_devlink.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_devlink.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_fw_update.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_fw_update.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_hw_mt.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_hw_mt.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_nictool.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_nictool.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_non_ptp.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_non_ptp.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_sdk_attack.c create mode 100644 hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_sdk_attack.h create mode 100644 hinic5/src/dpu_platform_library/host/sdk/ossl/linux/kernel/ossl_knl_linux.c create mode 100644 hinic5/src/dpu_platform_library/host/service/include/hinic5_srv_nic.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/182x_cmdq_adapt/182x_cmdq_ops.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/182x_cmdq_adapt/182x_cmdq_ops.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/187x_cmdq_adapt/187x_cmdq_ops.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/187x_cmdq_adapt/187x_cmdq_ops.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_cmdq_adapt.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_mag_cfg.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cfg.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cfg_vf.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cmdq.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_dbg.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_event.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_io.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_rss_cfg.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_mag_cfg.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_cfg.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_cfg_vf.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_dbg.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_event.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_io.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_rq.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_sq.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/1588/hinic5_ptp.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/1588/hinic5_ptp.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/CMakeLists.txt create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/Makefile create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_irq.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_irq.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_main.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_main.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_coalesce.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_coalesce.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_lb_test.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_lb_test.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_link_stats.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_link_stats.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_port_stats.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_port_stats.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_priv_flags.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_priv_flags.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ntuple.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ntuple.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_rss.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_rss.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_tc.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_tc.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dbg.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dbg.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dcb.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dcb.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_api.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_common.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dev.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dfx.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dfx.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_main.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_mgmt.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_nictool.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_protocol.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_service.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_filter.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_filter.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_netdev_ops.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_netdev_ops.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_nic_dev.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_rx.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_rx.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_tx.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_tx.h create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_xdp.c create mode 100644 hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_xdp.h create mode 100644 hinic5/src/dpu_platform_library/include/drv_fw_msg/cfm/bond_cfm_cmd.h create mode 100644 hinic5/src/dpu_platform_library/include/drv_fw_msg/cfm/qos_base_cmd.h create mode 100644 hinic5/src/dpu_platform_library/include/drv_fw_msg/mpu/inband_mpu_cmd_defs.h create mode 100644 hinic5/src/dpu_platform_library/include/drv_tool_msg/bond_pub_cmd.h create mode 100644 hinic5/src/dpu_platform_library/include/drv_tool_msg/hisec_pub_cmd.h create mode 100644 hinic5/src/dpu_platform_library/include/drv_tool_msg/macsec_pub_cmd.h create mode 100644 hinic5/src/dpu_platform_library/include/drv_tool_msg/nic_pub_cmd.h create mode 100644 hinic5/src/dpu_platform_library/include/drv_tool_msg/sdk_pub_cmd.h create mode 100644 hinic5/src/dpu_platform_library/include/fw_typedef.h create mode 100644 hinic5/src/tools/micro_log/hinic5_micro_log.c create mode 100644 hinic5/src/tools/micro_log/hinic5_micro_log.h create mode 100644 hinic5/src/tools/micro_log/micro_log_comm.c create mode 100644 hinic5/src/tools/micro_log/micro_log_comm.h create mode 100644 hinic5/src/tools/micro_log/micro_log_index.c create mode 100644 hinic5/src/tools/micro_log/micro_log_index.h create mode 100644 hinic5/src/tools/micro_log/micro_log_procfs_cmd.c create mode 100644 hinic5/src/tools/micro_log/micro_log_procfs_cmd.h diff --git a/hinic5/src/GLOBAL_VERSION_NEW b/hinic5/src/GLOBAL_VERSION_NEW new file mode 100644 index 00000000..a70350be --- /dev/null +++ b/hinic5/src/GLOBAL_VERSION_NEW @@ -0,0 +1 @@ +driver:100.0.1.100 \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/base_type.h b/hinic5/src/dpu_develop_interface/base_type.h new file mode 100644 index 00000000..4251233e --- /dev/null +++ b/hinic5/src/dpu_develop_interface/base_type.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * Version : Initial + * Created : 2023/12/12 + * Description : basic data types, 所有对基础类型的定义只能在这里完成封装,禁止其他头文件封装基础类型 + */ + +#ifndef BASE_TYPE_H +#define BASE_TYPE_H + +typedef signed char s8; +typedef unsigned char u8; +typedef short s16; +typedef unsigned short u16; +typedef int s32; +typedef unsigned int u32; +typedef long long s64; +typedef unsigned long long u64; + +#endif /* BASE_TYPE_H */ diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_chip_info.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_chip_info.h new file mode 100644 index 00000000..4d5100d3 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_chip_info.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * Description: sdk's inner chip related structure and macro defined here. + * Note: TODO : put it in sdk's inner include folder after decoupling. + * Create: 2025-08-13 +*/ + +#ifndef HINIC5_CHIP_INFO_H +#define HINIC5_CHIP_INFO_H + +#include "hinic5_crm.h" +#include "hinic5_mt.h" + +/** + * @brief struct card_node + * @details 定义一个名为card_node的结构体,表示一个网卡节点 + */ +struct card_node { + struct list_head node; /**< 表示一个列表头 */ + struct list_head func_list; /**< 表示一个功能列表头 */ + char chip_name[IFNAMSIZ]; /**< 存储芯片名称 */ + void *log_info; /**< 指向日志信息 */ + void *dbgtool_info; /**< 指向调试工具信息 */ + spinlock_t dbgtool_info_lock; /**< 保护fm_show更新上下文 */ + void *func_handle_array[MAX_FUNCTION_NUM]; /**< 存储功能句柄 */ + u16 func_num; /**< function数量 */ + u32 rsvd1; + atomic_t channel_busy_cnt; /**< 储通道忙碌计数 */ + void *priv_data; /**< 指向私有数据 */ + u64 rsvd2; + void *fw_update_context; /**< 指向固件更新上下文 */ + spinlock_t fw_update_context_lock; /**< 保护固件更新上下文 */ + struct hinic5_non_ptp_info *non_ptp_info; /**< 非ptp时间差信息 */ + bool exception_flag; /**< 是否出现致命异常 */ + u64 id; /**< 芯片唯一性id标识 */ +}; +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_common.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_common.h new file mode 100644 index 00000000..42490660 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_common.h @@ -0,0 +1,235 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_COMMON_H +#define HINIC5_COMMON_H + +#include <asm/byteorder.h> +#include <linux/types.h> + +/** + * @brief struct hinic5_dma_addr_align + * @details DMA地址对齐结构体 + */ +struct hinic5_dma_addr_align { + u32 real_size; /**< 真实大小 */ + + void *ori_vaddr; /**< 原始虚拟地址 */ + dma_addr_t ori_paddr; /**< 原始物理地址 */ + + void *align_vaddr; /**< 对齐的虚拟地址 */ + dma_addr_t align_paddr; /**< 对齐的物理地址 */ +}; + +/** + * @brief enum hinic5_wait_return - 等待处理的返回值枚举类 + * @details 有三种情况,处理完成,正在处理,处理出错 + */ +enum hinic5_wait_return { + WAIT_PROCESS_CPL = 0, /**< 表示处理完成,可以进行下一步操作 */ + WAIT_PROCESS_WAITING = 1, /**< 表示正在处理,需要继续等待 */ + WAIT_PROCESS_ERR = 2, /**< 表示处理出错,需要进行错误处理 */ +}; + +/** + * @brief struct hinic5_sge + * @details 硬件标识符 + */ +struct hinic5_sge { + u32 hi_addr; /**< 地址的高32位 */ + u32 lo_addr; /**< 地址的低32位 */ + u32 len; /**< 数据大小 */ +}; + +/** + * @brief 分配一块设备相关的内存,并且内存地址需要按照一定的对齐方式进行对齐 + * @param dev_hdl:分配内存的设备句柄 + * @param size:需要分配的内存大小 + * @param align:内存地址对齐方式 + * @param flag:分配内存的标志 + * @param mem_align:返回的内存地址对齐信息 + * + * @details 分配一块设备相关的内存,并且内存地址需要按照一定的对齐方式进行对齐 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, + struct hinic5_dma_addr_align *mem_align); + +/** + * @brief 释放DMA的内存 + * @param dev_hdl:设备句柄 + * @param mem_align:内存对齐结构体指针 + * + * @details 此函数用于释放通过hinic5_dma_alloc_coherent_align()函数分配的DMA内存 + * + * @return void + */ +void hinic5_dma_free_coherent_align(void *dev_hdl, + struct hinic5_dma_addr_align *mem_align); + + +/** + * @brief 定义一个名为wait_cpl_handler的函数指针类型 + * @param priv_data:私有数据,可以是任何类型的数据 + * + * @return 返回hinic5_wait_return枚举类型 + */ +typedef enum hinic5_wait_return (*wait_cpl_handler)(void *priv_data); + +/** + * @brief 在等待一定时间后,检查是否完成 + * @param priv_data:用于传递私有数据 + * @param handler:等待操作的处理函数 + * @param wait_total_ms:等待总时间,单位:毫秒 + * @param wait_once_us:每次等待时间,单位:微秒 + * + * @details 在等待一定时间后,检查是否完成 + * + * @return 返回检查结果 + * @retval 0:成功 + * @retval -EINVAL:参数无效 + * @retval -EIO:处理过程出错 + * @retval -ETIMEDOUT:超时 + */ +int hinic5_wait_for_timeout(void *priv_data, wait_cpl_handler handler, + u32 wait_total_ms, u32 wait_once_us); + +/** + * @brief 将数据从CPU字节序转换为大端字节序 + * @param data 要转换的数据 + * @param len 数据长度 + * + * @details 此函数将数据从CPU字节序转换为大端字节序。 + * 参数data表示要转换的数据,len表示数据长度。 + * + * @return void + */ +static inline void hinic5_cpu_to_be32(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = (u32 *)data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/** + * @brief 将大端模式的32位数据转换为当前CPU的字节序 + * @param data 要转换的数据的指针 + * @param len 要转换的数据的长度 + * + * @details 此函数将大端模式的32位数据转换为当前CPU的字节序。 + * 参数data指向要转换的数据,len表示要转换的数据的长度。 + * + * @return void + */ +static inline void hinic5_be32_to_cpu(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = (u32 *)data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} + +/** + * @brief 设置hinic5_sge结构体的值 + * @param sge 要设置的hinic5_sge结构体指针 + * @param addr 要设置的地址 + * @param len 要设置的长度 + * + * @return void + */ +static inline void hinic5_set_sge(struct hinic5_sge *sge, dma_addr_t addr, + u32 len) +{ + sge->hi_addr = upper_32_bits(addr); + sge->lo_addr = lower_32_bits(addr); + sge->len = len; +} + +#ifdef HW_CONVERT_ENDIAN +#define hinic5_hw_be32(val) (val) /**< 将32位值转换为大端序,此处直接返回原值 */ +#define hinic5_hw_cpu64(val) (val) /**< 将64位值转换为CPU字节序,此处直接返回原值 */ +#define hinic5_hw_cpu32(val) (val) /**< 将32位值转换为CPU字节序,此处直接返回原值 */ +#define hinic5_hw_cpu16(val) (val) /**< 将16位值转换为CPU字节序,此处直接返回原值 */ +#else +#define hinic5_hw_be32(val) cpu_to_be32(val) /**< 将32位值转换为大端序,此处调用cpu_to_be32函数进行转换 */ +#define hinic5_hw_cpu64(val) be64_to_cpu(val) /**< 将64位值转换为CPU字节序,此处调用be64_to_cpu函数进行转换 */ +#define hinic5_hw_cpu32(val) be32_to_cpu(val) /**< 将32位值转换为CPU字节序,此处调用be32_to_cpu函数进行转换 */ +#define hinic5_hw_cpu16(val) be16_to_cpu(val) /**< 将16位值转换为CPU字节序,此处调用be16_to_cpu函数进行转换 */ +#endif + +/** + * @brief 将数据从主机字节序转换为网络字节序 + * @param data 要转换的数据 + * @param len 数据长度 + * + * @return 无 + */ +static inline void hinic5_hw_be32_len(void *data, int len) +{ +#ifndef HW_CONVERT_ENDIAN + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = (u32 *)data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = hinic5_hw_be32(*mem); + mem++; + } +#endif +} + +/** + * @brief 该函数用于将数据从CPU端转换为HW端的32位数据 + * @param data 要转换的数据 + * @param len 要转换的数据的长度 + * + * @return 无 + */ +static inline void hinic5_hw_cpu32_len(void *data, int len) +{ +#ifndef HW_CONVERT_ENDIAN + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = (u32 *)data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = hinic5_hw_cpu32(*mem); + mem++; + } +#endif +} + +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_crm.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_crm.h new file mode 100644 index 00000000..a41c7e5e --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_crm.h @@ -0,0 +1,1852 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CRM_H +#define HINIC5_CRM_H + +#include <asm-generic/int-ll64.h> +#include <linux/types.h> +#include <linux/time.h> +#include <linux/spinlock.h> + +#include "mag_mpu_cmd_defs.h" + +#define HINIC5_DRV_VERSION GLOBAL_VERSION_STR /* 用于存储驱动版本信息 */ +#define HINIC5_DRV_DESC "Intelligent Network Interface Card Driver" /* 用于表示驱动的描述信息 */ + +#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof((arr)[0]))) /* 计算数组长度的宏 */ + +#define HINIC5_MGMT_VERSION_MAX_LEN 32 /* 定义HINIC5管理版本最大长度 */ + +#define HINIC5_FW_VERSION_NAME 16 /* 定义固件版本名称的长度 */ + +#define HISDK5_DCB_UP_MAX 0x8 /* 定义网络接口控制数据包(DCB)的最大用户优先级(UP) */ +/** + * @brief struct hinic5_fw_version + * @details 定义了一个结构体,用于存储固件的版本信息 + */ +struct hinic5_fw_version { + u8 mgmt_ver[HINIC5_FW_VERSION_NAME]; + u8 microcode_ver[HINIC5_FW_VERSION_NAME]; + u8 boot_ver[HINIC5_FW_VERSION_NAME]; +}; +#define HINIC5_MGMT_CMD_UNSUPPORTED 0xFF /* 表示管理命令不支持的情况 */ + +/** + * @brief enum hinic5_service_type + * @details 定义了服务类型枚举, + * show each drivers only such as nic_service_cap, + * toe_service_cap structure, but not show service_cap + */ +enum hinic5_service_type { + SERVICE_T_NIC = 0, + SERVICE_T_OVS = 1, + SERVICE_T_ROCE = 2, + SERVICE_T_TOE = 3, + SERVICE_T_IOE = 4, + SERVICE_T_FC = 5, + SERVICE_T_VBS = 6, + SERVICE_T_IPSEC = 7, + SERVICE_T_VIRTIO = 8, + SERVICE_T_MIGRATE = 9, + SERVICE_T_PPA = 10, + SERVICE_T_CUSTOM = 11, + SERVICE_T_VROCE = 12, + SERVICE_T_UB = 13, + SERVICE_T_JBOF = 14, + SERVICE_T_MACSEC = 15, + SERVICE_T_DMMU = 16, + SERVICE_T_CFM = 17, + SERVICE_T_BIFUR = 18, + SERVICE_T_HIHTR = 19, + SERVICE_T_MAX = 20, + + /* Only used for interruption resource management, + * mark the request module + */ + SERVICE_T_INTF = (1 << 15), + SERVICE_T_HINIC5_CQM = (1 << 16), +}; + +/** + * @brief enum hinic5_ppf_flr_type + * @details 定义hinic5_ppf_flr_type枚举类型 + */ +enum hinic5_ppf_flr_type { + STATELESS_FLR_TYPE, /* 无状态FLR类型 */ + STATEFUL_FLR_TYPE, /* 有状态FLR类型 */ +}; + +/** + * @brief struct nic_service_cap + * @details 网络接口服务能力结构体 + */ +struct nic_service_cap { + u16 max_sqs; /* 最大发送队列数量 */ + u16 max_rqs; /* 最大接收队列数量 */ + u16 default_num_queues; /* 默认队列数量 */ +}; + +/** + * @brief struct ppa_service_cap + * @details 定义了ppa服务的能力结构体 + */ +struct ppa_service_cap { + u16 qpc_fake_vf_start; /* 起始的虚拟函数号 */ + u16 qpc_fake_vf_num; /*可用的虚拟函数数量 */ + u32 qpc_fake_vf_ctx_num; /* 可用的虚拟函数上下文数量 */ + u32 pctx_sz; /* 上下文大小512B */ + u32 bloomfilter_length; /* 布隆过滤器长度 */ + u8 bloomfilter_en; /* 布隆过滤器使能标志 */ + u8 rsvd; + u16 rsvd1; +}; + +/** + * @brief struct vbs_service_cap + * @details 描述VBS服务的能力的结构体 + */ +struct vbs_service_cap { + u16 vbs_max_volq; /* 最大的卷积队列数量 */ + u16 vbs_main_pf_enable : 1; /* 主PF是否启用 */ + u16 vbs_vsock_pf_enable : 1; /* vsock PF是否启用 */ + u16 vbs_fushion_queue_pf_enable : 1; /* 融合队列PF是否启用 */ + u16 vbs_host_dma_data_cos : 3; /* 主机DMA数据的优先级 */ + u16 vbs_vmio_cpy_data_cos : 3; /* VMIO复制数据的优先级 */ + u16 vbs_volq_cos : 3; /* 卷积队列的优先级 */ + u16 rsvd1 : 4; + u32 vbs_child_ctx_num; + u32 vbs_hash_bucket_num; +}; + +/** + * @brief struct migr_service_cap + * @details 迁移服务能力结构体 + */ +struct migr_service_cap { + u8 master_host_id; /* 主机ID */ + u8 rsvd[3]; +}; + +/** + * @brief struct ub_dev_cap_sdk_res + * @details sdk中申请资源相关信息 + */ +struct ub_dev_cap_sdk_res { + u32 max_jfc; /* 最大Job Function Controller数量 */ + u32 max_jfr; /* 最大Job Function Record数量 */ + u32 max_tp; /* 最大Transmission Port数量 */ + u32 max_tpg; /* 最大Transmission Port Group数量 */ + u32 max_jetty; /* 最大Jetty数量 */ + u32 max_jetty_grp; /* 最大Jetty Group数量 */ + u32 max_mpts; /* 最大Multi-Path Termination Point数量 */ + u32 max_vtp; /* 最大Virtual Transmission Port数量 */ + u32 max_gid; /* 最大GID数量 */ + u32 max_utp; /* 最大Unicast Transmission Port数量 */ + u32 max_jfrc; /* 最大Job Function Record Cache数量 */ + + u32 srqc_entry_sz; /* Shared Receive Queue Controller条目大小 */ + u32 mpt_entry_sz; /* Multi-Path Termination Point条目大小 */ + u32 cqc_entry_sz; /* Completion Queue Controller条目大小 */ + u32 qpc_entry_sz; /* Queue Pair Context条目大小 */ + + u32 dmtt_cl_start; /* Data Move To Target开始地址 */ + u32 dmtt_cl_end; /* Data Move To Target结束地址 */ + u32 dmtt_cl_sz; /* Data Move To Target大小 */ + + u32 cmtt_cl_start; /* Control Move To Target开始地址 */ + u32 cmtt_cl_end; /* Control Move To Target结束地址 */ + u32 cmtt_cl_sz; /* Control Move To Target大小 */ + + u32 wqe_cl_start; /* Work Request Element开始地址 */ + u32 wqe_cl_end; /* Work Request Element结束地址 */ + u32 wqe_cl_sz; /* Work Request Element大小 */ +}; + +/** + * @brief struct ub_net_dev_cap + * @details 网络设备能力结构体 + */ +struct ub_net_dev_cap { + u32 is_tpf; /* 是否支持透明转发 */ + u32 vf_cnt; /* 虚拟函数数量 */ + u32 port_cnt; /* 端口数量 */ + u32 max_mtu; /* 最大传输单元大小 */ + u32 comp_vector_cnt; /* 中断向量数量 */ +}; + +/** + * @brief struct ub_service_cap + * @details 定义了一个服务能力结构体 + */ +struct ub_service_cap { + struct ub_dev_cap_sdk_res sdk_res; /* 设备能力SDK响应 */ + struct ub_net_dev_cap net_dev_cap; /* 网络设备能力 */ +}; + +/** + * @brief struct jbof_service_cap + * @details 定义了一个结构体,用于描述JBOF服务的能力信息 + */ +struct jbof_service_cap { + u32 max_parent_qpc_num; /* 最大父QPC数量 */ + u32 max_child_qpc_num; /* 最大子QPC数量 */ + u32 parent_qpc_size; /* 父QPC大小 */ + u32 child_qpc_size; /* 子QPC大小 */ + u32 hash_bucket_num; /* 哈希桶数量 */ +}; + +/** + * @brief struct dmmu_service_cap + * @details 定义了一个结构体,用于描述dmmu服务的能力信息 + */ +struct dmmu_service_cap { + u32 pasid_min; + u32 pasid_max; + u32 cl_start; + u32 cl_end; +}; + +/** + * @brief CFM(Common Function Module) service capability + */ +struct cfm_service_cap { + /* CCP - Congestion Control Platform */ + u32 ccp_max_child_ctx; + u16 ccp_child_ctx_sz; + u16 rsvd1; + + u64 rsvd[0xF]; +}; + +/** + * @brief struct dev_toe_svc_cap + * @details PF/VF ToE service resource structure + */ +struct dev_toe_svc_cap { + /* PF resources */ + u32 max_pctxs; /* Parent Context: max specifications 1M */ + u32 max_cctxt; + u32 max_cqs; + u16 max_srqs; + u32 srq_id_start; + u32 max_mpts; +}; + +/** + * @brief struct toe_service_cap + * @details ToE services + */ +struct toe_service_cap { + struct dev_toe_svc_cap dev_toe_cap; + + bool alloc_flag; + u32 pctx_sz; /* 1KB */ + u32 scqc_sz; /* 64B */ +}; + +/** + * @brief struct dev_fc_svc_cap + * @details PF FC service resource structure defined + */ +struct dev_fc_svc_cap { + /* PF Parent QPC */ + u32 max_parent_qpc_num; /* max number is 2048 */ + + /* PF Child QPC */ + u32 max_child_qpc_num; /* max number is 2048 */ + u32 child_qpc_id_start; + + /* PF SCQ */ + u32 scq_num; /* 16 */ + + /* PF supports SRQ */ + u32 srq_num; /* Number of SRQ is 2 */ + + u8 vp_id_start; + u8 vp_id_end; +}; + +/** + * @brief struct fc_service_cap + * @details FC services + */ +struct fc_service_cap { + struct dev_fc_svc_cap dev_fc_cap; + + /* Parent QPC */ + u32 parent_qpc_size; /* 256B */ + + /* Child QPC */ + u32 child_qpc_size; /* 256B */ + + /* SQ */ + u32 sqe_size; /* 128B(in linked list mode) */ + + /* SCQ */ + u32 scqc_size; /* Size of the Context 32B */ + u32 scqe_size; /* 64B */ + + /* SRQ */ + u32 srqc_size; /* Size of SRQ Context (64B) */ + u32 srqe_size; /* 32B */ +}; + +struct dev_roce_svc_own_cap { + u32 max_qps; + u32 max_cqs; + u32 max_srqs; + u32 max_mpts; + u32 max_drc_qps; + + u32 reserved_qps; /* roce_rsvd_qp */ + u32 reserved_qps_back; /* roce_rsvd_qp_back */ + u32 reserved_cqs; /* roce_rsvd_cq */ + u32 reserved_cqs_back; /* roce_rsvd_cq_back */ + u32 reserved_srqs; /* roce_rsvd_srq */ + u32 reserved_srqs_back; /* roce_rsvd_srq_back */ + u32 max_pd; /* roce_max_pd */ + u32 max_xrcd; /* roce_max_xrcd */ + u32 max_gid; /* roce_max_gid */ + + u32 cmtt_cl_start; + u32 cmtt_cl_end; + u32 cmtt_cl_sz; + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_sz; + + u32 wqe_cl_start; + u32 wqe_cl_end; + u32 wqe_cl_sz; + + u32 qpc_entry_sz; + u32 max_wqes; + u32 max_rq_sg; + u32 max_sq_inline_data_sz; + u32 max_rq_desc_sz; + + u32 rdmarc_entry_sz; + u32 max_qp_init_rdma; + u32 max_qp_dest_rdma; + + u32 max_srq_wqes; + u32 max_srq_sge; + u32 srqc_entry_sz; + + u32 max_msg_sz; /* Message size 2GB */ + u32 max_child_ctx_num; +}; + +/** + * @brief struct dev_rdma_svc_cap + * @details RDMA service capability structure + */ +struct dev_rdma_svc_cap { + struct dev_roce_svc_own_cap roce_own_cap; /* ROCE service unique parameter structure */ +}; + +/** + * @brief enum + * @details Defines the RDMA service capability flag + */ +enum { + RDMA_BMME_FLAG_LOCAL_INV = (1 << 0), + RDMA_BMME_FLAG_REMOTE_INV = (1 << 1), + RDMA_BMME_FLAG_FAST_REG_WR = (1 << 2), + RDMA_BMME_FLAG_RESERVED_LKEY = (1 << 3), + RDMA_BMME_FLAG_TYPE_2_WIN = (1 << 4), + RDMA_BMME_FLAG_WIN_TYPE_2B = (1 << 5), + + RDMA_DEV_CAP_FLAG_XRC = (1 << 6), + RDMA_DEV_CAP_FLAG_MEM_WINDOW = (1 << 7), + RDMA_DEV_CAP_FLAG_ATOMIC = (1 << 8), + RDMA_DEV_CAP_FLAG_APM = (1 << 9), +}; + +/** + * @brief struct rdma_service_cap + * @details RDMA services + */ +struct rdma_service_cap { + struct dev_rdma_svc_cap dev_rdma_cap; + + u8 log_mtt; /* 1. the number of MTT PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + + u32 num_mtts; /* Number of MTT table (4M), + * is actually MTT seg number + */ + u32 log_mtt_seg; + u32 mtt_entry_sz; /* MTT table size 8B, including 1 PA(64bits) */ + u32 mpt_entry_sz; /* MPT table size (64B) */ + + u32 dmtt_cl_start; + u32 dmtt_cl_end; + u32 dmtt_cl_sz; + + u8 log_rdmarc; /* 1. the number of RDMArc PA must be integer power of 2 + * 2. represented by logarithm. Each MTT table can + * contain 1, 2, 4, 8, and 16 PA) + */ + + u32 reserved_qps; /* Number of reserved QP */ + u32 max_sq_sg; /* Maximum SGE number of SQ (8) */ + u32 max_sq_desc_sz; /* WQE maximum size of SQ(1024B), inline maximum + * size if 960B(944B aligned to the 960B), + * 960B=>wqebb alignment=>1024B + */ + u32 wqebb_size; /* Currently, the supports 64B and 128B, + * defined as 64Bytes + */ + + u32 max_cqes; /* Size of the depth of the CQ (64K-1) */ + u32 reserved_cqs; /* Number of reserved CQ */ + u32 cqc_entry_sz; /* Size of the CQC (64B/128B) */ + u32 cqe_size; /* Size of CQE (32B) */ + + u32 reserved_mrws; /* Number of reserved MR/MR Window */ + + u32 max_fmr_maps; /* max MAP of FMR, + * (1 << (32-ilog2(num_mpt)))-1; + */ + + u32 log_rdmarc_seg; /* table number of each RDMArc seg(3) */ + + /* Timeout time. Formula:Tr=4.096us*2(local_ca_ack_delay), [Tr,4Tr] */ + u32 local_ca_ack_delay; + u32 num_ports; /* Physical port number */ + + u32 db_page_size; /* Size of the DB (4KB) */ + u32 direct_wqe_size; /* Size of the DWQE (256B) */ + + u32 num_pds; /* Maximum number of PD (128K) */ + u32 reserved_pds; /* Number of reserved PD */ + u32 max_xrcds; /* Maximum number of xrcd (64K) */ + u32 reserved_xrcds; /* Number of reserved xrcd */ + + u32 max_gid_per_port; /* gid number (16) of each port */ + u32 gid_entry_sz; /* RoCE v2 GID table is 32B, + * compatible RoCE v1 expansion + */ + + u32 reserved_lkey; /* local_dma_lkey */ + u32 num_comp_vectors; /* Number of complete vector (32) */ + u32 page_size_cap; /* Supports 4K,8K,64K,256K,1M and 4M page_size */ + + u32 flags; /* RDMA some identity */ + u32 max_frpl_len; /* Maximum number of pages frmr registration */ + u32 max_pkeys; /* Number of supported pkey group */ +}; + +/** + * @brief struct dev_ovs_svc_cap + * @details PF OVS service resource structure defined + */ +struct dev_ovs_svc_cap { + u32 max_pctxs; /* Parent Context: max specifications 1M */ + u32 fake_vf_max_pctx; + u16 fake_vf_num; + u16 fake_vf_start_id; + u8 dynamic_qp_en; +}; + +/** + * @brief struct ovs_service_cap + * @details OVS services + */ +struct ovs_service_cap { + struct dev_ovs_svc_cap dev_ovs_cap; + + u32 pctx_sz; /* 512B */ +}; + +/** + * @brief struct dev_ipsec_svc_cap + * @details PF IPsec service resource structure defined + */ +struct dev_ipsec_svc_cap { + u32 max_sactxs; /* max IPsec SA context num */ + u16 max_cqs; /* max IPsec SCQC num */ + u16 rsvd0; + u32 max_spctxs; /* max IPsec SP context num */ + u32 sa_hash_bucket_num; + u32 sp_hash_bucket_num; +}; + +/** + * @brief struct ipsec_service_cap + * @details IPsec services + */ +struct ipsec_service_cap { + struct dev_ipsec_svc_cap dev_ipsec_cap; + u32 sactx_sz; /* 512B */ +}; + +struct hisdk5_dcb_state { + u8 dcb_on; /* dcb on or off */ + u8 default_cos; /* Default COS值, 合法范围0~7 */ + u8 trust; /* trust状态, 0-PCP模式, 1-DSCP模式 */ + u8 rsvd1; /* reserved */ + u8 pcp2cos[HISDK5_DCB_UP_MAX]; /* PCP到COS的映射 */ + u8 dscp2cos[64]; /* DSCP到COS的映射值 */ + u32 rsvd2[7]; /* reserved */ +}; + +enum hisdk5_dcb_state_op { + HISDK5_DCB_STATE_GET, + HISDK5_DCB_STATE_SET, +}; + +/** + * @brief struct irq_info + * @details Defines the IRQ information structure + */ +struct irq_info { + u16 msix_entry_idx; /* IRQ corresponding index number */ + u32 irq_id; /* the IRQ number from OS */ +}; + +/** + * @brief struct interrupt_info + * @details 中断信息结构体 + */ +struct interrupt_info { + u32 lli_set; /* low latency interrupt enable */ + u32 interrupt_coalesc_set; /* interrupt coalesce enable */ + u16 msix_index; /* msix index */ + u8 lli_credit_limit; /* low latency interrupt credit value */ + u8 lli_timer_cfg; /* low latency interrupt timer value */ + u8 pending_limt; + u8 coalesc_timer_cfg; /* interrupt coalesce timer value*/ + u8 resend_timer_cfg; /* interrupt resend timer value*/ +}; + +/** + * @brief enum hinic5_msix_state + * @details 定义中断消息传递(MSI-X)的状态 + */ +enum hinic5_msix_state { + HINIC5_MSIX_ENABLE, /* 启用MSI-X */ + HINIC5_MSIX_DISABLE, /* 禁用MSI-X */ +}; + +/** + * @brief enum hinic5_msix_auto_mask + * @details 定义中断处理的枚举类型 + */ +enum hinic5_msix_auto_mask { + HINIC5_CLR_MSIX_AUTO_MASK, /* 清除MSIX自动掩码 */ + HINIC5_SET_MSIX_AUTO_MASK, /* 设置MSIX自动掩码 */ +}; + +/** + * @brief enum func_type + * @details 表示函数的类型 + */ +enum func_type { + TYPE_PF, + TYPE_VF, + TYPE_PPF, + TYPE_UNKNOWN, +}; + +/** + * @brief struct hinic5_init_para + * @details 用于初始化hinic5的参数结构体 + */ +struct hinic5_init_para { + /* Record hinic_pcidev or NDIS_Adapter pointer address */ + void *adapter_hdl; /* 记录hinic_pcidev或NDIS_Adapter指针地址 */ +#ifdef __UEFI__ + /** + * Record pcidev or Handler pointer address + * for example: ioremap interface input parameter + */ + void *busdev_hdl; /* 记录pcidev或ub dev指针地址 */ +#endif + /** + * Record pcidev->dev or Handler pointer address which used to + * dma address application or dev_err print the parameter + */ + void *dev_hdl; /* 记录pcidev->dev或Handler指针地址, + * 用于DMA地址应用或dev_err打印参数 + */ + + void *fers2_reg_base; /* FERS2 register base address */ + /* Configure virtual address, PF is bar1, VF is bar0/1 */ + void *cfg_reg_base; /* 配置虚拟地址,PF是bar1,VF是bar0/1 */ + /* interrupt configuration register address, PF is bar2, VF is bar2/3 */ + void *intr_reg_base; /* 中断配置寄存器地址,PF是bar2,VF是bar2/3 */ + /* for PF bar3 virtual address, if function is VF should set to NULL */ + void *mgmt_reg_base; /* 对于PF的bar3虚拟地址,如果函数是VF,应设置为NULL */ + + u64 db_dwqe_len; /* 用于记录doorbell和direct wqe的长度 */ + u64 db_base_phy; /* doorbell的基础物理地址 */ + /* the doorbell address, bar4/5 higher 4M space */ + void *db_base; /* doorbell地址,bar4/5高4M空间 */ + /* direct wqe 4M, follow the doorbell address space */ + void *dwqe_mapping; /* 直接wqe 4M,跟随doorbell地址空间 */ + void **hwdev; /* 硬件设备指针 */ + void *chip_node; /* 芯片节点指针 */ + /* if use polling mode, set it true */ + bool poll; /* 如果使用轮询模式,设置为true */ + + u16 probe_fault_level; /* 探测故障级别 */ +}; + +#define HINIC5_DB_DWQE_SIZE 0x100000000 /* Maximum support size of BAR45, DB & DWQE are both half */ + +#define HINIC5_DB_PAGE_SIZE 0x00001000ULL /* db page size: 4K */ +#define HINIC5_DWQE_OFFSET 0x00000800ULL /* dwqe page size: 4K */ + +/** + * @brief 定义一个宏,用于计算最大的数据库区域数量 + * @param HINIC5_DB_MAX_AREAS 表示最大的数据库区域数量 + * @param HINIC5_DB_DWQE_SIZE 表示每个数据库区域的大小 + * @param HINIC5_DB_PAGE_SIZE 表示每个数据库页的大小 + */ +#define HINIC5_DB_MAX_AREAS (HINIC5_DB_DWQE_SIZE / HINIC5_DB_PAGE_SIZE) + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif +#define MAX_FUNCTION_NUM 4096 + +#define HINIC5_SYNFW_TIME_PERIOD (60 * 60 * 1000) /* 表示同步固件的时间周期 */ + +#define FAULT_SHOW_STR_LEN 16 + +/** + * @brief enum hinic5_fault_source_type + * @details 错误源类型枚举 + */ +enum hinic5_fault_source_type { + /* same as FAULT_TYPE_CHIP */ + HINIC5_FAULT_SRC_HW_MGMT_CHIP = 0, /* 硬件管理芯片错误 */ + /* same as FAULT_TYPE_UCODE */ + HINIC5_FAULT_SRC_HW_MGMT_UCODE, /* 硬件管理微码错误 */ + /* same as FAULT_TYPE_MEM_RD_TIMEOUT */ + HINIC5_FAULT_SRC_HW_MGMT_MEM_RD_TIMEOUT, /* 硬件管理内存读取超时错误 */ + /* same as FAULT_TYPE_MEM_WR_TIMEOUT */ + HINIC5_FAULT_SRC_HW_MGMT_MEM_WR_TIMEOUT, /* 硬件管理内存写入超时错误 */ + /* same as FAULT_TYPE_REG_RD_TIMEOUT */ + HINIC5_FAULT_SRC_HW_MGMT_REG_RD_TIMEOUT, /* 硬件管理寄存器读取超时错误 */ + /* same as FAULT_TYPE_REG_WR_TIMEOUT */ + HINIC5_FAULT_SRC_HW_MGMT_REG_WR_TIMEOUT, /* 硬件管理寄存器写入超时错误 */ + HINIC5_FAULT_SRC_SW_MGMT_UCODE, /* 软件管理微码错误 */ + HINIC5_FAULT_SRC_MGMT_WATCHDOG, /* 管理看门狗错误 */ + HINIC5_FAULT_SRC_MGMT_RESET = 8, /* 管理重置错误 */ + HINIC5_FAULT_SRC_HW_PHY_FAULT, /* 硬件PHY错误 */ + HINIC5_FAULT_SRC_TX_PAUSE_EXCP, /* 发送暂停异常错误 */ + HINIC5_FAULT_SRC_PCIE_LINK_DOWN = 20, /* PCIE链路断开错误 */ + HINIC5_FAULT_SRC_HOST_HEARTBEAT_LOST = 21, /* 主机心跳丢失错误 */ + HINIC5_FAULT_SRC_TX_TIMEOUT, /* 发送超时错误 */ + HINIC5_FAULT_SRC_TYPE_MAX, /* 错误源类型最大值 */ +}; + +/** + * @brief union hinic5_fault_hw_mgmt + * @details 用于处理硬件管理中的错误信息 + */ +union hinic5_fault_hw_mgmt { + u32 val[4]; + /* valid only type == FAULT_TYPE_CHIP */ + struct { + u8 node_id; + u8 err_level; /* enum hinic_fault_err_level */ + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only if err_level == FAULT_LEVEL_SERIOUS_FLR */ + u8 rsvd1; + u8 host_id; + u16 func_id; + } chip; + + /* valid only if type == FAULT_TYPE_UCODE */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only if type == FAULT_TYPE_MEM_RD_TIMEOUT || + * FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_index; + } mem_timeout; + + /* valid only if type == FAULT_TYPE_REG_RD_TIMEOUT || + * FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + + struct { + /* 0: read; 1: write */ + u8 op_type; + u8 port_id; + u8 dev_ad; + u8 rsvd9; + u32 csr_addr; + u32 op_data; + u32 rsvd10; + } phy_fault; +}; + +/* defined by chip */ +/** + * @brief struct hinic5_fault_event + * @details 用于表示HINIC5的故障事件 + */ +struct hinic5_fault_event { + /* enum hinic_fault_type */ + u8 type; /* 故障类型 */ + u8 fault_level; /* SDK写入故障级别,用于ULDOVENT */ + u8 rsvd0[2]; + union hinic5_fault_hw_mgmt event; /* 故障硬件管理事件 */ +}; + +/** + * @brief struct hinic5_cmd_fault_event + * @details 定义了一个用于存储故障事件的结构体 + */ +struct hinic5_cmd_fault_event { + u8 status; /* 状态字段 */ + u8 version; /* 版本字段 */ + u8 rsvd0[6]; + struct hinic5_fault_event event; /* 故障事件结构体 */ +}; + +/** + * @brief struct hinic5_sriov_state_info + * @details 定义一个结构体,用于存储SR-IOV的状态信息 + */ +struct hinic5_sriov_state_info { + u8 enable; /* 表示SR-IOV是否启用 */ + u16 num_vfs; /* 表示虚拟函数的数量 */ + u32 vf_id; /* 待操作的func id */ +}; + +/** + * @brief enum hinic5_comm_event_type + * @details 通信事件类型枚举 + */ +enum hinic5_comm_event_type { + EVENT_COMM_PCIE_LINK_DOWN, /* PCIE链路断开事件 */ + EVENT_COMM_HEART_LOST, /* 心跳丢失事件 */ + EVENT_COMM_FAULT, /* 设备故障事件 */ + EVENT_COMM_SRIOV_STATE_CHANGE, /* SR-IOV状态变化事件 */ + EVENT_COMM_CARD_REMOVE, /* 设备移除事件 */ + EVENT_COMM_MGMT_WATCHDOG, /* 管理监视器看门狗事件 */ +}; + +/** + * @brief enum hinic5_event_service_type + * @details 定义了事件服务类型的枚举 + */ +enum hinic5_event_service_type { + EVENT_SRV_COMM = 0, /* 通用事件服务类型 */ +#define SERVICE_EVENT_BASE (EVENT_SRV_COMM + 1) /* 定义服务事件基础值 */ + /* 网络接口卡事件服务类型 */ + EVENT_SRV_NIC = SERVICE_EVENT_BASE + SERVICE_T_NIC, + /* 迁移事件服务类型 */ + EVENT_SRV_MIGRATE = SERVICE_EVENT_BASE + SERVICE_T_MIGRATE, +}; + +/** + * @brief 定义一个宏,用于生成服务事件类型的编码 + * @param svc 服务类型,取值范围为0-65535 + * @param type 事件类型,取值范围为0-65535 + * + * @details 这个宏将服务类型和事件类型的值合并为一个32位的无符号整数, + * 其中服务类型占用高16位,事件类型占用低16位。 + * + * @return 返回生成的事件类型编码 + */ +#define HINIC5_SRV_EVENT_TYPE(svc, type) ((((u32)(svc)) << 16) | (type)) +/** + * @brief struct hinic5_event_info + * @details 定义了hinic5事件信息的结构体 + */ +struct hinic5_event_info { + u16 service; /* 枚举类型hinic5_event_service_type */ + u16 type; /* 事件类型 */ + u8 event_data[104]; /* 事件数据,最大长度104字节 */ +}; + +/** + * @brief hinic5_set_msix_auto_mask - set msix auto mask function + * @param hwdev: device pointer to hwdev + * @param msix_idx: msix id + * @param flag: msix auto_mask flag, 1-enable, 2-clear + */ +void hinic5_set_msix_auto_mask_state(void *hwdev, u16 msix_idx, + enum hinic5_msix_auto_mask flag); + +/** + * @brief hinic5_set_msix_state - set msix state + * @param hwdev: device pointer to hwdev + * @param msix_idx: msix id + * @param flag: msix state flag, 0-enable, 1-disable + */ +void hinic5_set_msix_state(void *hwdev, u16 msix_idx, + enum hinic5_msix_state flag); + +/** + * @brief hinic5_misx_intr_clear_resend_bit - clear msix resend bit + * @param hwdev: device pointer to hwdev + * @param msix_idx: msix id + * @param clear_resend_en: 1-clear + */ +void hinic5_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, + u8 clear_resend_en); + +/** + * @brief hinic5_set_interrupt_cfg_direct - set interrupt cfg + * @param hwdev: device pointer to hwdev + * @param interrupt_para: interrupt info + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_interrupt_cfg_direct(void *hwdev, + struct interrupt_info *info, + u16 channel); +/** + * @brief Set interrupt cfg + * @param udkdev: device pointer to udkdev + * @param interrupt_info: Interrupt info + * @param channel: command message channel id is defined in enum hinic5_channel_id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_interrupt_cfg(void *dev, struct interrupt_info info, + u16 channel); + +/** + * @brief hinic5_get_interrupt_cfg - get interrupt cfg + * @param dev: device pointer to hwdev + * @param info: interrupt info + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_interrupt_cfg(void *dev, struct interrupt_info *info, + u16 channel); + +/** + * @brief hinic5_alloc_irqs - alloc irq + * @param hwdev: device pointer to hwdev + * @param type: service type + * @param num: alloc number + * @param irq_info_array: alloc irq info + * @param act_num: alloc actual number + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_alloc_irqs(void *hwdev, enum hinic5_service_type type, u16 num, + struct irq_info *irq_info_array, u16 *act_num); + +/** + * @brief hinic5_free_irq - free irq + * @param hwdev: device pointer to hwdev + * @param type: service type + * @param irq_id: irq id + */ +void hinic5_free_irq(void *hwdev, enum hinic5_service_type type, u32 irq_id); + +/** + * @brief hinic5_alloc_ceqs - alloc ceqs + * @param hwdev: device pointer to hwdev + * @param type: service type + * @param num: alloc ceq number + * @param ceq_id_array: alloc ceq_id_array + * @param act_num: alloc actual number + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_alloc_ceqs(void *hwdev, enum hinic5_service_type type, int num, + int *ceq_id_array, int *act_num); + +/** + * @brief hinic5_free_irq - free ceq + * @param hwdev: device pointer to hwdev + * @param type: service type + * @param irq_id: ceq id + */ +void hinic5_free_ceq(void *hwdev, enum hinic5_service_type type, int ceq_id); + +/** + * @brief hinic5_ppf_idx - get ppf id + * @param hwdev: device pointer to hwdev + * + * @return ppf id + */ +u8 hinic5_ppf_idx(void *hwdev); + +#ifndef __UEFI__ +/** + * @brief hinic5_write_ts_data - write time to hw rtc + * @param hwdev: device pointer to hwdev + * @param ts: time to write + */ +void hinic5_write_ts_data(void *hwdev, const struct timespec64 *ts); + +/** + * @brief hinic5_ts_up_en - enable time stamp update + * @param hwdev: device pointer to hwdev + * @param flags: update flags + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_ts_up_en(void *hwdev, u32 flags); + +/** + * @brief hinic5_read_ts_data - read hw rtc time + * @param hwdev: device pointer to hwdev + * @param ts: time read from hw + */ +void hinic5_read_ts_data(void *hwdev, struct timespec64 *ts); + +/** + * @brief hinic5_set_ptp_inc - set inc val per cycle + * @param hwdev: device pointer to hwdev + * @param inc_val: inc val + */ +void hinic5_set_ptp_inc(void *hwdev, u32 inc_val); + +/** + * @brief hinic5_ptp_ts_update - hw rtc time update + * @param hwdev: device pointer to hwdev + * @param delta_ns: delta time in ns + */ +void hinic5_ptp_ts_update(void *hwdev, s32 delta_ns); + +/** + * @brief hinic5_get_non_ptp_chip_time - get the chip time + * @param hwdev: device pointer to hwdev + * @param chip_time: get chip_time pointer + * + * @return get success or fail + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_non_ptp_chip_time(void *dev, u64 *chip_time); + +/** + * @brief hinic5_get_non_ptp_time_diff - get the diff of + * sys time and chip time + * @param hwdev: device pointer to hwdev + * @param time_diff: get time_diff pointer + * + * @return get success or fail + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_non_ptp_time_diff(void *dev, s64 *time_diff); +#endif + +/** + * @brief hinic5_get_chip_present_flag - get chip present flag + * @param hwdev: device pointer to hwdev + * + * @details This state is maintained by driver. + * The chip will be absent when + * - link down + * - PCI shutdown + * - PCI reset done + * + * Re-probe the function to redetect the absent chip. + * + * @return 1 - if chip present, 0 - if chip absent + */ +int hinic5_get_chip_present_flag(const void *hwdev); + +/** + * @brief hinic5_get_heartbeat_status - get heartbeat status + * @param hwdev: device pointer to hwdev + * + * @return heartbeat status + * @retval 0 normal + * @retval 1 heart lost + * @retval 0xFFFFFFFF link down + */ +u32 hinic5_get_heartbeat_status(void *hwdev); + +/** + * @brief hinic5_support_nic - function support nic + * @param hwdev: device pointer to hwdev + * @param cap: nic service capbility + * + * @return 设备是否支持网络接口卡服务 + * @retval true: function support nic + * @retval false: function not support nic + */ +bool hinic5_support_nic(void *hwdev, struct nic_service_cap *cap); + +/** + * @brief hinic5_support_ipsec - function support ipsec + * @param hwdev: device pointer to hwdev + * @param cap: ipsec service capbility + * + * @return 硬件设备是否支持IPsec服务 + * @retval true: function support ipsec + * @retval false: function not support ipsec + */ +bool hinic5_support_ipsec(void *hwdev, struct ipsec_service_cap *cap); + +/** + * @brief hinic5_support_macsec - function support macsec + * @param hwdev: device pointer to hwdev + * + * @return 硬件设备是否支持MACsec服务 + * @retval true: function support macsec + * @retval false: function not support macsec + */ +bool hinic5_support_macsec(void *hwdev); + +/** + * @brief hinic5_support_roce - function support roce + * @param hwdev: device pointer to hwdev + * @param cap: roce service capbility + * + * @return 设备是否支持RoCE + * @retval true: function support roce + * @retval false: function not support roce + */ +bool hinic5_support_roce(void *hwdev, struct rdma_service_cap *cap); + +/** + * @brief hinic5_support_fc - function support fc + * @param hwdev: device pointer to hwdev + * @param cap: fc service capbility + * + * @return 设备是否支持fc + * @retval true: function support fc + * @retval false: function not support fc + */ +bool hinic5_support_fc(void *hwdev, struct fc_service_cap *cap); + +/** + * @brief hinic5_support_rdma - function support rdma + * @param hwdev: device pointer to hwdev + * @param cap: rdma service capbility + * + * @return 设备是否支持rdma + * @retval true: function support rdma + * @retval false: function not support rdma + */ +bool hinic5_support_rdma(void *hwdev, struct rdma_service_cap *cap); + +/** + * @brief hinic5_is_rdma_en - is rdma enable + * @param hwdev: device pointer to hwdev + * @param cap: rdma service capbility + * + * @return RDMA服务是否启用 + * @retval true: rdma is enabled + * @retval false: rdma is disabled + */ +bool hinic5_is_rdma_en(void *hwdev, struct rdma_service_cap *cap); + +/** + * @brief hinic5_support_ovs - function support ovs + * @param hwdev: device pointer to hwdev + * @param cap: ovs service capbility + * + * @return 设备是否支持ovs + * @retval true: function support ovs + * @retval false: function not support ovs + */ +bool hinic5_support_ovs(void *hwdev, struct ovs_service_cap *cap); + +/** + * @brief hinic5_support_vbs - function support vbs + * @param hwdev: device pointer to hwdev + * @param cap: vbs service capbility + * + * @return 设备是否支持vbs + * @retval true: function support vbs + * @retval false: function not support vbs + */ +bool hinic5_support_vbs(void *hwdev, struct vbs_service_cap *cap); + +/** + * @brief hinic5_support_toe - sync time to hardware + * @param hwdev: device pointer to hwdev + * @param cap: toe service capbility + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +bool hinic5_support_toe(void *hwdev, struct toe_service_cap *cap); + +/** + * @brief hinic5_support_ppa - function support ppa + * @param hwdev: device pointer to hwdev + * @param cap: ppa service capbility + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +bool hinic5_support_ppa(void *hwdev, struct ppa_service_cap *cap); + +/** + * @brief hinic5_support_migr - function support migrate + * @param hwdev: device pointer to hwdev + * @param cap: migrate service capbility + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +bool hinic5_support_migr(void *hwdev, struct migr_service_cap *cap); + +/** + * @brief hinic5_support_ub - function support ub + * @param hwdev: device pointer to hwdev + * @param cap: ub service capbility + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +bool hinic5_support_ub(void *hwdev, struct ub_service_cap *cap); + +/** + * @brief hinic5_support_jbof - function support jbof + * @param hwdev: device pointer to hwdev + * @param cap: jbof service capbility + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +bool hinic5_support_jbof(void *hwdev, struct jbof_service_cap *cap); + +/** + * @brief hinic5_support_vroce - function support vroce + * @param hwdev: device pointer to hwdev + * @param cap: roce service capbility + * + * @return 函数是否支持vroce + * @retval true: function support roce + * @retval false: function not support roce + */ +bool hinic5_support_vroce(void *hwdev, struct rdma_service_cap *cap); + +/** + * @brief hinic5_support_dmmu - function support dmmu + * @param hwdev: device pointer to hwdev + * @param cap: dmmu service capbility + * @retval true: function support dmmu + * @retval false: function not support dmmu + */ +bool hinic5_support_dmmu(void *hwdev, struct dmmu_service_cap *cap); + +/** + * @brief hinic5_support_biufr - function support biufr + * @param hwdev: device pointer to hwdev + * @retval true: function support biufr + * @retval false: function not support biufr + */ +bool hinic5_support_bifur(void *hwdev); + +/** + * @brief hinic5_support_hihtr - function support hihtr + * @param hwdev: device pointer to hwdev + * @retval true: function support biufr + * @retval false: function not support hihtr + */ +bool hinic5_support_hihtr(void *hwdev); + +/** + * @brief hinic5_sync_time - sync time to hardware + * @param hwdev: device pointer to hwdev + * @param time: time to sync + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_sync_time(void *hwdev, u64 time); + +/** + * @brief hinic5_disable_mgmt_msg_report - disable mgmt report msg + * @param hwdev: device pointer to hwdev + */ +void hinic5_disable_mgmt_msg_report(void *hwdev); + +/** + * @brief hinic5_func_for_mgmt - get function service type + * @param hwdev: device pointer to hwdev + * + * @return 函数是否支持管理功能 + * @retval true: function for mgmt + * @retval false: function is not for mgmt + */ +bool hinic5_func_for_mgmt(void *hwdev); + +/** + * @brief hinic5_set_pcie_order_cfg - set pcie order cfg + * @param handle: device pointer to hwdev + */ +void hinic5_set_pcie_order_cfg(void *handle); + +/** + * @brief hinic5_init_hwdev - call to init hwdev + * @param para: device pointer to para + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_init_hwdev(struct hinic5_init_para *para); + +/** + * @brief hinic5_free_hwdev - free hwdev + * @param hwdev: device pointer to hwdev + */ +void hinic5_free_hwdev(void *hwdev); + +/** + * @brief hinic5_detect_hw_present - detect hardware present + * @param hwdev: device pointer to hwdev + */ +void hinic5_detect_hw_present(void *hwdev); + +/** + * @brief hinic5_record_pcie_error - record pcie error + * @param hwdev: device pointer to hwdev + */ +void hinic5_record_pcie_error(void *hwdev); + +/** + * @brief hinic5_shutdown_hwdev - shutdown hwdev + * @param hwdev: device pointer to hwdev + */ +void hinic5_shutdown_hwdev(void *hwdev); + +/** + * @brief hinic5_set_ppf_flr_type - set ppf flr type + * @param hwdev: device pointer to hwdev + * @param ppf_flr_type: ppf flr type + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_ppf_flr_type(void *hwdev, enum hinic5_ppf_flr_type flr_type); + +/* * + * @brief hinic5_set_ppf_tbl_hotreplace_flag - set os hotreplace flag in ppf function table + * @param hwdev: device pointer to hwdev + * @param flag : os hotreplace flag : 0-not in os hotreplace 1-in os hotreplace + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_ppf_tbl_hotreplace_flag(void *hwdev, u8 flag); + +/** + * @brief hinic5_get_mgmt_version - get management cpu version + * @param hwdev: device pointer to hwdev + * @param mgmt_ver: output management version + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_mgmt_version(void *hwdev, u8 *mgmt_ver, u8 version_size, + u16 channel); + +/** + * @brief hinic5_get_fw_version - get firmware version + * @param hwdev: device pointer to hwdev + * @param fw_ver: firmware version + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_fw_version(void *hwdev, struct hinic5_fw_version *fw_ver, + u16 channel); + +/** + * @brief hinic5_global_func_id - get global function id + * @param hwdev: device pointer to hwdev + * + * @return global function id + */ +u16 hinic5_global_func_id(void *hwdev); + +/** + * @brief hinic_ppf_idx - get ppf function id + * @param hwdev: device pointer to hwdev + * + * @return ppf function id + */ +u8 hinic5_ppf_idx(void *hwdev); + +/** + * @brief hinic5_vector_to_eqn - vector to eq id + * @param hwdev: device pointer to hwdev + * @param type: service type + * @param vector: vertor + * + * @return eq id + */ +int hinic5_vector_to_eqn(void *hwdev, enum hinic5_service_type type, + int vector); + +/** + * @brief hinic5_glb_pf_vf_offset - get vf offset id of pf + * @param hwdev: device pointer to hwdev + * + * @return vf offset id + */ +u16 hinic5_glb_pf_vf_offset(void *hwdev); + +/** + * @brief hinic5_pf_id_of_vf - get pf id of vf + * @param hwdev: device pointer to hwdev + * + * @return pf id + */ +u8 hinic5_pf_id_of_vf(void *hwdev); + +/** + * @brief hinic5_func_type - get function type + * @param hwdev: device pointer to hwdev + * + * @return function type + */ +enum func_type hinic5_func_type(void *hwdev); + +/** + * @brief hinic5_get_stateful_enable - get stateful status + * @param hwdev: device pointer to hwdev + * + * @return stateful enabel status + */ +bool hinic5_get_stateful_enable(void *hwdev); + +/** + * @brief hinic5_host_oq_id_mask - get oq id + * @param hwdev: device pointer to hwdev + * + * @return oq id + */ +u8 hinic5_host_oq_id_mask(void *hwdev); + +/** + * @brief hinic5_host_id - get host id + * @param hwdev: device pointer to hwdev + * + * @return host id + */ +u8 hinic5_host_id(void *hwdev); + +/** + * @brief hinic5_in_spu - if in spu + * @param hwdev: device pointer to hwdev + * + * @return if in spu + */ +bool hinic5_in_spu(void *hwdev); + +/** + * @brief hinic5_func_max_qnum - get host total function number + * @param hwdev: device pointer to hwdev + * + * @return 计算或获取到的功能总数 + * @retval non-zero: host total function number + * @retval zero: failure + */ +u16 hinic5_host_total_func(void *hwdev); + +/** + * @brief hinic5_func_max_qnum - get max nic queue number + * @param hwdev: device pointer to hwdev + * + * @return 网络设备的最大队列数量 + * @retval non-zero: max nic queue number + * @retval zero: failure + */ +u16 hinic5_func_max_nic_qnum(void *hwdev); + +/** + * @brief 获取函数cos掩码模式 + * @param hwdev 硬件设备指针 + * + * @return u8 返回cos掩码模式 + */ +u8 hinic5_func_cos_mask_mode(void *hwdev); + +/** + * @brief 获取函数默认的cos值 + * @param hwdev 硬件设备指针 + * + * @return 返回cos值 + */ +u8 hinic5_func_dev_default_cos(void *hwdev); + +/** + * @brief hinic5_func_max_qnum - get max queue number + * @param hwdev: device pointer to hwdev + * + * @return 获取设备的最大队列数 + * @retval non-zero: max queue number + * @retval zero: failure + */ +u16 hinic5_func_max_qnum(void *hwdev); + +/** + * @brief hinic5_er_id - get ep id + * @param hwdev: device pointer to hwdev + * + * @return ep id + */ +u8 hinic5_ep_id(void *hwdev); /* Obtain service_cap.ep_id */ + +/** + * @brief hinic5_er_id - get er id + * @param hwdev: device pointer to hwdev + * + * @return er id + */ +u8 hinic5_er_id(void *hwdev); /* Obtain service_cap.er_id */ + +/** + * @brief hinic5_physical_port_id - get physical port id + * @param hwdev: device pointer to hwdev + * + * @return physical port id + */ +u8 hinic5_physical_port_id(void *hwdev); /* Obtain service_cap.port_id */ + +/** + * @brief hinic5_func_max_vf - get vf number + * @param hwdev: device pointer to hwdev + * + * @return 返回最大虚拟功能数量 + * @retval non-zero: vf number + * @retval zero: failure + */ +u16 hinic5_func_max_vf(void *hwdev); /* Obtain service_cap.max_vf */ + +/* + * @brief hinic5_max_pf_num - get global max pf number + */ +u8 hinic5_max_pf_num(void *hwdev); + +/** + * @brief hinic5_host_pf_num - get current host pf number + * @param hwdev: device pointer to hwdev + * + * @return 返回获取到的PF数量 + * @retval non-zero: pf number + * @retval zero: failure + */ +u32 hinic5_host_pf_num(void *hwdev); /* Obtain service_cap.pf_num */ + +/** + * @brief hinic5_host_pf_id_start - get current host pf id start + * @param hwdev: device pointer to hwdev + * + * @return 获取设备的PF数量 + * @retval non-zero: pf id start + * @retval zero: failure + */ +u32 hinic5_host_pf_id_start(void *hwdev); /* Obtain service_cap.pf_num */ + +/** + * @brief hinic5_pcie_itf_id - get pcie port id + * @param hwdev: device pointer to hwdev + * + * @return pcie port id + */ +u8 hinic5_pcie_itf_id(void *hwdev); + +/** + * @brief hinic5_vf_in_pf - get vf offset in pf + * @param hwdev: device pointer to hwdev + * + * @return vf offset in pf + */ +u8 hinic5_vf_in_pf(void *hwdev); + +/** + * @brief hinic5_cos_valid_bitmap - get cos valid bitmap + * @param hwdev: device pointer to hwdev + * + * @return 是否成功 + * @retval non-zero: valid cos bit map + * @retval zero: failure + */ +int hinic5_cos_valid_bitmap(void *hwdev, u8 *func_dft_cos, u8 *port_cos_bitmap); + +/** + * @brief hinic5_stateful_init - init stateful resource + * @param hwdev: device pointer to hwdev + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_stateful_init(void *hwdev); + +/** + * @brief hinic5_stateful_deinit - deinit stateful resource + * @param hwdev: device pointer to hwdev + */ +void hinic5_stateful_deinit(void *hwdev); + +/** + * @brief hinic5_free_stateful - sdk remove free stateful resource + * @param hwdev: device pointer to hwdev + */ +void hinic5_free_stateful(void *hwdev); + +/** + * @brief hinic5_need_init_stateful_default - get need init stateful default + * @param hwdev: device pointer to hwdev + */ +bool hinic5_need_init_stateful_default(void *hwdev); + +/** + * @brief hinic5_get_card_present_state - get card present state + * @param hwdev: device pointer to hwdev + * @param card_present_state: return card present state + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_card_present_state(void *hwdev, bool *card_present_state); + +/** + * @brief hinic5_func_rx_tx_flush - function flush + * @param hwdev: device pointer to hwdev + * @param channel: channel id + * @param flr_timeout_ms: flr超时时间,以ms为单位 + * + * @attention 当flr_timeout_ms为0时,使用flr超时默认值HINIC5_FLR_TIMEOUT(40s) + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_func_rx_tx_flush(void *hwdev, u16 channel, bool wait_io, u32 flr_timeout_ms); + +/** + * @brief hinic5_flush_mgmt_workq - when remove function should flush work queue + * @param hwdev: device pointer to hwdev + */ +void hinic5_flush_mgmt_workq(void *hwdev); + +/** + * @brief get toe ceq num + * @param udkdev: device pointer to udkdev + * + * @return ceq number + */ +u8 hinic5_ceq_num(void *hwdev); + +/** + * @brief hinic5_intr_num get interrupt num + * @param udkdev: device pointer to udkdev + * + * @return interrupt number + */ +u16 hinic5_intr_num(void *hwdev); + +/** + * @brief hinic5_flexq_en get flexq en + * @param udkdev: device pointer to udkdev + * + * @return flexq enable: 1:enable, 0: disable + */ +u8 hinic5_flexq_en(void *hwdev); + +/** + * @brief hinic5_fault_event_report - report fault event + * @param hwdev: device pointer to hwdev + * @param src: fault event source, reference to enum hinic5_fault_source_type + * @param level: fault level, reference to enum hinic5_fault_err_level + */ +void hinic5_fault_event_report(void *hwdev, u16 src, u16 level); + +/** + * @brief hinic5_probe_success - notify device probe successfull + * @param hwdev: device pointer to hwdev + */ +void hinic5_probe_success(void *hwdev); + +/** + * @brief hinic5_set_func_svc_used_state - set function service used state + * @param hwdev: device pointer to hwdev + * @param svc_type: service type + * @param state: function used state + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_func_svc_used_state(void *hwdev, u16 svc_type, u8 state, + u16 channel); + +/** + * @brief hinic5_get_self_test_result - get self test result + * @param hwdev: device pointer to hwdev + * + * @return self test result + */ +u32 hinic5_get_self_test_result(void *hwdev); + +/** + * @brief set_slave_host_enable - set slave host enable + * @param hwdev: device pointer to hwdev + * @param host_id: set host id + * @param slave_en-zero: slave is enable + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +void set_slave_host_enable(void *hwdev, u8 host_id, bool enable); + +/** + * @brief hinic5_get_slave_bitmap - get slave host bitmap + * @param hwdev: device pointer to hwdev + * @param slave_host_bitmap-zero: slave host bitmap + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_slave_bitmap(void *hwdev, u8 *slave_host_bitmap); + +/** + * @brief hinic5_get_slave_host_enable - get slave host enable + * @param hwdev: device pointer to hwdev + * @param host_id: get host id + * @param slave_en-zero: slave is enable + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_slave_host_enable(void *hwdev, u8 host_id, u8 *slave_en); + +/** + * @brief hinic5_set_host_migrate_enable - set migrate host enable + * @param hwdev: device pointer to hwdev + * @param host_id: get host id + * @param slave_en-zero: migrate is enable + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_host_migrate_enable(void *hwdev, u8 host_id, bool enable); + +/** + * @brief hinic5_get_host_migrate_enable - get migrate host enable + * @param hwdev: device pointer to hwdev + * @param host_id: get host id + * @param slave_en-zero: migrte enable ptr + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_host_migrate_enable(void *hwdev, u8 host_id, u8 *migrate_en); + +/** + * @brief 判断是否是从主机 + * @param hwdev 硬件设备指针 + * + * @return 是否是从主机 + * @retval true表示是 + * @retval false表示不是 + */ +bool hinic5_is_slave_host(void *hwdev); + +/** + * @brief 判断是否是主主机 + * @param hwdev 硬件设备指针 + * + * @return 是否是主主机 + * @retval true表示是 + * @retval false表示不是 + */ +bool hinic5_is_master_host(void *hwdev); + +/** + * @brief 判断是否是多核心设备 + * @param hwdev 设备句柄 + * + * @return 是不是多核心设备 + * @retval true 是 + * @retval false 不是 + */ +bool hinic5_is_multi_bm(void *hwdev); + +/** + * @brief 设置插件服务位图 + * @param hwdev 硬件设备 + * @param srv_type 服务类型 + * @param func_id 功能ID + * @param attach_en 是否启用 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hisdk5_set_plug_srv_bitmap(void *hwdev, u8 srv_type, u16 func_id, u8 attach_en); + +/** + * @brief 获取插件服务位图 + * @param hwdev 硬件设备 + * @param srv_type 服务类型 + * @param func_id 功能ID + * @param attach_en 是否启用插件服务 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hisdk5_get_plug_srv_bitmap(void *hwdev, u8 srv_type, u16 func_id, u8 *attach_en); + +/** + * @brief 获取设备能力 + * @param hwdev 设备句柄 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_dev_cap(void *hwdev); + +/** + * @brief 获取是否是HTN设备 + * @param hwdev 设备句柄 + * + * @return 是不是HTN设备 + * @retval true: 是 + * @retval false: 不是 + */ +bool hinic5_support_htn(void *hwdev); + +/** + * @brief 获取当前 VF 是否独立运行 + * @param hwdev 设备句柄 + * + * @details VF 依赖 PF 运行时,其管理面消息需要发送给 PF 而不是 Mgmt + * @note 该配置对于 PF 无意义 + * + * @return VF 是否独立运行 + */ +bool hinic5_is_vf_isolation(void *hwdev); + +/** + * @brief 设置心跳检测周期和link_down检查次数 + * @param hwdev:设备句柄 + * @param heartbeat_period:心跳检测周期 + * @param linkdown_threshold:link_down检查次数 + * + * @details hwdev为空或心跳检测周期和断链检测次数均为0时,返回-EINVAL; + * 当心跳检测周期不为0时,更新心跳检测周期; + * 当断链检测次数不为0时,更新断链检测次数; + * + * @attention 支持仅更新检测周期/检测次数,不更新的参数直接置0即可; + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_heartbeat_period_and_linkdown_cnt(void *hwdev, u32 heartbeat_period, u32 linkdown_threshold); + +/** + * @brief 设置/获取dcb state + * @param hwdev[in]:设备句柄 + * @param op[in]:操作类型,HISDK5_DCB_STATE_GET(获取)/HISDK5_DCB_STATE_SET(设置) + * @param dcb_state[in]:存储业务驱动要写入到sdk的dcb state + * [out]:存储从sdk中获取的dcb state + * + * @details op == HISDK5_DCB_STATE_GET,从sdk中获取scb state保存到业务传入的dcb state指针对应的内存中,返回给上层业务; + * op == HISDK5_DCB_STATE_SET,将业务传入的dcb state保存到sdk中; + * + * @attention 在struct hisdk5_dcb_state中,当dcb_on == 0(dcb off)时,仅default_cos有效; + * 当dcb_on != 0(dcb on)时,参数均有效,可根据trust查看当前优先级模式, + * 根据pcp2cos/dscp2cos查看对应的映射关系; + * + * @return 是否成功 + * @retval 0: 成功 + * @retval 非0: 失败 + */ +int hinic5_dcb_state_op(void *hwdev, enum hisdk5_dcb_state_op op, struct hisdk5_dcb_state *dcb_state); + +/** + * @brief 获取设备端口速率 + * + * @param hwdev: device pointer to hwdev + * @param port_info: port info + * @param channel: channel id + * @return: 命令执行结果. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_get_port_info(void *hwdev, struct mag_port_info *port_info, u16 channel); + +/** + * @brief 获取设备端口速率 + * + * @param hwdev device pointer to hwdev + * @param speed 输出端口速率信息 + * @param channel channel id, mailbox发送使用的channel id + * @details 通过查询设备对应端口,发送mailbox消息给MPU获取端口速率 + * @attention: 函数内部涉及发送mailbox消息会休眠,禁止中断上下文等不允许休眠的流程中调用 + * @return: 设备端口速率获取返回成功或者失败 + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_get_speed(void *hwdev, enum mag_cmd_port_speed *speed, u16 channel); + +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_cqm.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_cqm.h new file mode 100644 index 00000000..fa419cc7 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_cqm.h @@ -0,0 +1,904 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2025 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_HINIC5_CQM_H +#define HINIC5_HINIC5_CQM_H + +#include <linux/types.h> +#include <linux/completion.h> + +#include "hinic5_crm.h" +#include "hinic5_hinic5_cqm_adpt.h" +#include "hinic5_hinic5_vram_api.h" + +#define HINIC5_CQM_SUCCESS 0 /* Success result code */ +#define HINIC5_CQM_FAIL (-1) /* Failure result code */ +#define HINIC5_CQM_CONTINUE 1 /* Continue result code */ + +#define HINIC5_CQM_WQE_WF_NORMAL 0 /* Normal WQE Format */ +#define HINIC5_CQM_WQE_WF_LINK 1 /* Link WQE format */ + +#define HINIC5_CQM_QUEUE_LINK_MODE 0 /* 链式队列模式 */ +#define HINIC5_CQM_QUEUE_RING_MODE 1 /* RING式队列模式 */ +#define HINIC5_CQM_QUEUE_TOE_SRQ_LINK_MODE 2 /* SRQ队列模式 */ +#define HINIC5_CQM_QUEUE_RDMA_QUEUE_MODE 3 /* RDMA队列模式 */ + +/** + * @brief Link WQE common structure + */ +typedef struct tag_hinic5_cqm_linkwqe { + u32 rsv1 : 14; /* Reserved */ + u32 wf : 1; /* WQE format */ + u32 rsv2 : 14; /* Reserved */ + u32 ctrlsl : 2; /* Length of the control segment */ + u32 o : 1; /* Owner bit */ + + u32 rsv3 : 31; /* Reserved */ + u32 lp : 1; /* Loop Back valid */ + + u32 next_page_gpa_h; /* 记录下一个页面的物理地址高32b,给芯片使用 */ + u32 next_page_gpa_l; /* 记录下一个页面的物理地址低32b,给芯片使用 */ + + u32 next_buffer_addr_h; /* 记录下一个页面的虚拟地址高32b,给驱动使用 */ + u32 next_buffer_addr_l; /* 记录下一个页面的虚拟地址低32b,给驱动使用 */ +} hinic5_cqm_linkwqe_s; + +/** + * @brief SRQ Link WQE structure + * @note wqe大小需要保证不超过普通RQE大小 + */ +typedef struct tag_hinic5_cqm_srq_linkwqe { + hinic5_cqm_linkwqe_s linkwqe; /* Link WQE common data */ + u32 current_buffer_gpa_h; /* 记录当前页面的物理地址高32b, + * 驱动释放container取消映射时使用 + */ + u32 current_buffer_gpa_l; /* 记录当前页面的物理地址低32b, + * 驱动释放container取消映射时使用 + */ + u32 current_buffer_addr_h; /* 记录当前页面的虚拟地址高32b, + * 驱动释放container时使用 + */ + u32 current_buffer_addr_l; /* 记录当前页面的虚拟地址低32b, + * 驱动释放container时使用 + */ + + u32 fast_link_page_addr_h; /* 记录container地址所在fastlink 页的虚拟地址高32b, + * 驱动释放fastlink时使用 + */ + u32 fast_link_page_addr_l; /* 记录container地址所在fastlink 页的虚拟地址低32b, + * 驱动释放fastlink时使用 + */ + + u32 fixed_next_buffer_addr_h; /* 记录下一个contianer的虚拟地址高32b, + * 用于驱动资源释放,驱动不可修改 + */ + u32 fixed_next_buffer_addr_l; /* 记录下一个contianer的虚拟地址低32b, + * 用于驱动资源释放,驱动不可修改 + */ +} hinic5_cqm_srq_linkwqe_s; + +/** + * @brief 标准128B WQE的前64B + */ +typedef union tag_hinic5_cqm_linkwqe_first64B { + hinic5_cqm_linkwqe_s basic_linkwqe; /* Link WQE common data */ + hinic5_cqm_srq_linkwqe_s toe_srq_linkwqe; /* srq linkwqe结构 */ + u32 value[16]; /* 保留字段 */ +} hinic5_cqm_linkwqe_first64B_s; + +/** + * @brief 标准128B WQE的后64B + */ +typedef struct tag_hinic5_cqm_linkwqe_second64B { + u32 rsvd0[4]; /* 第一个16B, Reserved */ + u32 rsvd1[4]; /* 第二个16B, Reserved */ + + union { + struct { + u32 rsvd0[3]; /* Reserved */ + u32 rsvd1 : 29; /* Reserved */ + u32 toe_o : 1; /* TOE owner bit */ + u32 resvd2 : 2; /* Reserved */ + } bs; + u32 value[4]; + } third_16B; /* 第三个16B */ + + union { + struct { + u32 rsvd0[2]; /* Reserved */ + u32 rsvd1 : 31; /* Reserved */ + u32 ifoe_o : 1; /* IFoE onwer bit */ + u32 rsvd2; /* Reserved */ + } bs; + u32 value[4]; + } forth_16B; /* 第四个16B */ +} hinic5_cqm_linkwqe_second64B_s; + +/** + * @brief 标准128B WQE结构 + */ +typedef struct tag_hinic5_cqm_linkwqe_128B { + hinic5_cqm_linkwqe_first64B_s first64B; /* 标准128B WQE的前64B */ + hinic5_cqm_linkwqe_second64B_s second64B; /* 标准128B WQE的后64B */ +} hinic5_cqm_linkwqe_128B_s; + +/** + * @brief AEQ类型定义 + */ +typedef enum { + HINIC5_CQM_AEQ_BASE_T_NIC = 0, /* NIC分15个event:0~14 */ + HINIC5_CQM_AEQ_BASE_T_DMMU = 15, /* DMMU分1个event:15 */ + HINIC5_CQM_AEQ_BASE_T_ROCE = 16, /* ROCE分32个event:16~47 */ + HINIC5_CQM_AEQ_BASE_T_FC = 48, /* FC分8个event:48~55 */ + HINIC5_CQM_AEQ_BASE_T_IOE = 56, /* IOE分8个event:56~63 */ + HINIC5_CQM_AEQ_BASE_T_TOE = 64, /* TOE分16个event:64~79 */ + HINIC5_CQM_AEQ_BASE_T_UB = 80, /* UB分16个event:80~95 */ + HINIC5_CQM_AEQ_BASE_T_VBS = 96, /* VBS分16个event:96~111 */ + HINIC5_CQM_AEQ_BASE_T_IPSEC = 112, /* IPSEC分16个event:112~127 */ + HINIC5_CQM_AEQ_BASE_T_MAX = 128 /* 最大定义128种event */ +} hinic5_cqm_aeq_event_type_e; + +/** + * @brief HINIC5_CQM 业务扩展描述 + */ +typedef struct tag_service_register_template { + u32 service_type; /* 业务类型 */ + u32 srq_ctx_size; /* srq context大小 */ + u32 scq_ctx_size; /* scq context大小 */ + void *service_handle; /* ceq/aeq回调函数时传给service driver的指针 */ + /* ceq回调:shared cq */ + void (*shared_cq_ceq_callback)(void *service_handle, u32 cqn, void *cq_priv); + /* ceq回调:embedded cq */ + void (*embedded_cq_ceq_callback)(void *service_handle, u32 xid, void *qpc_priv); + /* ceq回调:no cq */ + void (*no_cq_ceq_callback)(void *service_handle, u32 xid, u32 qid, void *qpc_priv); + u8 (*aeq_level_callback)(void *service_handle, u8 event_type, u8 *val); /* aeq level回调 */ + void (*aeq_callback)(void *service_handle, u8 event_type, u8 *val); /* aeq回调 */ +} service_register_template_s; + +/** + * @brief HINIC5_CQM object type + */ +typedef enum hinic5_cqm_object_type { + HINIC5_CQM_OBJECT_ROOT_CTX = 0, /* Root context. 留在以后兼容root ctx管理 */ + HINIC5_CQM_OBJECT_SERVICE_CTX, /* QPC, Service context, 连接管理对象 */ + HINIC5_CQM_OBJECT_MPT, /* RDMA Memory Protection Table */ + + HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_RQ = 10, /* 非RDMA业务的RQ,用LINKWQE管理 */ + HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_SQ, /* 非RDMA业务的SQ,用LINKWQE管理 */ + HINIC5_CQM_OBJECT_NONRDMA_SRQ, /* 非RDMA业务的SRQ,用MTT管理, + * 但要HINIC5_CQM自己申请MTT + */ + HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_CQ, /* 非RDMA业务的embedded CQ,用LINKWQE管理 */ + HINIC5_CQM_OBJECT_NONRDMA_SCQ, /* 非RDMA业务的SCQ,用LINKWQE管理 */ + + HINIC5_CQM_OBJECT_RESV = 20, /* Reserved */ + + HINIC5_CQM_OBJECT_RDMA_QP = 30, /* RDMA Queue Pair */ + HINIC5_CQM_OBJECT_RDMA_SRQ, /* RDMA Shared Receive Queue */ + HINIC5_CQM_OBJECT_RDMA_SCQ, /* RDMA Shared Completion Queue */ + + HINIC5_CQM_OBJECT_MTT = 50, /* RDMA Memory Translation Table */ + HINIC5_CQM_OBJECT_RDMARC, /* RDMA Reliable Connection */ +} hinic5_cqm_object_type_e; + +/** + * @brief BITMAP表申请的失败返回值 + */ +#define HINIC5_CQM_INDEX_INVALID ~(0U) + +/** + * @brief 新增字段的定义兼容XID=0xFFFFFFFF的默认XID申请规则 宏命名为低3bit比较位 + */ +#define HINIC5_CQM_XID_LOW_BIT_1_1_1 0x0 /* mask is 0x7 */ +#define HINIC5_CQM_XID_LOW_BIT_0_1_1 0x4 /* mask is 0x3 */ +#define HINIC5_CQM_XID_LOW_BIT_0_1_0 0x5 /* mask is 0x2 */ +#define HINIC5_CQM_XID_LOW_BIT_0_0_1 0x6 /* mask is 0x1 */ +#define HINIC5_CQM_XID_LOW_BIT_NONE 0x7 /* mask is 0x0 */ +#define HINIC5_CQM_XID_SEARCH_RANGE 0x0 +#define HINIC5_CQM_XID_SEARCH_ALL 0x1 + +#define HINIC5_CQM_XID_SEARCH_MODE_SHIFT 27 +#define HINIC5_CQM_XID_LB_MODE_SHIFT 24 +#define HINIC5_CQM_XID_LOW_BITS_SHIFT 21 +#define HINIC5_CQM_XID_SEARCH_MODE_MASK 0x1 +#define HINIC5_CQM_XID_LB_MODE_MASK 0x7 +#define HINIC5_CQM_XID_LOW_BITS_MASK 0x7 +#define HINIC5_CQM_DYNAMIC_XID_MASK 0x1FFFFF + +/** + * @brief 构造 XID + * @param[in] search_mode 搜索模式 + * @param[in] lb_mode 负载均衡模式 + * @param[in] xid_low XID的低两位 + * + * @details search_mode: 0---指定XID范围查找,范围为[bp_start, bp_end);1---整个动态区查找 + * lb_mode: + * 0--动态分配XID时,选择xid[2:0]=xid_low[2:0] + * 4--动态分配XID时,选择xid[1:0]=xid_low[1:0] + * 5--动态分配XID时,选择xid[0]=xid_low[0] + * 6--动态分配XID时,选择xid[1]=xid_low[1] + * 7--所有xid均可申请 + * xid_low: 用于匹配的xid_low[2:0] + * + * @return 返回生成的XID + */ +#define HINIC5_CQM_DYNAMIC_XID_MOD(search_mode, lb_mode, xid_low) \ + ((((search_mode) & HINIC5_CQM_XID_SEARCH_MODE_MASK) << HINIC5_CQM_XID_SEARCH_MODE_SHIFT) | \ + (((lb_mode) & HINIC5_CQM_XID_LB_MODE_MASK) << HINIC5_CQM_XID_LB_MODE_SHIFT) | \ + (((xid_low) & HINIC5_CQM_XID_LOW_BITS_MASK) << HINIC5_CQM_XID_LOW_BITS_SHIFT) | \ + HINIC5_CQM_DYNAMIC_XID_MASK) + +#define HINIC5_CQM_RDMA_Q_ROOM_1 (1) /* 为支持ROCE的Q buffer resize,第一个Q buffer空间 */ +#define HINIC5_CQM_RDMA_Q_ROOM_2 (2) /* 为支持ROCE的Q buffer resize,第二个Q buffer空间 */ + +#define HINIC5_CQM_HARDWARE_DOORBELL (1) /* 当前Q选择的doorbell方式,硬件doorbell */ +#define HINIC5_CQM_SOFTWARE_DOORBELL (2) /* 当前Q选择的doorbell方式,软件doorbell */ +#define HINIC5_CQM_SECURE_BUFFER_EN (1) /* 标识Buffer是从安全内存中申请的 */ + +/** + * @brief HINIC5_CQM buffer单节点结构 + */ +typedef struct tag_hinic5_cqm_buf_list { + void *va; /* 虚拟地址 */ + dma_addr_t pa; /* 物理地址 */ + u32 refcount; /* buf的引用计数,内部buf管理用 */ +} hinic5_cqm_buf_list_s; + +/** + * @brief HINIC5_CQM buffer单节点结构,适配WIN + */ +struct huge_buf_addr { + void *huge_buf_vaddr; /* 虚拟地址 */ + dma_addr_t huge_buf_paddr; /* 物理地址 */ + u32 huge_buf_size; /* 单节点buffer的大小 */ +}; + +/** + * @brief HINIC5_CQM buffers 管理结构 + */ +typedef struct tag_hinic5_cqm_buf { + hinic5_cqm_buf_list_s *buf_list; /* buffer 链表 */ + hinic5_cqm_buf_list_s direct; /* 将 buf_list 重新映射为连续的虚拟地址,其成员仅 va 有效 */ + u32 page_number; /* 总物理页数量 */ + u32 buf_number; /* buffer 链表长度 */ + u32 buf_size; /* buffer 大小 */ +#ifdef __WIN__ + struct huge_buf_addr *bufs_addr; /* buffer链表 */ + u32 huge_buf_number; /* buffer链表节点个数 */ +#endif + u32 secure_mem_flag; /* 安全内存标志,默认为 0 (不使用安全内存) */ + struct hinic5_vram_buf_info buf_info; +} hinic5_cqm_buf_s; + +/** + * @brief HINIC5_CQM object 结构,对 context/queue/table 的抽象 + */ +typedef struct tag_hinic5_cqm_object { + u32 service_type; /* 业务类型 */ + u32 object_type; /* 对象类型,如context,queue,mpt,mtt等 */ + u32 object_size; /* 对象大小, + 对于非RDMA的队列,是队列的深度; + 对于queue/ctx/MPT,单位Byte; + 对于MTT/RDMARC,单位entry个数; + 对于container,单位conainer个数 */ + atomic_t refcount; /* 引用计数 */ + struct completion free; /* 释放完成量 */ + void *hinic5_cqm_handle; /* hinic5_cqm_handle */ +} hinic5_cqm_object_s; + +/** + * @brief QPC/MPT object + */ +typedef struct tag_hinic5_cqm_qpc_mpt { + hinic5_cqm_object_s object; /* 对象基类 */ + u32 xid; /* XID. + xid[20:0] < 1M 时,表示静态申请的xid; + xid[20:0] 全1为动态申请; + xid[22:21] 指定低2bit; + xid[24:23] 为lb_mode; + xid[25]为search_mode */ + dma_addr_t paddr; /* QPC/MTT内存的物理地址 */ + void *priv; /* service driver的该对象的私有信息 */ + u8 *vaddr; /* QPC/MTT内存的虚拟地址 */ +} hinic5_cqm_qpc_mpt_s; + +/** + * @brief queue header结构 + */ +typedef struct tag_hinic5_cqm_queue_header { + u64 doorbell_record; /* SQ/RQ的db内容 */ + u64 ci_record; /* CQ的db内容 */ + u64 rsv1; /* 该区域为驱动和微码传递信息的自定义区 */ + u64 rsv2; /* 该区域为驱动和微码传递信息的自定义区 */ +} hinic5_cqm_queue_header_s; + +/** + * @brief 队列管理结构 + * @details 非 RDMA 业务,embeded 队列用 linkwqe 管理,SRQ 和 SCQ 用 MTT 管理,MTT 由 HINIC5_CQM 申请; + * RDMA 业务的队列,用 MTT 管理 + */ +typedef struct tag_hinic5_cqm_queue { + hinic5_cqm_object_s object; /* 对象基类 */ + u32 index; /* embeded队列、QP没有index,SRQ和SCQ有 */ + void *priv; /* service driver的该对象的私有信息 */ + u32 current_q_doorbell; /* 当前queue选择的doorbell类型,roce QP同时用HW/SW */ + u32 current_q_room; /* roce:当前有效的room buf */ + hinic5_cqm_buf_s q_room_buf_1; /* nonrdma:只能选择q_room_buf_1为q_room_buf */ + hinic5_cqm_buf_s q_room_buf_2; /* RDMA的CQ会重新分配queue room的大小 */ + hinic5_cqm_queue_header_s *q_header_vaddr; /* queue header虚拟地址 */ + dma_addr_t q_header_paddr; /* queue header物理地址 */ + u8 *q_ctx_vaddr; /* SRQ和SCQ的ctx虚拟地址 */ + dma_addr_t q_ctx_paddr; /* SRQ和SCQ的ctx物理地址 */ + u32 valid_wqe_num; /* 创建成功的有效wqe个数 */ + u8 *tail_container; /* SRQ container的尾指针 */ + u8 *head_container; /* SRQ container的首针 */ + u8 queue_link_mode; /* 队列创建时确定连接模式:link,ring等 */ +} hinic5_cqm_queue_s; + +/** + * @brief MTT/RDMARC管理结构 + */ +typedef struct tag_hinic5_cqm_mtt_rdmarc { + hinic5_cqm_object_s object; /* 对象基类 */ + u32 index_base; /* index_base */ + u32 index_number; /* index_number */ + u8 *vaddr; /* buffer虚拟地址 */ +} hinic5_cqm_mtt_rdmarc_s; + +/** + * @brief 发送命令结构 + */ +typedef struct tag_hinic5_cqm_cmd_buf { + void *buf; /* 命令buf虚拟地址 */ + dma_addr_t dma; /* 命令buf物理地址 */ + u16 size; /* 命令buf大小 */ +} hinic5_cqm_cmd_buf_s; + +/** + * @brief 发送ACK方式定义 + */ +typedef enum { + HINIC5_CQM_CMD_ACK_TYPE_CMDQ = 0, /* ack回写到cmdq */ + HINIC5_CQM_CMD_ACK_TYPE_SHARE_CQN = 1, /* ack通过root ctx的scq上报 */ + HINIC5_CQM_CMD_ACK_TYPE_APP_CQN = 2 /* ack通过业务的scq上报 */ +} hinic5_cqm_cmd_ack_type_e; + +/** + * @brief HINIC5_CQM 初始化 + * @param[in] ex_handle 设备句柄 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_init(void *ex_handle); + +/** + * @brief HINIC5_CQM 反初始化 + * @param[in] ex_handle 设备句柄 + */ +void hinic5_cqm3_uninit(void *ex_handle); + +/** + * @brief HINIC5_CQM 初始化指定 Fake VF + * @param[in] ex_handle 设备句柄 + * @param[in] vf_id 待初始化的 function id + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + * @retval -EINVAL Invalid argument + */ +int hinic5_cqm3_init_fake_vf(void *ex_handle, u32 vf_id); + +/** + * @brief 注册业务扩展能力 + * @param[in] ex_handle 设备句柄 + * @param[in] service_template 业务扩展描述 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_service_register(void *ex_handle, service_register_template_s *service_template); + +/** + * @brief 注销业务扩展能力 + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + */ +void hinic5_cqm3_service_unregister(void *ex_handle, u32 service_type); + +/** + * @brief 声明设备管理的 Fake VF 数量 + * @param[in] ex_handle 设备句柄 + * @param[in] fake_vf_num_cfg Fake VF 数量,该值不能大于设备支持的最大值 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_fake_vf_num_set(void *ex_handle, u16 fake_vf_num_cfg); + +/** + * @brief 创建 FC SRQ + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + * @param[in] object_type 对象类型 + * @param[in] wqe_number wqe 数目 + * @param[in] wqe_size wqe 大小 + * @param[in] object_priv 对象私有数据指针 + * + * @details 队列中有效wqe个数必须要满足传入的wqe个数。 + * 因为linkwqe只能填在页尾,真实有效个数超过需求,需要告知业务多创建的个数 + * + * @return 队列结构指针 + */ +hinic5_cqm_queue_s *hinic5_cqm3_object_fc_srq_create(void *ex_handle, u32 service_type, + hinic5_cqm_object_type_e object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv); + +/** + * @brief 创建 RQ + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + * @param[in] object_type 对象类型 + * @param[in] init_rq_num container 数目 + * @param[in] container_size container 大小 + * @param[in] wqe_size wqe 大小 + * @param[in] object_priv 对象私有数据指针 + * + * @details 在使用SRQ时,RQ队列创建 + * + * @return 队列结构指针 + */ +hinic5_cqm_queue_s *hinic5_cqm3_object_recv_queue_create(void *ex_handle, u32 service_type, + hinic5_cqm_object_type_e object_type, + u32 init_rq_num, u32 container_size, + u32 wqe_size, void *object_priv); + +/** + * @brief 创建 TOE SRQ + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + * @param[in] object_type 对象类型 + * @param[in] container_number container 数目 + * @param[in] container_size container 大小 + * @param[in] wqe_size wqe 大小 + * + * @return 队列结构指针 + */ +hinic5_cqm_queue_s *hinic5_cqm3_object_share_recv_queue_create(void *ex_handle, u32 service_type, + hinic5_cqm_object_type_e object_type, + u32 container_number, + u32 container_size, u32 wqe_size); + +/** + * @brief 创建 QPC/MPT + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + * @param[in] object_type 对象类型 + * @param[in] object_size 对象大小,单位Byte + * @param[in] object_priv 对象私有数据指针 + * @param[in] index 根据该值申请预留的qpn,如果要自动分配需填入HINIC5_CQM_INDEX_INVALID + * @param[in] bitmap_start 范围申请xid的起始index + * @param[in] bitmap_end 范围申请xid的结束index + * + * @attention 此接口可能会休眠 + * + * @return QPC/MPT 结构指针 + */ +hinic5_cqm_qpc_mpt_s *hinic5_cqm3_object_qpc_mpt_create(void *ex_handle, u32 service_type, + hinic5_cqm_object_type_e object_type, + u32 object_size, void *object_priv, + u32 index, u32 bitmap_start, u32 bitmap_end); + +/** + * @brief 创建非RDMA业务的队列 + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + * @param[in] object_type 对象类型 + * @param[in] wqe_number 包含link wqe的数目 + * @param[in] wqe_size 定长,大小为2^n + * @param[in] object_priv 对象私有数据指针 + * + * @attention 此接口可能会休眠 + * + * @return 队列结构指针 + */ +hinic5_cqm_queue_s *hinic5_cqm3_object_nonrdma_queue_create(void *ex_handle, u32 service_type, + hinic5_cqm_object_type_e object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv); + +/** + * @brief 创建RDMA业务的队列 + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + * @param[in] object_type 对象类型 + * @param[in] object_size 对象大小 + * @param[in] object_priv 对象私有数据指针 + * @param[in] room_header_alloc 是否要申请queue room和header空间 + * @param[in] xid 根据该值申请预留的qpn,如果要自动分配需填入HINIC5_CQM_INDEX_INVALID + * @param[in] bitmap_start 范围申请xid的起始index + * @param[in] bitmap_end 范围申请xid的结束index + * + * @attention 此接口可能会休眠 + * + * @return 队列结构指针 + */ +hinic5_cqm_queue_s *hinic5_cqm3_object_rdma_queue_create(void *ex_handle, u32 service_type, + hinic5_cqm_object_type_e object_type, + u32 object_size, void *object_priv, + bool room_header_alloc, u32 xid, + u32 bitmap_start, u32 bitmap_end); + +/** + * @brief 创建RDMA业务的 MTT/RDMARC + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + * @param[in] object_type 对象类型 + * @param[in] index_base 起始index编号 + * @param[in] index_number index数量 + * + * @return MTT/RDMARC 结构指针 + */ +hinic5_cqm_mtt_rdmarc_s *hinic5_cqm3_object_rdma_table_get(void *ex_handle, u32 service_type, + hinic5_cqm_object_type_e object_type, + u32 index_base, u32 index_number); + +/** + * @brief 申请一个 cmd buffer + * @param[in] ex_handle 设备句柄 + * + * @attention buffer大小固定 2K,buffer 内容没有清零,需要业务清零 + * + * @return cmd buffer 指针 + */ +hinic5_cqm_cmd_buf_s *hinic5_cqm3_cmd_alloc(void *ex_handle); + +/** + * @brief 释放一个 cmd buffer + * @param[in] ex_handle 设备句柄 + * @param[in] cmd_buf 待释放的 cmd buffer 指针 + */ +void hinic5_cqm3_cmd_free(void *ex_handle, hinic5_cqm_cmd_buf_s *cmd_buf); + +/** + * @brief 发送 cmd + * @param[in] ex_handle 设备句柄 + * @param[in] mod 模块 + * @param[in] cmd 命令字 + * @param[in] buf_in 输入命令 buffer + * @param[out] buf_out 输出命令 buffer + * @param[out] out_param 命令返回的 udata(user data) + * @param[in] timeout 命令超时时间,单位ms + * @param[in] channel 调用者 channel id + * + * @details 以 box 方式发送一个 cmdq cmd + * + * @attention 该接口会挂完成量,造成休眠 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, + hinic5_cqm_cmd_buf_s *buf_in, hinic5_cqm_cmd_buf_s *buf_out, + u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief 发送 cmd + * @param[in] ex_handle 设备句柄 + * @param[in] mod 模块 + * @param[in] cmd 命令字 + * @param[in] cos_id CMDQ 队列 + * @param[in] buf_in 输入命令 buffer + * @param[out] buf_out 输出命令 buffer + * @param[out] out_param 命令返回的 udata(user data) + * @param[in] timeout 命令超时时间,单位ms + * @param[in] channel 调用者 channel id + * + * @details 指定 CMDQ 队列并以 box 方式发送一个 cmdq cmd + * + * @attention 该接口会挂完成量,造成休眠 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_lb_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, u8 cos_id, + hinic5_cqm_cmd_buf_s *buf_in, hinic5_cqm_cmd_buf_s *buf_out, + u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief 发送 cmd + * @param[in] ex_handle 设备句柄 + * @param[in] mod 模块 + * @param[in] cmd 命令字 + * @param[in] buf_in 输入命令 buffer + * @param[out] out_param 命令返回的 udata(user data) + * @param[in] timeout 命令超时时间,单位ms + * @param[in] channel 调用者 channel id + * + * @details 以 imm 方式发送一个 cmdq cmd + * + * @attention 该接口会挂完成量,造成休眠 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_send_cmd_imm(void *ex_handle, u8 mod, u8 cmd, + hinic5_cqm_cmd_buf_s *buf_in, + u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief 申请硬件 doorbell 和 dwqe + * @param[in] ex_handle 设备句柄 + * @param[out] db_addr doorbell 物理地址 + * @param[out] dwqe_addr dwqe 物理地址 + * + * @details 申请一页硬件doorbell和dwqe,具有相同的index,得到的均为物理地址,每个function最多有1K个 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_db_addr_alloc(void *ex_handle, void __iomem **db_addr, void __iomem **dwqe_addr); + +/** + * @brief 释放硬件 doorbell 和 dwqe + * @param[in] ex_handle 设备句柄 + * @param[in] db_addr doorbell 物理地址 + * @param[in] dwqe_addr dwqe 物理地址 + */ +void hinic5_cqm3_db_addr_free(void *ex_handle, const void __iomem *db_addr, void __iomem *dwqe_addr); + +/** + * @brief 获得硬件 doorbell 虚拟地址 + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + * + * @return doorbell 虚拟地址 + */ + +void *hinic5_cqm3_get_db_addr(void *ex_handle, u32 service_type); + +/** + * @brief 获得硬件 doorbell 物理地址 + * @param[in] ex_handle 设备句柄 + * @param[out] addr 保存 doorbell 物理地址的指针 + * @param[in] service_type 业务类型 + * + * @details 获得硬件doorbell物理地址 + * + * @return doorbell地址 + */ +s32 hinic5_cqm3_get_hardware_db_addr(void *ex_handle, u64 *addr, enum hinic5_service_type service_type); + +/** + * @brief Ring a hardware DB + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + * @param[in] db_count doorbell中超出64b的PI[7:0] + * @param[in] db The content of hardware doorbell + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, u64 db); + +/** + * @brief Ring a direct wqe hardware DB to chip + * @param[in] ex_handle 设备句柄 + * @param[in] service_type 业务类型 + * @param[in] db_count The bit[7:0] of PI can't be store in 64-bit db + * @param[in] direct_wqe The content of direct_wqe + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_ring_direct_wqe_db(void *ex_handle, u32 service_type, u8 db_count, void *direct_wqe); + +/** + * @brief Ring a software DB + * @param[in] ex_handle 设备句柄 + * @param[in] object 对象指针 + * @param[in] db_record The content of software doorbell + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_ring_software_db(hinic5_cqm_object_s *object, u64 db_record); + +/** + * @brief bloom filter 增加引用计数 + * @param[in] ex_handle 设备句柄 + * @param[in] id bloom filter id + * + * @details 由 0 -> 1 时发送API置位 + * + * @attention 此接口可能会休眠 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_bloomfilter_inc(void *ex_handle, u16 func_id, u64 id); + +/** + * @brief bloom filter 减少引用计数 + * @param[in] ex_handle 设备句柄 + * @param[in] id bloom filter id + * + * @details 减为 0 时发送 API 清零 + * + * @attention 此接口可能会休眠 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_bloomfilter_dec(void *ex_handle, u16 func_id, u64 id); + +/** + * @brief 获取 SMF Timer spoke list 的基址 + * @param[in] ex_handle 设备句柄 + * + * @return 虚拟地址 + */ +void *hinic5_cqm3_timer_base(void *ex_handle); + +/** + * @brief 清零 SMF Timer spoke list + * @param[in] ex_handle 设备句柄 + * @param[in] function_id function id + */ +void hinic5_cqm3_function_timer_clear(void *ex_handle, u32 function_id); + +/** + * @brief 清零 hash buffer + * @param[in] ex_handle 设备句柄 + * @param[in] global_funcid function id + */ +void hinic5_cqm3_function_hash_buf_clear(void *ex_handle, s32 global_funcid); + +/** + * @brief SRQ 申请新的 container,创建后好挂链 + * @param[in] common 队列结构指针 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_object_share_recv_queue_add_container(hinic5_cqm_queue_s *common); + +/** + * @brief SRQ 申请新的 container,创建后不挂链,由业务完成挂链 + * @param[in] common 队列结构指针 + * @param[out] container_addr 返回的 container 地址 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_object_srq_add_container_free(hinic5_cqm_queue_s *common, u8 **container_addr); + +/** + * @brief 通过 index 获得对象 + * @param[in] ex_handle 设备句柄 + * @param[in] object_type 对象类型 + * @param[in] index index支持qpn,mptn,scqn,srqn + * @param[in] bh 是否禁用中断下半部 + * + * @return 对象指针 + */ +hinic5_cqm_object_s *hinic5_cqm3_object_get(void *ex_handle, hinic5_cqm_object_type_e object_type, + u32 index, bool bh); + +/** + * @brief 释放对象 + * @param[in] object 对象指针 + */ +void hinic5_cqm3_object_put(hinic5_cqm_object_s *object); + +/** + * @brief 删除对象 + * @param[in] object 对象指针 + * + * @details 删除创建的对象,该函数会休眠等待所有对该对象的操作完成才返回 + * + * @attention 此接口可能会休眠 + */ +void hinic5_cqm3_object_delete(hinic5_cqm_object_s *object); + +/** + * @brief 获得对象的所属 function ID + * @param[in] object 对象指针 + * + * @return + * @retval >=0 function ID + * @retval -1 失败 + */ +s32 hinic5_cqm3_object_funcid(hinic5_cqm_object_s *object); + +/** + * @brief 给对象申请一块新空间 + * @param[in] object 对象指针 + * @param[in] object_size 新buffer大小 + * + * @details 目前只对roce业务有用,调整CQ的buffer大小,但cqn和cqc不变, + * 申请新的buffer空间,不释放旧buffer空间,当前有效buffer仍为旧buffer + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_object_resize_alloc_new(hinic5_cqm_object_s *object, u32 object_size); + +/** + * @brief 给对象释放新申请buffer空间 + * @param[in] object 对象指针 + * + * @details 本函数释放新申请buffer空间,用于业务的异常处理分支 + */ +void hinic5_cqm3_object_resize_free_new(hinic5_cqm_object_s *object); + +/** + * @brief 给对象释旧buffer空间 + * @param[in] object 对象指针 + * + * @details 本函数释放旧的buffer,并将当前有效buffer设置为新buffer + */ +void hinic5_cqm3_object_resize_free_old(hinic5_cqm_object_s *object); + +/** + * @brief 释放container + * @param[in] object 对象指针 + * @param[in] container 要释放的container指针 + * + * @details 释放container + */ +void hinic5_cqm3_srq_used_rq_container_delete(hinic5_cqm_object_s *object, u8 *container); + +/** + * @brief 获得对象buffer指定偏移处的物理地址和虚拟地址 + * @param[in] object 对象指针 + * @param[in] offset 对于rdma table,offset为index绝对编号 + * @param[out] paddr 仅对rdma table才返回物理地址 + * + * @details 仅支持rdma table查找,获得对象buffer指定偏移处的物理地址和虚拟地址 + * + * @return u8 *buffer指定偏移处的虚拟地址 + */ +u8 *hinic5_cqm3_object_offset_addr(hinic5_cqm_object_s *object, u32 offset, dma_addr_t *paddr); + +/** + * @brief 创建 DTOE SRQ + * @param[in] ex_handle 设备句柄 + * @param[in] contex_size 上下文大小 + * @param[out] index_count 申请的 index 数量 + * @param[out] index 申请的 index 起始 + * + * @return 是否成功 + * @retval 0 success + * @retval -1 failure + */ +s32 hinic5_cqm3_dtoe_share_recv_queue_create(void *ex_handle, u32 contex_size, + u32 *index_count, u32 *index); + +/** + * @brief 释放 DTOE SRQ bitmap + * @param[in] ex_handle 设备句柄 + * @param[in] index_count 释放的 index 数量 + * @param[in] index 释放的 index 起始 + */ +void hinic5_cqm3_dtoe_free_srq_bitmap_index(void *ex_handle, u32 index_count, u32 index); + +#endif /* HINIC5_HINIC5_CQM_H */ diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_cqm_adpt.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_cqm_adpt.h new file mode 100644 index 00000000..b8582fff --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_cqm_adpt.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_HINIC5_CQM_ADPT_H +#define HINIC5_HINIC5_CQM_ADPT_H + +#define hinic5_cqm_init hinic5_cqm3_init /**< 初始化HINIC5_CQM */ +#define hinic5_cqm_uninit hinic5_cqm3_uninit /**< 反初始化HINIC5_CQM */ +#define hinic5_cqm_init_fake_vf hinic5_cqm3_init_fake_vf /**< HINIC5_CQM 初始化指定 Fake VF */ +#define hinic5_cqm_service_register hinic5_cqm3_service_register /**< 注册服务 */ +#define hinic5_cqm_service_unregister hinic5_cqm3_service_unregister /**< 注销服务 */ +#define hinic5_cqm_bloomfilter_dec hinic5_cqm3_bloomfilter_dec /**< 减少布隆过滤器的计数 */ +#define hinic5_cqm_bloomfilter_inc hinic5_cqm3_bloomfilter_inc /**< 增加布隆过滤器的计数 */ +#define hinic5_cqm_cmd_alloc hinic5_cqm3_cmd_alloc /**< 分配命令 */ +#define hinic5_cqm_get_hardware_db_addr hinic5_cqm3_get_hardware_db_addr /**< 获取硬件数据库地址 */ +#define hinic5_cqm_cmd_free hinic5_cqm3_cmd_free /**< 释放命令 */ +#define hinic5_cqm_send_cmd_box hinic5_cqm3_send_cmd_box /**< 发送命令盒 */ +#define hinic5_cqm_lb_send_cmd_box hinic5_cqm3_lb_send_cmd_box /**< 发送负载均衡命令盒 */ +#define hinic5_cqm_send_cmd_imm hinic5_cqm3_send_cmd_imm /**< 发送立即命令 */ +#define hinic5_cqm_db_addr_alloc hinic5_cqm3_db_addr_alloc /**< 分配数据库地址 */ +#define hinic5_cqm_db_addr_free hinic5_cqm3_db_addr_free /**< 释放数据库地址 */ +#define hinic5_cqm_ring_hardware_db hinic5_cqm3_ring_hardware_db /**< 环形硬件数据库 */ +#define hinic5_cqm_ring_software_db hinic5_cqm3_ring_software_db /**< 环形软件数据库 */ +#define hinic5_cqm_object_fc_srq_create hinic5_cqm3_object_fc_srq_create /**< 创建FC SRQ对象 */ +#define hinic5_cqm_object_share_recv_queue_create hinic5_cqm3_object_share_recv_queue_create /**< 创建共享接收队列对象 */ +#define hinic5_cqm_object_share_recv_queue_add_container hinic5_cqm3_object_share_recv_queue_add_container /**< 向共享接收队列添加容器 */ +#define hinic5_cqm_object_srq_add_container_free hinic5_cqm3_object_srq_add_container_free /**< 释放SRQ添加容器 */ +#define hinic5_cqm_object_recv_queue_create hinic5_cqm3_object_recv_queue_create /**< 创建接收队列对象 */ +#define hinic5_cqm_object_qpc_mpt_create hinic5_cqm3_object_qpc_mpt_create /**< 创建QPC MPT对象 */ +#define hinic5_cqm_object_nonrdma_queue_create hinic5_cqm3_object_nonrdma_queue_create /**< 创建非RDMA队列对象 */ +#define hinic5_cqm_object_rdma_queue_create hinic5_cqm3_object_rdma_queue_create /**< 创建RDMA队列对象 */ +#define hinic5_cqm_object_rdma_table_get hinic5_cqm3_object_rdma_table_get /**<获取RDMA表 */ +#define hinic5_cqm_object_delete hinic5_cqm3_object_delete /**< 删除对象 */ +#define hinic5_cqm_object_offset_addr hinic5_cqm3_object_offset_addr /**< 获取偏移地址 */ +#define hinic5_cqm_object_get hinic5_cqm3_object_get /**< 获取对象 */ +#define hinic5_cqm_object_put hinic5_cqm3_object_put /**< 放置对象 */ +#define hinic5_cqm_object_funcid hinic5_cqm3_object_funcid /**< 获取函数ID */ +#define hinic5_cqm_object_resize_alloc_new hinic5_cqm3_object_resize_alloc_new /**< 重新分配新大小的对象 */ +#define hinic5_cqm_object_resize_free_new hinic5_cqm3_object_resize_free_new /**< 释放新大小的对象 */ +#define hinic5_cqm_object_resize_free_old hinic5_cqm3_object_resize_free_old /**< 释放旧大小的对象 */ +#define hinic5_cqm_function_timer_clear hinic5_cqm3_function_timer_clear /**< 清除函数计时器 */ +#define hinic5_cqm_function_hash_buf_clear hinic5_cqm3_function_hash_buf_clear /**< 清除函数哈希缓冲区 */ +#define hinic5_cqm_srq_used_rq_container_delete hinic5_cqm3_srq_used_rq_container_delete /**< 删除使用的RQ容器 */ +#define hinic5_cqm_timer_base hinic5_cqm3_timer_base /**< 定时器基础 */ +#define hinic5_cqm_dtoe_free_srq_bitmap_index hinic5_cqm3_dtoe_free_srq_bitmap_index /**< 释放SRQ位图索引 */ +#define hinic5_cqm_dtoe_share_recv_queue_create hinic5_cqm3_dtoe_share_recv_queue_create /**< 创建共享接收队列 */ +#define hinic5_cqm_get_db_addr hinic5_cqm3_get_db_addr /**< 获取数据库地址 */ +#define hinic5_cqm_ring_direct_wqe_db hinic5_cqm3_ring_direct_wqe_db /**< 直接环形WQE数据库 */ +#define hinic5_cqm_fake_vf_num_set hinic5_cqm3_fake_vf_num_set /**< 设置假的VF数量 */ + +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_vram_api.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_vram_api.h new file mode 100644 index 00000000..58d7e1ee --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hinic5_vram_api.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_HINIC5_VRAM_API_H +#define HINIC5_HINIC5_VRAM_API_H + +#if !defined(__UEFI__) && !defined(__WIN__) +#include <linux/notifier.h> +#include <linux/numa.h> +#endif + +#define HINIC5_VRAM_NUMA_NODE0 0 +#define HINIC5_VRAM_NUMA_NODE1 1 +#define HINIC5_CQM_OVS_PAGESIZE_ORDER 9 +#define HINIC5_VRAM_NAME_APPLY_LEN 64 + +struct hinic5_vram_buf_info { + char buf_hinic5_vram_name[HINIC5_VRAM_NAME_APPLY_LEN]; + int use_hinic5_vram; +}; + +#if defined(__UEFI__) || defined(__WIN__) || defined(__VMWARE__) +#define hinic5_hinic5_vram_kalloc_node(name, size, numa) 0 +#define hinic5_hinic5_vram_kfree(vaddr, name, size) +#define get_use_hinic5_vram_flag() 0 +#else + +/** + * @brief alloc hinic5_vram memory + * @param name name of hinic5_vram memory + * @param size size of hinic5_vram memory + * @param numa hinic5_vram numa node. if greater than environment numa num, apply for idle nodes + **/ +void __iomem *hinic5_hinic5_vram_kalloc_node(char *name, u64 size, u8 numa); +/** + * @brief free hinic5_vram memory + * @param vaddr virtual address of hinic5_vram memory + * @param name name of hinic5_vram memory + * @param size size of hinic5_vram memory + **/ +void hinic5_hinic5_vram_kfree(void __iomem *vaddr, char *name, u64 size); +/** + * @brief get use-hinic5_vram flag + * @return + * - Zero for not-use-hinic5_vram. Non-zero for use-hinic5_vram. + **/ +int get_use_hinic5_vram_flag(void); + +#endif +#endif /* HINIC5_HINIC5_VRAM_API_H */ \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hw.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hw.h new file mode 100644 index 00000000..1e96452b --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_hw.h @@ -0,0 +1,1277 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_HW_H +#define HINIC5_HW_H + +#include <linux/types.h> + +#include "mpu_inband_cmd_defs.h" +#include "hinic5_crm.h" + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 0x4321 /**< 表示大端字节序 */ +#endif + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 0x1234 /**< 表示小端字节序 */ +#endif + +#ifdef BYTE_ORDER +#undef BYTE_ORDER +#endif +/* X86 */ +#define BYTE_ORDER LITTLE_ENDIAN +/* to use 0-level CLA, page size must be: SQ 16B(wqe) * 64k(max_q_depth) */ +#define HINIC5_DEFAULT_WQ_PAGE_SIZE 0x100000 /**< 定义了默认的工作队列页面大小 */ +#define HINIC5_HW_WQ_PAGE_SIZE 0x1000 /**< 定义硬件发送队列的页面大小 */ +#define HINIC5_MAX_WQ_PAGE_SIZE_ORDER 8 /**< 定义最大工作队列页面大小 */ +#define SPU_HOST_ID 4 /**< 表示主机的ID */ + +#define HINIC5_NIC_RES BIT(RES_TYPE_NIC) /**< 表示网络接口控制器(NIC)资源 */ + +/** + * @brief enum hinic5_channel_id - 通道类型 + * @details 定义了Hinic5通道的枚举类型 + */ +enum hinic5_channel_id { + HINIC5_CHANNEL_DEFAULT, + HINIC5_CHANNEL_COMM, + HINIC5_CHANNEL_NIC, + HINIC5_CHANNEL_ROCE, + HINIC5_CHANNEL_TOE, + HINIC5_CHANNEL_FC, + HINIC5_CHANNEL_OVS, + HINIC5_CHANNEL_DSW, + HINIC5_CHANNEL_MIG, + HINIC5_CHANNEL_CRYPT, + HINIC5_CHANNEL_UB, + HINIC5_CHANNEL_JBOF, + HINIC5_CHANNEL_MACSEC, + HINIC5_CHANNEL_DMMU, + HINIC5_CHANNEL_HIHTR, + + HINIC5_CHANNEL_MAX = 32, +}; + +/** + * @brief struct hinic5_cmd_buf - dma buffer 对象 + * @details 定义一个dma缓冲结构体 + */ +struct hinic5_cmd_buf { + void *buf; /**< va for buffer */ + dma_addr_t dma_addr; /**< dma address for buffer */ + u16 size; /**< buffer size */ + atomic_t ref_cnt; /**< buffer reference count */ +}; + +/** + * @brief struct hinic5_cmdq_cmd_param - cmdq请求对象 + * @details 存储cmdq请求的参数 + */ +struct hinic5_cmdq_cmd_param { + u8 mod; /**< mod id */ + u8 cmd; /**< cmd opcode */ + struct hinic5_cmd_buf *buf_in; /**< input buf, only support 32B/96B/160B for inline data mode */ + struct hinic5_cmd_buf *buf_out; /**< output buf writed by chip to return data */ + u64 *out_param; /**< output data, only support 8B */ +}; + +/** + * @brief enum hinic5_hwdev_init_state + * @details 定义了硬件设备初始化状态的枚举类型 + */ +enum hinic5_hwdev_init_state { + HINIC5_HWDEV_NONE_INITED = 0, /**< 设备未初始化 */ + HINIC5_HWDEV_MGMT_INITED, /**< 设备管理模块已初始化 */ + HINIC5_HWDEV_MBOX_INITED, /**< 设备mailbox模块已初始化 */ + HINIC5_HWDEV_CMDQ_INITED, /**< 设备cmdq模块已初始化 */ +}; + +/** + * @brief struct hinic5_ceq_info - 描述ceq的配置信息 + * @details NA + */ +struct hinic5_ceq_info { + u32 q_len; /**< ceq length */ + u32 page_size; /**< ceq page size */ + u16 elem_size; /**< ceq element size */ + u16 num_pages; /**< number of ceq pages*/ + u32 num_elem_in_pg; /**< number of ceq element in one page */ +}; + +/** + * @brief enum hinic5_ceq_event - ceqe的类型 + * @details 定义了Hinic5的中断事件类型 + */ +enum hinic5_ceq_event { + HINIC5_NON_L2NIC_SCQ, /**< 非L2NIC的SCQ中断事件 */ + HINIC5_NON_L2NIC_ECQ, /**< 非L2NIC的ECQ中断事件 */ + HINIC5_NON_L2NIC_NO_CQ_EQ, /**< 非L2NIC没有CQ的中断事件 */ + HINIC5_CMDQ, /**< 命令队列中断事件 */ + HINIC5_L2NIC_SQ, /**< L2NIC的发送队列中断事件 */ + HINIC5_L2NIC_RQ, /**< L2NIC的接收队列中断事件 */ + HINIC5_CEQ_EVENT_RSVD, /**< 保留的中断事件 */ + HINIC5_FAST_MSG_RQ, /**< 快速消息接收队列中断事件 */ + HINIC5_MAX_CEQ_EVENTS, /**< ceqe的类型个数 */ +}; + +/** + * @brief enum hinic5_mbox_seg_errcode + * @details mailbox错误码枚举类型 + */ +enum hinic5_mbox_seg_errcode { + MBOX_ERRCODE_NO_ERRORS = 0, + MBOX_ERRCODE_VF_TO_WRONG_FUNC = 0x100, /**< VF发送邮箱数据到错误的目标设备 */ + MBOX_ERRCODE_PPF_TO_WRONG_FUNC = 0x200, /**< PPF发送邮箱数据到错误的目标设备 */ + MBOX_ERRCODE_PF_TO_WRONG_FUNC = 0x300, /**< PF发送邮箱数据到错误的目标设备 */ + MBOX_ERRCODE_ZERO_DATA_SIZE = 0x400, /**< 邮箱数据大小设置为全零 */ + MBOX_ERRCODE_UNKNOWN_SRC_FUNC = 0x500, /**< 未知的原设备 */ + MBOX_ERRCODE_UNKNOWN_DES_FUNC = 0x600, /**< 未知的目标设备 */ +}; + +/** + * @brief 处理ceqe事件的回调类型 + * @param pri_handle 回调私有数据 + * @param ceqe_data ceqe数据 + * + * @return NA + */ +typedef void (*hinic5_ceq_event_cb)(void *pri_handle, u32 ceqe_data); + +/** + * @brief 定义一个名为hinic5_aeq_swe_cb的函数指针类型 + * @param pri_handle 参数为一个void类型的指针,表示私有句柄 + * @param event 参数为一个u8类型,表示事件 + * @param data 参数为一个u8类型的指针,表示数据 + * + * @return 返回值为u8类型的函数指针 + */ +typedef u8 (*hinic5_aeq_swe_cb)(void *pri_handle, u8 event, u8 *data); + +/** + * @brief 定义一个函数指针类型,用于处理虚拟函数邮箱的回调函数 + * @param pri_handle 指向私有句柄的指针 + * @param cmd 命令 + * @param buf_in 输入缓冲区 + * @param in_size 输入缓冲区大小 + * @param buf_out 输出缓冲区 + * @param out_size 输出缓冲区大小的指针 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +typedef int (*hinic5_vf_mbox_cb)(void *pri_handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size); + +/** + * @brief 定义一个函数指针类型,用于处理PF邮箱的回调函数 + * @param pri_handle 主句柄 + * @param vf_id 虚拟函数ID + * @param cmd 命令 + * @param buf_in 输入缓冲区 + * @param in_size 输入缓冲区大小 + * @param buf_out 输出缓冲区 + * @param out_size 输出缓冲区大小 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +typedef int (*hinic5_pf_mbox_cb)(void *pri_handle, u16 vf_id, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +/** + * @brief 定义一个函数指针类型,用于处理PF到VF的邮箱通信 + * @param pri_handle 设备句柄 + * @param pf_idx PF索引 + * @param vf_id VF ID + * @param cmd 命令 + * @param buf_in 输入缓冲区 + * @param in_size 输入缓冲区大小 + * @param buf_out 输出缓冲区 + * @param out_size 输出缓冲区大小 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +typedef int (*hinic5_ppf_mbox_cb)(void *pri_handle, u16 pf_idx, u16 vf_id, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +/** + * @brief 定义一个函数指针类型,用于接收来自PPF邮箱的消息 + * @param pri_handle 私有句柄 + * @param cmd 命令 + * @param buf_in 输入缓冲区 + * @param in_size 输入缓冲区大小 + * @param buf_out 输出缓冲区 + * @param out_size 输出缓冲区大小 + * + * @return 返回值 + */ +typedef int (*hinic5_pf_recv_from_ppf_mbox_cb)(void *pri_handle, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +/** + * @brief 定义一个函数指针类型,用于处理管理消息 + * @param pri_handle 私有句柄,用于传递给回调函数 + * @param cmd 命令码,表示需要执行的操作 + * @param buf_in 输入缓冲区,用于传递输入参数 + * @param in_size 输入缓冲区大小,用于限制输入参数的长度 + * @param buf_out 输出缓冲区,用于传递输出结果 + * @param out_size 输出缓冲区大小,用于限制输出结果的长度 + * + * @return 无 + */ +typedef void (*hinic5_mgmt_msg_cb)(void *pri_handle, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +u8 hinic5_nic_sw_aeqe_stats(void *hwdev, u8 event, u8 *data); + +/** + * @brief 注册微码产生的无状态aeqe的handler + * + * @param hwdev 设备对象指针(struct hinic5_hwdev *) + * @param pri_handle 回调函数私有数据 + * @param stateless_aeq_swe_cb 回调函数 + * + * @details NA + * + * @attention: NA + * + * @return: 描述函数返回值. + * @retval 0 注册成功 + * @retval 非0 注册失败 + */ +int hinic5_register_stateless_aeqs(void *hwdev, void *pri_handle, hinic5_aeq_swe_cb stateless_aeq_swe_cb); + +/** + * @brief 注销处理微码产生的无状态aeqe的handler + * + * @param hwdev 设备对象指针(struct hinic5_hwdev *) + * + * @details NA + * + * @attention: NA + * + * @return: NA + */ +void hinic5_unregister_stateless_aeqs(void *hwdev); + +/** + * @brief hinic5_ceq_register_cb - register ceq callback + * @param hwdev: device pointer to hwdev + * @param event: event type + * @param callback: callback function + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_ceq_register_cb(void *hwdev, void *pri_handle, enum hinic5_ceq_event event, + hinic5_ceq_event_cb callback); +/** + * @brief hinic5_ceq_unregister_cb - unregister ceq callback + * @param hwdev: device pointer to hwdev + * @param event: event type + **/ +void hinic5_ceq_unregister_cb(void *hwdev, enum hinic5_ceq_event event); + +/** + * @brief 为PPF注册处理mailbox消息的handler + * + * @param hwdev 设备对象指针 + * @param mod 业务模块 值为HINIC5_MOD_XXX宏定义 + * @param pri_handle 回调私有数据 + * @param callback 回调 + * + * @details NA + * + * @attention: NA + * + * @return: 描述函数返回值. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_register_ppf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + hinic5_ppf_mbox_cb callback); + +/** + * @brief 为PF注册处理mailbox消息的handler + * + * @param hwdev 设备对象指针 + * @param mod 业务模块 值为HINIC5_MOD_XXX宏定义 + * @param pri_handle 回调私有数据 + * @param callback 回调 + * + * @details NA + * + * @attention: NA + * + * @return: 描述函数返回值. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_register_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + hinic5_pf_mbox_cb callback); + +/** + * @brief 为VF注册处理mailbox消息的handler + * + * @param hwdev 设备对象指针 + * @param mod 业务模块 值为HINIC5_MOD_XXX宏定义 + * @param pri_handle 回调私有数据 + * @param callback 回调 + * + * @details NA + * + * @attention: NA + * + * @return: 描述函数返回值. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_register_vf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + hinic5_vf_mbox_cb callback); + +/** + * @brief 注销PPF的处理mailbox的回调 + * + * @param hwdev 设备对象指针 + * @param mod 业务模块 值为HINIC5_MOD_XXX宏定义 + * + * @details NA + * + * @attention: NA + * + * @return: NA + */ +void hinic5_unregister_ppf_mbox_cb(void *hwdev, u8 mod); + +/** + * @brief 注销PF的处理mailbox的回调 + * + * @param hwdev 设备对象指针 + * @param mod 业务模块 值为HINIC5_MOD_XXX宏定义 + * + * @details NA + * + * @attention: NA + * + * @return: NA + */ +void hinic5_unregister_pf_mbox_cb(void *hwdev, u8 mod); + +/** + * @brief 注销VF的处理mailbox的回调 + * + * @param hwdev 设备对象指针 + * @param mod 业务模块 值为HINIC5_MOD_XXX宏定义 + * + * @details NA + * + * @attention: NA + * + * @return: NA + */ +void hinic5_unregister_vf_mbox_cb(void *hwdev, u8 mod); + +/** + * @brief 注销PF的处理mailbox的回调 + * + * @param hwdev 设备对象指针 + * @param mod 业务模块 值为HINIC5_MOD_XXX宏定义 + * + * @details source是PPF + * + * @attention: NA + * + * @return: NA + */ +void hinic5_unregister_ppf_to_pf_mbox_cb(void *hwdev, u8 mod); + +/** + * @brief 为function注册处理mailbox消息的handler + * + * @param hwdev 设备对象指针 + * @param mod 业务模块 值为HINIC5_MOD_XXX宏定义 + * @param pri_handle 回调私有数据 + * @param callback 回调 + * + * @details source为MPU + * + * @attention: NA + * + * @return: 描述函数返回值. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_register_mgmt_msg_cb(void *hwdev, u8 mod, void *pri_handle, + hinic5_mgmt_msg_cb callback); + +/** + * @brief 注销function的处理mailbox的回调 + * + * @param hwdev 设备对象指针 + * @param mod 业务模块 值为HINIC5_MOD_XXX宏定义 + * + * @details source是MPU + * + * @attention: NA + * + * @return: NA + */ +void hinic5_unregister_mgmt_msg_cb(void *hwdev, u8 mod); + +/** + * @brief hinic5_register_service_adapter - register service adapter + * @param hwdev: device pointer to hwdev + * @param service_adapter: service adapter + * @param type: service type + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_register_service_adapter(void *hwdev, void *service_adapter, + enum hinic5_service_type type); + +/** + * @brief hinic5_unregister_service_adapter - unregister service adapter + * @param hwdev: device pointer to hwdev + * @param type: service type + **/ +void hinic5_unregister_service_adapter(void *hwdev, + enum hinic5_service_type type); + +/** + * @brief hinic5_get_service_adapter - get service adapter + * @param hwdev: device pointer to hwdev + * @param type: service type + * + * @return + * @retval non-zero: success + * @retval null: failure + */ +void *hinic5_get_service_adapter(void *hwdev, enum hinic5_service_type type); + +/** + * @brief hinic5_alloc_db_phy_addr - alloc doorbell & direct wqe pyhsical addr + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to alloc doorbell base address + * @param dwqe_base: pointer to alloc direct base address + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base); + +/** + * @brief hinic5_free_db_phy_addr - free doorbell & direct wqe physical address + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to free doorbell base address + * @param dwqe_base: pointer to free direct base address + **/ +void hinic5_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base); + +/** + * @brief hinic5_alloc_db_addr - alloc doorbell & direct wqe + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to alloc doorbell base address + * @param dwqe_base: pointer to alloc direct base address + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base); + +/** + * @brief hinic5_free_db_addr - free doorbell & direct wqe + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to free doorbell base address + * @param dwqe_base: pointer to free direct base address + **/ +void hinic5_free_db_addr(void *hwdev, const void __iomem *db_base, + void __iomem *dwqe_base); + +/** + * @brief hinic5_alloc_db_phy_addr - alloc physical doorbell & direct wqe + * @param hwdev: device pointer to hwdev + * @param db_base: pointer to alloc doorbell base address + * @param dwqe_base: pointer to alloc direct base address + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base); + +/** + * @brief hinic5_free_db_phy_addr - free physical doorbell & direct wqe + * @param hwdev: device pointer to hwdev + * @param db_base: free doorbell base address + * @param dwqe_base: free direct base address + **/ + +void hinic5_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base); + +/** + * @brief hinic5_set_root_ctxt - set root context + * @param hwdev: device pointer to hwdev + * @param rq_depth: rq depth + * @param sq_depth: sq depth + * @param rx_buf_sz: rx buffer size + * @param channel: channel id + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_root_ctxt(void *hwdev, u32 rq_depth, u32 sq_depth, + u16 rx_buf_sz, u16 channel); + +/** + * @brief hinic5_clean_root_ctxt - clean root context + * @param hwdev: device pointer to hwdev + * @param channel: channel id + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_clean_root_ctxt(void *hwdev, u16 channel); + +/** + * @brief hinic5_alloc_cmd_buf - alloc cmd buffer + * @param hwdev: device pointer to hwdev + * + * @return + * @retval non-zero: success + * @retval null: failure + * @note + * 申请到的内存不支持默认清零, 调用者根据需求进行清零 + */ +struct hinic5_cmd_buf *hinic5_alloc_cmd_buf(void *hwdev); + +/** + * @brief hinic5_free_cmd_buf - free cmd buffer + * @param hwdev: device pointer to hwdev + * @param cmd_buf: cmd buffer to free + **/ +void hinic5_free_cmd_buf(void *hwdev, struct hinic5_cmd_buf *cmd_buf); + +/** + * @brief 读取16位计数器的值 + * @param hwdev 硬件设备上下文 + * @param node 节点号 + * @param instance 实例号 + * @param ctr_id 计数器ID + * @param value 存储读取到的计数器值的指针 + * + * @return 0 成功,其他值表示失败 + */ +int hinic5_sm_ctr_rd16(void *hwdev, u8 node, u8 instance, u32 ctr_id, u16 *value); + +/** + * @brief hinic5_sm_ctr_rd32 - small single 32 counter read + * @param hwdev: device pointer to hwdev + * @param node: the node id + * @param instance: instance id + * @param ctr_id: counter id + * @param value: read counter value ptr + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u32 *value); +/** + * @brief hinic5_sm_ctr_rd32_clear - small single 32 counter read clear + * @param hwdev: device pointer to hwdev + * @param node: the node id + * @param instance: instance id + * @param ctr_id: counter id + * @param value: read counter value ptr + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value); + +/** + * @brief hinic5_sm_ctr_rd64_pair - big pair 128 counter read + * @param hwdev: device pointer to hwdev + * @param node: the node id + * @param instance: instance id + * @param ctr_id: counter id + * @param value1: read counter value ptr + * @param value2: read counter value ptr + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value1, u64 *value2); + +/** + * @brief 读取并清除64位计数器对的值 + * @param hwdev 硬件设备上下文 + * @param node 节点号 + * @param instance 实例号 + * @param ctr_id 计数器ID + * @param value1 存储读取到的64位计数器对的第一个值的指针 + * @param value2 存储读取到的64位计数器对的第二个值的指针 + * + * @return 0 成功,其他值 失败 + */ +int hinic5_sm_ctr_rd64_pair_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value1, u64 *value2); + +/** + * @brief hinic5_sm_ctr_rd64 - big counter 64 read + * @param hwdev: device pointer to hwdev + * @param node: the node id + * @param instance: instance id + * @param ctr_id: counter id + * @param value: read counter value ptr + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 *value); + +/** + * @brief 清除64位计数器的值 + * @param hwdev 硬件设备的句柄 + * @param node 节点号 + * @param instance 实例号 + * @param ctr_id 计数器ID + * @param value 存储读取到的计数器值的指针 + * + * @return 0 成功,其他值表示失败 + */ +int hinic5_sm_ctr_rd64_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value); + +/** + * @brief hinic5_api_csr_rd32 - read 32 byte csr + * @param hwdev: device pointer to hwdev + * @param dest: hardware node id + * @param addr: reg address + * @param val: reg value + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val); + +/** + * @brief hinic5_api_csr_wr32 - write 32 byte csr + * @param hwdev: device pointer to hwdev + * @param dest: hardware node id + * @param addr: reg address + * @param val: reg value + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val); + +/** + * @brief hinic5_api_csr_rd64 - read 64 byte csr + * @param hwdev: device pointer to hwdev + * @param dest: hardware node id + * @param addr: reg address + * @param val: reg value + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val); + +/** + * @brief hinic5_dbg_get_hw_stats - get hardware stats + * @param hwdev: device pointer to hwdev + * @param hw_stats: pointer to memory caller to alloc + * @param out_size: out size + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, const u32 *out_size); + +/** + * @brief hinic5_dbg_clear_hw_stats - clear hardware stats + * @param hwdev: device pointer to hwdev + * @return clear hardware size + */ +u16 hinic5_dbg_clear_hw_stats(void *hwdev); + +/** + * @brief hinic5_get_chip_fault_stats - get chip fault stats + * @param hwdev: device pointer to hwdev + * @param chip_fault_stats: pointer to memory caller to alloc + * @param offset: offset + */ +void hinic5_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, + u32 offset); + +/** + * @brief hinic5_msg_to_mgmt_sync - msg to management cpu + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout, u16 channel); + +/** + * @brief hinic5_msg_to_mgmt_async - msg to management cpu async + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param channel: channel id + * + * @details The function does not sleep inside, allowing use in irq context + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_msg_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, u16 channel); + +/** + * @brief hinic5_msg_to_mgmt_no_ack - msg to management cpu don't need no ack + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param channel: channel id + * + * @details The function will sleep inside, and it is not allowed to be used in interrupt context + * @return 发送是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, u16 channel); + +/** + * @brief 向管理处理链发送异步消息 + * @param hwdev 硬件设备上下文 + * @param mod 消息模块 + * @param cmd 消息命令 + * @param buf_in 输入缓冲区 + * @param in_size 输入缓冲区大小 + * + * @return 发送是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_msg_to_mgmt_api_chain_async(void *hwdev, u8 mod, u16 cmd, + const void *buf_in, u16 in_size); + +int hinic5_msg_to_mgmt_api_chain_sync(void *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout); + +/** + * @brief hinic5_mbox_to_pf - vf mbox message to pf + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_mbox_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout, u16 channel); + +/** + * @brief hinic5_mbox_to_vf - mbox message to vf + * @param hwdev: device pointer to hwdev + * @param vf_id: vf index + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param timeout: timeout + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, + u16 channel); + +/** + * @brief hinic5_mbox_to_vf_without_ack - mbox message to vf without ack + * @param hwdev: device pointer to hwdev + * @param vf_id: vf index + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_mbox_to_vf_without_ack(void *hwdev, u16 vf_id, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel); + +/** + * @brief hinic5_mbox_to_vf_no_ack - mbox message to vf no ack + * @param hwdev: device pointer to hwdev + * @param vf_id: vf index + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_mbox_to_vf_no_ack(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u16 channel); + +int hinic5_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); +/** + * @brief hinic5_cmdq_async - cmdq asynchronous message + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct hinic5_cmd_buf *buf_in, u16 channel); + +/** + * @brief hinic5_cmdq_direct_resp - cmdq direct message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param out_param: message out + * @param timeout: timeout + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, + u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief hinic5_cmdq_detail_resp - cmdq detail message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param buf_out: message buffer out + * @param out_param: inline output data + * @param timeout: timeout + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_cmdq_detail_resp(void *hwdev, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, + struct hinic5_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief hinic5_cmdq_inline_data - cmdq with inline data + * @param hwdev: device pointer to hwdev + * @param cmd_param: cmd info, see struct hinic5_cmdq_cmd_param + * @param timeout: timeout + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_cmdq_inline_data(void *hwdev, struct hinic5_cmdq_cmd_param *cmd_param, + u32 timeout, u16 channel); + +/** + * @brief hinic5_cos_id_detail_resp - cmdq detail message response + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param cos_id: cos id + * @param buf_in: message buffer in + * @param buf_out: message buffer out + * @param out_param: inline output data + * @param timeout: timeout + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_cos_id_detail_resp(void *hwdev, u8 mod, u8 cmd, u8 cos_id, + struct hinic5_cmd_buf *buf_in, + struct hinic5_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel); + +/** + * @brief hinic5_ppf_tmr_start - start ppf timer + * @param hwdev: device pointer to hwdev + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_ppf_tmr_start(void *hwdev); + +/** + * @brief hinic5_ppf_tmr_stop - stop ppf timer + * @param hwdev: device pointer to hwdev + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_ppf_tmr_stop(void *hwdev); + +/** + * @brief hinic5_func_tmr_bitmap_set - set timer bitmap status + * @param hwdev: device pointer to hwdev + * @param func_id: global function index + * @param enable: 0-disable, 1-enable + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_func_tmr_bitmap_set(void *hwdev, u16 func_id, bool en); + +/** + * @brief hinic5_func_vio_en - set current function VIO to enabled/disabled + * @param hwdev: device pointer to hwdev + * @param en: 0-disable, 1-enable + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_func_vio_en(void *hwdev, bool en); + +/** + * @brief hinic5_get_board_info - get board info + * @param hwdev: device pointer to hwdev + * @param info: board info + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_board_info(void *hwdev, struct hinic5_board_info *info, + u16 channel); + +/** + * @brief hinic5_set_wq_page_size - set work queue page size + * @param hwdev: device pointer to hwdev + * @param func_idx: function id + * @param page_size: page size + * @param channel: channel id + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_wq_page_size(void *hwdev, u16 func_idx, u32 page_size, + u16 channel); + +/** + * @brief hinic5_event_callback - evnet callback to notify service driver + * @param hwdev: device pointer to hwdev + * @param event: event info to service driver + */ +void hinic5_event_callback(void *hwdev, struct hinic5_event_info *event); + +/** + * @brief hinic5_dbg_lt_rd_16byte - liner table read + * @param hwdev: device pointer to hwdev + * @param dest: destine id + * @param instance: instance id + * @param lt_index: liner table index id + * @param data: data + */ +int hinic5_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data); + +/** + * @brief hinic5_dbg_lt_wr_16byte_mask - liner table write + * @param hwdev: device pointer to hwdev + * @param dest: destine id + * @param instance: instance id + * @param lt_index: liner table index id + * @param data: data + * @param mask: mask + */ +int hinic5_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data, u16 mask); + +/** + * @brief hinic5_link_event_stats - link event stats + * @param hwdev: device pointer to hwdev + * @param link: link status + */ +void hinic5_link_event_stats(void *dev, u8 link); + +/** + * @brief hinic5_get_link_down_cnt - link event stats + * @param hwdev: device pointer to hwdev + * @param link: link status + */ +int hinic5_get_link_down_cnt(void *dev, int *link_down_cnt); + +/** + * @brief hinic5_get_hw_pf_infos - get pf infos + * @param hwdev: device pointer to hwdev + * @param infos: pf infos + * @param channel: channel id + */ +int hinic5_get_hw_pf_infos(void *hwdev, struct hinic5_hw_pf_infos *infos, + u16 channel); + +/** + * @brief hinic5_func_reset - reset func + * @param hwdev: device pointer to hwdev + * @param func_id: global function index + * @param reset_flag: reset flag + * @param channel: channel id + */ +int hinic5_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel); + +/** + * @brief 获取PPF(Physical Function,物理功能)定时器配置 + * @param hwdev 硬件设备上下文 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_ppf_timer_cfg(void *hwdev); + +/** + * @brief 设置PCI设备的BDF(总线、设备、功能)信息 + * @param hwdev 硬件设备上下文 + * @param bus PCI总线号 + * @param device PCI设备号 + * @param function PCI功能号 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_bdf_ctxt(void *hwdev, u8 bus, u8 device, u8 function); + +/** + * @brief 初始化函数邮箱消息通道 + * @param hwdev 硬件设备 + * @param num_func 函数数量 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_init_func_mbox_msg_channel(void *hwdev, u16 num_func); + +/** + * @brief 获取SML表信息 + * @param hwdev 硬件设备信息 + * @param tbl_id 表ID + * @param node_id 节点ID + * @param instance_id 实例ID + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_sml_table_info(void *hwdev, u32 tbl_id, u8 *node_id, u8 *instance_id); + +/** + * @brief 从PPF发送邮箱消息到主机 + * @param hwdev 硬件设备 + * @param mod 消息模块 + * @param cmd 消息命令 + * @param host_id 主机ID + * @param buf_in 输入缓冲区 + * @param in_size 输入缓冲区大小 + * @param buf_out 输出缓冲区 + * @param out_size 输出缓冲区大小 + * @param timeout 超时时间 + * @param channel 通道 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_mbox_ppf_to_host(void *hwdev, u8 mod, u16 cmd, u8 host_id, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel); + +/** + * @brief 强制完成所有的操作 + * @param dev 设备指针 + * + * @return 无 + */ +void hinic5_force_complete_all(void *dev); +/** + * @brief 获取CEQ页面的物理地址 + * @param hwdev 硬件设备上下文 + * @param q_id 队列ID + * @param page_idx 页面索引 + * @param page_phy_addr 页面物理地址 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_ceq_page_phy_addr(void *hwdev, u16 q_id, + u16 page_idx, u64 *page_phy_addr); + /** + * @brief 禁用中断请求 + * @param hwdev 硬件设备 + * @param q_id 队列ID + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_ceq_irq_disable(void *hwdev, u16 q_id); +/** + * @brief 获取ceq信息 + * @param hwdev 硬件设备信息 + * @param q_id 队列ID + * @param ceq_info ceq信息 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_ceq_info(void *hwdev, u16 q_id, struct hinic5_ceq_info *ceq_info); + +/** + * @brief hinic5_init_single_ceq_status + * @param hwdev: device pointer to hwdev + * @param q_id: ceq id + */ +int hinic5_init_single_ceq_status(void *hwdev, u16 q_id); + +/** + * @brief 设置API停止 + * @param hwdev 硬件设备指针 + * + * @return 无 + */ +void hinic5_set_api_stop(void *hwdev); + +/** + * @brief 激活固件 + * @param hwdev 硬件设备 + * @param cfg_index 配置索引 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_activate_firmware(void *hwdev, u8 cfg_index); +/** + * @brief hinic5_switch_config函数的主要功能是切换固件版本 + * @param hwdev 设备句柄 + * @param cfg_index 配置索引 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_switch_config(void *hwdev, u8 cfg_index); + +enum hinic5_hw_type { + HINIC5_HW_TYPE_FPGA = 0, + HINIC5_HW_TYPE_ASIC = 1, + HINIC5_HW_TYPE_EMU = 2, + HINIC5_HW_TYPE_EDA = 3, + HINIC5_HW_TYPE_INVALID = 0xff, +}; + +/** + * @brief 获取设备硬件类型 + * + * @param hwdev 设备对象指针 + * + * @details NA + * + * @attention: NA + * + * @return: 描述函数返回值. + * @retval HINIC5_HW_TYPE_FPGA fpga类型 + * @retval HINIC5_HW_TYPE_ASIC asic类型 + * @retval HINIC5_HW_TYPE_EMU emu类型 + * @retval HINIC5_HW_TYPE_EDA eda类型 + * @retval HINIC5_HW_TYPE_INVALID 无效类型 + */ +u8 hinic5_get_hw_type(void *hwdev); + +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_lld.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_lld.h new file mode 100644 index 00000000..183fd03c --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_lld.h @@ -0,0 +1,399 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_LLD_H +#define HINIC5_LLD_H + +#include <linux/pci.h> +#include "hinic5_crm.h" + +/** + * @brief function device bus type + * @details NA + */ +enum hinic5_dev_type { + HINIC5_DEVICE_T_PCI, /**< 设备通过pci bus连接 */ + HINIC5_DEVICE_T_UB, /**< 设备通过ub bus连接 */ + HINIC5_DEVICE_T_MAX, /**< 支持的bus类型个数 */ +}; + +#define HINIC5_CARD_ID_OFFSET 16 +#define HINIC5_CARD_ID_MASK 0xffffffffffff +#define HINIC5_GUID_LEN (16) +/** + * @brief guid + * @details NA + */ +struct hinic5_guid { + u8 id[HINIC5_GUID_LEN]; +}; + +/** + * @brief 每个function的设备信息 + * @details NA + */ +struct hinic5_device_info { + u64 id; /* DPU唯一性标识编号 */ + + struct hinic5_guid guid; +}; + +/** + * @brief struct hinic5_lld_dev - 暴露给uld的设备对象 + * @details uld使用的设备对象,包含设备类型、硬件设备指针 + */ +struct hinic5_lld_dev { + void *hwdev; /**< sdk驱动内部的硬件设备指针 */ + struct device *dev; /**< 关联的struct device */ + enum hinic5_dev_type dev_type; /**< device bus type */ +}; + +/** + * @brief struct hinic5_uld_info + * @details 定义了一个结构体,用于存储用户层驱动(uld)的信息 + */ +struct hinic5_uld_info { + /* When the function does not need to initialize the corresponding uld, + * @probe needs to return 0 and uld_dev is set to NULL; + * if uld_dev is NULL, @remove will not be called when uninstalling + */ + int (*probe)(struct hinic5_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name); /**< 初始化用户层驱动的函数 */ + void (*remove)(struct hinic5_lld_dev *lld_dev, void *uld_dev); /**< 移除用户层驱动的函数 */ + int (*suspend)(struct hinic5_lld_dev *lld_dev, void *uld_dev, pm_message_t state); /**< 挂起用户层驱动的函数 */ + int (*resume)(struct hinic5_lld_dev *lld_dev, void *uld_dev); /**< 恢复用户层驱动的函数 */ + void (*event)(struct hinic5_lld_dev *lld_dev, void *uld_dev, /**< 处理事件的函数 */ + struct hinic5_event_info *event); + int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, /**< 执行ioctl操作的函数 */ + void *buf_out, u32 *out_size); +}; + +/** + * @brief hinic5_get_card_nic_uld_array - get nic uld array + * @param lld_dev: device pointer to pcie + * @param dev_cnt: uld cnt + * @param array: uld array + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ + +int hinic5_get_card_nic_uld_array(struct hinic5_lld_dev *lld_dev, u32 *dev_cnt, void *array[]); + +/** + * @brief 注册用户层驱动 + * @param type 服务类型 + * @param uld_info 用户层驱动信息 + * + * @details 此函数用于注册用户层驱动,根据提供的服务类型和用户层驱动信息进行注册 + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_register_uld(enum hinic5_service_type type, struct hinic5_uld_info *uld_info); + +/** + * @brief 注销用户定义的上层驱动模块 + * @param type 服务类型枚举 + * + * @details 此函数用于注销用户定义的上层驱动模块 + * + * @return 无 + */ +void hinic5_unregister_uld(enum hinic5_service_type type); + +/** + * @brief 等待LLD设备节点的改变完成 + * + * @details 在调用此函数前,需要确保没有任何设备节点正在改变 + * + * @return 无 + */ +void lld_hold(void); +/** + * @brief 此函数用于释放全局锁 + * + * @details 通过原子操作递减设备引用计数,如果引用计数为0,则表示没有设备在使用该锁,可以进行释放 + * + * @return 无 + */ +void lld_put(void); + +/** + * @brief hinic5_get_lld_dev_by_chip_name - get lld device by chip name + * @param chip_name: chip name + * + * @details The value of lld_dev reference increases when lld_dev is obtained. The caller needs + * to release the reference by calling hinic5_lld_dev_put. + * + * @return 返回lld设备 + */ +struct hinic5_lld_dev *hinic5_get_lld_dev_by_chip_name(const char *chip_name); + +/** + * @brief hinic5_lld_dev_hold - get reference to lld_dev + * @param dev: lld device + * + * @details Hold reference to device to keep it from being freed + */ +void hinic5_lld_dev_hold(struct hinic5_lld_dev *dev); + +/** + * @brief hinic5_lld_dev_put - release reference to lld_dev + * @param dev: lld device + * + * @details Release reference to device to allow it to be freed + */ +void hinic5_lld_dev_put(struct hinic5_lld_dev *dev); + +/** + * @brief hinic5_get_lld_dev_by_dev_name - get lld device by uld device name + * @param dev_name: uld device name + * @param type: uld service type, When the type is SERVICE_T_MAX, try to match all ULD names to get uld_dev + * + * @details The value of lld_dev reference increases when lld_dev is obtained. The caller needs + * to release the reference by calling hinic5_lld_dev_put. + * + * @return 如果成功,返回LLD设备,否则返回NULL + */ +struct hinic5_lld_dev *hinic5_get_lld_dev_by_dev_name(const char *dev_name, + enum hinic5_service_type type); + +/** + * @brief hinic5_get_lld_dev_by_dev_name_unsafe - get lld device by uld device name + * @param dev_name: uld device name + * @param type: uld service type, When the type is SERVICE_T_MAX, try to match all ULD names to get uld_dev + * + * @details hinic5_get_lld_dev_by_dev_name_unsafe() is completely analogous to + * hinic5_get_lld_dev_by_dev_name(), The only difference is that the reference + * of lld_dev is not increased when lld_dev is obtained. + * The caller must ensure that lld_dev will not be freed during the remove process + * when using lld_dev. + * + * @return 如果成功,返回LLD设备,否则返回NULL + */ +struct hinic5_lld_dev *hinic5_get_lld_dev_by_dev_name_unsafe(const char *dev_name, + enum hinic5_service_type type); + +/** + * @brief hinic5_get_lld_dev_by_chip_and_port - get lld device by chip name and port id + * @param chip_name: chip name + * @param port_id: port id + * + * @return 如果成功,返回LLD设备,否则返回NULL + */ +struct hinic5_lld_dev *hinic5_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id); + +/** + * @brief hinic5_get_lld_dev_with_l3i_enabled - get lld device which enables BAT L3I + * @param chip_name: chip name + * + * @return 如果成功,返回LLD设备,否则返回NULL + */ +struct hinic5_lld_dev *hinic5_get_lld_dev_with_l3i_enabled(const char *chip_name); + +/** + * @brief hinic5_get_ppf_lld_dev - get ppf lld device by current function's lld device + * @param lld_dev: current function's lld device + * + * @details The value of lld_dev reference increases when lld_dev is obtained. The caller needs + * to release the reference by calling hinic5_lld_dev_put. + * + * @return 如果成功,返回LLD设备,否则返回NULL + */ +struct hinic5_lld_dev *hinic5_get_ppf_lld_dev(struct hinic5_lld_dev *lld_dev); + +/** + * @brief hinic5_get_ppf_lld_dev_unsafe - get ppf lld device by current function's lld device + * @param lld_dev: current function's lld device + * + * @details hinic5_get_ppf_lld_dev_unsafe() is completely analogous to hinic5_get_ppf_lld_dev(), + * The only difference is that the reference of lld_dev is not increased when lld_dev is obtained. + * The caller must ensure that ppf's lld_dev will not be freed during the remove process + * when using ppf lld_dev. + * + * @return 如果成功,返回LLD设备,否则返回NULL + */ +struct hinic5_lld_dev *hinic5_get_ppf_lld_dev_unsafe(struct hinic5_lld_dev *lld_dev); + +/** + * @brief hinic5_get_ppf_hw_dev_unsafe - get any ppf hw device in current host by current function's hw device + * @param hwdev: current function's hw device + * + * @details The caller must ensure that ppf's hw_dev will not be freed during the remove process + * when using ppf hw_dev. + */ +void *hinic5_get_ppf_hw_dev_unsafe(void *hwdev); + +/** + * @brief hinic5_uld_dev_hold - get reference to uld_dev + * @param lld_dev: lld device + * @param type: uld service type + * + * @details Hold reference to uld device to keep it from being freed + */ +void hinic5_uld_dev_hold(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type); + +/** + * @brief hinic5_uld_dev_put - release reference to lld_dev + * @param dev: lld device + * @param type: uld service type + * + * @details Release reference to uld device to allow it to be freed + */ +void hinic5_uld_dev_put(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type); + +/** + * @brief hinic5_get_uld_dev - get uld device by lld device + * @param lld_dev: lld device + * @param type: uld service type + * + * @details The value of uld_dev reference increases when uld_dev is obtained. The caller needs + * to release the reference by calling hinic5_uld_dev_put. + */ +void *hinic5_get_uld_dev(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type); + +/** + * @brief hinic5_get_uld_dev_unsafe - get uld device by lld device + * @param lld_dev: lld device + * @param type: uld service type + * + * @details hinic5_get_uld_dev_unsafe() is completely analogous to hinic5_get_uld_dev(), + * The only difference is that the reference of uld_dev is not increased when uld_dev is obtained. + * The caller must ensure that uld_dev will not be freed during the remove process when using uld_dev. + */ +void *hinic5_get_uld_dev_unsafe(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type); + +/** + * @brief hinic5_get_chip_name - get chip name by lld device + * @param lld_dev: lld device + * @param chip_name: String for storing the chip name + * @param max_len: Maximum number of characters to be copied for chip_name + * + * @return 0 成功,其他值 失败 + */ +int hinic5_get_chip_name(struct hinic5_lld_dev *lld_dev, char *chip_name, u16 max_len); + +/** + * @brief 获取SDK硬件设备 + * @param lld_dev 低层驱动设备 + * + * @return 返回SDK硬件设备 + */ +void *hinic5_get_sdk_hwdev_by_lld(struct hinic5_lld_dev *lld_dev); + +/** + * @brief 设置VF业务(service)的使能开关,仅PF调用 + * @param lld_dev 设备结构体指针 + * @param service 服务类型 + * @param vf_srv_load 是否启用虚拟函数服务负载 + * + * @return 返回0表示成功,否则返回错误码 + */ +int hinic5_set_vf_service_load(struct hinic5_lld_dev *lld_dev, u16 service, + bool vf_srv_load); + +/** + * @brief 设置该服务的VF加载使能标志位 + * @param lld_dev 物理设备 + * @param vf_func_id 虚拟函数ID + * @param service 服务类型 + * @param en 是否启用 + * + * @return 返回0表示成功,否则返回错误码 + */ +int hinic5_set_vf_service_state(struct hinic5_lld_dev *lld_dev, u16 vf_func_id, + u16 service, bool en); + +/** + * @brief 设置VF的加载使能标志 + * @param lld_dev 设备结构体指针 + * @param vf_load_state 虚拟函数加载状态 + * + * @return 返回0表示成功,否则返回错误码 + */ +int hinic5_set_vf_load_state(struct hinic5_lld_dev *lld_dev, bool vf_load_state); + +/** + * @brief 绑定NIC设备 + * @param lld_dev 低层设备结构体指针 + * + * @return 返回0表示成功,否则返回错误码 + */ +int hinic5_attach_nic(struct hinic5_lld_dev *lld_dev); + +/** + * @brief 解除网卡 + * @param lld_dev 设备的底层驱动信息 + * + * @return 无 + */ +void hinic5_detach_nic(const struct hinic5_lld_dev *lld_dev); + +/** + * @brief 将指定的服务类型附加到设备上 + * @param lld_dev 设备的底层驱动信息 + * @param type 服务类型 + * + * @return 成功返回0,失败返回错误码 + */ +int hinic5_attach_service(const struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type); +/** + * @brief hinic5_detach_service 函数的作用是解除服务 + * @param lld_dev 设备的底层驱动信息 + * @param type 服务类型 + * + * @return 无 + */ +void hinic5_detach_service(const struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type); + +/** + * @brief 注销uld前调用,执行cleanup回调 + * + * @param type ULD服务类型 + * @param cleanup 回调 + * + * @details 加载了ULD的所有function,按顺序调用cleanup回调 + * + * @attention: NA + * + * @return: NA + */ +void hinic5_uld_cleanup_before_unregister(enum hinic5_service_type type, void (*cleanup)(void *)); + +/** + * @brief hinic5_get_vf_num 函数的作用是获取pci/ub设备使能的vf数 + * @param lld_dev 设备的底层驱动信息 + * + * @return 使能的vf数 + */ +int hinic5_get_vf_num(struct hinic5_lld_dev *lld_dev); + +/** + * @brief hinic5_get_chip_node_id 函数的作用是获取pci/ub设备归属的chip_node id + * @param lld_dev 设备的底层驱动信息 + * + * @return chip_node id + */ +int hinic5_get_chip_node_id(struct hinic5_lld_dev *lld_dev, u64 *chip_node_id); + +/** + * @brief 获取设备信息 + * + * @param[in] lld_dev 设备 + * @param[out] info 返回设备信息 + * + * @details NULL + * + * @attention: NULL + * + * @return: 描述函数返回值. + * @retval 0 成功 + * @retval 非0 错误码 + */ +int hinic5_get_device_info(struct hinic5_lld_dev *lld_dev, struct hinic5_device_info *info); +int hinic5_lld_init(void); +void hinic5_lld_exit(void); + +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_mt.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_mt.h new file mode 100644 index 00000000..ba52d00d --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_mt.h @@ -0,0 +1,750 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_MT_H +#define HINIC5_MT_H + +#include <linux/types.h> + +#ifdef __HIFC__ /**< 如果定义了__HIFC__宏 */ +#define HINIC5_DRV_NAME "hifc3" /**< 定义驱动名为 hifc3 */ +#define HINIC5_CHIP_NAME "hifc" /**< 定义芯片名为 hifc */ +#else /**< 如果没有定义__HIFC__宏 */ +#define HINIC5_DRV_NAME "hisdk5" /**< 定义驱动名为 hisdk5 */ +#define HINIC5_CHIP_NAME "hinic" /**< 定义芯片名为 hinic */ +#endif +/* Interrupt at most records, interrupt will be recorded in the FFM */ + +#define NICTOOL_CMD_TYPE (0x18) + +/** + * @brief struct api_cmd_rd + * @details 用于接收API命令的结构体 + */ +struct api_cmd_rd { + u32 pf_id; /**< pf id */ + u8 dest; /**< node id */ + u8 *cmd; /**< 指向API命令的指针 */ + u16 size; /**< 表示命令的大小 */ + void *ack; /**< 指向命令的确认信息的指针 */ + u16 ack_size; /**< 表示确认信息的大小 */ +}; + +/** + * @brief struct api_cmd_wr + * @details 用于API命令写操作的结构体 + */ +struct api_cmd_wr { + u32 pf_id; /**< pf id */ + u8 dest; /**< node id */ + u8 *cmd; /**< 指向API命令的指针 */ + u16 size; /**< 表示命令的大小 */ +}; + +#define PF_DEV_INFO_NUM 32 + +/** + * @brief struct pf_dev_info + * @details 存储PCI设备的信息的结构体 + */ +struct pf_dev_info { + u64 bar0_size; /**< bar0的大小 */ + u8 bus; /**< PCI设备的总线号 */ + u8 slot; /**< PCI设备的插槽号 */ + u8 func; /**< PCI设备的功能号 */ + u64 phy_addr; /**< PCI设备的物理地址 */ +}; + +/** + * Indicates the maximum number of interrupts that can be recorded. + * Subsequent interrupts are not recorded in FFM. + */ +#define FFM_RECORD_NUM_MAX 64 + +/** + * @brief struct ffm_intr_info + * @details 存储中断信息的结构体 + */ +struct ffm_intr_info { + u8 node_id; /**< 中断源的节点ID */ + u8 err_level; /**< 中断源的错误级别 */ + u16 err_type; /**< 中断源的错误类型 */ + u32 err_csr_addr; /**< 中断源的地址 */ + u32 err_csr_value; /**< 中断源的值 */ +}; + +/** + * @brief struct ffm_intr_tm_info + * @details 存储中断信息和时间信息的结构体 + */ +struct ffm_intr_tm_info { + struct ffm_intr_info intr_info; /**< 中断信息 */ + u8 times; /**< 时间 */ + u8 sec; /**< 秒 */ + u8 min; /**< 分钟 */ + u8 hour; /**< 小时 */ + u8 mday; /**< 日期 */ + u8 mon; /**< 月份 */ + u16 year; /**< 年份 */ +}; + +/** + * @brief struct ffm_record_info + * @details 用于存储FFM记录信息的结构体 + */ +struct ffm_record_info { + u32 ffm_num; /**< FFM编号 */ + u32 last_err_csr_addr; /**< 最后一次错误的CSR地址 */ + u32 last_err_csr_value; /**< 最后一次错误的CSR值 */ + struct ffm_intr_tm_info ffm[FFM_RECORD_NUM_MAX]; /**< FFM中断时间信息数组 */ +}; + +/** + * @brief struct dbgtool_k_glb_info + * @details 存储调试工具的全局信息的结构体 + */ +struct dbgtool_k_glb_info { + struct semaphore dbgtool_sem; /**< 信号量,用于同步调试工具的线程 */ + struct ffm_record_info *ffm; /**< 存储ffm记录信息 */ +}; + +/** + * @brief struct msg_2_up + * @details 存储上行消息的相关信息的结构体 + */ +struct msg_2_up { + u8 pf_id; /**< 协议ID */ + u8 mod; /**< 模块ID */ + u8 cmd; /**< 命令ID */ + void *buf_in; /**< 输入缓冲区指针 */ + u16 in_size; /**< 输入缓冲区大小 */ + void *buf_out; /**< 输出缓冲区指针 */ + u16 *out_size; /**< 输出缓冲区大小指针 */ +}; + +/** + * @brief struct dbgtool_param + * @details 调试工具参数结构体 + */ +struct dbgtool_param { + union { + struct api_cmd_rd api_rd; /**< 读取命令 */ + struct api_cmd_wr api_wr; /**< 写入命令 */ + struct pf_dev_info *dev_info; /**< 设备信息 */ + struct ffm_record_info *ffm_rd; /**< FFM记录信息 */ + struct msg_2_up msg2up; /**< 上行消息 */ + } param; + char chip_name[16]; /**< 芯片名称 */ +}; + +/** + * @brief typedef enum + * @details 表示调试工具的命令类型 + */ +typedef enum { + DBGTOOL_CMD_API_RD = 0, /**< 读取API命令 */ + DBGTOOL_CMD_API_WR, /**< 写入API命令 */ + DBGTOOL_CMD_FFM_RD, /**< 读取FFM命令 */ + DBGTOOL_CMD_FFM_CLR, /**< 清除FFM命令 */ + DBGTOOL_CMD_PF_DEV_INFO_GET, /**< 获取PF设备信息命令 */ + DBGTOOL_CMD_MSG_2_UP, /**< 将消息发送到上层命令 */ + DBGTOOL_CMD_FREE_MEM, /**< 释放内存命令 */ + DBGTOOL_CMD_NUM /**< 命令类型数量 */ +} dbgtool_cmd; + +#define PF_MAX_SIZE (16) +#define BUSINFO_LEN (32) +#define HINIC_FUNC_MAX_SIZE (4096) + +/** + * @brief enum module_name + * @details 表示不同的模块名称 + */ +enum module_name { + SEND_TO_NPU = 1, /**< 发送到NPU模块 */ + SEND_TO_MPU, /**< 发送到MPU模块 */ + SEND_TO_SM, /**< 发送到SM模块 */ + SEND_TO_HW_DRIVER, /**< 发送到硬件驱动 */ +#define SEND_TO_SRV_DRV_BASE (SEND_TO_HW_DRIVER + 1) + SEND_TO_NIC_DRIVER = SEND_TO_SRV_DRV_BASE, /**< 发送到网络接口控制器驱动 */ + SEND_TO_OVS_DRIVER, /**< 发送到Open vSwitch驱动 */ + SEND_TO_ROCE_DRIVER, /**< 发送到RDMA over Converged Ethernet驱动 */ + SEND_TO_TOE_DRIVER, /**< 发送到TCP offload驱动 */ + SEND_TO_IOE_DRIVER, /**< 发送到I/O加速驱动 */ + SEND_TO_FC_DRIVER, /**< 发送到光纤通道驱动 */ + SEND_TO_VBS_DRIVER, /**< 发送到虚拟阻塞存储驱动 */ + SEND_TO_IPSEC_DRIVER, /**< 发送到IPsec驱动 */ + SEND_TO_VIRTIO_DRIVER, /**< 发送到Virtio驱动 */ + SEND_TO_MIGRATE_DRIVER, /**< 发送到迁移驱动 */ + SEND_TO_PPA_DRIVER, /**< 发送到PPA驱动 */ + SEND_TO_CUSTOM_DRIVER = SEND_TO_SRV_DRV_BASE + 11, /**< 发送到自定义驱动 */ + SEND_TO_VROCE_DRIVER, /**< 发送到vRDMA over Converged Ethernet驱动 */ + SEND_TO_UB_DRIVER, /**< 发送到UB驱动 */ + SEND_TO_JBOF_DRIVER, /**< 发送到Jumbo Frame offload驱动 */ + SEND_TO_MACSEC_DRIVER, /**< 发送到MACsec驱动 */ + SEND_TO_BIFUR_DRIVER = SEND_TO_MACSEC_DRIVER + 3, + SEND_TO_HIHTR_DRIVER, /**< 发送到Hihtr驱动 */ + SEND_TO_DRIVER_MAX = SEND_TO_SRV_DRV_BASE + 20, /* reserved */ +}; + +/** + * @brief enum driver_cmd_type + * @details 定义驱动命令类型枚举 + */ +enum driver_cmd_type { + TX_INFO = 0x1, /**< 发送信息 */ + Q_NUM = 0x2, /**< 队列数量 */ + TX_WQE_INFO = 0x3, /**< 发送工作队列信息 */ + TX_MAPPING = 0x4, /**< 发送映射 */ + RX_INFO = 0x5, /**< 接收信息 */ + RX_WQE_INFO = 0x6, /**< 接收工作队列信息 */ + RX_CQE_INFO = 0x7, /**< 接收完成队列信息 */ + UPRINT_FUNC_EN = 0x8, /**< 打印功能启用 */ + UPRINT_FUNC_RESET = 0x9, /**< 打印功能重置 */ + UPRINT_SET_PATH = 0xa, /**< 设置打印路径 */ + UPRINT_GET_STATISTICS = 0xb, /**< 获取打印统计信息 */ + FUNC_TYPE = 0xc, /**< 功能类型 */ + GET_FUNC_IDX = 0xd, /**< 获取功能索引 */ + GET_INTER_NUM = 0xe, /**< 获取内部数量 */ + CLOSE_TX_STREAM = 0xf, /**< 关闭发送流 */ + GET_DRV_VERSION = 0x10, /**< 获取驱动版本 */ + CLEAR_FUNC_STASTIC = 0x11, /**< 清除功能统计信息 */ + GET_HW_STATS = 0x12, /**< 获取硬件统计信息 */ + CLEAR_HW_STATS = 0x13, /**< 清除硬件统计信息 */ + GET_SELF_TEST_RES = 0x14, /**< 获取自我测试结果 */ + GET_CHIP_FAULT_STATS = 0x15, /**< 获取芯片故障统计信息 */ + NIC_RSVD1 = 0x16, + NIC_RSVD2 = 0x17, + NIC_RSVD3 = 0x18, + GET_CHIP_ID = 0x19, /**< 获取芯片ID */ + GET_SINGLE_CARD_INFO = 0x1a, /**< 获取单卡信息 */ + GET_FIRMWARE_ACTIVE_STATUS = 0x1b, /**< 获取固件活动状态 */ + ROCE_DFX_FUNC = 0x1c, /**< RoCE调试功能 */ + GET_DEVICE_ID = 0x1d, /**< 获取设备ID */ + GET_PF_DEV_INFO = 0x1e, /**< 获取PF设备信息 */ + CMD_FREE_MEM = 0x1f, /**< 释放内存 */ + GET_LOOPBACK_MODE = 0x20, /**< 获取环回模式 */ + SET_LOOPBACK_MODE = 0x21, /**< 设置环回模式 */ + SET_LINK_MODE = 0x22, /**< 设置链接模式 */ + SET_PF_BW_LIMIT = 0x23, /**< 设置PF带宽限制 */ + GET_PF_BW_LIMIT = 0x24, /**< 获取PF带宽限制 */ + ROCE_CMD = 0x25, /**< RoCE命令 */ + GET_POLL_WEIGHT = 0x26, /**< 获取轮询权重 */ + SET_POLL_WEIGHT = 0x27, /**< 设置轮询权重 */ + GET_HOMOLOGUE = 0x28, /**< 获取对端信息 */ + SET_HOMOLOGUE = 0x29, /**< 设置对端信息 */ + GET_SSET_COUNT = 0x2a, /**< 获取统计信息数量 */ + GET_SSET_ITEMS = 0x2b, /**< 获取统计信息项 */ + IS_DRV_IN_VM = 0x2c, /**< 判断是否在虚拟机中 */ + LRO_ADPT_MGMT = 0x2d, /**< 管理LRO适配器 */ + SET_INTER_COAL_PARAM = 0x2e, /**< 设置中断合并参数 */ + GET_INTER_COAL_PARAM = 0x2f, /**< 获取中断合并参数 */ + GET_CHIP_INFO = 0x30, /**< 获取芯片信息 */ + GET_NIC_STATS_LEN = 0x31, /**< 获取NIC统计信息长度 */ + GET_NIC_STATS_STRING = 0x32, /**< 获取NIC统计信息字符串 */ + GET_NIC_STATS_INFO = 0x33, /**< 获取NIC统计信息 */ + GET_PF_ID = 0x34, /**< 获取PF ID */ + GET_MBOX_CNT = 0x35, /**< 获取mailbox数量 */ + NIC_RSVD5 = 0x36, + DCB_QOS_INFO = 0x37, /**< DCB QoS信息 */ + DCB_PFC_STATE = 0x38, /**< DCB PFC状态 */ + DCB_ETS_STATE = 0x39, /**< DCB ETS状态 */ + DCB_STATE = 0x3a, /**< DCB状态 */ + QOS_DEV = 0x3b, /**< QOS设备 */ + GET_QOS_COS = 0x3c, /**< 获取QOS优先级 */ + GET_ULD_DEV_NAME = 0x3d, /**< 获取ULD设备名称 */ + GET_TX_TIMEOUT = 0x3e, /**< 获取发送超时 */ + SET_TX_TIMEOUT = 0x3f, /**< 设置发送超时 */ + + RSS_CFG = 0x40, /**< RSS配置 */ + RSS_INDIR = 0x41, /**< RSS间接表 */ + PORT_ID = 0x42, /**< 端口ID */ + BOND_DFX_OPS = 0x43, /**< BOND DFX操作 */ + + GET_FUNC_CAP = 0x50, /**< 获取功能能力 */ + GET_XSFP_PRESENT = 0x51, /**< 获取XSFP存在状态 */ + GET_XSFP_INFO = 0x52, /**< 获取XSFP信息 */ + DEV_NAME_TEST = 0x53, /**< 设备名称测试 */ + GET_XSFP_INFO_COMP_CMIS = 0x54, /**< 获取XSFP信息(支持CMIS) */ + CMD_GET_PROFILE_ID = 0x55, + CMD_SET_PROFILE_ID = 0x56, + CMD_MOVE_TCAM_TABLE = 0x57, + + GET_WIN_STAT = 0x60, /**< 获取窗口状态 */ + WIN_CSR_READ = 0x61, /**< 读取窗口CSR */ + WIN_CSR_WRITE = 0x62, /**< 写入窗口CSR */ + WIN_API_CMD_RD = 0x63, /**< 读取窗口API命令 */ + + ROCE_CMD_SET_LDCP_PARAM = 0x70, /**< RoCE命令设置LDCP参数 */ + + ROCE_CMD_GET_QPC_FROM_CACHE = 0x80, /**< 从缓存获取QPC */ + ROCE_CMD_GET_QPC_FROM_HOST = 0x81, /**< 从主机获取QPC */ + ROCE_CMD_GET_CQC_FROM_CACHE = 0x82, /**< 从缓存获取CQC */ + ROCE_CMD_GET_CQC_FROM_HOST = 0x83, /**< 从主机获取CQC */ + ROCE_CMD_GET_SRQC_FROM_CACHE = 0x84, /**< 从缓存获取SRQC */ + ROCE_CMD_GET_SRQC_FROM_HOST = 0x85, /**< 从主机获取SRQC */ + ROCE_CMD_GET_MPT_FROM_CACHE = 0x86, /**< 从缓存获取MPT */ + ROCE_CMD_GET_MPT_FROM_HOST = 0x87, /**< 从主机获取MPT */ + ROCE_CMD_GET_GID_FROM_CACHE = 0x88, /**< 从缓存获取GID */ + ROCE_CMD_GET_QPC_CQC_PI_CI = 0x89, /**< 获取QPC、CQC、PI、CI */ + ROCE_CMD_GET_QP_COUNT = 0x8a, /**< 获取QP数量 */ + ROCE_CMD_GET_DEV_ALGO = 0x8b, /**< 获取设备算法 */ + ROCE_CMD_GET_DEV_TYPE = 0x8c, /**< 获取设备类型 */ + ROCE_CMD_GET_HW_COUNT = 0x8d, /**< 获取硬件计数 */ + ROCE_CMD_GET_SPECIFICATIONS = 0x8e, /**< 从缓存获取设备的功能规格 */ + + ROCE_CMD_START_CAP_PACKET = 0x90, /**< 开始捕获数据包 */ + ROCE_CMD_STOP_CAP_PACKET = 0x91, /**< 停止捕获数据包 */ + ROCE_CMD_QUERY_CAP_INFO = 0x92, /**< 查询捕获信息 */ + ROCE_CMD_ENABLE_QP_CAP_PACKET = 0x93, /**< 启用QP捕获数据包 */ + ROCE_CMD_DISABLE_QP_CAP_PACKET = 0x94, /**< 禁用QP捕获数据包 */ + ROCE_CMD_QUERY_QP_CAP_INFO = 0x95, /**< 查询QP捕获信息 */ + ROCE_CMD_SET_BYPASS = 0x96, /**< 设置bypass */ + ROCE_CMD_QUERY_BYPASS = 0x97, /**< 查询bypass */ + ROCE_CMD_GET_AEQC_FROM_CACHE = 0x98, /**< 从缓存获取AEQC */ + ROCE_CMD_GET_AEQC_FROM_HOST = 0x99, /**< 从主机获取AEQC */ + + ROCE_CMD_ENABLE_BW_CTRL = 0xa0, /**< 启用带宽控制 */ + ROCE_CMD_DISABLE_BW_CTRL = 0xa1, /**< 禁用带宽控制 */ + ROCE_CMD_CHANGE_BW_CTRL_PARAM = 0xa2, /**< 更改带宽控制参数 */ + ROCE_CMD_QUERY_BW_CTRL_PARAM = 0xa3, /**< 查询带宽控制参数 */ + ROCE_CMD_SET_BW_WATERLINE = 0xa4, /**< 设置带宽水线值 */ + ROCE_CMD_GET_BW_WATERLINE = 0xa5, /**< 获取带宽水线值 */ + ROCE_CMD_SET_VNIC_WATERLINE = 0xa6, /**< 设置VNIC水线值 */ + ROCE_CMD_GET_VNIC_WATERLINE = 0xa7, /**< 获取VNIC水线值 */ + ROCE_CMD_ROCE_SET = 0xa8, /**< 设置ROCE相关配置 */ + ROCE_CMD_DFX_LATCH_QUERY = 0xa9, /**< ROCE锁存查询 */ + + ROCE_CMD_TIMEOUT_ALARM = 0xb0, /**< 超时警告 */ + ROCE_CMD_PORT_TRAFFIC = 0Xb1, /**< 端口流量 */ + ROCE_CMD_DFX_ATTACK = 0Xb2, /**< ROCE主机侧防攻击 */ + ROCE_CMD_ULD_IOCTL_EXTEND = 0xb3, /**< 用户ioctl到驱动的总入口 */ + + MIG_QUERY_DFX = 0xc0, /**< 查询迁移DFX */ + + DRV_CMD_TYPE_RSV = 0xd0, /**< 预留,不可使用 */ + + NIC_CMD_ANTI_ATTACK = 0xd6, /**< 防攻击验证 */ + + VM_COMPAT_TEST = 0xFF, /**< VM兼容性测试 */ + + SERVICE_DRV_BASE_CMD = 0x120, /**< 业务命令字从0x120开始,0x120前面的预留给产品后续使用 */ +}; + +/** + * @brief enum api_chain_cmd_type + * @details 定义API链命令类型枚举 + */ +enum api_chain_cmd_type { + API_CSR_READ, /**< 读取CSR(控制和状态寄存器) */ + API_CSR_WRITE /**< 写入CSR(控制和状态寄存器) */ +}; + +/** + * @brief sm_cmd_type + * @details 用于表示不同的命令类型 + */ +enum sm_cmd_type { + SM_CTR_RD16 = 1, /**< 读取16位数据的命令 */ + SM_CTR_RD32, /**< 读取32位数据的命令 */ + SM_CTR_RD64_PAIR, /**< 读取64位数据对的命令 */ + SM_CTR_RD64, /**< 读取64位数据的命令 */ + SM_CTR_RD32_CLEAR, /**< 清除32位数据的命令 */ + SM_CTR_RD64_PAIR_CLEAR, /**< 清除64位数据对的命令 */ + SM_CTR_RD64_CLEAR /**< 清除64位数据的命令 */ +}; + +#define HINIC5_CQM_AEQ_CALLBACK_CNT_MAX 128 /* 与HINIC5_CQM_AEQ_BASE_T_MAX保持一致 */ + +/** + * @brief struct hinic5_cqm_stats + * @details 统计HINIC5_CQM模块的各种操作数量 + */ +struct hinic5_cqm_stats { + atomic_t hinic5_cqm_cmd_alloc_cnt; /**< 统计HINIC5_CQM命令分配的次数 */ + atomic_t hinic5_cqm_cmd_free_cnt; /**< 统计HINIC5_CQM命令释放的次数 */ + atomic_t hinic5_cqm_send_cmd_box_cnt; /**< 统计HINIC5_CQM发送命令盒子的次数 */ + atomic_t hinic5_cqm_send_cmd_imm_cnt; /**< 统计HINIC5_CQM发送命令的次数 */ + atomic_t hinic5_cqm_db_addr_alloc_cnt; /**< 统计HINIC5_CQM数据库地址分配的次数 */ + atomic_t hinic5_cqm_db_addr_free_cnt; /**< 统计HINIC5_CQM数据库地址释放的次数 */ + atomic_t hinic5_cqm_fc_srq_create_cnt; /**< 统计HINIC5_CQM创建FC SRQ的次数 */ + atomic_t hinic5_cqm_srq_create_cnt; /**< 统计HINIC5_CQM创建SRQ的次数 */ + atomic_t hinic5_cqm_rq_create_cnt; /**< 统计HINIC5_CQM创建RQ的次数 */ + atomic_t hinic5_cqm_qpc_mpt_create_cnt; /**< 统计HINIC5_CQM创建QPC和MPT的次数 */ + atomic_t hinic5_cqm_nonrdma_queue_create_cnt; /**< 统计HINIC5_CQM创建非RDMA队列的次数 */ + atomic_t hinic5_cqm_rdma_queue_create_cnt; /**< 统计HINIC5_CQM创建RDMA队列的次数 */ + atomic_t hinic5_cqm_rdma_table_create_cnt; /**< 统计HINIC5_CQM创建RDMA表的次数 */ + atomic_t hinic5_cqm_qpc_mpt_delete_cnt; /**< 统计HINIC5_CQM删除QPC和MPT的次数 */ + atomic_t hinic5_cqm_nonrdma_queue_delete_cnt; /**< 统计HINIC5_CQM删除非RDMA队列的次数 */ + atomic_t hinic5_cqm_rdma_queue_delete_cnt; /**< 统计HINIC5_CQM删除RDMA队列的次数 */ + atomic_t hinic5_cqm_rdma_table_delete_cnt; /**< 统计HINIC5_CQM删除RDMA表的次数 */ + atomic_t hinic5_cqm_func_timer_clear_cnt; /**< 统计HINIC5_CQM清除函数计时器的次数 */ + atomic_t hinic5_cqm_func_hash_buf_clear_cnt; /**< 统计HINIC5_CQM清除函数哈希缓冲区的次数 */ + atomic_t hinic5_cqm_scq_callback_cnt; /**< 统计HINIC5_CQM SCQ回调的次数 */ + atomic_t hinic5_cqm_ecq_callback_cnt; /**< 统计HINIC5_CQM ECQ回调的次数 */ + atomic_t hinic5_cqm_nocq_callback_cnt; /**< 统计HINIC5_CQM NOCQ回调的次数 */ + atomic_t hinic5_cqm_aeq_callback_cnt[HINIC5_CQM_AEQ_CALLBACK_CNT_MAX]; /**< 统计HINIC5_CQM AEQ回调的次数 */ +}; + +/** + * @brief struct link_event_stats + * @details 用于统计链路事件的数量 + */ +struct link_event_stats { + atomic_t link_down_stats; /**< 表示链路已经断开的事件数量 */ + atomic_t link_up_stats; /**< 表示链路已经连接的事件数量 */ +}; + +/** + * @brief enum hinic5_fault_err_level + * @details 错误级别枚举类型 + */ +enum hinic5_fault_err_level { + FAULT_LEVEL_FATAL, /**< 致命错误 */ + FAULT_LEVEL_SERIOUS_RESET, /**< 严重错误,需要重置 */ + FAULT_LEVEL_HOST, /**< 主机错误 */ + FAULT_LEVEL_SERIOUS_FLR, /**< 严重错误,需要FLR(函数级复位) */ + FAULT_LEVEL_GENERAL, /**< 一般错误 */ + FAULT_LEVEL_SUGGESTION, /**< 建议错误 */ + FAULT_LEVEL_MAX, /**< 错误级别最大值 */ +}; + +/** + * @brief enum hinic5_fault_type + * @details 定义了可能出现的故障类型 + */ +enum hinic5_fault_type { + FAULT_TYPE_CHIP, /**< 芯片故障 */ + FAULT_TYPE_UCODE, /**< 微码故障 */ + FAULT_TYPE_MEM_RD_TIMEOUT, /**< 读取内存超时故障 */ + FAULT_TYPE_MEM_WR_TIMEOUT, /**< 写入内存超时故障 */ + FAULT_TYPE_REG_RD_TIMEOUT, /**< 读取寄存器超时故障 */ + FAULT_TYPE_REG_WR_TIMEOUT, /**< 写入寄存器超时故障 */ + FAULT_TYPE_PHY_FAULT, /**< 物理故障 */ + FAULT_TYPE_TSENSOR_FAULT, /**< 温度传感器故障 */ + FAULT_TYPE_MAX, /**< 故障类型最大值 */ +}; + +/** + * @brief struct fault_event_stats + * @details 故障事件统计结构体 + */ +struct fault_event_stats { + /* HINIC_NODE_ID_MAX: temp use the value of 1822(22) */ + atomic_t chip_fault_stats[22][FAULT_LEVEL_MAX]; /**< 各个芯片的故障等级统计 */ + atomic_t fault_type_stat[FAULT_TYPE_MAX]; /**< 各种故障类型的统计 */ + atomic_t pcie_fault_stats; /**< PCIE故障的统计 */ +}; + +/** + * @brief enum hinic5_ucode_event_type + * @details 定义了Ucode事件类型的枚举类型 + */ +enum hinic5_ucode_event_type { + HINIC5_INTERNAL_OTHER_FATAL_ERROR = 0x0, /**< 内部其他致命错误 */ + HINIC5_HTN_PTP_EVENT = 0x1, /**< HTN PTP事件 */ + HINIC5_CHANNEL_BUSY = 0x7, /**< 通道忙 */ + HINIC5_NIC_FATAL_ERROR_MAX = 0x8, /**< NIC致命错误的最大值 */ +}; + +/** + * @brief struct hinic5_hw_stats + * @details 硬件统计结构体 + */ +struct hinic5_hw_stats { + atomic_t heart_lost_stats; /**< 心跳丢失统计 */ + struct hinic5_cqm_stats hinic5_cqm_stats; /**< HINIC5_CQM统计 */ + struct link_event_stats link_event_stats; /**< 链路事件统计 */ + struct fault_event_stats fault_event_stats; /**< 故障事件统计 */ + atomic_t nic_ucode_event_stats[HINIC5_NIC_FATAL_ERROR_MAX]; /**< NIC微码事件统计 */ +}; + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16 +#endif + +/** + * @brief struct pf_info + * @details 用于存储网络接口的信息 + */ +struct pf_info { + char name[IFNAMSIZ]; /**< 网络接口的名称 */ + char bus_info[BUSINFO_LEN]; /**< 网络接口的总线信息 */ + u32 pf_type; /**< 网络接口的类型 */ +}; + +/** + * @brief struct card_info + * @details 用于存储卡片信息 + * @param pf_num + * in_param: 累计已获取的pf num(PF_MAX_SIZE的整数倍) + * out_param: 当前获取的pf_info的数量及待获取pf_info的pf num总和 + */ +struct card_info { + struct pf_info pf[PF_MAX_SIZE]; /**< 用于存储卡片的各种信息 */ + u32 pf_num; /**< 用于存储卡片的数量 */ +}; + +struct func_mbox_cnt_info { + char bus_info[BUSINFO_LEN]; + u64 send_cnt; + u64 ack_cnt; +}; + +struct card_mbox_cnt_info { + struct func_mbox_cnt_info func_info[HINIC_FUNC_MAX_SIZE]; + u32 func_num; +}; + +/** + * @brief struct hinic5_nic_loop_mode + * @details 用于循环模式的结构体 + */ +struct hinic5_nic_loop_mode { + u32 loop_mode; /**< 循环模式的标识符 */ + u32 loop_ctrl; /**< 循环控制的标识符 */ +}; + +/** + * @brief struct hinic5_pf_info + * @details 用于存储PF的信息 + */ +struct hinic5_pf_info { + u32 isvalid; /**< 表示PF信息是否有效的标志位 */ + u32 pf_id; /**< PF的唯一标识符 */ +}; + +#define HINIC5_CHIP_FAULT_SIZE (110 * 1024) +#define MAX_DRV_BUF_SIZE 4096 + +/** + * @brief struct nic_cmd_chip_fault_stats + * @details 网络接口命令芯片故障统计信息结构体 + */ +struct nic_cmd_chip_fault_stats { + u32 offset; /**< 偏移量 */ + u8 chip_fault_stats[MAX_DRV_BUF_SIZE]; /**< 芯片故障统计信息数组 */ +}; + +#define NIC_TOOL_MAGIC 'x' /**< 表示NIC工具的魔数 */ + +#ifdef STORAGE_PANGEA +#define CARD_MAX_SIZE (16) /**< 定义卡片的最大大小为16 */ +#else +#define CARD_MAX_SIZE (64) /**< 卡片的最大大小为64 */ +#endif + +/** + * @brief struct nic_card_id + * @details 用于存储网卡的ID和数量 + */ +struct nic_card_id { + u32 id[CARD_MAX_SIZE]; /**< 网卡ID数组,最大长度为CARD_MAX_SIZE */ + u32 num; /**< 网卡数量 */ +}; + +/** + * @brief struct func_dev_info + * @details 用于存储函数相关的dev信息 + */ +struct func_dev_info { + u64 bar0_phy_addr; /**< bar0的物理地址 */ + u64 bar0_size; /**< bar0的大小 */ + u64 bar1_phy_addr; /**< bar1的物理地址 */ + u64 bar1_size; /**< bar1的大小 */ + u64 bar3_phy_addr; /**< bar3的物理地址 */ + u64 bar3_size; /**< bar3的大小 */ + u64 rsvd1[4]; +}; + +/** + * @brief struct hinic5_card_func_info + * @details 用于存储卡片功能信息 + */ +struct hinic5_card_func_info { + u32 num_pf; /**< 物理函数数量 */ + u32 rsvd0; + u64 usr_api_phy_addr; /**< 用户API物理地址 */ + struct func_dev_info dev_info[CARD_MAX_SIZE]; /**< 函数dev信息数组 */ +}; + +#define MAX_VER_INFO_LEN 128 /**< 定义最大版本信息长度常量 */ +/** + * @brief struct drv_version_info + * @details 定义驱动版本信息结构体 + */ +struct drv_version_info { + char ver[MAX_VER_INFO_LEN]; /**< 定义版本信息字符数组,长度为MAX_VER_INFO_LEN */ +}; + +#define MT_EPERM 1 /**< 不允许操作 */ +#define MT_EIO 2 /**< I/O错误 */ +#define MT_EINVAL 3 /**< 无效参数 */ +#define MT_EBUSY 4 /**< 设备或资源忙 */ +#define MT_EOPNOTSUPP 0xFF /**< 不支持操作 */ + +/** + * @brief struct mt_msg_head + * @details 用于存储消息头的结构体 + */ +struct mt_msg_head { + u8 status; /**< 状态 */ + u8 rsvd1[3]; /**< 保留字段 */ +}; + +/** + * @brief enum mt_api_type + * @details 用于表示不同的API类型 + */ +enum mt_api_type { + API_TYPE_MBOX = 1, /**< MBOX API类型 */ + API_TYPE_API_CHAIN_BYPASS, /**< API链式调用绕过API类型 */ + API_TYPE_API_CHAIN_TO_MPU, /**< API链式调用到MPU的API类型 */ + API_TYPE_CLP, /**< CLP API类型 */ +}; + +/** + * @brief struct npu_cmd_st + * @details 用于描述NPU命令的结构体 + */ +struct npu_cmd_st { + u32 mod : 8; /**< 模块ID,占用32位中的8位 */ + u32 cmd : 8; /**< 命令ID,占用32位中的8位 */ + u32 ack_type : 3; /**< 确认类型,占用32位中的3位 */ + u32 direct_resp : 1; /**< 直接响应标志,占用32位中的1位 */ + u32 len : 12; /**< 长度,占用32位中的12位 */ +}; + +/** + * @brief struct mpu_cmd_st + * @details 用于存储MPU命令的结构体 + */ +struct mpu_cmd_st { + u32 api_type : 8; /**< 定义了一个32位的无符号整数,用于存储API类型,占用8位 */ + u32 mod : 8; /**< 定义了一个32位的无符号整数,用于存储模块,占用8位 */ + u32 cmd : 16; /**< 定义了一个32位的无符号整数,用于存储命令,占用16位 */ +}; + +/** + * @brief struct msg_module + * @details 消息模块结构体,用于存储设备名、模块信息、命令格式、超时时间、函数索引、输入输出缓冲区大小、缓冲区指针、总线号、端口ID等信息 + */ +struct msg_module { + char device_name[IFNAMSIZ]; /**< 设备名,存储设备的名称 */ + u32 module; /**< 模块信息,存储模块的相关信息 */ + /** + * @brief 存储消息的格式 + */ + union { + u32 msg_formate; + struct npu_cmd_st npu_cmd; + struct mpu_cmd_st mpu_cmd; + }; + u32 timeout; /**< 超时时间,存储操作的超时时间 */ + u32 func_idx; /**< 函数索引,存储函数的索引信息 */ + u32 buf_in_size; /**< 输入缓冲区大小,存储输入缓冲区的大小 */ + u32 buf_out_size; /**< 输出缓冲区大小,存储输出缓冲区的大小 */ + void *in_buf; /**< 输入缓冲区指针,存储输入缓冲区的指针 */ + void *out_buf; /**< 输出缓冲区指针,存储输出缓冲区的指针 */ + int bus_num; /**< 总线编号,存储总线的编号信息 */ + u8 port_id; /**< 端口ID,存储端口的ID信息 */ + u8 use_func_idx; /**< 表示使用func_idx对设备下发命令, 配合func_idx使用 */ + u8 rsvd1[2]; + u32 rsvd2[4]; +}; + +#define MQM_FLOW_NUM 2 +#define MQM_XID_NUM 32768 +#define MQM_COS_VLD 1 +#define MQM_COS_INVLD 0 +#define MQM_COS_NUM 8 + +typedef struct hinic5_mqm_send_db_cmd_s { + u32 service_type; + u32 cflag; + u32 no_fliter; + u32 pf_id; + u32 cos_vld[8]; + u32 db_cnt[8]; + u32 time[8]; + u32 time_max; + u32 speed_pps[8]; + u32 length[8]; + u32 num[8]; + u32 mode; + u32 db_dw0_rsv; + u32 db_dw1_value; + u32 rand; +} mqm_send_db_cmd_s; + +struct hinic5_mqm_db_num_len { + u32 db_info; + u32 pi_hi; +}; +typedef union { + struct { + u32 num : 8; + u32 length : 18; + u32 mode : 2; + u32 rsvd : 4; + } bs; + u32 value; +} u_hinic5_mqm_db_num_len; + +struct hinic5_mt_msg { + const void *buf_in; + void *buf_out; + u32 in_size; + u32 out_size; +}; + +/** + * @brief struct hinic5_non_ptp_info + * @details 定义一个结构体,用于存储non ptp 时间信息 + */ +struct hinic5_non_ptp_info { + char name[IFNAMSIZ]; /**< 存储时间对应的chip设备名 */ + u64 non_ptp_time_diff_enable; /**< 非PTP时间差使能 */ + s64 non_ptp_time_diff; /**< 非PTP时间差 */ + atomic_t ref_cnt; /**< 使用该non ptp info的引用计数 */ +}; + +/** + * @brief hinic5_set_freq_reduce_ratio + * @param dev: device pointer + * @param ratio: non ptp chip time 降频比, 需要大于0 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_freq_reduce_ratio(void *dev, u32 ratio); + +/** + * @brief hinic5_set_non_ptp_time_diff_en + * @param dev: device pointer + * @param enable: non ptp chip time 使能标记, 0 disable, 1 enable + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_non_ptp_time_diff_en(void *dev, bool enable); + +/** + * @brief SDK驱动命令钩子函数 + * + * @param lld_dev device pointer to hinic5_lld_dev + * @param cmd 命令字 + * @param nt_msg 命令内容 + * @param support 是否支持该命令, 产品需要根据命令字判断是否支持 + * + * @details 由产品重载 + * + * @return: 命令执行结果. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_nictool_cmd_extend_handle(void *lld_dev, u32 cmd, struct hinic5_mt_msg *mt_msg, bool *support); + +#endif /* _HINIC5_MT_H_ */ diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_profile.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_profile.h new file mode 100644 index 00000000..90624634 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_profile.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_PROFILE_H +#define HINIC5_PROFILE_H + +#include <linux/types.h> + +/** + * @brief Check the device whether matches the specific scene + * @param device Generic device pointer + * + * @return match or not + */ +typedef bool (*hinic5_is_match_prof)(void *device); + +/** + * @brief Init profile attributes of the device + * @param device Generic device pointer + * + * @return Nullable, profile attributes + */ +typedef void *(*hinic5_init_prof_attr)(void *device); + +/** + * @brief Denit profile attributes of the device + * @param porf_attr Profile attributes + */ +typedef void (*hinic5_deinit_prof_attr)(void *porf_attr); + +/** + * @brief Profile adapter types + */ +enum prof_adapter_type { + PROF_ADAP_TYPE_INVALID, /**< Invalid adapter type */ + PROF_ADAP_TYPE_PANGEA = 1, /**< PANGEA */ + + /* Add prof adapter type before default */ + PROF_ADAP_TYPE_DEFAULT, +}; + +/** + * struct hinic5_prof_adapter - custom scene's profile adapter + * @type: adapter type + * @match: Check whether the current function is used in the custom scene. + * Implemented in the current source file + * @init: When @match return true, the initialization function called in probe. + * Implemented in the source file of the custom scene + * @deinit: When @match return true, the deinitialization function called when + * remove. Implemented in the source file of the custom scene + */ +struct hinic5_prof_adapter { + enum prof_adapter_type type; + hinic5_is_match_prof match; + hinic5_init_prof_attr init; + hinic5_deinit_prof_attr deinit; +}; + +struct hinic5_prof_ops { + void (*fault_recover)(void *data, u16 src, u16 level); + int (*get_work_cpu_affinity)(void *data, u32 work_type); + void (*probe_success)(void *data); + void (*remove_pre_handle)(void *hwdev); +}; + +struct hinic5_prof_attr { + void *priv_data; + u64 hw_feature_cap; + u64 sw_feature_cap; + u64 dft_hw_feature; + u64 dft_sw_feature; + + struct hinic5_prof_ops *ops; +}; + +/** + * @brief Get profile adapter by device + * @param hwdev The hwdev pointer + * @return Nullable, the adapter for the device + */ +const struct hinic5_prof_adapter *hinic5_get_prof_adapter(void *hwdev); + +/** + * @brief Helper function for verifing a profile adapter + * @param adapter Adapter pointer + * @return True if the adapter is valid + */ +static inline bool hinic5_verify_prof_adapter(const struct hinic5_prof_adapter *adapter) +{ + bool has_init, has_deinit; + + if (!adapter) + return true; + + has_init = adapter->init != NULL; + has_deinit = adapter->deinit != NULL; + if (has_init != has_deinit) + return false; + + return true; +} + +/** + * @brief Helper function for finding a adapter by the device and calling adapter's init function + * @param device Generic device's pointer + * @param adap_objs Adapter array + * @param num_adap Adapter array size + * @param prof_attr [Out] Result of init function + * + * @return Nullable, the adapter for the device + */ +static inline struct hinic5_prof_adapter *hinic5_prof_init(void *device, + struct hinic5_prof_adapter *adap_objs, + int num_adap, void **prof_attr) +{ + struct hinic5_prof_adapter *prof_obj = NULL; + int i; + + for (i = 0; i < num_adap; i++) { + prof_obj = &adap_objs[i]; + if (!(prof_obj->match && prof_obj->match(device))) + continue; + + *prof_attr = prof_obj->init ? prof_obj->init(device) : NULL; + + return prof_obj; + } + + return NULL; +} + +/** + * @brief 反初始化一个 hinic5 的 profile 对象 + * @param prof_obj 参数说明 + * @param prof_attr 参数说明 + * + * @return 无 + */ +static inline void hinic5_prof_deinit(const struct hinic5_prof_adapter *prof_obj, void *prof_attr) +{ + if (!prof_obj) + return; + + if (prof_obj->deinit) + prof_obj->deinit(prof_attr); +} + +/* module-level interface */ +#ifdef CONFIG_MODULE_PROF +/** + * @brief model-level interface + */ +struct hinic5_module_ops { + int (*module_prof_pre_init)(void); /**< pre初始化模块的配置文件,成功返回0,否则返回错误码 */ + int (*module_prof_post_init)(void); /**< post初始化模块的配置文件,成功返回0,否则返回错误码 */ + void (*module_prof_pre_exit)(void); /**< pre退出模块的配置文件 */ + void (*module_prof_post_exit)(void); /**< post退出模块的配置文件 */ + void (*probe_fault_process)(void *pdev, u16 level); /**< 处理探测故障 */ + int (*probe_pre_process)(void *pdev); /**< 预处理探测 */ + void (*probe_pre_unprocess)(void *pdev); /**< 预处理探测取消 */ +}; + +/** + * @brief 获取模块性能操作的函数 + * + * @return 返回一个指向hinic5_module_ops结构体的指针 + */ +struct hinic5_module_ops *hinic5_get_module_prof_ops(void); + +/** + * @brief 用于处理设备的探测错误 + * @param pdev 设备指针 + * @param level 错误级别 + * + * @return 无 + */ +static inline void hinic5_probe_fault_process(void *pdev, u16 level) +{ + struct hinic5_module_ops *ops = hinic5_get_module_prof_ops(); + + if (ops && ops->probe_fault_process) + ops->probe_fault_process(pdev, level); +} + +/** + * @brief 模块预初始化函数 + * + * @return 返回0表示成功,返回其他值表示失败 + */ +static inline int hinic5_module_pre_init(void) +{ + struct hinic5_module_ops *ops = hinic5_get_module_prof_ops(); + + if (!ops || !ops->module_prof_pre_init) + return -EINVAL; + + return ops->module_prof_pre_init(); +} + +/** + * @brief 模块退出后的处理函数 + * + * @return void 无返回值 + */ +static inline void hinic5_module_post_exit(void) +{ + struct hinic5_module_ops *ops = hinic5_get_module_prof_ops(); + + if (ops && ops->module_prof_post_exit) + ops->module_prof_post_exit(); +} + +/** + * @brief 在驱动程序中进行设备探测前的预处理 + * @param pdev 设备指针 + * + * @return 成功返回0,失败返回-EINVAL + */ +static inline int hinic5_probe_pre_process(void *pdev) +{ + struct hinic5_module_ops *ops = hinic5_get_module_prof_ops(); + + if (!ops || !ops->probe_pre_process) + return -EINVAL; + + return ops->probe_pre_process(pdev); +} + +/** + * @brief 设备移除或者加载失败后处理,和hinic5_probe_pre_process匹配 + * @param pdev 设备指针 + * + * @return 无 + */ +static inline void hinic5_probe_pre_unprocess(void *pdev) +{ + struct hinic5_module_ops *ops = hinic5_get_module_prof_ops(); + + if (ops && ops->probe_pre_unprocess) + ops->probe_pre_unprocess(pdev); +} + +/** + * @brief 模块后初始化函数 + * + * @return 返回0表示成功,返回其他值表示失败 + */ +static inline int hinic5_module_post_init(void) +{ + struct hinic5_module_ops *ops = hinic5_get_module_prof_ops(); + + if (!ops || !ops->module_prof_post_init) + return -EINVAL; + + return ops->module_prof_post_init(); +} + +/** + * @brief 模块退出前的处理函数 + * + * @return void 无返回值 + */ +static inline void hinic5_module_pre_exit(void) +{ + struct hinic5_module_ops *ops = hinic5_get_module_prof_ops(); + + if (ops && ops->module_prof_pre_exit) + ops->module_prof_pre_exit(); +} + +#else + +/** + * @brief hinic5_probe_fault_process 函数的作用 + * @param pdev 设备指针,用于操作设备 + * @param level 错误级别 + * + * @return 无 + */ +static inline void hinic5_probe_fault_process(void *pdev, u16 level) { }; + + +/** + * @brief module pre initial + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ + +static inline int hinic5_module_pre_init(void) +{ + return 0; +} + +/** + * @brief 模块退出后的处理函数 + * + * @return 无 + */ +static inline void hinic5_module_post_exit(void) { }; + +/** + * @brief module pre process + * @param pdev: pointer to dev + * + * @retval + * @retval zero: success + * @retval non-zero: failure + */ + +static inline int hinic5_probe_pre_process(void *pdev) +{ + return 0; +} + +/** + * @brief module pre unprocess + * @param pdev: pointer to dev + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +static inline void hinic5_probe_pre_unprocess(void *pdev) { }; + +/** + * @brief 模块后初始化函数 + * + * @return 返回0表示成功,返回其他值表示失败 + */ +static inline int hinic5_module_post_init(void) +{ + return 0; +} + +/** + * @brief 模块退出前的处理函数 + * + * @return void 无返回值 + */ +static inline void hinic5_module_pre_exit(void) { }; + +#endif + +/** + * @brief 获取struct hinic5_prof_attr指针 + * + * @param hwdev sdk驱动内部的硬件设备指针 + * + * @return: 命令执行结果. + * @retval NULL 获取失败 + * @retval 非NULL 获取成功 + */ +struct hinic5_prof_attr *hinic5_get_prof_attr(void *hwdev); + +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_wq.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_wq.h new file mode 100644 index 00000000..d094220f --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/hisdk/hinic5_wq.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_WQ_H +#define HINIC5_WQ_H + +#include <linux/types.h> + +#include "hinic5_common.h" + +/** + * @brief struct hinic5_wq + * @details 用于描述一个工作队列的相关信息; 修改需同步到用户态的struct sdk_cmdq_info + */ +struct hinic5_wq { + u16 cons_idx; /**< 消费者索引 */ + u16 prod_idx; /**< 生产者索引 */ + + u32 q_depth; /**< 队列深度 */ + u16 idx_mask; /**< 队列索引掩码 */ + u16 wqebb_size_shift; /**< wqebb的位移 */ + u16 rsvd1; + u16 num_wq_pages; /**< wq页面的数量 */ + u32 wqebbs_per_page; /**< 每页的wqe数量 */ + u16 wqebbs_per_page_shift; + u16 wqebbs_per_page_mask; + + struct hinic5_dma_addr_align *wq_pages; /* 指向DMA地址对齐结构体的指针, + * 用于描述wq页面的相关信息 + */ + + dma_addr_t wq_block_paddr; /**< wq块的物理地址 */ + u64 *wq_block_vaddr; /**< wq块的虚拟地址 */ + + void *dev_hdl; /**< 指向pcidev->dev或Handler的指针 */ + u32 wq_page_size; /**< wq页面大小 */ + u16 wqebb_size; /**< wqebb大小 */ +} ____cacheline_aligned; + + +/** + * @brief 定义一个宏,用于计算给定队列中特定索引的掩码值 + * @param wq 队列对象 + * @param idx 索引值 + * + * @return 返回计算后的掩码值 + */ +#define WQ_MASK_IDX(wq, idx) (((u16)(idx)) & (wq)->idx_mask) +/** + * @brief 根据工作队列和页索引计算掩码页 + * @param wq 工作队列 + * @param pg_idx 页索引 + * + * @return 如果页索引小于工作队列的页数,则返回页索引,否则返回0 + */ +#define WQ_MASK_PAGE(wq, pg_idx) (((pg_idx) < (wq)->num_wq_pages) ? (pg_idx) : 0) +/** + * @brief 计算页面索引 + * @param wq 等待队列 + * @param idx 索引 + * + * @return 返回计算后的页面索引 + */ +#define WQ_PAGE_IDX(wq, idx) ((idx) >> (wq)->wqebbs_per_page_shift) +/** + * @brief 计算队列中元素的偏移量 + * @param wq 队列指针 + * @param idx 元素索引 + * + * @return 返回元素在页面中的偏移量 + */ +#define WQ_OFFSET_IN_PAGE(wq, idx) ((idx) & (wq)->wqebbs_per_page_mask) +/** + * @brief 获取WQEBB地址 + * @param wq 工作队列 + * @param pg_idx 页索引 + * @param idx_in_pg 页内索引 + * + * @return u8* 返回WQEBB地址 + */ +#define WQ_GET_WQEBB_ADDR(wq, pg_idx, idx_in_pg) \ + ((u8 *)(wq)->wq_pages[(pg_idx)].align_vaddr + \ + (((u64)(idx_in_pg)) << (wq)->wqebb_size_shift)) +/** + * @brief 判断队列是否为0级别 + * @param wq 队列指针 + * + * @return + * @retval true 如果队列是0级别 + * @retval false 如果队列不是0级别 + */ +#define WQ_IS_0_LEVEL_CLA(wq) ((wq)->num_wq_pages == 1) + +/** + * @brief free wq wqebb + * @param wq: pointer of wq control struct + * + * @return free wqebb number + */ +static inline u16 hinic5_wq_free_wqebbs(struct hinic5_wq *wq) +{ + return (u16)(wq->q_depth + - ((wq->q_depth + wq->prod_idx - wq->cons_idx) & wq->idx_mask) + - 1); +} + +/** + * @brief check wq empty + * @param wq: pointer of wq control struct + * + * @return true or false + */ +static inline bool hinic5_wq_is_empty(const struct hinic5_wq *wq) +{ + return WQ_MASK_IDX(wq, wq->prod_idx) == WQ_MASK_IDX(wq, wq->cons_idx); +} + +/** + * @brief get wq multi wqebbs + * @param wq: pointer of wq control struct + * @param wqebb_cnt: the number of wqebbs + * @param pi: producer index + * + * @return first wqebb address + **/ +static inline void *hinic5_wq_get_wqebbs(struct hinic5_wq *wq, u16 wqebb_cnt, u16 *pi) +{ + *pi = WQ_MASK_IDX(wq, wq->prod_idx); + wq->prod_idx += wqebb_cnt; + + return WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, *pi), WQ_OFFSET_IN_PAGE(wq, *pi)); +} + +/** + * @brief get wq one wqebb + * @param wq: pointer of wq control struct + * @param pi: producer index + **/ +static inline void *hinic5_wq_get_one_wqebb(struct hinic5_wq *wq, u16 *pi) +{ + return hinic5_wq_get_wqebbs(wq, 1, pi); +} + +/** + * @brief get wq multi wqebbs + * @param wq: pointer of wq control struct + * @param num_wqebbs: the number of wqebbs + * @param prod_idx: producer index + * @param second_part_wqebbs_addr: second part wqebbs address + * @param first_part_wqebbs_num: first part wqebbs address + * + * @return wqebbs address + */ +static inline void *hinic5_wq_get_multi_wqebbs(struct hinic5_wq *wq, u16 num_wqebbs, + u16 *prod_idx, void **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num) +{ + u32 pg_idx, off_in_page; + + *prod_idx = WQ_MASK_IDX(wq, wq->prod_idx); + wq->prod_idx += num_wqebbs; + + pg_idx = WQ_PAGE_IDX(wq, *prod_idx); + off_in_page = WQ_OFFSET_IN_PAGE(wq, *prod_idx); + + if (off_in_page + num_wqebbs > wq->wqebbs_per_page) { + /* wqe across wq page boundary */ + *second_part_wqebbs_addr = WQ_GET_WQEBB_ADDR(wq, WQ_MASK_PAGE(wq, pg_idx + 1), 0); + *first_part_wqebbs_num = (u16)(wq->wqebbs_per_page - off_in_page); + } else { + *second_part_wqebbs_addr = NULL; + *first_part_wqebbs_num = num_wqebbs; + } + + return WQ_GET_WQEBB_ADDR(wq, pg_idx, off_in_page); +} + +/** + * @brief put wq wqebb + * @param wq: pointer of wq control struct + * @param num_wqebbs: number of wqebb + */ + +static inline void hinic5_wq_put_wqebbs(struct hinic5_wq *wq, u16 num_wqebbs) +{ + wq->cons_idx += num_wqebbs; +} + +/** + * @brief get wq wqebb address + * @param wq: pointer of wq control struct + * @param idx: wqbb idx + */ + +static inline void *hinic5_wq_wqebb_addr(struct hinic5_wq *wq, u16 idx) +{ + return WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, idx), WQ_OFFSET_IN_PAGE(wq, idx)); +} + +/** + * @brief read one wqebb + * @param wq: pointer of wq control struct + * @param cons_idx: wqbb index + * + * @return wqe page address + */ +static inline void *hinic5_wq_read_one_wqebb(struct hinic5_wq *wq, u16 *cons_idx) +{ + *cons_idx = WQ_MASK_IDX(wq, wq->cons_idx); + + return hinic5_wq_wqebb_addr(wq, *cons_idx); +} + +/** + * @brief get the first wqe page address + * @param wq: pointer of wq control struct + * + * @return wqe page address + */ +static inline u64 hinic5_wq_get_first_wqe_page_addr(struct hinic5_wq *wq) +{ + return wq->wq_pages[0].align_paddr; +} + +/** + * @brief reset wq zero wq page data + * @param wq: pointer of wq control struct + */ +static inline void hinic5_wq_reset(struct hinic5_wq *wq) +{ + u16 pg_idx; + + wq->cons_idx = 0; + wq->prod_idx = 0; + + for (pg_idx = 0; pg_idx < wq->num_wq_pages; pg_idx++) + memset(wq->wq_pages[pg_idx].align_vaddr, 0, wq->wq_page_size); +} + +/** + * @brief initial wq struct and alloc wq page + * @param udkdev: device pointer to udkdev + * @param wq: pointer of wq control struct + * @param q_depth: wq depth + * @param wqebb_size: the size of wqebb + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_wq_create(void *hwdev, struct hinic5_wq *wq, u32 q_depth, + u16 wqebb_size); + +/** + * @brief release wqe pages + * @param udkdev: device pointer to udkdev + * @param wq: pointer of wq control struct + */ +void hinic5_wq_destroy(struct hinic5_wq *wq); + +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/nic_kcompat.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/nic_kcompat.h new file mode 100644 index 00000000..c13f0df3 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/nic_kcompat.h @@ -0,0 +1,60 @@ +/* Autogenerated for KSRC=/usr/src/kernels/6.6.0-28.0.0.34.oe2403.aarch64/ via nic-kcompat-generator.sh */ +#ifndef NIC_KCOMPAT_H +#define NIC_KCOMPAT_H +#define HAVE_NETIF_NAPI_NO_WEIGHT 1 +#define HAVE_NETIF_NAPI_ADD_WEIGHT 1 +#define HAVE_NDO_TX_TIMEOUT_TXQ 1 +#define HAVE_NDO_BPF 1 +#define HAVE_NDO_GET_STATS64 1 +#define HAVE_NDO_ETH_IOCTL 1 +#define HAVE_VOID_NDO_GET_STATS64 1 +#define HAVE_NDO_SET_FEATURES 1 +#define HAVE_NAPI_GRO_FLUSH_OLD 1 +#define HAVE_VF_SPOOFCHK_CONFIGURE 1 +#define HAVE_NDO_SET_VF_TRUST 1 +#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE 1 +#define HAVE_NETDEVICE_MIN_MAX_MTU 1 +#define HAVE_NETDEV_STATS_IN_NETDEV 1 +#define HAVE_NETDEVICE_MACSEC_OPS 1 +#define HAVE_NDO_SET_VF_LINK_STATE 1 +#define HAVE_NDO_SELECT_QUEUE_SB_DEV 1 +#define HAVE_NETDEV_CHANGEUPPER 1 +#define HAVE_ETHTOOL_COALESCE_EXTACK 1 +#define HAVE_ETHTOOL_EXTENDED_RINGPARAMS 1 +#define HAVE_RXFH_HASHFUNC 1 +#define HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE 1 +#define HAVE_ETHTOOL_GET_MODULE_EEPROM_BY_PAGE 1 +#define SUPPORTED_COALESCE_PARAMS 1 +#define HAVE_ETHTOOL_SET_PHYS_ID 1 +#define NEED_DEFINE_SPEED_20000 1 +#define NEED_DEFINE_SPEED_25000 1 +#define NEED_DEFINE_SPEED_40000 1 +#define NEED_DEFINE_SPEED_100000 1 +#define HAVE_ETHTOOL_GLINKSETTINGS 1 +#define HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY 1 +#define HAVE_NETDEV_PROG_XDP_WARN_ACTION 1 +#define HAVE_XDP_DO_FLUSH_MAP 1 +#define HAVE_SK_BUFF_ENCAPSULATION 1 +#define HAVE_TYPEDEF_SKB_FRAG_T_BIOVEC 1 +#define HAVE_SKBUFF_CSUM_LEVEL 1 +#define HAVE_ETH_GET_HEADLEN_NET_DEVICE_ARG 1 +#define HAVE_ETH_HW_ADDR_SET 1 +#define HAVE_ETH_GET_HEADLEN_FUNC 1 +#define NEED_ETH_P_8021AD 1 +#define HAVE_VM_FLAGS_SET 1 +#define NEED_PDE_DATA 1 +#define HAVE_PDE_DATA_LOWERCASE 1 +#define HAVE_NETIF_F_RXHASH 1 +#define HAVE_SOCK_CREATE_KERN_NET 1 +#define HAVE_UDP_TUNNEL_NIC_INFO 1 +#define NEED_DEFINE_FIELD_SIZEOF 1 +#define HAVE_XDP_SUPPORT 1 +#define HAVE_XDP_DATA_META 1 +#define HAVE_XDP_RXQ_INFO_REG_NAPI_ID 1 +#define HAVE_NETDEV_XDP_ACT_NDO_XMIT 1 +#define HAVE_PAGE_POOL_SUPPORT 1 +#define HAVE_PAGE_POOL_NEW 1 +#define HAVE_PP_FLAG_PAGE_FRAG 1 +#define HAVE_FLOW_ACTION_PRIORITY 1 +#define HAVE_KOBJ_TYPE_DEFAULT_GROUPS 1 +#endif /* NIC_KCOMPAT_H */ diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl.h new file mode 100644 index 00000000..739d9942 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef OSSL_KNL_H +#define OSSL_KNL_H + +#ifdef __LINUX__ +#include "ossl_knl_linux.h" +#endif +#ifdef __WIN__ +#include "ossl_knl_win.h" +#ifdef __HIFC__ +#include "ossl_knl_fc_win.h" +#else +#include "ossl_knl_nic_win.h" +#endif +#endif +#ifdef __UEFI__ +#include "ossl_knl_uefi.h" +#endif + +#ifdef __VMWARE__ +#include "ossl_knl_vmware.h" +#endif + +#ifndef BIG_ENDIAN +#define BIG_ENDIAN 0x4321 +#endif + +#ifndef LITTLE_ENDIAN +#define LITTLE_ENDIAN 0x1234 +#endif + +#ifdef BYTE_ORDER +#undef BYTE_ORDER +#endif +/* X86 */ +#define BYTE_ORDER LITTLE_ENDIAN +#define USEC_PER_MSEC 1000L +#define MSEC_PER_SEC 1000L + +#ifndef UINT16_MAX +#define UINT16_MAX ((u16)(~((u16)0))) /* 0xFFFF */ +#endif /* UINT16_MAX */ + +#ifndef __LINUX__ +#ifndef MAX_ORDER +#define MAX_ORDER 10 +#endif +#endif + +#endif /* OSSL_KNL_H */ diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_linux.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_linux.h new file mode 100644 index 00000000..7a2a559d --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_linux.h @@ -0,0 +1,1713 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef OSSL_KNL_LINUX_H_ +#define OSSL_KNL_LINUX_H_ + +#include <net/ipv6.h> +#include <net/checksum.h> +#include <linux/string.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/version.h> +#include <linux/ethtool.h> +#include <linux/fs.h> +#include <linux/kthread.h> +#include <linux/if_vlan.h> +#include <linux/udp.h> +#include <linux/highmem.h> +#include <linux/list.h> +#include <linux/bitmap.h> +#include <linux/slab.h> +#include <linux/math64.h> +#include <linux/mm.h> +#include <linux/mmzone.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include "sdk_kcompat.h" +#include "ossl_knl_linux_nic.h" +#ifdef HAVE_XDP_SUPPORT +#include <net/xdp.h> +#endif /* HAVE_XDP_SUPPORT */ + +/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ +#ifndef UTS_RELEASE +/* utsrelease.h changed locations in 2.6.33 */ +#if (KERNEL_VERSION(2, 6, 33) > LINUX_VERSION_CODE) +#include <linux/utsrelease.h> +#else +#include <generated/utsrelease.h> +#endif +#endif + +#undef __always_unused +#define __always_unused __attribute__((__unused__)) + +#define ossl_get_free_pages __get_free_pages + +#ifndef high_16_bits +#define low_16_bits(x) ((x) & 0xFFFF) +#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16) +#endif + +#ifndef U8_MAX +#define U8_MAX 0xFF +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b)) +#endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a, b) (((a) << 8) + (b)) +#endif + +#ifndef AX_RELEASE_CODE +#define AX_RELEASE_CODE 0 +#endif + +#if (defined(AX_RELEASE_CODE) && AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 0) +#elif (defined(AX_RELEASE_CODE) && AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 1) +#elif (defined(AX_RELEASE_CODE) && AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3, 2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5, 3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5. */ +#define RHEL_RELEASE_CODE 0 +#endif + +/* RHEL 7 didn't backport the parameter change in + * create_singlethread_workqueue. + * If/when RH corrects this we will want to tighten up the version check. + */ +#if (defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 0)) +#undef create_singlethread_workqueue +#define create_singlethread_workqueue(name) \ + alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) +#endif + +/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find + * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new + * enough versions of Ubuntu. Otherwise you can simply see it in the output of + * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in + * the linux-source package, but in the linux-headers package. It begins to + * appear in later releases of 14.04 and 14.10. + * + * Ex: + * <Ubuntu 14.04.1> + * $uname -r + * 3.13.0-45-generic + * ABI is 45 + * + * <Ubuntu 14.10> + * $uname -r + * 3.16.0-23-generic + * ABI is 23. + */ +#ifndef UTS_UBUNTU_RELEASE_ABI +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#else + +#ifndef __HULK_3_10__ +/* Ubuntu does not provide actual release version macro, so we use the kernel + * version plus the ABI to generate a unique version code specific to Ubuntu. + * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to + * ignore differences in sublevel which are not important since we have the + * ABI value. Otherwise, it becomes impossible to correlate ABI to version for + * ordering checks. + */ +#define UBUNTU_VERSION_CODE \ + (((~0xFF & LINUX_VERSION_CODE) << 8) + UTS_UBUNTU_RELEASE_ABI) +#endif +#if UTS_UBUNTU_RELEASE_ABI > 255 +#error UTS_UBUNTU_RELEASE_ABI is too large... +#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ + +#if (KERNEL_VERSION(3, 0, 0) >= LINUX_VERSION_CODE) +/* Our version code scheme does not make sense for non 3.x or newer kernels, + * and we have no support in kcompat for this scenario. Thus, treat this as a + * non-Ubuntu kernel. Possibly might be better to error here. + */ +#define UTS_UBUNTU_RELEASE_ABI 0 +#define UBUNTU_VERSION_CODE 0 +#endif +#endif + +/* Note that the 3rd digit is always zero, and will be ignored. This is + * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux + * version codes are 3 digit, this 3rd digit is superseded by the ABI value. + */ +#define UBUNTU_VERSION(a, b, c, d) ((KERNEL_VERSION(a, b, 0) << 8) + (d)) + +#ifndef DEEPIN_PRODUCT_VERSION +#define DEEPIN_PRODUCT_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#ifdef CONFIG_DEEPIN_KERNEL +#if (KERNEL_VERSION(4, 4, 102) == LINUX_VERSION_CODE) +#define DEEPIN_VERSION_CODE DEEPIN_PRODUCT_VERSION(15, 2, 0) +#endif +#endif + +#ifndef DEEPIN_VERSION_CODE +#define DEEPIN_VERSION_CODE 0 +#endif + +/* SuSE version macros are the same as Linux kernel version macro. */ +#ifndef SLE_VERSION +#define SLE_VERSION(a, b, c) KERNEL_VERSION(a, b, c) +#endif +#define SLE_LOCALVERSION(a, b, c) KERNEL_VERSION(a, b, c) +#ifdef CONFIG_SUSE_KERNEL +#if (KERNEL_VERSION(2, 6, 27) == LINUX_VERSION_CODE) +/* SLES11 GA is 2.6.27 based. */ +#define SLE_VERSION_CODE SLE_VERSION(11, 0, 0) +#elif (KERNEL_VERSION(2, 6, 32) == LINUX_VERSION_CODE) +/* SLES11 SP1 is 2.6.32 based. */ +#define SLE_VERSION_CODE SLE_VERSION(11, 1, 0) +#elif (KERNEL_VERSION(3, 0, 13) == LINUX_VERSION_CODE) +/* SLES11 SP2 GA is 3.0.13-0.27. */ +#define SLE_VERSION_CODE SLE_VERSION(11, 2, 0) +#elif (KERNEL_VERSION(3, 0, 76) == LINUX_VERSION_CODE) +/* SLES11 SP3 GA is 3.0.76-0.11. */ +#define SLE_VERSION_CODE SLE_VERSION(11, 3, 0) +#elif (KERNEL_VERSION(3, 0, 101) == LINUX_VERSION_CODE) +/* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ +#define SLE_VERSION_CODE SLE_VERSION(11, 4, 0) +#elif (KERNEL_VERSION(3, 12, 28) == LINUX_VERSION_CODE) +/* + * SLES12 GA is 3.12.28-4 + * kernel updates 3.12.xx-<33 through 52>[.yy]. + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 0, 0) +#elif (KERNEL_VERSION(3, 12, 49) == LINUX_VERSION_CODE) +/* + * SLES12 SP1 GA is 3.12.49-11 + * updates 3.12.xx-60.yy where xx={51..} + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 1, 0) +#elif ((KERNEL_VERSION(4, 4, 21) <= LINUX_VERSION_CODE && \ + (KERNEL_VERSION(4, 4, 59) >= LINUX_VERSION_CODE)) || \ + (KERNEL_VERSION(4, 4, 74) <= LINUX_VERSION_CODE && \ + KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE && \ + KERNEL_VERSION(92, 0, 0) <= SLE_LOCALVERSION_CODE && \ + KERNEL_VERSION(93, 0, 0) > SLE_LOCALVERSION_CODE)) +/* + * SLES12 SP2 GA is 4.4.21-69. + * SLES12 SP2 updates before SLES12 SP3 are: 4.4.{21,38,49,59} + * SLES12 SP2 updates after SLES12 SP3 are: 4.4.{74,90,103,114,120} + * but they all use a SLE_LOCALVERSION_CODE matching 92.nn.y + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 2, 0) +#elif ((KERNEL_VERSION(4, 4, 73) == LINUX_VERSION_CODE || \ + KERNEL_VERSION(4, 4, 82) == LINUX_VERSION_CODE || \ + KERNEL_VERSION(4, 4, 92)) == LINUX_VERSION_CODE || \ + (KERNEL_VERSION(4, 4, 103) == LINUX_VERSION_CODE && \ + (KERNEL_VERSION(6, 33, 0) == LINUX_VERSION_CODE || \ + KERNEL_VERSION(6, 38, 0) == SLE_LOCALVERSION_CODE)) || \ + (KERNEL_VERSION(4, 4, 114) <= LINUX_VERSION_CODE && \ + KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE && \ + KERNEL_VERSION(94, 0, 0) <= SLE_LOCALVERSION_CODE && \ + KERNEL_VERSION(95, 0, 0) > SLE_LOCALVERSION_CODE)) +/* SLES12 SP3 GM is 4.4.73-5 and update kernels are 4.4.82-6.3. + * SLES12 SP3 updates not conflicting with SP2 are: 4.4.{82,92} + * SLES12 SP3 updates conflicting with SP2 are: + * - 4.4.103-6.33.1, 4.4.103-6.38.1 + * - 4.4.{114,120}-94.nn.y + */ +#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0) +#elif (KERNEL_VERSION(4, 12, 14) <= LINUX_VERSION_CODE) +/* SLES15 Beta1 is 4.12.14-2. + * SLES12 SP4 will also use 4.12.14-nn.xx.y + */ +#define SLE_VERSION_CODE SLE_VERSION(15, 0, 0) + +#include <linux/suse_version.h> + +/* + * new SLES kernels must be added here with >= based on kernel + * the idea is to order from newest to oldest and just catch all + * of them using the >= + */ +#endif /* LINUX_VERSION_CODE == KERNEL VERSION(x,y,z) */ +#endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 +#endif /* SLE_VERSION_CODE */ +#ifndef SLE_LOCALVERSION_CODE +#define SLE_LOCALVERSION_CODE 0 +#endif /* SLE_LOCALVERSION_CODE */ +#ifndef SUSE_PRODUCT_CODE +#define SUSE_PRODUCT_CODE 0 +#endif /* SUSE_PRODUCT_CODE */ +#ifndef SUSE_PRODUCT +#define SUSE_PRODUCT(product, version, patchlevel, auxrelease) \ + (((product) << 24) + ((version) << 16) + \ + ((patchlevel) << 8) + (auxrelease)) +#endif /* SUSE_PRODUCT */ + +#ifndef ALIGN_DOWN +#ifndef __ALIGN_KERNEL +#define __ALIGN_KERNEL(x, a) __ALIGN_MASK(x, (typeof(x))(a) - 1) +#endif +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#endif +/* ************************************************************************ */ +/* mm buddy */ +#ifndef MAX_ORDER +#ifdef MAX_PAGE_ORDER +#define MAX_ORDER MAX_PAGE_ORDER +#endif /* MAX_PAGE_ORDER */ +#endif /* !MAX_ORDER */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(2, 6, 22) > LINUX_VERSION_CODE) +#define tcp_hdr(skb) ((skb)->h.th) +#define tcp_hdrlen(skb) ((skb)->h.th->doff << 2) +#define skb_transport_offset(skb) ((skb)->h.raw - (skb)->data) +#define skb_transport_header(skb) ((skb)->h.raw) +#define ipv6_hdr(skb) ((skb)->nh.ipv6h) +#define ip_hdr(skb) ((skb)->nh.iph) +#define skb_network_header(skb) ((skb)->nh.raw) +#define skb_tail_pointer(skb) ((skb)->tail) +#define skb_reset_tail_pointer(skb) ((skb)->tail = (skb)->data) +#define skb_set_tail_pointer(skb, offset) \ + ((skb)->tail = (skb)->data + (offset)) +#define skb_copy_to_linear_data(skb, from, len) memcpy(skb->data, from, len) +#define pci_register_driver pci_module_init + +#ifdef NETIF_F_MULTI_QUEUE +#ifndef alloc_etherdev_mq +#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) +#endif +#endif /* NETIF_F_MULTI_QUEUE */ + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif +#define cancel_work_sync(x) flush_scheduled_work() +#ifndef udp_hdr +#define udp_hdr _udp_hdr +static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) +{ + return (struct udphdr *)skb_transport_header(skb); +} +#endif + +#ifdef cpu_to_be16 +#undef cpu_to_be16 +#endif +#define cpu_to_be16(x) __constant_htons(x) +#endif /* < 2.6.22 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(2, 6, 32) > LINUX_VERSION_CODE) +#undef netdev_tx_t +#define netdev_tx_t int +#endif /* < 2.6.32 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(2, 6, 33) > LINUX_VERSION_CODE) +#ifndef IPV4_FLOW +#define IPV4_FLOW 0x10 +#endif /* IPV4_FLOW */ +#ifndef IPV6_FLOW +#define IPV6_FLOW 0x11 +#endif /* IPV6_FLOW */ + +#ifndef __percpu +#define __percpu +#endif /* __percpu */ + +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif /* PORT_DA */ +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(6, 3) <= RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(7, 0) > RHEL_RELEASE_CODE))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(6, 2) <= RHEL_RELEASE_CODE))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) \ + do { \ + } while (0) +#endif /* !(RHEL >= 6.2) */ +#endif /* < 2.6.33 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(2, 6, 34) > LINUX_VERSION_CODE) +#if (RHEL_RELEASE_VERSION(6, 0) > RHEL_RELEASE_CODE) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + +#ifndef ETH_FLAG_NTUPLE +#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE +#endif + +#ifndef netdev_mc_count +#define netdev_mc_count(dev) ((dev)->mc_count) +#endif +#ifndef netdev_mc_empty +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#endif +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif +#ifndef netdev_uc_empty +#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) +#endif +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif +#ifndef dma_set_coherent_mask +#define dma_set_coherent_mask(dev, mask) \ + pci_set_consistent_dma_mask(to_pci_dev(dev), (mask)) +#endif + +/* netdev logging taken from include/linux/netdevice.h */ +#ifndef netdev_name +static inline const char *_kc_netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} + +#define netdev_name(netdev) _kc_netdev_name(netdev) +#endif /* netdev_name */ + +#undef netdev_emerg +#define netdev_emerg(dev, format, args...) \ + netdev_printk(KERN_EMERG, dev, format, ##args) +#undef netdev_alert +#define netdev_alert(dev, format, args...) \ + netdev_printk(KERN_ALERT, dev, format, ##args) +#undef netdev_crit +#define netdev_crit(dev, format, args...) \ + netdev_printk(KERN_CRIT, dev, format, ##args) +#undef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#undef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#undef netdev_notice +#define netdev_notice(dev, format, args...) \ + netdev_printk(KERN_NOTICE, dev, format, ##args) +#undef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#undef netdev_dbg +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) +#define netdev_dbg(__dev, format, args...) \ + dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ + netdev_name(__dev), ##args) +#else /* DEBUG */ +#define netdev_dbg(__dev, format, args...) \ + ({ \ + if (0) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args); \ + 0; \ + }) +#endif /* DEBUG */ + +#undef netif_printk +#define netif_printk(priv, type, level, dev, fmt, args...) \ + do { \ + if (netif_msg_##type(priv) != 0) \ + netdev_printk(level, (dev), fmt, ##args); \ + } while (0) + +#undef netif_emerg +#define netif_emerg(priv, type, dev, fmt, args...) \ + netif_level(emerg, priv, type, dev, fmt, ##args) +#undef netif_alert +#define netif_alert(priv, type, dev, fmt, args...) \ + netif_level(alert, priv, type, dev, fmt, ##args) +#undef netif_crit +#define netif_crit(priv, type, dev, fmt, args...) \ + netif_level(crit, priv, type, dev, fmt, ##args) +#undef netif_err +#define netif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, fmt, ##args) +#undef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#undef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#undef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) + +#ifndef for_each_set_bit +#define for_each_set_bit(bit, addr, size) \ + for ((bit) = find_first_bit((addr), (size)); \ + (bit) < (size); (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit */ + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN +#define dma_unmap_addr pci_unmap_addr +#define dma_unmap_addr_set pci_unmap_addr_set +#define dma_unmap_len pci_unmap_len +#define dma_unmap_len_set pci_unmap_len_set +#endif /* DEFINE_DMA_UNMAP_ADDR */ + +#ifndef pci_bus_speed +/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ +enum _kc_pci_bus_speed { + _KC_PCIE_SPEED_2_5GT = 0x14, + _KC_PCIE_SPEED_5_0GT = 0x15, + _KC_PCIE_SPEED_8_0GT = 0x16, + _KC_PCI_SPEED_UNKNOWN = 0xff, +}; + +#define pci_bus_speed _kc_pci_bus_speed +#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT +#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT +#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT +#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN +#endif /* pci_bus_speed */ + +#else /* < 2.6.34 */ +#ifndef HAVE_SET_RX_MODE +#define HAVE_SET_RX_MODE +#endif +#define HAVE_INET6_IFADDR_LIST +#endif /* < 2.6.34 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(2, 6, 36) > LINUX_VERSION_CODE) +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#ifdef NET_IP_ALIGN +#undef NET_IP_ALIGN +#endif +#define NET_IP_ALIGN 0 +#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +#ifdef __LINX_6_0_60__ +enum work_busy_status { + /* bit mask for work_busy() return values */ + WORK_BUSY_PENDING = 1 << 0, + WORK_BUSY_RUNNING = 1 << 1, +}; + +#define work_busy(work) _work_busy(work) +unsigned int _work_busy(struct work_struct *work); + +#endif + +#ifdef NET_SKB_PAD +#undef NET_SKB_PAD +#endif + +#if (L1_CACHE_BYTES > 32) +#define NET_SKB_PAD L1_CACHE_BYTES +#else +#define NET_SKB_PAD 32 +#endif + +static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, + unsigned int length) +{ + struct sk_buff *skb; + + skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); + if (skb) { +#if (NET_IP_ALIGN + NET_SKB_PAD) + skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); +#endif + skb->dev = dev; + } + + return skb; +} + +#ifdef netdev_alloc_skb_ip_align +#undef netdev_alloc_skb_ip_align +#endif +#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) + +#undef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ + do { \ + if (netif_msg_##type(priv) != 0) \ + netdev_##level(dev, fmt, ##args); \ + } while (0) + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3))) +#undef usleep_range +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#endif + +#define u64_stats_update_begin(a) \ + do { \ + } while (0) +#define u64_stats_update_end(a) \ + do { \ + } while (0) +#define u64_stats_fetch_retry(a, b) (0) +#define u64_stats_fetch_begin(a) (0) +#define u64_stats_fetch_retry_bh(a, b) (0) +#define u64_stats_fetch_begin_bh(a) (0) +struct u64_stats_sync_empty { + int:0; +}; + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 1)) +#define HAVE_8021P_SUPPORT +#endif + +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ +#if (!(RHEL_RELEASE_VERSION(6, 4) <= RHEL_RELEASE_CODE) && \ + !(SLE_VERSION(11, 2, 0) <= SLE_VERSION_CODE)) +static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) {} +#endif + +#else /* < 2.6.36 */ + + +#endif /* < 2.6.36 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(2, 6, 37) > LINUX_VERSION_CODE) +#ifndef VLAN_N_VID +#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN +#endif /* VLAN_N_VID */ + +static inline void *_kc_vzalloc(unsigned long size) +{ + void *pr = vmalloc(size); + + if (pr) + memset(pr, 0, size); + return pr; +} + +#define vzalloc(_size) _kc_vzalloc(_size) +#endif + +/* ************************************************************************ */ +#if (KERNEL_VERSION(2, 6, 39) > LINUX_VERSION_CODE) + +#ifndef TC_BITMASK +#define TC_BITMASK 15 +#endif + +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM BIT(29) +#endif + +#ifndef skb_queue_reverse_walk_safe +#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ + for (skb = (queue)->prev, tmp = skb->prev; \ + skb != (struct sk_buff *)(queue); skb = tmp, tmp = skb->prev) +#endif + +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6, 0))) +u8 _kc_netdev_get_num_tc(struct net_device *dev); +#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) +int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); +#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) +#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) +#define netdev_set_tc_queue(dev, tc, cnt, off) \ + do { \ + } while (0) +#define netdev_set_prio_tc_map(dev, up, tc) \ + do { \ + } while (0) +#else /* RHEL6.1 or greater */ +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif /* HAVE_MQPRIO */ + +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ + +#ifndef udp_csum +#define udp_csum __kc_udp_csum +static inline __wsum __kc_udp_csum(struct sk_buff *skb) +{ + __wsum csum = csum_partial(skb_transport_header(skb), + sizeof(struct udphdr), skb->csum); + + for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) + csum = csum_add(csum, skb->csum); + + return csum; +} +#endif /* udp_csum */ +#else /* < 2.6.39 */ + +#ifndef HAVE_MQPRIO +#define HAVE_MQPRIO +#endif +#ifndef HAVE_SETUP_TC +#define HAVE_SETUP_TC +#endif + +#define HAVE_IRQ_AFFINITY_NOTIFY +#endif /* < 2.6.39 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 0, 0) > LINUX_VERSION_CODE) + +#ifndef kfree_rcu +#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) +#endif /* kfree_rcu */ + +#else +#define HAVE_NETDEV_WANTED_FEAUTES +#endif + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 2, 0) > LINUX_VERSION_CODE) +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) +static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + + if (ret) + memset(ret, 0, size); + + return ret; +} +#endif + +#ifndef skb_frag_size +#define skb_frag_size(frag) kc_skb_frag_size(frag) +static inline unsigned int kc_skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif /* skb_frag_size */ + +#ifndef skb_frag_size_sub +#define skb_frag_size_sub(frag, delta) kc_skb_frag_size_sub(frag, delta) +static inline void kc_skb_frag_size_sub(skb_frag_t *frag, int delta) +{ + frag->size -= delta; +} +#endif /* skb_frag_size_sub */ + +#ifndef skb_frag_page +#define skb_frag_page(frag) _kc_skb_frag_page(frag) +static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} +#endif /* skb_frag_page */ + +#ifndef skb_frag_address +#define skb_frag_address(frag) kc_skb_frag_address(frag) +static inline void *kc_skb_frag_address(const skb_frag_t *frag) +{ + return page_address(skb_frag_page(frag)) + frag->page_offset; +} +#endif /* skb_frag_address */ + +#ifndef skb_frag_dma_map +#if (KERNEL_VERSION(2, 6, 0) <= LINUX_VERSION_CODE) +#include <linux/dma-mapping.h> +#endif +#define skb_frag_dma_map(dev, frag, offset, size, dir) \ + _kc_skb_frag_dma_map(dev, frag, offset, size, dir) + +static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, const skb_frag_t *frag, + size_t offset, size_t size, + enum dma_data_direction dir) +{ + return dma_map_page(dev, skb_frag_page(frag), + frag->page_offset + offset, size, dir); +} +#endif /* skb_frag_dma_map */ + +#ifndef __skb_frag_unref +#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) +static inline void __kc_skb_frag_unref(skb_frag_t *frag) +{ + put_page(skb_frag_page(frag)); +} +#endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if ((RHEL_RELEASE_VERSION(6, 3) <= RHEL_RELEASE_CODE) || \ + (SLE_VERSION_CODE && SLE_VERSION(11, 3, 0) <= SLE_VERSION_CODE)) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif +#else /* < 3.2.0 */ +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED + +#endif + +#endif /* < 3.2.0 */ + +#if (KERNEL_VERSION(3, 3, 0) > LINUX_VERSION_CODE) +/* + * NOTE: the order of parameters to _kc_alloc_workqueue() is different than + * alloc_workqueue() to avoid compiler warning from -Wvarargs. + */ +#define STR_IDX 3 +#define FIRST_TO_CHECK 4 +static inline struct workqueue_struct *__printf(STR_IDX, FIRST_TO_CHECK) +_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, + const char *fmt, ...) +{ + struct workqueue_struct *wq; + va_list args, temp; + unsigned int len; + char *p; + + va_start(args, fmt); + va_copy(temp, args); + len = vsnprintf(NULL, 0, fmt, temp); + va_end(temp); + + p = kmalloc(len + 1, GFP_KERNEL); + if (!p) { + va_end(args); + return NULL; + } + + vsnprintf(p, len + 1, fmt, args); + va_end(args); + +#if (KERNEL_VERSION(2, 6, 36) > LINUX_VERSION_CODE) + wq = create_workqueue(p); +#else + wq = alloc_workqueue(p, flags, max_active); +#endif + kfree(p); + + return wq; +} + +#ifdef alloc_workqueue +#undef alloc_workqueue +#endif +#define alloc_workqueue(fmt, flags, max_active, args...) \ + _kc_alloc_workqueue(flags, max_active, fmt, ##args) + +#if !(RHEL_RELEASE_VERSION(6, 5) <= RHEL_RELEASE_CODE) +typedef u32 netdev_features_t; +#endif +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) \ + do { \ + } while (0) +#define netdev_completed_queue(_n, _p, _b) \ + do { \ + } while (0) +#define netdev_tx_sent_queue(_q, _b) \ + do { \ + } while (0) +#define netdev_sent_queue(_n, _b) \ + do { \ + } while (0) +#define netdev_tx_reset_queue(_q) \ + do { \ + } while (0) +#define netdev_reset_queue(_n) \ + do { \ + } while (0) +#endif +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11, 3, 0)) +#endif /* SLE_VERSION(11,3,0) */ +#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) +#if !(SLE_VERSION_CODE && SLE_VERSION(11, 4, 0) <= SLE_VERSION_CODE) +static inline int kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, + u8 *nexthdrp, + __be16 __always_unused *frag_offp) +{ + return ipv6_skip_exthdr(skb, start, nexthdrp); +} + +#undef ipv6_skip_exthdr +#define ipv6_skip_exthdr(a, b, c, d) kc_ipv6_skip_exthdr((a), (b), (c), (d)) +#endif /* !SLES11sp4 or greater */ + +#else /* ! < 3.3.0 */ +#define HAVE_INT_NDO_VLAN_RX_ADD_VID +#ifdef ETHTOOL_SRXNTUPLE +#undef ETHTOOL_SRXNTUPLE +#endif +#endif /* < 3.3.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE) +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif /* NETIF_F_RXFCS */ +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif /* NETIF_F_RXALL */ + +#if !(SLE_VERSION_CODE && SLE_VERSION(11, 3, 0) <= SLE_VERSION_CODE) +#define NUMTCS_RETURNS_U8 +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +void _kc_skb_add_rx_frag(struct sk_buff *param0, int param1, + struct page *param2, int param3, int param4, unsigned int param5); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) \ + do { \ + eth_random_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ + +#ifndef for_each_set_bit_from +#define for_each_set_bit_from(bit, addr, size) \ + for ((bit) = find_next_bit((addr), (size), (bit)); \ + (bit) < (size); (bit) = find_next_bit((addr), (size), (bit) + 1)) +#endif /* for_each_set_bit_from */ + +#if (RHEL_RELEASE_VERSION(7, 0) > RHEL_RELEASE_CODE) +#define _kc_kmap_atomic(page) kmap_atomic(page, KM_SKB_DATA_SOFTIRQ) +#define _kc_kunmap_atomic(addr) kunmap_atomic(addr, KM_SKB_DATA_SOFTIRQ) +#else +#define _kc_kmap_atomic(page) __kmap_atomic(page) +#define _kc_kunmap_atomic(addr) __kunmap_atomic(addr) +#endif + +#else /* < 3.4.0 */ + +#define _kc_kmap_atomic(page) kmap_atomic(page) +#define _kc_kunmap_atomic(addr) kunmap_atomic(addr) +#endif /* >= 3.4.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 5, 0) > LINUX_VERSION_CODE) + +#ifndef BITS_PER_LONG_LONG +#define BITS_PER_LONG_LONG 64 +#endif + +#ifndef ether_addr_equal +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} + +#define ether_addr_equal(_addr1, _addr2) \ + __kc_ether_addr_equal((_addr1), (_addr2)) +#endif + +/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ +#ifdef CONFIG_OF_NET +static inline int of_get_phy_mode(struct device_node __always_unused *np) +{ + return -ENODEV; +} + +static inline const void *of_get_mac_address(struct device_node __always_unused *np) +{ + return NULL; +} +#endif +#else +#include <linux/of_net.h> +#define HAVE_FDB_OPS + +#endif /* < 3.5.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 6, 0) > LINUX_VERSION_CODE) +#ifndef eth_random_addr +#define eth_random_addr _kc_eth_random_addr +static inline void _kc_eth_random_addr(u8 *addr) +{ + get_random_bytes(addr, ETH_ALEN); + addr[0] &= 0xfe; /* clear multicast */ + addr[0] |= 0x02; /* set local assignment */ +} +#endif /* eth_random_addr */ +#endif /* < 3.6.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 8, 0) > LINUX_VERSION_CODE) +#else /* >= 3.8.0 */ +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif +#endif /* < 3.8.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) +#ifndef NAPI_POLL_WEIGHT +#define NAPI_POLL_WEIGHT 64 +#endif +#ifdef CONFIG_PCI_IOV +int kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) kc_pci_vfs_assigned(dev) + +#ifndef NEED_DEFINE_LIST_FIRST_ENTRY_OR_NULL +#define list_first_entry_or_null(ptr, type, member) \ + (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) +#endif /* NEED_DEFINE_LIST_FIRST_ENTRY_OR_NULL */ +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *kc_vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + + return skb; +} + +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + kc_vlan_hwaccel_put_tag(skb, vlan_tci) +#endif +#ifndef PCI_DEVID +#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) +#endif +#else /* >= 3.10.0 */ +#endif /* >= 3.10.0 */ + +/* ************************************************************************ */ +#ifdef NEED_PDE_DATA +#ifdef HAVE_PDE_DATA_LOWERCASE +#define PDE_DATA pde_data +#else +#warning PDE_DATA not defined +#endif +#endif + +#if (KERNEL_VERSION(6, 3, 0) > LINUX_VERSION_CODE) +static inline void vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags) +{ + vma->vm_flags |= flags; +} + +static inline void vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags) +{ + vma->vm_flags &= ~flags; +} +#endif /* < 6.3.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 13, 0) > LINUX_VERSION_CODE) +#define dma_set_mask_and_coherent(_p, _m) kc_dma_set_mask_and_coherent(_p, _m) +int kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); +#ifndef u64_stats_init +#define u64_stats_init(a) \ + do { \ + } while (0) +#endif +#ifndef BIT_ULL +#define BIT_ULL(n) (1ULL << (n)) +#endif + +#if (SLE_VERSION_CODE && SLE_VERSION(12, 1, 0) <= SLE_VERSION_CODE) +#undef HAVE_STRUCT_PAGE_PFMEMALLOC +#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT +#endif + +#ifndef NEED_DEFINE_LIST_NEXT_ENTRY +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, typeof(*(pos)), member) +#endif /* NEED_DEFINE_LIST_NEXT_ENTRY */ + +#ifndef NEED_DEFINE_LIST_PREV_ENTRY +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, typeof(*(pos)), member) +#endif /* NEED_DEFINE_LIST_PREV_ENTRY */ + +#else /* >= 3.13.0 */ +#define HAVE_VXLAN_CHECKS +#if (defined(UBUNTU_VERSION_CODE) && UBUNTU_VERSION_CODE && \ + UBUNTU_VERSION(3, 13, 0, 24) <= UBUNTU_VERSION_CODE) +#else +#define HAVE_NDO_SELECT_QUEUE_ACCEL +#endif +#define HAVE_NET_GET_RANDOM_ONCE +#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS +#endif + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 14, 0) > LINUX_VERSION_CODE) +#if (!(RHEL_RELEASE_CODE && \ + RHEL_RELEASE_VERSION(7, 0) <= RHEL_RELEASE_CODE) && \ + !(SLE_VERSION_CODE && SLE_VERSION(12, 0, 0) <= SLE_VERSION_CODE)) + +/* it isn't expected that this would be a #define unless we made it so */ +#ifndef NEED_SKB_SET_HASH + +#define PKT_HASH_TYPE_NONE 0 +#define PKT_HASH_TYPE_L2 1 +#define PKT_HASH_TYPE_L3 2 +#define PKT_HASH_TYPE_L4 3 + +enum _kc_pkt_hash_types { + _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, + _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, + _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, + _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, +}; + +#define pkt_hash_types _kc_pkt_hash_types +#define skb_set_hash __kc_skb_set_hash +static inline void +__kc_skb_set_hash(struct sk_buff __maybe_unused *skb, + u32 __maybe_unused hash, int __maybe_unused type) +{ +#ifdef HAVE_SKB_L4_RXHASH + skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); +#endif +#ifdef NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} +#endif /* NEED_SKB_SET_HASH */ +#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ +#ifndef HAVE_VXLAN_CHECKS +#define HAVE_VXLAN_CHECKS +#endif /* HAVE_VXLAN_CHECKS */ +#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ +#ifndef pci_enable_msix_range +int kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int min_vec, int max_vec); +#define pci_enable_msix_range kc_pci_enable_msix_range +#endif +#else /* >= 3.14.0 */ +#endif /* 3.14.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 16, 0) > LINUX_VERSION_CODE) + +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 0 +#endif + +#ifndef NETIF_F_GSO_IPXIP6 +#define NETIF_F_GSO_IPXIP6 0 +#endif + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)) +#define HAVE_VLAN_FIND_DEV_DEEP_RCU +#endif + +#else +#define HAVE_VLAN_FIND_DEV_DEEP_RCU + +#endif /* 3.16.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(3, 18, 0) > LINUX_VERSION_CODE) +#if RHEL_RELEASE_CODE && (RHEL_RELEASE_VERSION(7, 1) < RHEL_RELEASE_CODE) +#define HAVE_MULTI_VLAN_OFFLOAD_EN +#endif +#else +#define HAVE_MULTI_VLAN_OFFLOAD_EN +#endif /* 3.18.0 */ + +#if (KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE) +#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_VERSION(7, 5) <= RHEL_RELEASE_CODE) +#define UNSUPPORT_NTUPLE_IPV6 +#endif +#endif /* 4.6.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE) +#define HAVE_STRUCT_CURRENT +#if (SLE_VERSION_CODE && (SLE_VERSION(12, 3, 0) <= SLE_VERSION_CODE)) || \ + (RHEL_RELEASE_CODE && \ + RHEL_RELEASE_VERSION(7, 5) <= RHEL_RELEASE_CODE) || \ + (DEEPIN_VERSION_CODE && \ + (DEEPIN_PRODUCT_VERSION(15, 2, 0) == DEEPIN_VERSION_CODE)) + +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL +#define HAVE_NDO_BUSY_POLL +#endif +#else /* > 4.11 */ + +#define HAVE_VM_OPS_FAULT_NO_VMA +#endif /* 4.11.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(4, 13, 0) > LINUX_VERSION_CODE) +#else /* > 4.13 */ +#define HAVE_HWTSTAMP_FILTER_NTP_ALL +#define HAVE_PCI_ERROR_HANDLER_RESET_PREPARE +#define HAVE_PTP_CLOCK_DO_AUX_WORK +#endif /* 4.13.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(4, 15, 0) > LINUX_VERSION_CODE) +#if ((KERNEL_VERSION(3, 10, 0) == LINUX_VERSION_CODE) && RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(7, 6) == RHEL_RELEASE_CODE)) +#else +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif +#if (KERNEL_VERSION(4, 12, 0) <= LINUX_VERSION_CODE) +#if (SLE_VERSION_CODE && (SLE_VERSION(15, 0, 0) <= SLE_VERSION_CODE)) || \ + (RHEL_RELEASE_CODE && RHEL_RELEASE_VERSION(7, 5) <= RHEL_RELEASE_CODE) +#else /* 4.12-4.15 */ +#define HAVE_IP6_FRAG_ID_ENABLE_UFO +#endif +#else /* < 4.12.0 */ +#define HAVE_IP6_FRAG_ID_ENABLE_UFO +#endif +#endif /* 4.15.0 */ +/* ************************************************************************ */ +#if (KERNEL_VERSION(4, 17, 0) > LINUX_VERSION_CODE) +#if KERNEL_VERSION(3, 1, 0) <= LINUX_VERSION_CODE || \ + KERNEL_VERSION(2, 6, 32) == LINUX_VERSION_CODE +#define NEED_VLAN_RESTORE +#endif +#else +#define HAVE_MACRO_VM_FAULT_T +#endif /* 4.17.0 */ + +#if (KERNEL_VERSION(4, 18, 0) > LINUX_VERSION_CODE) +#else /* >= 4.18 */ +#if RHEL_RELEASE_CODE && RHEL_RELEASE_VERSION(8, 2) <= RHEL_RELEASE_CODE +#define ETH_GET_HEADLEN_NEED_DEV +#endif +#endif + +/* ************************************************************************ */ +#if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE) + +#ifndef bitmap_zalloc +#if (SLE_VERSION_CODE && (SLE_VERSION(15, 0, 0) > SLE_VERSION_CODE)) +#ifndef kmalloc_array + +#define SIZE_MAX (~(size_t)0) + +static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) +{ + if (size != 0 && n > SIZE_MAX / size) + return NULL; + return __kmalloc(n * size, flags); +} +#endif +#endif + +#define bitmap_zalloc(nbits, flags) _hinic5_bitmap_zalloc(nbits, flags) +static inline unsigned long *_hinic5_bitmap_zalloc(unsigned int nbits, gfp_t flags) +{ + return kmalloc_array(BITS_TO_LONGS(nbits), + sizeof(unsigned long), flags | __GFP_ZERO); +} + +#define bitmap_free(bitmap) _hinic5_bitmap_free(bitmap) +static inline void _hinic5_bitmap_free(unsigned long *bitmap) +{ + kfree(bitmap); +} + +#endif + +#endif + +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE) +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 0)) +#define dev_open(x) dev_open(x, NULL) +#endif +#else /* >= 5.0.0 */ +#define dev_open(x) dev_open(x, NULL) + + +#ifndef get_ds +#define get_ds() (KERNEL_DS) +#endif + +#ifndef dma_zalloc_coherent +#define dma_zalloc_coherent(d, s, h, f) _hinic5_dma_zalloc_coherent(d, s, h, f) +static inline void *_hinic5_dma_zalloc_coherent(struct device *dev, + size_t size, dma_addr_t *dma_handle, + gfp_t gfp) +{ + /* Above kernel 5.0, fixed up all remaining architectures + * to zero the memory in dma_alloc_coherent, and made + * dma_zalloc_coherent a no-op wrapper around dma_alloc_coherent, + * which fixes all of the above issues. + */ + return dma_alloc_coherent(dev, size, dma_handle, gfp); +} +#endif + +#if (KERNEL_VERSION(5, 6, 0) <= LINUX_VERSION_CODE) +#ifndef DT_KNL_EMU +struct timeval { + __kernel_old_time_t tv_sec; /* seconds */ + __kernel_suseconds_t tv_usec; /* microseconds */ +}; +#endif +#endif + +#ifndef do_gettimeofday +#define do_gettimeofday(time) _kc_do_gettimeofday(time) +static inline void _kc_do_gettimeofday(struct timeval *tv) +{ + struct timespec64 ts; + + ktime_get_real_ts64(&ts); + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; +} +#endif + +#endif /* 5.0.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 2, 0) > LINUX_VERSION_CODE) +#else /* >= 5.2.0 */ +#define ETH_GET_HEADLEN_NEED_DEV +#define HAVE_GENL_OPS_FIELD_VALIDATE +#endif /* 5.2.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE) +#if (SUSE_PRODUCT(1, 15, 2, 0) <= SUSE_PRODUCT_CODE) +#ifndef pci_cleanup_aer_uncorrect_error_status +#define pci_cleanup_aer_uncorrect_error_status pci_aer_clear_nonfatal_status +#endif +#else /* < SLES15sp2 */ +#endif /* >= SLES15sp2 */ +#else /* >= 5.4.0 */ +#endif /* 5.4.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 6, 0) > LINUX_VERSION_CODE) +#else /* >= 5.6.0 */ +#ifndef rtc_time_to_tm +#define rtc_time_to_tm rtc_time64_to_tm +#endif + +#define HAVE_PROC_OPS +#endif /* 5.6.0 */ + +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 7, 0) > LINUX_VERSION_CODE) +#else /* >= 5.7.0 */ + +#ifndef pci_cleanup_aer_uncorrect_error_status +#define pci_cleanup_aer_uncorrect_error_status pci_aer_clear_nonfatal_status +#endif +#endif /* 5.7.0 */ + +/* ************************************************************************ */ +#if (KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE) + +#else /* >= 5.9.0 */ +#define HAVE_XDP_FRAME_SZ +#endif /* 5.9.0 */ + + +#ifdef NEED_DEFINE_PCI_DMA_COMPAT +#include <linux/dma-mapping.h> + +/* This defines the direction arg to the DMA mapping routines. */ +#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL +#define PCI_DMA_TODEVICE DMA_TO_DEVICE +#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE +#define PCI_DMA_NONE DMA_NONE + +static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, + dma_addr_t *dma_handle) +{ + return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC); +} + +static inline void *pci_zalloc_consistent(struct pci_dev *pdev, size_t size, + dma_addr_t *dma_handle) +{ + return dma_alloc_coherent(&pdev->dev, size, dma_handle, GFP_ATOMIC); +} + +static inline void pci_free_consistent(struct pci_dev *pdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + dma_free_coherent(&pdev->dev, size, vaddr, dma_handle); +} + +static inline dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, + size_t size, int direction) +{ + return dma_map_single(&pdev->dev, ptr, size, + (enum dma_data_direction)direction); +} + +static inline void pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, + size_t size, int direction) +{ + dma_unmap_single(&pdev->dev, dma_addr, size, + (enum dma_data_direction)direction); +} + +static inline dma_addr_t pci_map_page(struct pci_dev *pdev, struct page *page, + unsigned long offset, size_t size, + int direction) +{ + return dma_map_page(&pdev->dev, page, offset, size, + (enum dma_data_direction)direction); +} + +static inline void pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_address, + size_t size, int direction) +{ + dma_unmap_page(&pdev->dev, dma_address, size, + (enum dma_data_direction)direction); +} + +static inline int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, + int nents, int direction) +{ + return dma_map_sg(&pdev->dev, sg, nents, + (enum dma_data_direction)direction); +} + +static inline void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, + int nents, int direction) +{ + dma_unmap_sg(&pdev->dev, sg, nents, (enum dma_data_direction)direction); +} + +static inline void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, + dma_addr_t dma_handle, + size_t size, int direction) +{ + dma_sync_single_for_cpu(&pdev->dev, dma_handle, size, + (enum dma_data_direction)direction); +} + +static inline void pci_dma_sync_single_for_device(struct pci_dev *pdev, + dma_addr_t dma_handle, + size_t size, int direction) +{ + dma_sync_single_for_device(&pdev->dev, dma_handle, size, + (enum dma_data_direction)direction); +} + +static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, + struct scatterlist *sg, + int nelems, int direction) +{ + dma_sync_sg_for_cpu(&pdev->dev, sg, nelems, + (enum dma_data_direction)direction); +} + +static inline void pci_dma_sync_sg_for_device(struct pci_dev *pdev, + struct scatterlist *sg, + int nelems, int direction) +{ + dma_sync_sg_for_device(&pdev->dev, sg, nelems, + (enum dma_data_direction)direction); +} + +static inline int pci_dma_mapping_error(struct pci_dev *pdev, + dma_addr_t dma_addr) +{ + return dma_mapping_error(&pdev->dev, dma_addr); +} + +#ifdef CONFIG_PCI +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_mask(&dev->dev, mask); +} + +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + return dma_set_coherent_mask(&dev->dev, mask); +} +#else +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ return -EIO; } +static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ return -EIO; } +#endif +#endif /* > NEED_DEFINE_PCI_DMA_COMPAT */ + +/* ************************************************************************ */ +/* pci_pool */ +#if (KERNEL_VERSION(5, 15, 0) <= LINUX_VERSION_CODE) + +#include <linux/dmapool.h> + +#define pci_pool dma_pool +#define pci_pool_create(name, pdev, size, align, allocation) \ + dma_pool_create(name, &pdev->dev, size, align, allocation) +#define pci_pool_destroy(pool) dma_pool_destroy(pool) +#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) +#define pci_pool_zalloc(pool, flags, handle) \ + dma_pool_zalloc(pool, flags, handle) +#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) +#endif /* > 5.15.0 */ + +/* PCI AER */ +#ifdef NEED_PCI_DISABLE_PCIE_ERROR_REPORTING +#define pci_disable_pcie_error_reporting(pdev) ((void)pdev) +#endif + +#ifdef NEED_PCI_ENABLE_PCIE_ERROR_REPORTING +#define pci_enable_pcie_error_reporting(pdev) ((void)pdev) +#endif + +/* ************************************************************************ */ +/* device class */ +#ifndef HAVE_CLASS_CREATE_OWNER +#define class_create(owner, dev_class) class_create(dev_class) +#endif + +#ifdef NEED_GET_FS +#define get_fs() 0 +#endif + +#ifdef NEED_SET_FS +#define set_fs(fs) ((void)fs) +#endif + +#ifdef NEED_FORCE_UACCESS_BEGIN +#define force_uaccess_begin() 0 +#endif + +#ifdef NEED_FORCE_UACCESS_END +#define force_uaccess_end(oldfs) ((void)oldfs) +#endif + +/* ************************************************************************ */ +/* kallsyms */ + +#ifndef kallsyms_lookup_name +#define kallsyms_lookup_name(name) __symbol_get(name) +#endif + +#ifndef kallsyms_lookup_name_wrap +#define kallsyms_lookup_name_wrap(name) __symbol_get(name) +#endif + +/* ********************* net/devlink.h start ************ */ +#ifdef HAVE_DEVLINK_H +#include <net/devlink.h> +#endif + +/*****************************************************************************/ +#if (KERNEL_VERSION(5, 5, 0) > LINUX_VERSION_CODE) +#else /* >= 5.5.0 */ +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS // TODO, 历史遗留, devlink 功能性宏开关 +#endif /* 5.5.0 */ + +/* devlink_alloc */ +#if (defined(HAVE_DEVLINK_ALLOC) && defined(HAVE_DEVLINK_ALLOC_SET_DEV)) +#define ossl_devlink_alloc(ops, priv_size, dev) devlink_alloc(ops, priv_size, dev) +#else +#define ossl_devlink_alloc(ops, priv_size, dev) devlink_alloc(ops, priv_size) +#endif + +/* devlink_register */ +#ifdef HAVE_DEVLINK_REGISTER +#ifndef HAVE_DEVLINK_REGISTER_HAVE_RET +#define ossl_devlink_register(devlink, dev) ({ devlink_register(devlink); 0; }) +#else +#ifndef HAVE_DEVLINK_REGISTER_SET_DEV +#define ossl_devlink_register(devlink, dev) devlink_register(devlink) +#else +#define ossl_devlink_register(devlink, dev) devlink_register(devlink, dev) +#endif +#endif +#endif + +/* devlink_params_* */ +#ifndef HAVE_DEVLINK_PARAMS_PUBLISH +#define devlink_params_publish(devlink) ((void)(devlink)) +#endif + +#ifndef HAVE_DEVLINK_PARAMS_UNPUBLISH +#define devlink_params_unpublish(devlink) ((void)(devlink)) +#endif +/* ********************* net/devlink.h end ************ */ + +/* vxlan outer udp checksum will offload and + * skb->inner_transport_header is wrong + */ +#if (defined(SLE_VERSION_CODE) && defined(SLE_VERSION) && SLE_VERSION_CODE && \ + ((SLE_VERSION(12, 1, 0) == SLE_VERSION_CODE) || \ + (SLE_VERSION(12, 0, 0) == SLE_VERSION_CODE))) || \ + (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_VERSION(7, 0) == RHEL_RELEASE_CODE)) +#define HAVE_OUTER_IPV6_TUNNEL_OFFLOAD +#endif + +#ifdef NEED_ETH_ZERO_ADDR +static inline void hinic5_eth_zero_addr(u8 *addr) +{ + (void)memset_s(addr, ETH_ALEN, 0x00, ETH_ALEN); +} + +#define eth_zero_addr(_addr) hinic5_eth_zero_addr(_addr) +#endif + +#ifdef NEED_PCI_SRIOV_GET_TOTALVFS +int pci_sriov_get_totalvfs(struct pci_dev *dev); +#endif + +#ifdef NEED_CPUMASK_LOCAL_SPREAD +unsigned int cpumask_local_spread(unsigned int i, int node); +#endif + +#define spin_lock_deinit(lock) ((void)(lock)) + +struct file *file_creat(const char *file_name); + +struct file *file_open(const char *file_name); + +void file_close(struct file *file_handle); + +u32 get_file_size(struct file *file_handle); + +void set_file_position(struct file *file_handle, u32 position); + +int file_read(struct file *file_handle, char *log_buffer, u32 rd_length, + u32 *file_pos); + +u32 file_write(struct file *file_handle, const char *log_buffer, u32 wr_length); + +struct sdk_thread_info { + struct task_struct *thread_obj; + char *name; + void (*thread_fn)(void *x); + void *thread_event; + void *data; +}; + +int creat_thread(struct sdk_thread_info *thread_info); + +void stop_thread(struct sdk_thread_info *thread_info); + +#define destroy_work(work) +void utctime_to_localtime(u64 utctime, u64 *localtime); + +#ifndef HAVE_TIMER_SETUP +void initialize_timer(const void *adapter_hdl, struct timer_list *timer); +#endif + +void add_to_timer(struct timer_list *timer, u64 period); +void stop_timer(struct timer_list *timer); +void delete_timer(struct timer_list *timer); +u64 ossl_get_real_time(void); + +/* linux 内核中不存在, 定义为空 */ +#define destroy_completion(completion) +#define sema_deinit(lock) +#define mutex_deinit(lock) +#define rwlock_deinit(lock) + +#define tasklet_state(tasklet) ((tasklet)->state) + +#ifdef NEED_MATH64_MUL_U64_U64_DIV_U64 +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c); +#else +#define HAVE_PTP_INFO_GETTIMEX64 +#endif + +#ifdef NEED_PTP_ADJUST_BY_SCALED_PPM +static inline bool diff_by_scaled_ppm(u64 base, long scaled_ppm, u64 *diff) +{ + bool negative = false; + long scaled_ppm_val = scaled_ppm; + + if (scaled_ppm < 0) { + negative = true; + scaled_ppm_val = -scaled_ppm; + } + /* scaled_ppm(x.y) low 16bit -> y, high 16bit -> x */ + *diff = mul_u64_u64_div_u64(base, (u64)scaled_ppm_val, 1000000ULL << 16); + + return negative; +} +static inline u64 adjust_by_scaled_ppm(u64 base, long scaled_ppm) +{ + u64 diff; + + if (diff_by_scaled_ppm(base, scaled_ppm, &diff)) + return base - diff; + + return base + diff; +} +#endif + +#ifdef NEED_SYSFS_EMIT +int sysfs_emit(char *buf, const char *fmt, ...); +#endif + +#ifdef NEED_PCI_DOMAIN_NR +static inline int pci_domain_nr(struct pci_bus *bus) +{ + return bus->domain_nr; +} +#endif + +#ifdef NEED_STRLCPY +#define strlcpy strscpy +#endif + +#endif /* OSSL_KNL_LINUX_H_ */ diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_linux_nic.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_linux_nic.h new file mode 100644 index 00000000..c77455cc --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_linux_nic.h @@ -0,0 +1,379 @@ +/** + * @copyright Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * @file ossl_knl_linux_nic.h + * @brief Kernel compatibility layer for the NIC module across different OS kernels. + * @version Initial + * @date 2025/11/1 + */ + +#ifndef OSSL_KNL_LINUX_NIC_H +#define OSSL_KNL_LINUX_NIC_H + +#include <net/xdp.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/filter.h> +#include <linux/if_vlan.h> +#include <linux/kernel.h> +#include <linux/limits.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/udp.h> + +#include "base_type.h" +#include "nic_kcompat.h" + +#define ETH_ALEN 6 /* Octets in one ethernet addr */ + +#ifndef __GFP_COLD +#define __GFP_COLD 0 +#endif + +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + +#ifndef NETIF_F_SCTP_CSUM +#define NETIF_F_SCTP_CSUM 0 +#endif + +#ifndef NETIF_F_SCTP_CRC +#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM +#endif /* NETIF_F_SCTP_CRC */ + +#ifndef ETHTOOL_GLINKSETTINGS +/* adapt to SUPPORTED_** and ADVERTISED_**, only 32 bits */ +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, +}; + +#ifndef __ETHTOOL_LINK_MODE_MASK_NBITS +#define __ETHTOOL_LINK_MODE_MASK_NBITS 32 +#endif +#endif + +#ifndef __ETHTOOL_DECLARE_LINK_MODE_MASK +#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ + DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) +#endif + +#ifndef ETHTOOL_LINK_MODE_1000baseX_Full_BIT +#define ETHTOOL_LINK_MODE_1000baseX_Full_BIT 41 +#endif + +#ifndef ETHTOOL_LINK_MODE_10000baseCR_Full_BIT +#define ETHTOOL_LINK_MODE_10000baseCR_Full_BIT 42 +#define ETHTOOL_LINK_MODE_10000baseSR_Full_BIT 43 +#define ETHTOOL_LINK_MODE_10000baseLR_Full_BIT 44 +#define ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT 45 +#endif + +#ifndef ETHTOOL_LINK_MODE_25000baseKR_Full_BIT +#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 +#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 +#define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 +#endif + +#ifndef ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT +#define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 +#define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 +#define ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT 40 +#endif + +#ifndef ETHTOOL_LINK_MODE_50000baseKR_Full_BIT +#define ETHTOOL_LINK_MODE_50000baseKR_Full_BIT 52 +#define ETHTOOL_LINK_MODE_50000baseCR_Full_BIT 54 +#define ETHTOOL_LINK_MODE_50000baseSR_Full_BIT 53 +#endif + +#ifndef ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT +#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 +#define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 +#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 +#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 +#endif + +#ifndef ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT +#define ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT 57 +#define ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT 59 +#define ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT 58 +#endif + +#ifndef ETHTOOL_LINK_MODE_100000baseKR_Full_BIT +#define ETHTOOL_LINK_MODE_100000baseKR_Full_BIT 75 +#define ETHTOOL_LINK_MODE_100000baseCR_Full_BIT 78 +#define ETHTOOL_LINK_MODE_100000baseSR_Full_BIT 76 +#endif + +#ifndef ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT +#define ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT 62 +#define ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT 63 +#define ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT 66 +#endif + +#ifndef ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT +#define ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT 80 +#define ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT 81 +#define ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT 84 +#endif + +#ifndef ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT +#define ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT 85 +#define ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT 86 +#define ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT 89 +#endif + +#ifndef ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT +#define ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT 94 +#define ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT 97 +#define ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT 93 +#endif + +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif + +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif + +#ifndef SPEED_400000 +#define SPEED_400000 400000 +#endif + +#ifndef SPEED_800000 +#define SPEED_800000 800000 +#endif + +#ifdef NEED_DEFINE_SPEED_20000 +#define SPEED_20000 20000 +#endif /* NEED_DEFINE_SPEED_20000 */ + +#ifdef NEED_DEFINE_SPEED_25000 +#define SPEED_25000 25000 +#endif /* NEED_DEFINE_SPEED_25000 */ + +#ifdef NEED_DEFINE_SPEED_40000 +#define SPEED_40000 40000 +#endif /* NEED_DEFINE_SPEED_40000 */ + +#ifdef NEED_DEFINE_SPEED_100000 +#define SPEED_100000 100000 +#endif /* NEED_DEFINE_SPEED_100000 */ + +#ifdef ETHTOOL_GMODULEEEPROM +#ifndef ETH_MODULE_SFF_8472 +#define ETH_MODULE_SFF_8472 0x2 +#endif +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif +#ifndef ETH_MODULE_SFF_8472_LEN +#define ETH_MODULE_SFF_8472_LEN 512 +#endif +#ifndef ETH_MODULE_SFF_8636_MAX_LEN +#define ETH_MODULE_SFF_8636_MAX_LEN 640 +#endif +#ifndef ETH_MODULE_SFF_8436_MAX_LEN +#define ETH_MODULE_SFF_8436_MAX_LEN 640 +#endif +#endif + +#ifdef NEED_DEFINE_U16_MAX +#define U16_MAX ((u16)~0U) +#endif /* NEED_DEFINE_U16_MAX */ + +#ifdef NEED_DEFINE_U32_MAX +#define U32_MAX ((u32)~0U) +#endif /* NEED_DEFINE_U32_MAX */ + +#ifdef NEED_DEFINE_DMA_RMB +/* It is used for tx hw_ci and sw_ci. */ +#define dma_rmb() rmb() +#endif /* NEED_DEFINE_DMA_RMB */ + +#ifdef NEED_ETH_P_8021AD +#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ +#endif /* NEED_ETH_P_8021AD */ + +#ifdef NEED___vlan_get_protocol +__be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, int *next_depth); +#endif /* NEED___vlan_get_protocol */ + +#ifdef NEED_NETDEV_PHYS_ITEM_ID +#define netdev_phys_item_id netdev_phys_port_id +#endif /* NEED_NETDEV_PHYS_ITEM_ID */ + +#ifdef NEED_DEFINE_NETDEV_RSS_KEY_LEN +#define NETDEV_RSS_KEY_LEN (13 * 4) +#endif /* NEED_DEFINE_NETDEV_RSS_KEY_LEN */ + + +#ifdef NEED_NAPI_SCHEDULE_IRQOFF +#define napi_schedule_irqoff napi_schedule +#endif /* NEED_NAPI_SCHEDULE_IRQOFF */ + +#ifdef NEED_DEFINE_ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif /* NEED_DEFINE_ETH_MODULE_SFF_8636 */ +#ifndef NEED_DEFINE_ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif /* NEED_DEFINE_ETH_MODULE_SFF_8636_LEN */ +#ifndef NEED_DEFINE_ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif /* NEED_DEFINE_ETH_MODULE_SFF_8436 */ +#ifndef NEED_DEFINE_ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif /* NEED_DEFINE_ETH_MODULE_SFF_8436_LEN */ + +#ifdef NEED_DEFINE_NETIF_F_GSO_UDP_TUNNEL_CSUM +/* + * if someone backports this, hopefully they backport as a #define. + * declare it as zero on older kernels so that if it get's or'd in + * it won't effect anything, therefore preventing core driver changes. + */ +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#define SKB_GSO_UDP_TUNNEL_CSUM 0 +#endif /* NEED_DEFINE_NETIF_F_GSO_UDP_TUNNEL_CSUM */ + +#ifdef NEED_DEFINE_FIELD_SIZEOF +#define FIELD_SIZEOF(t, f) (sizeof(((t *)0)->f)) +#endif /* NEED_DEFINE_FIELD_SIZEOF */ + +#ifdef NEED_DEFINE_SKB_VLAN_TAG_PRESENT +#define skb_vlan_tag_present(__skb) vlan_tx_tag_present(__skb) +#define skb_vlan_tag_get(__skb) vlan_tx_tag_get(__skb) +#define skb_vlan_tag_get_id(__skb) vlan_tx_tag_get_id(__skb) +#endif /* NEED_DEFINE_SKB_VLAN_TAG_PRESENT */ + +#ifdef HAVE_ETHTOOL_GLINKSETTINGS +#ifdef NEED_ENUM_ETHTOOL_LINK_MODE_25000baseCR_Full_BIT +#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 +#endif /* NEED_ENUM_ETHTOOL_LINK_MODE_25000baseCR_Full_BIT */ +#ifdef NEED_ETHTOOL_LINK_MODE_25000baseKR_Full_BIT +#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 +#endif /* NEED_ETHTOOL_LINK_MODE_25000baseKR_Full_BIT */ +#ifdef NEED_ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT +#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 +#endif /* NEED_ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT */ +#ifdef NEED_ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT +#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 +#endif /* NEED_ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT */ +#endif /* HAVE_ETHTOOL_GLINKSETTINGS */ + +#if defined(HAVE_XDP_XDP_QUERY_PROG) || defined(HAVE_BPF_XDP_QUERY_PROG) +#define HAVE_XDP_QUERY_PROG +#endif /* HAVE_XDP_QUERY_PROG */ + +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) && !defined(HAVE_NDO_SELECT_QUEUE_FALLBACK) +#define HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY +#elif defined(HAVE_NDO_SELECT_QUEUE_FALLBACK) && \ + (defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) || \ + defined(HAVE_NDO_SELECT_QUEUE_ACCEL)) +#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK +#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL */ + +/* netif_napi_add_weight style in new kernel version */ +#ifdef HAVE_NETIF_NAPI_ADD_WEIGHT +#define netif_napi_add(dev, napi, napi_struct, weight) \ + netif_napi_add_weight(dev, napi, napi_struct, weight) +#elif defined(HAVE_NETIF_NAPI_NO_WEIGHT) +#define netif_napi_add(dev, napi, napi_struct, weight) netif_napi_add(dev, napi, napi_struct) +#endif + +/* skb_recv_datagram absent noblock param int new kernel version */ +#ifndef HAVE_SKB_RECV_DATAGRAM_NOBLOCK +#define skb_recv_datagram(sk, flags, noblock, err) \ + skb_recv_datagram(sk, (flags) | ((noblock) != 0 ? MSG_DONTWAIT : 0), err) +#endif + +/* bpf_warn_invalid_xdp_action absent net_dev and prog param in old kernel version */ +#ifndef HAVE_NETDEV_PROG_XDP_WARN_ACTION +#define bpf_warn_invalid_xdp_action(net_dev, prog, act) \ + bpf_warn_invalid_xdp_action(act) +#endif + +#ifndef netdev_hw_addr_list_for_each +#define netdev_hw_addr_list_for_each(ha, l) \ + list_for_each_entry(ha, &(l)->list, list) +#endif /* NEED_DEFINE_NETDEV_HW_ADDR_LIST_FOR_EACH */ + +#ifdef NEED_SKB_FRAG_OFF_ADD +#define skb_frag_off_add(frag, delta) kc_skb_frag_off_add(frag, delta) +static inline void kc_skb_frag_off_add(skb_frag_t *frag, int delta) +{ +#ifdef HAVE_TYPEDEF_SKB_FRAG_T_BIOVEC + frag->bv_offset += (unsigned short)delta; +#else + frag->page_offset += (unsigned short)delta; +#endif +} +#endif /* NEED_SKB_FRAG_OFF_ADD */ + +/* ether_addr_copy did not exist prior to kernel version 3.10 */ +#ifdef NEED_ETHER_ADDR_COPY +#define ether_addr_copy __kc_ether_addr_copy +static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) +{ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + *(u32 *)dst = *(const u32 *)src; + *(u16 *)(dst + 4) = *(const u16 *)(src + 4); +#else + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + for (u16 i = 0; i < ETH_ALEN / sizeof(u16); ++i) { + a[i] = b[i]; + } +#endif +} +#endif + +/* eth_hw_addr_set was introduced in kernel version 5.15 */ +static inline void hinic5_eth_hw_addr_set(struct net_device *dev, const u8 *addr) +{ +#if defined(HAVE_ETH_HW_ADDR_SET) + eth_hw_addr_set(dev, addr); +#else + ether_addr_copy(dev->dev_addr, addr); +#endif +} + +#ifdef HAVE_PAGE_POOL_SUPPORT +#if defined(HAVE_PAGE_POOL_NEW) +#include <net/page_pool/types.h> +#include <net/page_pool/helpers.h> +#elif defined(HAVE_PAGE_POOL_OLD) +#include <net/page_pool.h> +#endif +#endif /* HAVE_PAGE_POOL_SUPPORT */ + + +#if (KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE) +#define HAVE_ENCAPSULATION_TSO +#endif + +#if (KERNEL_VERSION(3, 8, 0) <= LINUX_VERSION_CODE) +#define HAVE_ENCAPSULATION_CSUM +#endif + +#endif /* OSSL_KNL_LINUX_NIC_H */ \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_uefi.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_uefi.h new file mode 100644 index 00000000..8f4eccc1 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/ossl_knl_uefi.h @@ -0,0 +1,844 @@ +/* + * Huawei(R) HiNIC PCI Express Network Controller UEFI Driver. + * This driver is only usable in the EFI Pre-boot execution environment. + * + * Copyright(c) 2017 Huawei Technologies Co., Ltd. + * All rights reserved. + * + */ + +#ifndef OSSL_KNL_UEFI_H +#define OSSL_KNL_UEFI_H + +#include <Uefi.h> +#include <Base.h> +#include <stdlib.h> +#include <stdbool.h> +#include <Protocol/DriverBinding.h> +#include <Protocol/DevicePath.h> +#include <Protocol/PciIo.h> +#include <Protocol/SerialIo.h> + +#include <Library/BaseLib.h> +#include <Library/UefiLib.h> +#include <Library/DebugLib.h> +#include <Library/UefiDriverEntryPoint.h> +#include <Library/BaseMemoryLib.h> +#include <Library/MemoryAllocationLib.h> +#include <Library/UefiBootServicesTableLib.h> +#include <Library/DevicePathLib.h> +#include <Library/PrintLib.h> +#include <IndustryStandard/Pci.h> +#include <Library/UefiRuntimeServicesTableLib.h> + +#include "HwSafeOperationLib/HwSafePrint.h" +#include "HwSafeOperationLib/HwSafeMemOpWrapper.h" +#include "base_type.h" + +/* global var */ +#define __iomem +typedef UINT64 dma_addr_t; +typedef UINT64 phys_addr_t; +typedef EFI_LOCK spinlock_t; + +typedef enum irqreturn irqreturn_t; +typedef irqreturn_t (*irq_handler_t)(int, void *); +typedef u64 uintptr_t; + +#define module_init(name) +#define module_exit(name) +#define module_param(name, type, perm) +#define module_param_array_named(name, names, type, nump, perm) + +#define MODULE_LICENSE(_license) +#define MODULE_PARM_DESC(_parm, desc) +#define MODULE_PARM(_parm, desc) + +#define EXPORT_SYMBOL(name) +#define EXPORT_SYMBOL_GPL(name) + +#define LONG_MAX ((long)(~0UL>>1)) + +#define HINIC5_CFG_BAR 1 +#define HINIC5_INTR_BAR 2 +#define HINIC5_MGMT_BAR 3 +#define HINIC5_DB_BAR 4 + +struct mutex { + EFI_LOCK mutex_lock; +}; + +struct dma_pool { + UINT32 size; + VOID *dev_hdl; +}; + +typedef struct { + INT32 counter; +} atomic_t; + +struct attribute { + char *name; +}; + +struct semaphore { + EFI_LOCK sem; +}; + +typedef void (*simulated_irq)(void *hwdev); + +struct ifla_vf_info { + UINT32 vf; + UINT8 mac[32]; + UINT32 vlan; + UINT32 qos; + UINT32 spoofchk; + UINT32 linkstate; + UINT32 tx_rate; + UINT32 max_tx_rate; + UINT32 min_tx_rate; + UINT32 trusted; +}; + +struct completion { + INT32 done; + simulated_irq simulated_irq_instance; + VOID *hwdev; +}; + +enum hinic5_wq_type { + HINIC5_WQ_FOR_MBX, + HINIC5_WQ_FOR_MGMT +}; + +struct workqueue_struct { + void *work_hdl; + enum hinic5_wq_type wq_type; +}; + +struct list_head { + unsigned long RetrievalCode; + struct list_head *pNext; +}; + +struct timer_list { + EFI_EVENT timer; + UINT64 expires; // jiffess + void (*function)(unsigned long); + UINT64 data; +}; + +struct tasklet_struct { + void (*fun)(ulong); + void *data; + BOOLEAN bEnable; +}; + +struct work_struct; +typedef void (*work_func_t)(struct work_struct *work); + +typedef struct work_struct { + work_func_t foo; + void *data; + UINT64 pengding; + struct workqueue_struct *wq; + UINT64 timeout; + EFI_EVENT timer; +} work_struct; + +struct msix_entry { + UINT32 vector; + UINT16 entry; +}; + +enum irqreturn { + IRQ_NONE = (0 << 0), + IRQ_HANDLED = (1 << 0), + IRQ_WAKE_THREAD = (1 << 1), +}; + +struct pci_dev { + /* struct device must be first elment */ + EFI_PCI_IO_PROTOCOL *dev; + UINTN vendor; + UINTN device; + struct msix_entry *entries; + UINT16 maxmsicnt; + spinlock_t pci_dev_lock; +}; + +struct device { + /* struct device must be first elment */ + EFI_PCI_IO_PROTOCOL *dev; +}; + +/* define linux kernel data */ +#define SZ_1K 0x00000400 +#define SZ_2K 0x00000800 +#define SZ_4K 0x00001000 +#define SZ_8K 0x00002000 +#define SZ_16K 0x00004000 +#define SZ_32K 0x00008000 +#define SZ_64K 0x00010000 +#define SZ_128K 0x00020000 +#define SZ_256K 0x00040000 + +#define __GFP_COMP 0 +#define GFP_KERNEL 0 +#define GFP_ATOMIC 0 + +#define PAGE_SHIFT 12 +#define PAGE_SIZE SZ_4K + +#define EfiBusIoWidthUint32 EfiPciIoWidthUint32 +#define EfiBusIoWidthUint64 EfiPciIoWidthUint64 +#define EFI_BUS_IO_PROTOCOL_WIDTH EFI_PCI_IO_PROTOCOL_WIDTH + +#define EfiBusIoAttributeOperationSupported EfiPciIoAttributeOperationSupported +#define EfiBusIoAttributeOperationEnable EfiPciIoAttributeOperationEnable +#define EfiBusIoAttributeOperationSet EfiPciIoAttributeOperationSet +#define EfiBusIoAttributeOperationGet EfiPciIoAttributeOperationGet +#define EfiBusIoOperationBusMasterCommonBuffer EfiPciIoOperationBusMasterCommonBuffer +#define EFI_BUS_IO_PROTOCOL_ATTRIBUTE_OPERATION EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION +#define EFI_BUS_IO_PROTOCOL_OPERATION EFI_PCI_IO_PROTOCOL_OPERATION + +typedef struct BUS_IO_PROTOCOL BUS_IO_PROTOCOL; + +typedef EFI_STATUS (EFIAPI *EFI_BUS_IO_PROTOCOL_ALLOCATE_BUFFER)( + IN BUS_IO_PROTOCOL *This, + IN EFI_ALLOCATE_TYPE Type, + IN EFI_MEMORY_TYPE MemoryType, + IN UINTN Pages, + OUT VOID **HostAddress, + IN UINT64 Attributes +); + +typedef EFI_STATUS (EFIAPI *EFI_BUS_IO_PROTOCOL_FREE_BUFFER)( + IN BUS_IO_PROTOCOL *This, + IN UINTN Pages, + IN VOID *HostAddress +); + +typedef EFI_STATUS (EFIAPI *EFI_BUS_IO_PROTOCOL_IO_MEM)( + IN BUS_IO_PROTOCOL *This, + IN EFI_BUS_IO_PROTOCOL_WIDTH Width, + IN UINT8 BarIndex, + IN UINT64 Offset, + IN UINTN Count, + IN OUT VOID *Buffer +); + +typedef EFI_STATUS (EFIAPI *EFI_BUS_IO_PROTOCOL_ATTRIBUTES)( + IN BUS_IO_PROTOCOL *This, + IN EFI_BUS_IO_PROTOCOL_ATTRIBUTE_OPERATION Operation, + IN UINT64 Attributes, + OUT UINT64 *Result OPTIONAL +); + +typedef EFI_STATUS (EFIAPI *EFI_BUS_IO_PROTOCOL_MAP)( + IN BUS_IO_PROTOCOL *This, + IN EFI_BUS_IO_PROTOCOL_OPERATION Operation, + IN VOID *HostAddress, + IN OUT UINTN *NumberOfBytes, + OUT EFI_PHYSICAL_ADDRESS *DeviceAddress, + OUT VOID **Mapping +); + +typedef EFI_STATUS (EFIAPI *EFI_BUS_IO_PROTOCOL_UNMAP)( + IN BUS_IO_PROTOCOL *This, + IN VOID *Mapping +); + +typedef struct { + EFI_BUS_IO_PROTOCOL_IO_MEM Read; + EFI_BUS_IO_PROTOCOL_IO_MEM Write; +} EFI_BUS_IO_PROTOCOL_ACCESS; + +typedef struct BUS_IO_PROTOCOL { + EFI_BUS_IO_PROTOCOL_ALLOCATE_BUFFER AllocateBuffer; + EFI_BUS_IO_PROTOCOL_FREE_BUFFER FreeBuffer; + EFI_BUS_IO_PROTOCOL_ACCESS Mem; + EFI_BUS_IO_PROTOCOL_ATTRIBUTES Attributes; + EFI_BUS_IO_PROTOCOL_MAP Map; + EFI_BUS_IO_PROTOCOL_UNMAP Unmap; + VOID *BusHdl; +} BUS_IO_PROTOCOL; + +enum { + EPERM = 1, /* Operation not permitted */ + ENOENT = 2, /* No such file or directory */ + ESRCH = 3, /* No such process */ + EINTR = 4, /* Interrupted system call */ + EIO = 5, /* I/O error */ + ENXIO = 6, /* No such device or address */ + E2BIG = 7, /* Argument list too long */ + ENOEXEC = 8, /* Exec format error */ + EBADF = 9, /* Bad file number */ + ECHILD = 10, /* No child processes */ + EAGAIN = 11, /* Try again */ + ENOMEM = 12, /* Out of memory */ + EACCES = 13, /* Permission denied */ + EFAULT = 14, /* Bad address */ + ENOTBLK = 15, /* Block device required */ + EBUSY = 16, /* Device or resource busy */ + EEXIST = 17, /* File exists */ + EXDEV = 18, /* Cross-device link */ + ENODEV = 19, /* No such device */ + ENOTDIR = 20, /* Not a directory */ + EISDIR = 21, /* Is a directory */ + EINVAL = 22, /* Invalid argument */ + ENFILE = 23, /* File table overflow */ + EMFILE = 24, /* Too many open files */ + ENOTTY = 25, /* Not a typewriter */ + ETXTBSY = 26, /* Text file busy */ + EFBIG = 27, /* File too large */ + ENOSPC = 28, /* No space left on device */ + ESPIPE = 29, /* Illegal seek */ + EROFS = 30, /* Read-only file system */ + EMLINK = 31, /* Too many links */ + EPIPE = 32, /* Broken pipe */ + EDOM = 33, /* Math argument out of domain of func */ + ERANGE = 34, /* Math result not representable */ + EWOULDBLOCK = EAGAIN, /* Operation would block */ + EINPROGRESS = 36, /* Operation now in progress */ + EALREADY = 37, /* Operation already in progress */ + ENOTSOCK = 38, /* Socket operation on non-socket */ + EDESTADDRREQ = 39, /* Destination address required */ + EMSGSIZE = 40, /* Message too long */ + EPROTOTYPE = 41, /* Protocol wrong type for socket */ + ENOPROTOOPT = 42, /* Protocol not available */ + EPROTONOSUPPORT = 43, /* Protocol not supported */ + ESOCKTNOSUPPORT = 44, /* Socket type not supported */ + EOPNOTSUPP = 45, /* Operation not supported on transport endpoint */ + EPFNOSUPPORT = 46, /* Protocol family not supported */ + EAFNOSUPPORT = 47, /* Address family not supported by protocol */ + EADDRINUSE = 48, /* Address already in use */ + EADDRNOTAVAIL = 49, /* Cannot assign requested address */ + ENETDOWN = 50, /* Network is down */ + ENETUNREACH = 51, /* Network is unreachable */ + ENETRESET = 52, /* Network dropped connection because of reset */ + ECONNABORTED = 53, /* Software caused connection abort */ + ECONNRESET = 54, /* Connection reset by peer */ + ENOBUFS = 55, /* No buffer space available */ + EISCONN = 56, /* Transport endpoint is already connected */ + ENOTCONN = 57, /* Transport endpoint is not connected */ + ESHUTDOWN = 58, /* Cannot send after transport endpoint shutdown */ + ETOOMANYREFS = 59, /* Too many references: cannot splice */ + ETIMEDOUT = 60, /* Connection timed out */ + ECONNREFUSED = 61, /* Connection refused */ + ELOOP = 62, /* Too many symbolic links encountered */ + ENAMETOOLONG = 63, /* File name too long */ + EHOSTDOWN = 64, /* Host is down */ + EHOSTUNREACH = 65, /* No route to host */ + ENOTEMPTY = 66, /* Directory not empty */ +}; + +#define ETH_ALEN 6 /* Octets in one ethernet addr */ +#define ETH_HLEN 14 /* Total octets in header. */ +#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */ +#define ETH_DATA_LEN 1500 /* Max. octets in payload */ +#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */ +#define ETH_FCS_LEN 4 /* Octets in the FCS */ +#define ETH_MIN_MTU 68 /* Min IPv4 MTU per RFC791 */ + +#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ +#define VLAN_PRIO_SHIFT 13 +#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ +#define VLAN_TAG_PRESENT VLAN_CFI_MASK +#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ +#define VLAN_N_VID 4096 + +#define IS_ERR(a) (!(a)) + +#define min(a, b) MIN(a, b) +#define max(a, b) MAX(a, b) +#define min_t(t, a, b) MIN(a, b) +#define max_t(t, a, b) MAX(a, b) + +#define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) +#define INT_BIT_WIDTH 32 +#define INT_BIT_SHIFT 5 + +#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) +#define lower_32_bits(n) ((u32)(n)) + +dma_addr_t virt_to_phys(VOID *vaddr); + +UINT16 ilog2(int n); + +VOID EFIAPI EfiSerialPortPrint(IN UINTN ErrorLevel, IN CONST CHAR8 *Format, ...); +#ifdef __Taishan__ +#define DebugPrint EfiSerialPortPrint +#endif + +/** Returns offset of member in structure + @param[in] st Structure type + @param[in] m Structure member + @return Offset of member from structure in bytes +**/ +#define offsetof_type(st, m) ((size_t)((char *)&((st *)(0))->m - (char *)0)) + +#define container_of(ptr, type, member) ({ \ + const typeof(((type *)0)->member) *__mptr = (ptr); \ + (type *)((char *)__mptr - offsetof_type(type, member)); }) + +/** + * ALIGN - aligns number to specified granularity + * @x: number + * @a: granularity + * @return: number aligned to granularity + */ +#define ALIGN(x, a) (((x) + ((UINT64) (a) - 1)) & ~((UINT64) (a) - 1)) +#define ____cacheline_aligned + +#define time_before(a, b) ((a) < (b)) +#define time_after(a, b) time_before(b, a) + +unsigned long get_jiffies(VOID); +#define jiffies get_jiffies() + +unsigned long msecs_to_jiffies(unsigned long m); +unsigned long jiffies_to_msecs(unsigned long n); + +/* + * The struct used to pass data via the following ioctl. Similar to the + * struct tm in <time.h>, but it needs to be here so that the kernel + * source is self contained, allowing cross-compiles, etc. etc. + */ + +struct rtc_time { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; + int tm_wday; + int tm_yday; + int tm_isdst; +}; + +#define UEFI_LEAPS_THRU_END_OF(y) (((y) / 4 - (y) / 100) + (y) / 400) +void do_gettimeofday(struct timeval *tv); +void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); + +#define roundup_pow_of_two(n) \ + (((n) == 1) ? 1 : (1UL << (ilog2((n) - 1) + 1))) + +UINT64 cpu_to_be64(UINT64 x); +UINT32 cpu_to_be32(UINT32 x); +UINT16 cpu_to_be16(UINT16 x); +UINT64 be64_to_cpu(UINT64 x); +UINT32 be32_to_cpu(UINT32 x); +UINT16 be16_to_cpu(UINT16 x); + +void atomic_inc(atomic_t *v); +void atomic_dec(atomic_t *v); +void atomic_set(atomic_t *addr, int newval); +int atomic_read(atomic_t *v); +void atomic_add(int i, atomic_t *v); +void atomic_sub(int i, atomic_t *v); +int atomic_add_return(int i, atomic_t *v); +int atomic_sub_return(int i, atomic_t *v); +int atomic_cmpxchg(atomic_t *v, int oldval, int newval); +void *_kzalloc(size_t size, ulong flags); +void *kcalloc(size_t n, size_t size, ulong flags); +void _kfree(void *p); +void *_vzalloc(size_t size); +void _vfree(void *p); + +#define destroy_completion(completion) +#define sema_deinit(lock) +#define mutex_deinit(lock) + +#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define BITS_PER_BYTE 8 +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) + +#define DECLARE_BITMAP(name, bits) unsigned long name[BITS_TO_LONGS(bits)] + +void bitmap_copy(unsigned long *dst, const unsigned long *src, int nbits); +void bitmap_zero(unsigned long *dst, int nbits); +void _set_bit(long nr, volatile unsigned long *addr); +void _clear_bit(long nr, volatile unsigned long *addr); +int _test_bit(long nr, volatile unsigned long *addr); + +#define kzalloc(size, flag) _kzalloc(size, flag) +#define NUMA_NO_NODE (-1) +#define dev_to_node(pdev) NUMA_NO_NODE + +#ifndef gfp_t +#define gfp_t unsigned +#endif + +VOID dma_free_coherent(VOID *handle, size_t size, VOID *cpu_addr, + dma_addr_t dma_handle); + +VOID *dma_zalloc_coherent(VOID *handle, size_t size, dma_addr_t *DeviceAddress, + ulong flag); +#define dma_alloc_coherent dma_zalloc_coherent +struct dma_pool *dma_pool_create(char *name, void *dev_hdl, size_t size, + size_t align, size_t allocation); +void dma_pool_destroy(struct dma_pool *pool); + +VOID msleep(UINT32 mstime); +VOID usleep(UINT32 ustime); +VOID usleep_range(UINT32 mix, UINT32 max); + +VOID spin_lock_init(spinlock_t *lock); +VOID spin_lock_deinit(spinlock_t *lock); +VOID spin_lock_bh(spinlock_t *lock); +VOID spin_unlock_bh(spinlock_t *lock); +void sema_init(struct semaphore *sem, int val); +void down(struct semaphore *sem); +void up(struct semaphore *sem); +void init_completion(struct completion *x); +#define reinit_completion init_completion +BOOLEAN wait_for_completion_timeout(struct completion *x, + unsigned long timeout); +BOOLEAN try_wait_for_completion(struct completion *x); +void wait_for_completion(struct completion *x); +void complete(struct completion *x); +void mutex_init(struct mutex *lock); +int mutex_trylock(struct mutex *lock); +void mutex_lock(struct mutex *lock); +void mutex_unlock(struct mutex *lock); +void *io_mapping_map_wc(void *ignored1, UINT64 ignored2); +void io_mapping_unmap(void *ignored); +UINT16 hinic5_adev_irq_vectors_alloc(void *dev, struct msix_entry *entries, + int minvec, UINT16 maxvec); +UINT16 hinic5_adev_irq_vector(struct msix_entry *entry, u16 i); +#define hinic5_adev_irq_vectors_free(adev) + +#define dev_name(dev) "dev" + +void synchronize_irq(UINT32 irq); + +#define WQ_MEM_RECLAIM 0x8U + +void cancel_work_sync(struct work_struct *work); +struct workqueue_struct *create_singlethread_workqueue(char *name); +struct workqueue_struct *alloc_workqueue(const char *fmt, unsigned int flags, + int max_active, ...); + +void destroy_workqueue(struct workqueue_struct *wq); +void destroy_work(struct work_struct *work); +int queue_work(struct workqueue_struct *wq, struct work_struct *work); +void flush_workqueue(struct workqueue_struct *wq); +void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), + unsigned long data); +#define tasklet_schedule(t) +#define tasklet_kill(t) +#define tasklet_state(tasklet) 1 + +#define queue_work_on(cpu, wq, work) queue_work(wq, work) + +/* bypass timer functions */ +void initialize_timer(void *adapter_hdl, struct timer_list *timer); +void add_to_timer(struct timer_list *timer, UINT64 period); +void stop_timer(struct timer_list *timer); +void delete_timer(struct timer_list *timer); +void mod_timer(struct timer_list *timer, UINT64 expires); + +#define LINUX_VERSION_CODE 2 +#define KERNEL_VERSION(i, j, k) 1 +#define CRITICAL (1 << 10) + +#define wmb() MemoryFence() +#define smp_rmb() MemoryFence() +#define rmb() MemoryFence() +#define dma_rmb() rmb() + +#define spin_lock_irqsave(lock, flags) spin_lock_bh(lock) +#define spin_unlock_irqrestore(lock, flags) spin_unlock_bh(lock) +#define spin_lock(lock) spin_lock_bh(lock) +#define spin_unlock(lock) spin_unlock_bh(lock) +#define dma_pool_alloc(pool, flags, handle) \ + dma_zalloc_coherent((pool->dev_hdl), (pool->size), (handle), (flags)) +#define dma_pool_free(pool, vaddr, addr) \ + dma_free_coherent((pool->dev_hdl), (pool->size), (vaddr), (addr)) + +#define DEBUGPRINT(lvl, msg, ...) \ + do { \ + if (lvl != 0) { \ + DebugPrint (0x2, msg, ##__VA_ARGS__); \ + DebugPrint (0x2, "\n"); \ + } \ + } while (0) + +#define likely +#define unlikely + +#define pr_err(fmt, ...) DEBUGPRINT(CRITICAL, fmt, ##__VA_ARGS__) +#define pr_info(fmt, ...) DEBUGPRINT(CRITICAL, fmt, ##__VA_ARGS__) + +static inline void __attribute__ ((unused)) hinic5_get_time(UINT8 *min, UINT8 *second, + UINT32 *ns) +{ + EFI_STATUS Status; + EFI_TIME time = {0}; + + Status = gRT->GetTime(&time, NULL); + if (EFI_ERROR(Status)) + DebugPrint(DEBUG_ERROR, "GetTime not success, returns %r\n", Status); + + *min = time.Minute; + *second = time.Second; + *ns = Time.Nanosecond; +} + +#define dev_err(dev, fmt, ...) \ + do { \ + dev = dev; \ + u8 min = 0; \ + u8 second = 0; \ + u32 ns = 0; \ + hinic5_get_time(&min, &second, &ns); \ + DEBUGPRINT(CRITICAL, "[%u:%u:%u]"fmt, min, second, ns, ##__VA_ARGS__); \ + } while (0) + +#define dev_warn(dev, fmt, ...) \ + do { \ + dev = dev; \ + u8 min = 0; \ + u8 second = 0; \ + u32 ns = 0; \ + hinic5_get_time(&min, &second, &ns); \ + DEBUGPRINT(CRITICAL, "[%u:%u:%u]"fmt, min, second, ns, ##__VA_ARGS__); \ + } while (0) + +#define dev_warn_once(dev, fmt, ...) \ + do { \ + static bool __print_once; \ + if (!__print_once) { \ + __print_once = true; \ + dev_warn(dev, fmt, ##__VA_ARGS__); \ + } \ + } while (0) + +#define dev_notice(dev, fmt, ...) \ + do { \ + dev = dev; \ + DEBUGPRINT(CRITICAL, fmt, ##__VA_ARGS__); \ + } while (0) + +#define dev_info(dev, fmt, ...) \ + do { \ + dev = dev; \ + DEBUGPRINT(CRITICAL, fmt, ##__VA_ARGS__); \ + } while (0) + +#define dev_dbg(dev, fmt, ...) \ + do { \ + dev = dev; \ + DEBUGPRINT(CRITICAL, fmt, ##__VA_ARGS__); \ + } while (0) + +#define BITS_PER_LONG 64 + +int request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, + const char *name, void *dev); +void free_irq(UINT32 irq, void *dev); + +void INIT_WORK(struct work_struct *work, + void (*eq_irq_work)(struct work_struct *work)); +UINT8 work_busy(struct work_struct *work); + +#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) +#define set_bit(nr, dest) _set_bit((long)(nr), (ulong *)(dest)) +#define clear_bit(nr, dest) _clear_bit((long)(nr), (ulong *)(dest)) +#define test_bit(nr, dest) _test_bit((long)(nr), (ulong *)(dest)) + +unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); +int find_first_zero_bit(const unsigned long *vaddr, unsigned size); + +#define kfree _kfree +#define vzalloc(size) _vzalloc(size) +#define vfree(p) _vfree(p) + +#define SIZE_MAX (~(size_t)0) +#define vmalloc(size) _vzalloc(size) +#define kmalloc(size, flag) _kzalloc(size, flag) + +void ether_addr_copy(UINT8 *dst, const UINT8 *src); +int memcpy_s(void *s1, size_t destMax, const void *s2, size_t n); +int memset_s(void *s, size_t destMax, int c, size_t n); +int strncpy_s(char *strDest, size_t destMax, const char *strSrc, size_t count); +int sprintf_s(char *strDest, size_t destMax, const char *format, ...); +int snprintf_s(char *strDest, size_t destMax, size_t count, const char *format, ...); + +UINT32 readl_uefi(BUS_IO_PROTOCOL *BusIO, UINT32 reg, UINT8 bar_idx); +void writel_uefi(BUS_IO_PROTOCOL *BusIO, UINT32 reg, UINT8 bar_idx, + UINT32 val); +void writeq_uefi(BUS_IO_PROTOCOL *BusIO, UINT64 reg, UINT8 bar_idx, + UINT64 val); + +#ifndef ether_addr_equal +#define ether_addr_equal(addr1, addr2) !CompareMem(addr1, addr2, ETH_ALEN) +#endif + +#ifndef eth_zero_addr +#define eth_zero_addr(addr) ZeroMem(addr, ETH_ALEN) +#endif + +static inline BOOLEAN is_multicast_ether_addr(const UINT8 *addr) +{ + return 0x01 & addr[0]; +} + +static inline BOOLEAN is_zero_ether_addr(const UINT8 *addr) +{ + return !(addr[0x0] | addr[0x1] | addr[0x2] | addr[0x3] | addr[0x4] | addr[0x5]); +} + +static inline int is_valid_ether_addr(const UINT8 *addr) +{ + /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to + * explicitly check for it here. + */ + return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr); +} + +static inline int fls(unsigned int x) +{ + int i; + + if (x == 0) + return 0; + + /* 32 bit for unsigned int */ + for (i = 32; i > 0; i--) { + if ((x & (1U << (unsigned int)(i - 1))) != 0) + break; + } + + return i; +} + +#define USEC_PER_MSEC 1000L + +#define WORK_CPU_UNBOUND 0 + +/* Adapting to the HINIC5_CQM */ +typedef struct { + unsigned long pgprot; +} pgprot_t; + +#define PAGE_KERNEL ((pgprot_t) {(x)}) /* these mean nothing to non MMU */ + +#ifndef gfp_t +#define gfp_t unsigned +#endif + +#define rwlock_t spinlock_t +#define __GFP_ZERO 0 +#define DMA_BIDIRECTIONAL 0 +#define get_order(x) HighBitSet32(EFI_SIZE_TO_PAGES(x)) +#define VM_MAP 0x00000004 /* vmap()ed pages */ + +#define __swab64(value) cpu_to_be64(value) +#define __swab32(value) cpu_to_be32(value) + +#define read_lock(lock) spin_lock_bh(lock) +#define read_unlock(lock) spin_unlock_bh(lock) +#define write_lock(lock) spin_lock_bh(lock) +#define write_unlock(lock) spin_unlock_bh(lock) + +#define read_lock_bh(lock) read_lock(lock) +#define read_unlock_bh(lock) read_unlock(lock) +#define write_lock_bh(lock) write_lock(lock) +#define write_unlock_bh(lock) write_unlock(lock) + +#define rwlock_init(lock) spin_lock_init(lock) +#define rwlock_deinit(lock) + +#define vmap(pages, page_number, flag1, flag2) pages +#define vunmap(a) +#define page_address(page) page + +void *get_free_pages(UINT32 order); +void free_pages(UINT64 addr, UINT32 order); +#define __get_free_pages(gfp_mask, order) \ + ({ (void)(gfp_mask); get_free_pages(order); }) +#define ossl_get_free_pages __get_free_pages + +static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, + size_t size, int direction) +{ + return (dma_addr_t)ptr; +} + +static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, int direction) { } + +static inline int dma_mapping_error(struct device *dev, + dma_addr_t dma_addr) +{ + return 0; +} + +static inline struct page *virt_to_page(void *ptr) +{ + return (struct page *)ptr; +} + +static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, + unsigned int order) +{ + return get_free_pages(order); +} + +static inline int find_next_zero_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset) +{ + int i = 0; + + for (i = (int)offset; i < (int)size; i++) + if (test_bit(i, addr) == 0) + return i; + + return i; +} + +static inline INT64 atomic_dec_and_test(atomic_t *v) +{ + EFI_TPL OldTpl; + INT32 r = 0; + + OldTpl = gBS->RaiseTPL(TPL_HIGH_LEVEL); + r = v->counter; + v->counter = v->counter - 1; + gBS->RestoreTPL(OldTpl); + + return (INT64)(r == 1); +} + +struct msi_msg { + u32 address_lo; + u32 address_hi; + u32 data; +}; + +/* MSI-X Table entry format */ +#define PCI_MSIX_ENTRY_SIZE 16 +#define PCI_MSIX_ENTRY_LOWER_ADDR 0 +#define PCI_MSIX_ENTRY_UPPER_ADDR 4 +#define PCI_MSIX_ENTRY_DATA 8 + +#ifndef WARN_ON +#define WARN_ON(condition) ({ \ + bool __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#ifndef WARN_ON_ONCE +#define WARN_ON_ONCE(condition) ({ \ + bool __ret_warn_on = !!(condition); \ + unlikely(__ret_warn_on); \ +}) +#endif + +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/sdk_kcompat.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/sdk_kcompat.h new file mode 100644 index 00000000..c06b10c3 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/sdk_kcompat.h @@ -0,0 +1,20 @@ +/* Autogenerated for KSRC=/usr/src/kernels/6.6.0-28.0.0.34.oe2403.aarch64/ via sdk-kcompat-generator.sh */ +#ifndef SDK_KCOMPAT_H +#define SDK_KCOMPAT_H +#define HAVE_PCIE_RESET_DONE 1 +#define HAVE_TIMER_SETUP 1 +#define HAVE_DEVLINK_H 1 +#define HAVE_DEVLINK_FLASH_UPDATE_METHOD 1 +#define HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW 1 +#define HAVE_DEVLINK_OPS_FLASH_UPDATE_HAVE_PARAMS 1 +#define HAVE_DEVLINK_ALLOC_SET_DEV 1 +#define HAVE_DEVLINK_ALLOC 1 +#define HAVE_DEVLINK_REGISTER 1 +#define NEED_FORCE_UACCESS_BEGIN 1 +#define NEED_FORCE_UACCESS_END 1 +#define NEED_GET_FS 1 +#define NEED_SET_FS 1 +#define NEED_PCI_ENABLE_PCIE_ERROR_REPORTING 1 +#define NEED_PCI_DISABLE_PCIE_ERROR_REPORTING 1 +#define HAVE_DEVNODE_CONST_DEV 1 +#endif /* SDK_KCOMPAT_H */ diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/uefi/HwSafeMemOpWrapper.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/uefi/HwSafeMemOpWrapper.h new file mode 100644 index 00000000..0ad1b34d --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/uefi/HwSafeMemOpWrapper.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2018-2018. All rights reserved. + * Description: Huawei safe print function define + * + * Huawei(R) HiNIC PCI Express Network Controller UEFI Driver. + * This driver is only usable in the EFI Pre-boot execution environment. + * All rights reserved. + * Author : + * Create : 2018/12/10 + */ + +#ifndef HW_SAFE_MEM_OP_WRAPPER_H +#define HW_SAFE_MEM_OP_WRAPPER_H + +EFI_STATUS MemCpyS( + void *dest, + UINTN destMax, + const void *src, + UINTN count); + +EFI_STATUS MemSetS( + void *dest, + UINTN destMax, + UINT8 c, + UINTN count); + +EFI_STATUS MemMoveS( + void *dest, + UINTN destMax, + const void *src, + UINTN count); + +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/uefi/HwSafePrint.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/uefi/HwSafePrint.h new file mode 100644 index 00000000..b0cdaf5c --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/uefi/HwSafePrint.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2018-2018. All rights reserved. + * Description: Huawei safe print function define + * + * Huawei(R) HiNIC PCI Express Network Controller UEFI Driver. + * This driver is only usable in the EFI Pre-boot execution environment. + * All rights reserved. + * Author : + * Create : 2018/12/10 + */ + +#ifndef HW_SAFE_PRINT_H +#define HW_SAFE_PRINT_H + +UINTN AsciiSPrintS( + CHAR8 *startOfBuffer, + UINTN bufferSize, + CONST CHAR8 *formatString, + ...); + +UINTN UnicodeSPrintS( + CHAR16 *startOfBuffer, + UINTN bufferSize, + CONST CHAR16 *formatString, + ...); + +UINTN AsciiVSPrintS( + CHAR8 *startOfBuffer, + UINTN bufferSize, + CONST CHAR8 *formatString, + VA_LIST marker); + +UINTN UnicodeVSPrintS( + CHAR16 *startOfBuffer, + UINTN bufferSize, + CONST CHAR16 *formatString, + VA_LIST marker); + +#ifdef EDKII_SUPPORT +UINTN AsciiBSPrintS( + CHAR8 *startOfBuffer, + UINTN bufferSize, + CONST CHAR8 *formatString, + BASE_LIST marker); + +UINTN UnicodeBSPrintS( + CHAR16 *startOfBuffer, + UINTN bufferSize, + CONST CHAR16 *formatString, + BASE_LIST marker); +#endif + +#endif diff --git a/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/vbs_kcompat.h b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/vbs_kcompat.h new file mode 100644 index 00000000..279eb9c2 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_sdk_intf/ossl/vbs_kcompat.h @@ -0,0 +1,13 @@ +/** + * @copyright Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * @file nic_kcompat.h + * @brief Temporary file - The actual nic_kcompat.h is generated during build. + * @version Initial + * @date 2026/1/22 + */ +#ifndef VBS_KCOMPAT_H +#define VBS_KCOMPAT_H + +// vbs_kcompat.h是中间产物头文件 + +#endif /* VBS_KCOMPAT_H */ \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/drv_srvc_intf/drv_bond_api.h b/hinic5/src/dpu_develop_interface/drv_srvc_intf/drv_bond_api.h new file mode 100644 index 00000000..cdfd6935 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_srvc_intf/drv_bond_api.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * Description : nic驱动提供的半卸载bond接口定义 + * Author : / + * Create : 2025/9/9 + * Notes : + * History : Init + */ + +#ifndef DRV_BOND_API +#define DRV_BOND_API + +#include <net/bonding.h> +#include <linux/netdevice.h> +#include <linux/types.h> +#include "bond_common_defs.h" + +/** + * @brief struct hinic5_bond_info_s + * @details 用户获取的bond信息结构体 + */ +struct hinic5_bond_info_s { + u8 slaves; /**< bond port slave bitmap */ + u8 cnt; /**< bond slave 的数量 */ + u8 rsvd[2]; + char slaves_name[BOND_PORT_MAX_NUM][BOND_NAME_MAX_LEN]; /**< slave设备名 */ +}; + +/** + * @brief struct netdev_lower_state_info + * @details ndev状态信息 + */ +struct netdev_lower_state_info { + u8 link_up : 1; /**< slave设备 link up状态 */ + u8 tx_enabled : 1; /**< 可用于发送数据的slave设备 */ + u8 rsvd : 6; +} __attribute__((__packed__)); + +/** + * @brief struct bond_tracker + * @details bond设备信息结构体 + */ +struct bond_tracker { + struct netdev_lower_state_info netdev_state[BOND_PORT_MAX_NUM]; /**< bond slave设备信息 */ + struct net_device *ndev[BOND_PORT_MAX_NUM]; /**< bond slave设备指针 */ + u8 cnt; /**< bond slave 设备数量 */ + bool is_bonded; /**< bond是否可以发送至mpu创建并激活bond */ +}; + +/** + * @brief struct bond_attr + * @details bond基础属性 + */ +struct bond_attr { + u16 bond_mode; /**< bond模式 */ + u16 bond_id; /**< bond id */ + u16 up_delay; /**< bond up时开始工作的延迟时间 */ + u16 down_delay; /**< bond donw时不可用的延迟时间 */ + u8 active_slaves; /**< 激活可用的slave bitmap */ + u8 slaves; /**< 组bond时原始的slave bitmap */ + u8 lacp_collect_slaves; /**< lacp协议配置的slave bitmap */ + u8 xmit_hash_policy; /**< bond选路的hash策略 */ + u32 first_roce_func; /**< bond中的第一个func,仅roce使用 */ + u32 bond_pf_bitmap; /**< 组bond的func */ + u32 user_bitmap; /**< 当前使用bond的用户bitmap */ +}; + +/** + * @brief 用户注册的bond绑定处理接口 + * @param[in] bond: 内核协议中bonding结构体指针 + * @details 当用户注册此接口后,协议栈bond触发bond事件(增删slave等)会调用此接口进行 + * 是否可以绑定bond的判断,如果判断可以则bond驱动会为用户尝试绑定bond + * @attention N/A + * @return 如果返回true,代表可以绑定bond,如果返回false代表不可绑定bond + **/ +typedef bool (*attach_func)(struct bonding *bond); + +/** + * @brief 用户注册的bond事件处理接口 + * @param[in] bond_name: bond名 + * @param[in] attr: bond属性 + * @param[in] err: bond激活后/修改后/去激活后的处理结果,0为成功,非0失败 + * @details 用户注册此接口,当bond的创建/删除/更新前后,bond驱动会调用做业务自身的处理 + * @attention N/A + * @return void + **/ +typedef void (*event_func)(const char *bond_name, struct bond_attr *attr, int err); + +/** + * @brief struct bond_srv_func + * @details 业务注册的bond处理接口集合 + */ +struct bond_srv_func { + event_func before_active; /**< bond激活前srv处理 */ + event_func after_active; /**< bond激活后srv处理 */ + event_func before_modify; /**< bond修改前srv处理 */ + event_func after_modify; /**< bond修改后srv处理 */ + event_func before_deactive; /**< bond去激活前srv处理 */ + event_func after_deactive; /**< bond去激活后srv处理 */ + attach_func can_attach; /**< 注册此接口代表当协议栈bond更新CFM会为srv绑定bond */ +}; + +/** + * @brief 用户绑定协议栈bonding + * @param[in] name bond设备名 + * @param[in] user 绑定的用户 + * @param[out] bond_id 绑定的bondid + * @details 用户在协议栈bonding创建完成后,可以下发此接口绑定对应bond名的bond,返回芯片内部管理的bondid + * @attention N/A + * @return 返回绑定结果,0为成功,非0失败 + **/ +int hinic5_bond_attach(const char *name, enum hinic5_bond_user user, u16 *bond_id); + +/** + * @brief 用户解绑协议栈bonding + * @param[in] bond_id bond id + * @param[in] user 绑定的用户 + * @details 用户解绑协议栈bonding,如果无使用的用户则会销毁nic驱动中的bond设备 + * @attention N/A + * @return 返回解绑结果,0为成功,非0失败 + **/ +int hinic5_bond_detach(u16 bond_id, enum hinic5_bond_user user); + +/** + * @brief 解绑此用户所有绑定的bond + * @param[in] user 用户 + * @details 为所有的bond设备解绑此用户 + * @attention N/A + * @return void + **/ +void hinic5_bond_clean_user(enum hinic5_bond_user user); + +/** + * @brief 获取bond设备的BDF标识 + * @param[in] bond_id bond id + * @param[out] uplink_id 返回的bdf id + * @details 获取bond设备的BDF标识,支持pci/ub设备 + * @attention N/A + * @return 返回获取结果 0为成功,非0失败 + **/ +int hinic5_bond_get_uplink_id(u16 bond_id, u32 *uplink_id); + +/** + * @brief bond用户注册处理接口接口 + * @param[in] user 用户 + * @param[in] func 处理接口集合,包含事件处理和绑定处理 + * @details 用户注册处理接口,处理接口见bond_srv_func结构体内的接口定义 + * @attention N/A + * @return 返回注册结果 0为成功,非0失败 + **/ +int hinic5_bond_register_service_func(enum hinic5_bond_user user, struct bond_srv_func *func); + +/** + * @brief bond用户解注册处理接口接口 + * @param[in] user 用户 + * @details 用户解注册处理接口,处理接口见bond_srv_func结构体内的接口定义 + * @attention N/A + * @return 返回解注册结果 0为成功,非0失败 + **/ +int hinic5_bond_unregister_service_func(enum hinic5_bond_user user); + +/** + * @brief 获取bond slave信息 + * @param[in] bond_id bond id + * @param[in] info bond slave信息,详见hinic5_bond_info_s + * @details 获取bond slave信息 + * @attention N/A + * @return 返回获取结果 0为成功,非0失败 + **/ +int hinic5_bond_get_slaves(u16 bond_id, struct hinic5_bond_info_s *info); + +/** + * @brief 获取bond slave的ndev设备 + * @param[in] bond_name bond名 + * @param[in] port_id port id + * @details 获取bond中对应portid的ndev设备指针 + * @attention N/A + * @return 返回ndev指针,返回非NULL获取成功,返回NULL获取失败 + **/ +struct net_device *hinic5_bond_get_netdev_by_portid(const char *bond_name, u8 port_id); + +/** + * @brief 获取bond slave的设备信息 + * @param[in] bond_name bond名 + * @param[out] tracker bond设备信息,见bond_tracker + * @details 获取bond中所有slave设备的设备信息 + * @attention N/A + * @return 返回获取结果 0为成功,非0失败 + **/ +int hinic5_get_bond_tracker_by_name(const char *name, struct bond_tracker *tracker); + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/drv_srvc_intf/nic/drv_nic_api.h b/hinic5/src/dpu_develop_interface/drv_srvc_intf/nic/drv_nic_api.h new file mode 100644 index 00000000..6d28f4e9 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/drv_srvc_intf/nic/drv_nic_api.h @@ -0,0 +1,226 @@ +#ifndef DRV_NIC_API_H +#define DRV_NIC_API_H + +#include "base_type.h" +#if !defined(__UEFI__) && !defined(__VMWARE__) +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include "hinic5_lld.h" +#endif + +/** + * @brief PF设置VF link状态 + * + * @param hwdev device pointer to hwdev + * @param vf_link_forced VF 强制link状态,false--Link状态跟随PF,true--link状态根据link_state值 + * @param link_state link状态,false--Link down,true--link up + * @details PF设置该PF下所有VF link状态,PF保存VF的link状态状态, + * 未设置场景下VF link状态默认跟随PF,用户设置后以用户设置为准 + * + * @attention: 仅PF支持 + * + * @return: VF link状态设置成功或者失败. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_pf_set_vf_link_state(void *hwdev, bool vf_link_forced, bool link_state); + +/** + * @brief 添加设备mac接口 + * + * @param hwdev device pointer to hwdev + * @param mac_addr mac地址 + * @param vlan_id vlan id 范围[0~4095] + * @param func_id global function index + * @param channel channel id,mailbox发送使用的channel id + * + * @details 添加对应function的mac地址 + * + * @attention: 函数内部涉及发送mailbox消息会休眠,禁止中断上下文等不允许休眠的流程中调用 + * + * @return: 添加MAC返回成功或者失败. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel); + +#if !defined(__UEFI__) && !defined(__VMWARE__) +/** + * @brief 根据lld_dev获取网络设备句柄netdev结构体指针 + * + * @param lld_dev device pointer to lld_dev + * + * @details 根据lld_dev查找nic uld设备获取netdev + * + * @attention: 该接口返回不会对netdev引用计数++,使用过程中如果netdev被释放可能导致访问野指针 +* + * @return: 成功匹配到lld_dev的netdev时返回netdev结构体指针,否则返回NULL + */ +struct net_device *hinic5_get_netdev_by_lld(struct hinic5_lld_dev *lld_dev); + +/** + * @brief 注册设备私有数据 + * + * @param dev device pointer to net_device + * @param priv 私有数据 + * + * @details 通过net_device注册设备私有数据 + * + * @return: 私有数据注册成功或者失败. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_netdev_priv_set(const struct net_device *dev, void *priv); + +/** + * @brief 获取设备私有数据 + * + * @param dev device pointer to net_device + * + * @details 通过net_device注册设备私有数据 + * + * @attention: 需要先调用hinic5_netdev_priv_set接口注册设备私有数据 + * + * @return: 私有数据. + * @retval NULL 失败 + * @retval 非NULL 成功 + */ +void *hinic5_netdev_priv_get(const struct net_device *dev); + +/** + * @brief NIC驱动加载钩子函数 + * + * @param netdev device pointer to net_device + * + * @details 由产品重载,可实现注册文件系统文件、修改netdev名称等功能 + * + * @return: 钩子函数运行结果. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_probe_extend_hook(struct net_device *netdev); + +/** + * @brief NIC驱动卸载钩子 + * + * @param netdev device pointer to net_device + * + * @details 由产品重载 + * + * @return: 钩子函数运行结果. + * @retval 0 成功 + * @retval 非0 失败 + */ +void hinic5_remove_extend_hook(struct net_device *netdev); + +struct hinic5_nt_msg { + void *buf_in; + void *buf_out; + u32 in_size; + u32 out_size; +}; + +/** + * @brief NIC驱动命令钩子函数 + * + * @param netdev device pointer to net_device + * @param cmd 命令字 + * @param nt_msg 命令内容 + * @param support 是否支持该命令, 产品需要根据命令字判断是否支持 + * + * @details 由产品重载 + * + * @return: 命令执行结果. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_tool_cmd_extend_handle(struct net_device *netdev, u32 cmd, + struct hinic5_nt_msg *nt_msg, bool *support); + +/** + * @brief 产品侧设置用户态qps数的接口 + * + * @param netdev device pointer to net_device + * @param usr_qps_num 期望的用户态qps数量 + * @details 由产品调用 + * + * @return: 命令执行结果. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_set_usr_qps_num(struct net_device *netdev, u16 usr_qps_num); + +/** + * @brief NIC相关跳过MAC设置函数 + * + * @param dev device pointer to net_device + * @param addr MAC地址 + * + * @details 由产品重载,可实现用户态队列开启后跳过相关设置的功能 + * + * @return: 钩子函数运行结果. + * @retval 0 不跳过 + * @retval 非0 跳过 + */ +int hinic5_set_mac_addr_pre_hook(struct net_device *netdev, void *addr); + +/** + * @brief NIC相关跳过MTU设置函数 + * + * @param netdev device pointer to net_device + * @param new_mtu 新的mtu值 + * + * @details 由产品重载,可实现用户态队列开启后跳过相关设置的功能 + * + * @return: 钩子函数运行结果. + * @retval 0 不跳过 + * @retval 非0 跳过 + */ +int hinic5_change_mtu_pre_hook(struct net_device *netdev, int new_mtu); + +/** + * @brief NIC相关跳过MTU设置函数 + * + * @param netdev device pointer to net_device + * @param ring 队列深度相关参数 + * + * @details 由产品重载,可实现用户态队列开启后跳过相关设置的功能 + * + * @return: 钩子函数运行结果. + * @retval 0 不跳过 + * @retval 非0 跳过 + */ +int hinic5_set_ringparam_pre_hook(struct net_device *netdev, struct ethtool_ringparam *ring); + +/** + * @brief 产品侧设置流分叉使能group数量的接口 + * + * @param netdev device pointer to net_device + * @param group_num 期望的group数量 + * @details 由产品调用,group_num范围为1~8。1:关闭流分叉,其他值:开启流分叉。 + * @attention 开启流分叉时,group_num实际生效值会向上取整到2的幂次。 + * + * @return: 命令执行结果. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_set_flow_bifurcation_group_num(struct net_device *netdev, u8 group_num); + +/** + * @brief 产品侧查询/设置流分叉功能启用时groupId对应的间接表 + * + * @param netdev device pointer to net_device + * @param op_code 0 查询;1 设置 + * @param group_id 设备所使用的group id + * @param indir 间接表 + * @param indir_length 间接表长度 + * @details 由产品调用 + * + * @return: 命令执行结果. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_cfg_flow_bifurcation_paras(struct net_device *netdev, u8 op_code, + u8 group_id, u32 *indir, u16 indir_length); +#endif /* !defined(__UEFI__) && !defined(__VMWARE__) */ +#endif diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/bond/bond_common_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/bond/bond_common_defs.h new file mode 100644 index 00000000..3dbcf279 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/bond/bond_common_defs.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2023. All rights reserved. + * Description : South Interface---ovs mpu bond interface between mpu and driver + * Author : / + * Create : 2019/1/1 + * Notes : Attention--Use the doxygen comment style to generate interface document. + * History : None + */ +#ifndef BOND_COMMON_DEFS_H +#define BOND_COMMON_DEFS_H + +#define BOND_PORT_MAX_NUM 4 +#define BOND_NAME_MAX_LEN 16 +#define BOND_ID_INVALID 0xFFFF +#define OVS_PORT_NUM_MAX BOND_PORT_MAX_NUM +#define BOND_DEFAULT_ROCE_FUNC 0xFFFFFFFF + +#define BOND_ID_IS_VALID(_id) (((_id) >= BOND_FIRST_ID) && ((_id) <= BOND_MAX_ID)) +#define BOND_ID_IS_INVALID(_id) (!(BOND_ID_IS_VALID(_id))) + +/** + * @brief enum bond_group_id + * @details bond id + */ +enum bond_group_id { + BOND_FIRST_ID = 1, + BOND_MAX_ID = BOND_FIRST_ID, + BOND_MAX_NUM, +}; + +/** + * @brief enum hinic5_bond_user + * @details bond用户枚举,为了兼容23,不适用通用定义 + */ +enum hinic5_bond_user { + HINIC5_BOND_USER_OVS, + HINIC5_BOND_USER_TOE, + HINIC5_BOND_USER_ROCE, + HINIC5_BOND_USER_UB, + HINIC5_BOND_USER_NIC, + HINIC5_BOND_USER_NUM +}; + +/** + * @brief enum tag_bond_mode + * @details bond模式的枚举类型 + */ +typedef enum tag_bond_mode { + BOND_MODE_NONE = 0, /**< 禁用网络绑定 */ + BOND_MODE_BACKUP = 1, /**< 主备模式,1表示主备绑定 */ + BOND_MODE_BALANCE = 2, /**< 负载均衡模式,2表示XOR负载均衡绑定 */ + BOND_MODE_LACP = 4, /**< LACP模式,4表示802.3ad绑定 */ + BOND_MODE_MAX /**< 网络绑定模式的最大值 */ +} bond_mode_e; + +/** + * @brief enum tag_bond_hash + * @details bond hash策略的枚举类型 + */ +typedef enum tag_bond_hash { + BOND_HASH_L2 = 0, /**< 使用L2地址进行哈希 */ + BOND_HASH_L23 = 1, /**< 使用L2和L3地址进行哈希 */ + BOND_HASH_L34 = 2, /**< 使用L3和L4地址进行哈希 */ + BOND_HASH_MAX = 3 /**< 最大的哈希策略值 */ +} bond_hash_e; + +/** + * ovs bond hash policy + */ +typedef enum ovs_bond_hash_policy { + OVS_BOND_HASH_POLICY_L2 = 0, /**< 0 for layer 2 */ + OVS_BOND_HASH_POLICY_L34 = 1, /**< 1 for layer 3+4 */ + OVS_BOND_HASH_POLICY_L23 = 2, /**< 2 for layer 2+3 */ + OVS_BOND_HASH_POLICY_VFID_SQN = 3, /**< 3 for vfid ^ sqn */ + OVS_BOND_HASH_POLICY_MAX +} ovs_bond_hash_policy_e; + +/** + * @brief enum bond_port_duplex_state + * @details 端口双工状态枚举类型 + */ +enum bond_port_duplex_state { + BOND_PORT_HALF_DUPLEX = 0, /**< 半双工 */ + BOND_PORT_FULL_DUPLEX, /**< 全双工 */ +}; + +#endif /* BOND_COMMON_DEFS_H */ diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/bond/bond_mpu_cmd_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/bond/bond_mpu_cmd_defs.h new file mode 100644 index 00000000..1860add6 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/bond/bond_mpu_cmd_defs.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * Description : 驱动和mpu交互bond相关定义 + * Author : / + * Create : 2025/9/8 + * Notes : Attention--Use the doxygen comment style to generate interface document. + * History : None + */ + +#ifndef BOND_MPU_CMD_DEFS_H +#define BOND_MPU_CMD_DEFS_H + +#include "mpu_cmd_base_defs.h" +#include "bond_common_defs.h" + +/** + * @brief enum bond_mpu_cmd + * @details 继承的bond cmd枚举类型在23中使用,mbox消息和OVS绑定 见enum tag_ovs_mpu_cmd + */ +enum bond_mpu_cmd { + MPU_CMD_BOND_CREATE = 17, /* Create bond, temporarily keep it to 17 + * @see struct tag_ovs_bond_cmd + */ + MPU_CMD_BOND_DELETE = 18, /**< Delete bond @see struct tag_ovs_bond_cmd */ + MPU_CMD_BOND_SET_ATTR = 19, /**< Set bond attributes @see struct tag_ovs_bond_cmd */ + MPU_CMD_BOND_GET_ATTR = 20, /**< Get bond attributes, @see struct tag_bond_get */ +}; + +/** + * bond set attr: bond_name之前变量定义要求与结构体struct bond_attr保持一致 + */ +typedef struct tag_ovs_bond_cmd { + u16 bond_mode; /* bond mode:1 for active-backup, + * 2 for balance-xor,4 for 802.3ad + */ + u16 bond_id; /**< bond id */ + u16 up_delay; /**< default:200ms */ + u16 down_delay; /**< default:200ms */ + u32 active_slaves : 8; /**< active port slaves(bitmaps) */ + u32 slaves : 8; /**< bond port id bitmaps */ + u32 lacp_collect_slaves : 8; /**< bond port id bitmaps */ + u32 xmit_hash_policy : 8; /* xmit hash:0 for layer 2 , + * 1 for layer 2+3 ,2 for layer 3+4 + */ + u32 first_roce_func; /**< RoCE used */ + u32 bond_pf_bitmap; /**< all PFs under the bond */ + u32 user_bitmap; + u8 bond_name[BOND_NAME_MAX_LEN]; /**< bond name, length must be less than 16 */ +} ovs_bond_cmd_s; + +/** + * Create/Delete bond and set attribute command struct defination. + */ +struct hinic5_bond_cmd { + struct mgmt_msg_head comm_head; + u16 sub_cmd; + u16 rsvd; + ovs_bond_cmd_s attr; +}; + +/** + * bond per port statistics + */ +#pragma pack(4) +typedef struct tag_bond_port_stat { + /** mpu provide */ + u64 rx_pkts; + u64 rx_bytes; + u64 rx_drops; + u64 rx_errors; + + u64 tx_pkts; + u64 tx_bytes; + u64 tx_drops; + u64 tx_errors; +} hinic5_bond_port_stat_s; +#pragma pack() + +/** + * bond port attribute + */ +typedef struct tag_bond_port_attr { + u8 duplex; + u8 status; + u8 rsvd0[2]; + u32 speed; +} hinic5_bond_port_attr_s; + +/** + * Get bond information command struct defination + * @see OVS_MPU_CMD_BOND_GET_ATTR + */ +typedef struct tag_bond_get { + u16 bond_id_vld; /* bond_id_vld=1: used bond_id get bond info; + * bond_id_vld=0: used bond_name get bond info + */ + u16 bond_id; /**< if bond_id_vld=1 input, else output */ + u8 bond_name[BOND_NAME_MAX_LEN]; /**< if bond_id_vld=0 input, else output */ + + u16 bond_mode; /* bond mode:1 for active-backup, + * 2 for balance-xor,4 for 802.3ad + */ + u8 active_slaves; /**< active port slaves(bitmaps) */ + u8 slaves; /**< bond port id bitmaps */ + + u8 lacp_collect_slaves; /**< bond port id bitmaps */ + u8 xmit_hash_policy; /**< xmit hash:0 for layer 2 ,1 for layer 2+3 ,2 for layer 3+4 */ + u16 rsvd0; /**< in order to 4B aligned */ + + hinic5_bond_port_stat_s stat[BOND_PORT_MAX_NUM]; + hinic5_bond_port_attr_s attr[BOND_PORT_MAX_NUM]; +} hinic5_bond_get_s; + +/* BOND OPCODE操作类型定义 */ +#define BOND_CFG_OPCODE_GET 0x0 +#define BOND_CFG_OPCODE_SET 0x1 + +/* BOND 配置的BITMAP定义 */ +#define BOND_CFG_BITMAP_ARP_EN 0x1UL + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/cfg_mgmt/cfg_mgmt_mpu_cmd.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/cfg_mgmt/cfg_mgmt_mpu_cmd.h new file mode 100644 index 00000000..c4d49e33 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/cfg_mgmt/cfg_mgmt_mpu_cmd.h @@ -0,0 +1,10 @@ +#ifndef CFG_MGMT_MPU_CMD_H +#define CFG_MGMT_MPU_CMD_H + +enum cfg_cmd { + CFG_CMD_GET_DEV_CAP = 0, + CFG_CMD_GET_HOST_TIMER = 1, + CFG_CMD_GET_EXTEND_DEV_CAP = 2, +}; + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h new file mode 100644 index 00000000..1bf49b89 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h @@ -0,0 +1,470 @@ +#ifndef CFG_MGMT_MPU_CMD_DEFS_H +#define CFG_MGMT_MPU_CMD_DEFS_H + +#if defined(__LINUX__) || defined(__VMWARE__) +#include <linux/types.h> +#else +#include "base_type.h" +#endif +#include "mpu_cmd_base_defs.h" + +typedef enum { + SERVICE_BIT_NIC = 0, + SERVICE_BIT_ROCE = 1, + SERVICE_BIT_VBS = 2, + SERVICE_BIT_TOE = 3, + SERVICE_BIT_IPSEC = 4, + SERVICE_BIT_FC = 5, + SERVICE_BIT_VIRTIO = 6, + SERVICE_BIT_OVS = 7, + SERVICE_BIT_NVME = 8, + SERVICE_BIT_ROCEAA = 9, // TBD 替换为SERVICE_BIT_ROCE_MIG + SERVICE_BIT_CURRENET = 10, // TBD 替换为SERVICE_BIT_VIRTIO_MIG + SERVICE_BIT_PPA = 11, + SERVICE_BIT_MIGRATE = 12, + SERVICE_BIT_VROCE = 13, + SERVICE_BIT_DMMU = 14, + SERVICE_BIT_UB = 15, + SERVICE_BIT_ROCE_MIG = 16, + SERVICE_BIT_JBOF = 17, + SERVICE_BIT_RSV0 = 18, + SERVICE_BIT_ADV_ROCE = 19, + SERVICE_BIT_MACSEC = 20, + SERVICE_BIT_PFE = 21, + SERVICE_BIT_UBCNET = 22, + SERVICE_BIT_CFM = 23, + SERVICE_BIT_BIFUR = 24, + SERVICE_BIT_HIHTR = 25, + SERVICE_BIT_MAX +} servic_bit_define_e; + +typedef enum { + /* 0~31: reserved for servic_bit_define_e */ + + EXT_CAP_BEGIN = 32, + EXT_CAP_FAKE_VF = 32, + EXT_CAP_FW_UPDATE = 33, + EXT_CAP_COMM_INFO = 34, + EXT_CAP_MAX, +} extend_cap_type_e; + +#define CFG_SERVICE_MASK_NIC (0x1 << SERVICE_BIT_NIC) +#define CFG_SERVICE_MASK_ROCE (0x1 << SERVICE_BIT_ROCE) +#define CFG_SERVICE_MASK_VBS (0x1 << SERVICE_BIT_VBS) +#define CFG_SERVICE_MASK_TOE (0x1 << SERVICE_BIT_TOE) +#define CFG_SERVICE_MASK_IPSEC (0x1 << SERVICE_BIT_IPSEC) +#define CFG_SERVICE_MASK_FC (0x1 << SERVICE_BIT_FC) +#define CFG_SERVICE_MASK_VIRTIO (0x1 << SERVICE_BIT_VIRTIO) +#define CFG_SERVICE_MASK_OVS (0x1 << SERVICE_BIT_OVS) +#define CFG_SERVICE_MASK_NVME (0x1 << SERVICE_BIT_NVME) +#define CFG_SERVICE_MASK_ROCEAA (0x1 << SERVICE_BIT_ROCEAA) // TBD 替换为SERVICE_BIT_ROCE_MIG +#define CFG_SERVICE_MASK_CURRENET (0x1 << SERVICE_BIT_CURRENET) // TBD 替换为SERVICE_BIT_VIRTIO_MIG +#define CFG_SERVICE_MASK_PPA (0x1 << SERVICE_BIT_PPA) +#define CFG_SERVICE_MASK_MIGRATE (0x1 << SERVICE_BIT_MIGRATE) +#define CFG_SERVICE_MASK_VROCE (0x1 << SERVICE_BIT_VROCE) +#define CFG_SERVICE_MASK_JBOF (0x1 << SERVICE_BIT_JBOF) +#define CFG_SERVICE_MASK_DMMU (0x1 << SERVICE_BIT_DMMU) +#define CFG_SERVICE_MASK_CFM (0x1 << SERVICE_BIT_CFM) +#define CFG_SERVICE_MASK_UB (0x1 << SERVICE_BIT_UB) +#define CFG_SERVICE_MASK_MACSEC (0x1 << SERVICE_BIT_MACSEC) +#define CFG_SERVICE_MASK_BIFUR (0x1 << SERVICE_BIT_BIFUR) +#define CFG_SERVICE_MASK_HIHTR (0x1 << SERVICE_BIT_HIHTR) + +#define FUNC_PARITY_GPA_SPU_EN 0 +#define FUNC_GPA_SPU_DIS 1 +#define FUNC_GPA_SPU_EN 2 + +typedef enum { + PLATFORMS_ID_ASIC = 0, + PLATFORMS_ID_FPGA = 1, + PLATFORMS_ID_PG = 2, + PLATFORMS_ID_STRG = 3, + PLATFORMS_ID_MAX +} platform_id_e; + +/* Definition of the scenario ID in the cfg_data, which is used for SML memory allocation. */ +typedef enum { + SCENES_ID_FPGA_ETH = 0, + SCENES_ID_FPGA_TIOE = 1, /* Discarded */ + SCENES_ID_ASIC_STORAGE_ROCEAA_2X100 = 2, + SCENES_ID_ASIC_STORAGE_ROCEAA_4X25 = 3, + SCENES_ID_ASIC_CLOUD = 4, + SCENES_ID_ASIC_FC = 5, + SCENES_ID_ASIC_STORAGE_ROCE = 6, + SCENES_ID_ASIC_COMPUTE_ROCE = 7, + SCENES_ID_ASIC_STORAGE_TOE = 8, + SCENES_ID_FPGA_UB = 9, + SCENES_ID_FPGA_JBOF = 10, + SCENES_ID_FPGA_CRYPT = 11, + SCENES_ID_FPGA_VBS = 12, + SCENES_ID_FPGA_FC = 13, + SCENES_ID_FPGA_TOE = 14, + SCENES_ID_FPGA_NIC = 15, + SCENES_ID_PG_CLOUD = 16, + SCENES_ID_PG_AT_TEST = 17, + SCENES_ID_PG_JBOF = 18, + SCENES_ID_ASIC_JBOF = 19, + SCENES_ID_PG_TOE = 20, + SCENES_ID_ASIC_SLT = 21, + SCENES_ID_PG_FC = 22, + SCENES_ID_STRG_NIC = 23, + SCENES_ID_FPGA_OVS = 24, + SCENES_ID_FPGA_ALL1 = 25, + SCENES_ID_FPGA_ALL2 = 26, + SCENES_ID_ASIC_ALL = 27, + SCENES_ID_MAX +} scenes_id_define_e; + +/* Definition of the scenario ID in the cfg_data of V100 */ +typedef enum { + SCENES_ID_V100_FPGA_ETH = 0, + SCENES_ID_V100_COMPUTE_STANDARD = 1, + SCENES_ID_V100_STORAGE_ROCEAA_2x100 = 2, + SCENES_ID_V100_STORAGE_ROCEAA_4x25 = 3, + SCENES_ID_V100_CLOUD = 4, + SCENES_ID_V100_FC = 5, + SCENES_ID_V100_STORAGE_ROCE = 6, + SCENES_ID_V100_COMPUTE_ROCE = 7, + SCENES_ID_V100_STORAGE_TOE = 8, + SCENES_ID_V100_MAX +} scenes_id_v100_define_e; + +/* struct cfg_cmd_dev_cap.sf_svc_attr */ +enum { + SF_SVC_FT_BIT = (1 << 0), + SF_SVC_RDMA_BIT = (1 << 1), +}; + +/* + * Detailed information about VF timer. + * This describes the Fake VF timer info. + */ +struct timer_vf_info_fake { + u16 timer_normal_vf_num; /* Size from the first Non-Fake VF to the last Non-Fake VF */ + u16 timer_fake_vf_id_start; + u16 timer_fake_vf_num; /* Size from the first Fake VF to the last Fake VF */ + u16 rsvd1; + + u32 rsvd[5]; +}; + +/* + * Detailed information about VF timer. + * This describes a VF timer segment. + */ +struct timer_vf_info_seg { + u16 start; + u16 num; +}; + +#define TIMER_VF_SEGS_NUM 7 +#define TIMER_FAKE_VF_SEG 6 + +struct cfg_cmd_host_timer { + struct mgmt_msg_head head; + + u8 host_id; + u8 rsvd1 : 6; + /* + * Detailed info type about VF timer. + * Only one can be set. + */ + u8 timer_vf_info_mode_segs : 1; /* VF timer segments */ + u8 timer_vf_info_mode_fake : 1; /* Fake VF timer info */ + + u8 timer_pf_num; + u8 timer_pf_id_start; + + u16 timer_vf_num; /* Total num of VF */ + u16 timer_vf_id_start; + + union { + struct timer_vf_info_fake fake; + struct timer_vf_info_seg segs[TIMER_VF_SEGS_NUM]; + } timer_vf_info; + + u32 rsvd3; +}; + +struct cfg_cmd_dev_cap { + struct mgmt_msg_head head; + + u16 func_id; + u16 svc_cap_en_h; + + /* Public resources */ + u8 host_id; + u8 ep_id; + u8 er_id; + u8 port_id; + + u16 host_total_func; + u8 host_pf_num; + u8 pf_id_start; + u16 host_vf_num; + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 timer_en; + u8 host_valid_bitmap; + u8 rsvd_host; + + u16 svc_cap_en; /* svc_cap_en lower 16 bit */ + u16 max_vf; + u8 flexq_en; + u8 valid_cos_bitmap; + /* Reserved for func_valid_cos_bitmap */ + u8 port_cos_valid_bitmap; + u8 func_gpa_spu_en; + u8 dev_cos_valid_bitmap; + u8 dev_default_cos; + u8 cos_mask_mode; + u8 rsvd_func2; + + u8 sf_svc_attr; + u8 func_sf_en; + u8 lb_mode; + u8 smf_pg; + + u32 max_conn_num; + u16 max_stick2cache_num; + u16 max_bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + + /* shared resource */ + u8 host_sf_en; + u8 master_host_id; + u8 srv_multi_host_mode; + u8 virtio_vq_size; + u16 vio_func_num; /* virtio + nvme function num,共用同一片cache */ + u16 nvme_qp_num; + u32 virtio_vq_num; + u32 rsvd_func4[3]; + + /* l2nic */ + u16 nic_max_sq_id; + u16 nic_max_rq_id; + u16 nic_default_num_queues; + u16 rsvd1_nic; + u32 rsvd2_nic[2]; + + /* RoCE */ + u32 roce_max_qp; + u32 roce_max_cq; + u32 roce_max_srq; + u32 roce_max_mpt; + u32 roce_max_drc_qp; + + u32 roce_cmtt_cl_start; + u32 roce_cmtt_cl_end; + u32 roce_cmtt_cl_size; + + u32 roce_dmtt_cl_start; + u32 roce_dmtt_cl_end; + u32 roce_dmtt_cl_size; + + u32 roce_wqe_cl_start; + u32 roce_wqe_cl_end; + u32 roce_wqe_cl_size; + u8 roce_srq_container_mode; + u8 hyper_qpc_entry_size_en; + u8 rsvd_roce1[2]; + u32 roce_max_child_ctx_num; + u32 rsvd_roce2[4]; + + /* IPsec */ + u32 ipsec_max_sactxs; + u16 ipsec_max_cq; + u16 rsvd_ipsec1; + u16 ipsec_max_spctxs; + u16 ipsec_sp_hash_bucket_num; + u32 ipsec_sa_hash_bucket_num; + + /* OVS */ + u32 ovs_max_qpc; + u32 rsvd_ovs1[3]; + + /* ToE */ + u32 toe_max_pctx; + u32 toe_max_cq; + u16 toe_max_srq; + u16 toe_srq_id_start; + u16 toe_max_mpt; + u16 rsvd_toe_1; + u32 toe_max_cctxt; + u32 rsvd_toe[1]; + + /* FC */ + u32 fc_max_pctx; + u32 fc_max_scq; + u32 fc_max_srq; + + u32 fc_max_cctx; + u32 fc_cctx_id_start; + + u8 fc_vp_id_start; + u8 fc_vp_id_end; + u8 rsvd_fc1[2]; + u32 rsvd_fc2[5]; + + /* VBS */ + u16 vbs_max_volq; + u8 vbs_main_pf_enable; + u8 vbs_vsock_pf_enable; + u8 vbs_fushion_queue_pf_enable; + u8 rsvd0_vbs; + u16 vbs_host_dma_data_cos : 3; + u16 vbs_vmio_cpy_data_cos : 3; + u16 vbs_volq_cos : 3; + u16 rsvd1_vbs : 7; + u32 vbs_child_ctx_num : 21; + u32 rsvd2_vbs : 11; + u32 vbs_hash_bucket_num : 18; + u32 rsvd3_vbs : 14; + + /* FakeVF */ + u16 fake_vf_start_id; + u16 fake_vf_num; + u32 fake_vf_max_pctx; + u16 fake_vf_bfilter_start_addr; + u16 fake_vf_bfilter_len; + + /* JBOF */ + u32 rsvd_jbof; + u32 jbof_hash_bucket_num; + u32 jbof_max_pctx; + u32 jbof_max_cctx; + + /* DMMU */ + u16 max_fake_pasid; + u16 min_fake_pasid; + u16 dmmu_cl_start; + u16 dmmu_cl_end; + + /* Rsvd */ + u32 rsvd_glb[2]; +}; + +#define MAX_CAP_LEN_QWORD 2000 +struct cfg_cmd_ext_dev_cap { + struct mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + + u8 ext_cap[MAX_CAP_LEN_QWORD]; +}; + +struct cfg_cmd_tlv_hdr { + u16 type; + u16 len; +}; + +struct ub_firmware_caps { + u32 is_tpf; + u32 vf_cnt; + u32 max_jfc; + u32 max_jfr; + u32 max_jetty; + u32 max_jetty_grp; + u32 max_tp; + u32 max_tpg; + u32 max_vtp; + u32 max_utp; + u32 max_gid; + u32 max_mpts; + u32 max_mtu; + u32 max_jfrc; + u32 cqc_entry_sz; + u32 srqc_entry_sz; + u32 qpc_entry_sz; +}; + +struct cfg_roce_ext_caps { + u32 rsvd_qp; + u32 rsvd_qp_back; + u32 rsvd_cq; + u32 rsvd_cq_back; + u32 rsvd_srq; + u32 rsvd_srq_back; + u32 max_pd; + u32 max_xrcd; + u32 max_gid; +}; + +struct cfg_jbof_ext_caps { + u32 jbof_hash_bucket_num; + u32 jbof_max_pctx; + u32 jbof_max_cctx; +}; + +struct cfg_fake_vf_ext_caps { + u32 scqc_fake_vf_ctx_num; + u32 srqc_fake_vf_ctx_num; + u32 gid_fake_vf_ctx_num; + u32 mpt_fake_vf_ctx_num; + u32 childc_fake_vf_ctx_num; + + u8 qpc_fake_vf_ctx_size_order; + u8 qpc_fake_vf_ctx_size_order_en; + u16 fake_vf_parent_func_id; /* Parent function id of the fake vf group */ + + u8 fake_vf_lazy_init; + u8 rsvd1[0x3]; + + u32 rsvd[0x5]; +}; + +struct comm_info_ext_cap { + u8 max_smf_num; + u8 bat_cid_index_bit_width; + + /* CFM - CCP */ + u16 ccp_child_ctx_sz; /* 12 bits */ + u32 ccp_max_child_ctx; /* 20 bits */ + + /* SRIOV - ext cap */ + u32 vf_isolation : 1; /* The VF communicates directly with the Mgmt */ + u32 rsvd1 : 31; + + u32 rsvd3[0x8]; +}; + +typedef struct mpu_ub_ext_cap { + struct cfg_cmd_tlv_hdr ub_ext_cap_mgmt; + struct ub_firmware_caps ub_ext_cap_content; +} mpu_ub_ext_cap_s; + +typedef struct mpu_roce_ext_cap { + struct cfg_cmd_tlv_hdr roce_ext_cap_mgmt; + struct cfg_roce_ext_caps roce_ext_cap_content; +} mpu_roce_ext_cap_s; + +typedef struct mpu_jbof_ext_cap { + struct cfg_cmd_tlv_hdr jbof_ext_cap_mgmt; + struct cfg_jbof_ext_caps jbof_ext_cap_content; +} mpu_jbof_ext_cap_s; + +typedef struct mpu_fake_vf_ext_cap { + struct cfg_cmd_tlv_hdr mgmt; + struct cfg_fake_vf_ext_caps content; +} mpu_fake_vf_ext_cap_s; + +typedef struct { + u32 fw_img_hdr_size; + u32 fw_tile_text_size; + u32 rsvd[6]; +} cfg_fw_update_ext_caps; + +typedef struct mpu_fw_update_ext_cap { + struct cfg_cmd_tlv_hdr mgmt; + cfg_fw_update_ext_caps fw_update_caps; +} mpu_fw_update_ext_cap_s; + + +typedef struct mpu_dev_comm_info_ext_cap { + struct cfg_cmd_tlv_hdr mgmt; + struct comm_info_ext_cap comm_info; +} mpu_dev_comm_info_ext_cap_s; + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/cfm/cfm_cmd.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/cfm/cfm_cmd.h new file mode 100644 index 00000000..9cc0237b --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/cfm/cfm_cmd.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * Description : cfm cmd define + * Author : / + * Create : 2025/08/07 + * Notes : + * History : None + */ +#ifndef CFM_CMD_H +#define CFM_CMD_H + +#define HINIC5_BOND_MSG_TIMEOUT_MS (60000) + +/* TODO:bond ccp qos待整合在同一个枚举中 */ +typedef enum tag_cfm_mpu_drv_cmd { + CFM_MPU_CMD_BOND_CREATE = 0, + CFM_MPU_CMD_BOND_DELETE = 1, + CFM_MPU_CMD_BOND_SET = 2, + CFM_MPU_CMD_BOND_GET = 3, + CFM_MPU_CMD_BOND_CFG = 4, /* bond配置和查询 */ + CFM_MPU_CMD_BOND_LINK_INFO_GET = 5, + CFM_MPU_CMD_PASS_ARP_PKT = 6, /* 通过mpu发送arp报文 */ + CFM_MPU_CMD_CCP_COMM_PARA_GET = 16, + CFM_MPU_CMD_CCP_ALGO_PARA_GET = 17, + CFM_MPU_CMD_QOS_VPORT_MAPPING_SET = 32, + CFM_MPU_CMD_QOS_VPORT_SHAPER_SET = 33, + CFM_MPU_CMD_QOS_VPORT_SHAPER_CLR = 34, + CFM_MPU_CMD_QOS_VPORT_SHAPER_GET = 35, /* qos_base配置和查询 */ + CFM_MPU_CMD_QOS_CC_L2D_SET = 36, /* CFM QoS CC L2DMEM写操作 */ + CFM_MPU_CMD_QOS_CC_L2D_GET = 37, /* CFM QoS CC L2DMEM读操作 */ + CFM_MPU_CMD_EXTEND_END = 64 +} cfm_mpu_drv_cmd_e; + +typedef enum tag_cfm_npu_cmdq_cmd { + CFM_NPU_CMD_CCP_CTX = 0, + CFM_NPU_CMD_CCP_STATISTICS = 1, + CFM_NPU_CMD_HMM_OPS = 2, + CFM_NPU_CMD_MAX, +} cfm_npu_drv_cmd_e; + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/cfm/fast_msg_common_define.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/cfm/fast_msg_common_define.h new file mode 100644 index 00000000..9a2fc842 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/cfm/fast_msg_common_define.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2023. All rights reserved. + * Description : fast msg common define, for driverµ_code usage, common struct defined + * Version : 1.0 + * Date : 2023/8/24 + * Note : Auto generated, MUST NOT modify manually! + */ + +#ifndef FAST_MSG_COMMON_DEFINE_H +#define FAST_MSG_COMMON_DEFINE_H + +#if defined(__LINUX__) || defined(__VMWARE__) +#include <linux/types.h> +#else +#include "typedef.h" +#endif + +#define FW_UBCORE_MSG_NOTIFY_FASTMSG_DRAIN 0x16 +#define MAX_FAST_MSG_LEN 2032 + +typedef struct hisdk5_fast_msg_header { +#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && ((BYTE_ORDER == BIG_ENDIAN)) + u8 cmd; + u8 mod; + u16 status; + + u16 src_func_id; + u16 dst_func_id; + + u16 rsvd : 10; + u16 ulp_format : 4; + u16 nack : 1; + u16 send : 1; + u16 data_len; +#else + u16 status; + u8 mod; + u8 cmd; + + u16 dst_func_id; + u16 src_func_id; + + u16 data_len; + u16 send : 1; + u16 nack : 1; + u16 ulp_format : 4; + u16 rsvd : 10; +#endif + u32 rsvd1; +} hisdk5_fast_msg_header; + +typedef struct hisdk5_fast_msg_buf { + union { + hisdk5_fast_msg_header fast_msg_header; + u32 rq_offset; + }; + u8 fast_msg_data[MAX_FAST_MSG_LEN]; +} hisdk5_fast_msg_buf; + +#endif /* FAST_MSG_COMMON_DEFINE_H */ \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/hinic5_cqm/hinic5_cqm_npu_cmd.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/hinic5_cqm/hinic5_cqm_npu_cmd.h new file mode 100644 index 00000000..9a0cdf9f --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/hinic5_cqm/hinic5_cqm_npu_cmd.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2023. All rights reserved. + * Description: hinic5_cqm common command interface define. + * Author: None + * Create: 2015/11/13 + */ +#ifndef HINIC5_CQM_NPU_CMD_H +#define HINIC5_CQM_NPU_CMD_H + +typedef enum { + HINIC5_CQM_CMD_T_INVALID = 0, + HINIC5_CQM_CMD_T_BAT_UPDATE = 1, + HINIC5_CQM_CMD_T_CLA_UPDATE = 2, + HINIC5_CQM_CMD_T_BLOOMFILTER_SET = 3, + HINIC5_CQM_CMD_T_BLOOMFILTER_CLEAR = 4, + HINIC5_CQM_CMD_T_COMPACT_SRQ_UPDATE = 5, + HINIC5_CQM_CMD_T_CLA_CACHE_INVALID = 6, + HINIC5_CQM_CMD_T_BLOOMFILTER_INIT = 7, + HINIC5_CQM_CMD_T_CLA_RESET = 8, /* Reset VF's CLA */ + HINIC5_CQM_CMD_T_MAX +} hinic5_cqm_cmd_type_e; + +#endif /* HINIC5_CQM_NPU_CMD_H */ diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/hinic5_cqm/hinic5_cqm_npu_cmd_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/hinic5_cqm/hinic5_cqm_npu_cmd_defs.h new file mode 100644 index 00000000..2b97ca6e --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/hinic5_cqm/hinic5_cqm_npu_cmd_defs.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2023. All rights reserved. + * Description: hinic5_cqm common command interface define. + * Author: None + * Create: 2015/11/13 + */ +#ifndef HINIC5_CQM_NPU_CMD_DEFS_H +#define HINIC5_CQM_NPU_CMD_DEFS_H + +#if defined(__LINUX__) || defined(__VMWARE__) +#include <linux/types.h> +#else +#include "typedef.h" +#endif + +typedef struct tag_hinic5_cqm_cla_cache_invalid_cmd { + u32 gpa_h; + u32 gpa_l; + + u32 cache_size; /* CLA cache size=4096B */ + + u32 smf_id; + u32 func_id; +} hinic5_cqm_cla_cache_invalid_cmd_s; + +typedef struct tag_hinic5_cqm_cla_update_cmd { + /* Gpa address to be updated */ + u32 gpa_h; // byte addr + u32 gpa_l; // byte addr + + /* Updated Value */ + u32 value_h; + u32 value_l; + + u32 smf_id; + u32 func_id; +} hinic5_cqm_cla_update_cmd_s; + +typedef struct tag_hinic5_cqm_cla_reset_cmd { + u32 func_id; + u32 rsvd1; + + u32 rsvd[0x20]; /* Reserve 2 dwords for each BAT entries */ +} hinic5_cqm_cla_reset_cmd_s; + +typedef struct tag_hinic5_cqm_bloomfilter_cmd { + u32 rsv1; + +#if (BYTE_ORDER == LITTLE_ENDIAN) + u32 k_en : 4; + u32 func_id : 16; + u32 rsv2 : 12; +#else + u32 rsv2 : 12; + u32 func_id : 16; + u32 k_en : 4; +#endif + + u32 index_h; + u32 index_l; +} hinic5_cqm_bloomfilter_cmd_s; + +#define HINIC5_CQM_BAT_MAX_SIZE 256 +typedef struct tag_hinic5_cqm_cmdq_bat_update { + u32 offset; // byte offset,16Byte aligned + u32 byte_len; // max size: 256byte + u8 data[HINIC5_CQM_BAT_MAX_SIZE]; + u32 smf_id; + u32 func_id; +} hinic5_cqm_bat_update_cmd_s; + + +typedef struct tag_hinic5_cqm_bloomfilter_init_cmd { + u32 bloom_filter_len; // 16Byte aligned + u32 bloom_filter_addr; +} hinic5_cqm_bloomfilter_init_cmd_s; + +typedef struct tag_compact_srq_update_cmd { + u32 srqid; + u32 data[32]; +} compact_srq_update_cmd_s; + +#endif /* HINIC5_CQM_CMDQ_H */ diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/macsec/macsec_mpu_cmd.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/macsec/macsec_mpu_cmd.h new file mode 100644 index 00000000..76cf0e83 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/macsec/macsec_mpu_cmd.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Filename : macsec_mpu_cmd.h + * Creation time : 2024/02/23 + * Description : COMM Commands between host and MPU(macsec) + * Version : 1.0 + */ + +#ifndef MACSEC_MPU_CMD_H +#define MACSEC_MPU_CMD_H + +#include <linux/bits.h> + +#ifndef BIT +#define BIT(n) (1UL << (n)) +#endif + +/** + * @brief macsec_mgmt_cmd_e - COMM Commands between hinicadm to MPU(macsec) + * @details MACSEC与驱动之间交互的操作码,涉及SC、SA、MIB以及SERVICE的管理 + */ +typedef enum { + MACSEC_CMD_SC_OP = 0, /* sc operation @see struct tag_macsec_cmd_sc_operation_s */ + MACSEC_CMD_SA_OP, /* sa operation @see struct tag_macsec_cmd_sa_operation_s */ + MACSEC_CMD_GET_PORT_MIB, /* get port mib @see struct tag_macsec_cmd_port_mib_operation_s */ + MACSEC_CMD_GET_SC_MIB, /* get sc mib @see struct tag_macsec_cmd_sc_mib_operation_s */ + MACSEC_CMD_GET_ERR_CNT, /* get error cnt @see struct tag_macsec_cmd_err_cnt_operation_s */ + MACSEC_CMD_SERVICE_OP, /* service op @see struct macsec_cmd_service_operation_s */ + MACSEC_CMD_FEATURE_NEGO_OP, /* macsec feature negotiate op + * @see struct macsec_feature_nego_cmd_s + */ + MACSEC_CMD_FLUSH_OP, /* macsec resource flush op @see struct tag_macsec_flush_cmd_s */ + MACSEC_CMD_WHITELIST_OP, /* macsec whitelist op @see struct macsec_whitelist_cmd_out */ +} macsec_mgmt_cmd_e; + +/** + * @brief macsec_mbox_sc_op_cmd_e - opcode of mailbox sc opcode + * @details SC加解密侧的操作码,涉及创建、删除和更新等操作 + */ +typedef enum { + MACSEC_CMD_ENC_SC_CREATE = 0, /* enc sc create */ + MACSEC_CMD_ENC_SC_DELETE, /* enc sc delete */ + MACSEC_CMD_ENC_SC_UPDATE, /* enc sc update */ + MACSEC_CMD_ENC_SC_GET_INFO, /* get enc sc */ + MACSEC_CMD_DEC_SC_CREATE, /* dec sc create */ + MACSEC_CMD_DEC_SC_DELETE, /* dec sc delete */ + MACSEC_CMD_DEC_SC_UPDATE, /* dec sc update */ + MACSEC_CMD_DEC_SC_GET_INFO, /* get dec sc */ +} macsec_mbox_sc_op_cmd_e; + +/** + * @brief macsec_mbox_sa_op_cmd_e - opcode of mailbox sa opcode + * @details SA加解密侧的操作码,涉及创建、删除和更新等操作 + */ +typedef enum { + MACSEC_CMD_ENC_SA_CREATE = 0, /* enc sa create */ + MACSEC_CMD_ENC_SA_DELETE, /* enc sa delete */ + MACSEC_CMD_ENC_SA_UPDATE, /* enc sa update */ + MACSEC_CMD_ENC_SA_GET_INFO, /* get enc sa */ + MACSEC_CMD_DEC_SA_CREATE, /* dec sa create */ + MACSEC_CMD_DEC_SA_DELETE, /* dec sa delete */ + MACSEC_CMD_DEC_SA_UPDATE, /* dec sa update */ + MACSEC_CMD_DEC_SA_GET_INFO, /* get dec sa */ +} macsec_mbox_sa_op_cmd_e; + +/** + * @brief macsec_mbox_service_op_cmd_e - opcode of mailbox service opcode + * @details MACSEC服务相关操作,如驱动加载时的使能MACSEC功能,驱动卸载时的关闭MACSEC功能 + */ +typedef enum { + MACSEC_CMD_SERVICE_OP_MACSEC_DISABLE = 0, /* macsec disable */ + MACSEC_CMD_SERVICE_OP_MACSEC_ENABLE, /* macsec enable */ +} macsec_mbox_service_op_cmd_e; + +/** + * @brief macsec_mbox_feature_nego_op_cmd_e - opcode of mailbox feature negotiate opcode + * @details MACSEC特性协商相关操作,对特性进行查询或者设置 + */ +typedef enum { + MACSEC_FEATURE_NEGO_OPCODE_GET = 0, /* feature negotiation get */ + MACSEC_FEATURE_NEGO_OPCODE_SET = 1, /* feature negotiation set (reserve for using) */ +} macsec_mbox_feature_nego_op_cmd_e; + +/** + * @brief macsec_mbox_feature_cap_e - list of features supported by macsec + * @details MACSEC支持特性列表 + */ +typedef enum { + MACSEC_F_HARDEN_PATH = BIT(0), /* macsec does not have NPU */ + MACSEC_F_SUPPORT_SM4 = BIT(1), /* macsec support using sm4 */ +} macsec_mbox_feature_cap_e; + +/** + * @brief macsec_mbox_flush_op_cmd_e - opcode of mailbox macsec flush opcode + * @details MACSEC资源清理相关操作 + */ +typedef enum { + MACSEC_CMD_FLUSH_SC_OP = 0, /* flush sc and sa resource */ + MACSEC_CMD_FLUSH_SA_OP, /* flush sa resource */ +} macsec_mbox_flush_op_cmd_e; + +#endif /* MACSEC_MPU_CMD_H */ diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/macsec/macsec_mpu_cmd_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/macsec/macsec_mpu_cmd_defs.h new file mode 100644 index 00000000..f78ea34b --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/macsec/macsec_mpu_cmd_defs.h @@ -0,0 +1,387 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Filename : macsec_mpu_cmd_defs.h + * Creation time : 2024/02/23 + * Description : COMM Commands between host and MPU(macsec) + * Version : 1.0 + */ + +#ifndef MACSEC_MPU_CMD_DEFS_H +#define MACSEC_MPU_CMD_DEFS_H + +#include "mpu_cmd_base_defs.h" + +#define MACSEC_PORT_NUM 4 /* macsec最大的端口规格 */ +#define MACSEC_SC_NUM 32 /* macsec最大的SC规格 */ +#define MACSEC_SA_NUM 64 /* macsec最大的SA规格 */ +#define MACSEC_MAX_FEATURE_QWORD 4 /* macsec最大特性规格 */ +#define MACSEC_WHITELIST_DMAC_ID_MAX 16 /* macsec dmac id最大模板数量 */ +#define MACSEC_WHITELIST_TOOL_IN_BUF_MAX 1024 +#define MACSEC_WHITELIST_TOOL_OUT_BUF_MAX 4096 +#define MACSEC_WHITELIST_DMAC_ENABLE 1 /* macsec dmac使能 */ +#define MACSEC_WHITELIST_DMAC_DISABLE 0 /* macsec dmac不使能 */ +#define MACSEC_WHITELIST_SERVICE_ENABLE 1 /* macsec 使能 */ +#define MACSEC_WHITELIST_SERVICE_DISABLE 0 /* macsec 不使能 */ +#define WHITELIST_FUNC_ENABLE 0x1 +#define WHITELIST_FUNC_DISABLE 0x0 +#define MACSEC_DMAC_TEMPLATE_NUM 16 /* macsec dmac最大模板数量 */ +#define MACSEC_ETYPE_TEMPLATE_NUM 16 /* macsec etype最大模板数量 */ +#define MACSEC_VLANID_MAX 4094 /* macsec vlanid最大值 */ +#define MACSEC_VLANID_MIN 1 /* macsec vlanid最小值 */ +#define MACSEC_RXVLANID_MASK_MAX 0xFFF /* macsec rxvlan mask最大值 */ +#define MACSEC_DMAC_VALUE_MAX 0xFFFFFFFFFFFF /* macsec dmac最大值 */ +#define MACSEC_DMAC_MASK_MAX 0xFF /* macsec dmac mask最大值 */ + +/** + * @brief macsec_sa_info_s - SA结构体 + * @details 通过该结构体进行SA的管理 + */ +typedef struct tag_macsec_sa_info_s { + // encryption + u64 pn_th; /* The threshold of PN */ + u64 next_pn; /* NEXTPN number */ + u8 enable_transmit; /* Indicates if the transmit SA can be used to receive frames */ + u8 rsvd0[7]; /* for expanding */ + + // decryption + u8 replay_protect; /* Enable or disable replay protection function */ + u8 enable_receive; /* Indicates if the transmit SA can be used to receive frames */ + u8 rsvd1[2]; + u32 replay_window; /* Indicates the replay protection window size */ + u64 lowest_pn; /* LOWESTPN number */ + + // common + u8 an; /* an value used by sa */ + /* The confidentaility protection offset options of the cipher suite */ + u8 confidentiality_offset; + u8 current_key_length; /* Indicate the key length the SA is currently using */ + /* Indicates to enable the extended packet number while receiving MACsec frames */ + u8 extended_pn_enable; + /* Indicates the crypto algorithm which the SA is currently using */ + u8 current_crypto_algo; + u8 rsvd2[7]; /* for expanding */ + u32 ssci; /* If extended_pn_enable is 1, use SSCI to compose IV */ + u32 salt[3]; /* Used to generate IV in extended Packet Number mode, 96bit */ + u32 sak_crc; /* sak key crc val */ + u64 sci; /* 64-bit SCI value used to identify an SC */ + u32 sak[8]; /* cipher suite key,max 256bit */ +} macsec_sa_info_s; + +/** + * @brief macsec_sc_info_s - SC结构体 + * @details 通过该结构体进行SC的管理 + */ +typedef struct tag_macsec_sc_info_s { + // encryption + u8 protect_frames; /* Protect function for frames */ + u8 protection_mode; /* Protection options of the Cipher Suite */ + u8 include_sci_enable; /* Indicates to include the SCI information in SecTAG field */ + u8 use_es_enable; /* Indicates to enable the ES bit in SecTAG */ + u8 use_scb_enable; /* Indicates to enable the SCB bit in SecTAG field */ + u8 encoding_sa; /* The current transmitting SA in use */ + u8 rsvd0[2]; /* for expanding */ + + // decryption + u8 validate_frames; /* Uses to control the validation function for frames per SC */ + u8 rsvd1[3]; /* for expanding */ + + // common + u8 sa_an[4]; /* AN number used by SA, 4 refer to max num */ + u64 sci; /* 64-bit SCI value used to identify an SC */ +} macsec_sc_info_s; /* Common head info */ + +/** + * @brief macsec_cmd_sc_operation_s - 与主机驱动之间的SC管理信息 + * @details 通过该信息的传递,进行SC的创建、删除和更新,各SC之间独立。 + */ +typedef struct tag_macsec_cmd_sc_operation_s { + struct mgmt_msg_head head; /* Common head info */ + u8 op_code; /* Operation code, 0: enc get/1: enc set/2: enc update + * 3: dec get/4: dec set/5: dec update + */ + u8 rsvd[3]; /* reserved for expanding */ + macsec_sc_info_s sc_info; /* 与主机驱动之间传递的sc信息 */ +} macsec_cmd_sc_operation_s; + +/** + * @brief macsec_cmd_sa_operation_s - 与主机驱动之间的SA管理信息 + * @details 用作SA的创建、删除和更新,其中包括密钥管理、重放管理以及PN值等信息 + */ +typedef struct tag_macsec_cmd_sa_operation_s { + struct mgmt_msg_head head; /* Common head info */ + u8 op_code; /* Operation code, 0: enc get/1: enc set/2: enc update + *3: dec get/4: dec set/5: dec update + */ + u8 rsvd[3]; /* reserved for expanding */ + macsec_sa_info_s sa_info; /* 与主机驱动之间传递的sa信息 */ +} macsec_cmd_sa_operation_s; + +/** + * @brief macsec_port_mib_info_s - PORT MIB具体内容 + * @details 包括PORT级别的SecTAG异常、完整性保护和加密保护的统计值 + */ +typedef struct { + u64 macsec_mib_txpkts_untagged; /* 传输的不带SecTAG的报文数 */ + /* 传输的只做完整性保护不做加密的字节数 */ + u64 macsec_mib_txoctets_protected; + /* 传输的即做完整性保护也做加密的字节数 */ + u64 macsec_mib_txoctets_encrypted; + /* 接收不带SecTAG的报文数 not strict mode */ + u64 macsec_mib_rxpkts_untagged; + u64 macsec_mib_rxpkts_notag; /* 接收不带SecTAG的报文数 strict mode */ + /* 接收的无效SecTAG或ICV无效的报文数,V、SC、SL出现错误 */ + u64 macsec_mib_rxpkts_badtag; + /* 接收的未知SCI或未使用的SA的报文数 未设置validateEnable&C&strict mode */ + u64 macsec_mib_rxpkts_nosa; + /* 接收的未知SCI或未使用的SA的报文数 设置validateEnable&C&strict mode */ + u64 macsec_mib_rxpkts_nosaerror; + /* 接收到的只做完整性保护不做加密的字节数 */ + u64 macsec_mib_rxoctets_validated; + /* 接收到的即做完整性保护也做加密的字节数 */ + u64 macsec_mib_rxoctets_decrypted; +} macsec_port_mib_info_s; + +/** + * @brief macsec_sc_mib_info_s - SC MIB具体内容 + * @details 包括SC级别的完整性保护和加密保护的统计值、重放以及安全校验相关的统计值 + */ +typedef struct { + /* 传输的只做完整性保护不做加密的报文数 */ + u64 macsec_mib_txscpkts_protected; + /* 传输的即做完整性保护也做加密的报文数 */ + u64 macsec_mib_txscpkts_encrypted; + u64 macsec_mib_rxscpkts_ok; /* 成功验证且在重放窗口内接收的报文数 */ + u64 macsec_mib_rxscpkts_delayed; /* 报文的PN小于可接受的最小PN的报文数 */ + /* ReplayProtect为true,且报文的PN小于重放检查PN的下限的报文数 */ + u64 macsec_mib_rxscpkts_late; + /* validateFrames disable,SecTAG中的C位没有设置, + * 且该数据包不是重放数据包 + */ + u64 macsec_mib_rxscpkts_unchecked; + /* ICV验证失败,但由于validateFrames为strtic + * 或数据已加密而被丢弃的报文数 + */ + u64 macsec_mib_rxscpkts_notvalid; + /* validateFrames标识check, + * 但是数据没有加密(原始帧可能是recovered)的报文数 + */ + u64 macsec_mib_rxscpkts_invalid; +} macsec_sc_mib_info_s; + +/** + * @brief macsec_cmd_port_mib_operation_s - PORT MIB管理信息 + * @details 遵循macsec标准协议,获取指定端口的MIB信息 + */ +typedef struct tag_macsec_cmd_port_mib_operation_s { + struct mgmt_msg_head head; /* Management msg header info, 8B */ + macsec_port_mib_info_s port_mib; /* port mib info */ + u64 reserved; /* reserved for expanding */ +} macsec_cmd_port_mib_operation_s; + +/** + * @brief macsec_cmd_sc_mib_operation_s - SC MIB管理信息 + * @details 遵循macsec标准协议,获取单个SC的MIB信息 + */ +typedef struct tag_macsec_cmd_sc_mib_operation_s { + struct mgmt_msg_head head; /* Management msg header info, 8B */ + macsec_sc_mib_info_s sc_mib; /* sc mib info */ + u64 sci; /* 64-bit SCI value used to identify an SC */ + u8 reserved[8]; /* reserved for expanding */ +} macsec_cmd_sc_mib_operation_s; + +/** + * @brief macsec_enc_cnt_u - MACSEC加密侧的DFX统计值 + * @details 包括加密侧可纠和不可纠异常、数据路径的sop和eop异常以及根据profile id进行不同处理的统计值等 + */ +typedef union { + struct { + u32 macsec_enc_msop_cnt; /* MSOP中断产生的次数 */ + u32 macsec_enc_meop_cnt; /* MEOP中断产生的次数 */ + u32 macsec_enc_iadp_ierr_c_cnt; /* IADP中断可纠错误统计 */ + u32 macsec_enc_iadp_ierr_u_cnt; /* IADP中断不可纠错误统计 */ + u32 macsec_enc_oadp_ierr_c_cnt; /* OADP中断可纠错误统计 */ + u32 macsec_enc_oadp_ierr_u_cnt; /* OADP中断不可纠错误统计 */ + u32 macsec_enc_cs_ierr_c_cnt; /* CS中断可纠错误统计 */ + u32 macsec_enc_cs_ierr_u_cnt; /* CS中断不可纠错误统计 */ + u32 macsec_enc_mib_ierr_c_cnt; /* MIB中断可纠错误统计 */ + u32 macsec_enc_mib_ierr_u_cnt; /* MIB中断不可纠错误统计 */ + u32 macsec_enc_dpp_ierr_c_cnt; /* DPP中断可纠错误统计 */ + u32 macsec_enc_dpp_ierr_u_cnt; /* DPP中断不可纠错误统计 */ + u32 macsec_enc_dpp_wrongsa_cnt; /* DPP错误SA统计 */ + u32 macsec_enc_dpp_illegal_pkt_cnt; /* 非法报文统计 */ + /* profile id为0的bypass报文统计 */ + u64 macsec_enc_dpp_profile_id_0_bypass_pkt_cnt; + /* porfile id非0但profile信息为0被bypass报文统计 */ + u32 macsec_enc_dpp_profile_read_all0_bypass_pkt_cnt; + u32 macsec_enc_dpp_profile_id_1_cnt; /* profile id为1的报文统计 */ + u32 macsec_enc_dpp_profile_id_2_cnt; /* profile id为2的报文统计 */ + u32 macsec_enc_dpp_profile_id_3_cnt; /* profile id为3的报文统计 */ + u64 macsec_enc_white_list_plaintext_pkt_cnt; /* 白名单放通报文统计 */ + u64 macsec_enc_in_sop_cnt; /* 加密引擎入端口产生SOP次数 */ + u64 macsec_enc_in_eop_cnt; /* 加密引擎入端口产生EOP次数 */ + u64 macsec_enc_in_abort_cnt; /* 加密引擎入端口丢弃的次数 */ + u64 macsec_enc_out_sop_cnt; /* 加密引擎出端口产生SOP次数 */ + u64 macsec_enc_out_eop_cnt; /* 加密引擎出端口产生EOP次数 */ + u64 macsec_enc_out_abort_cnt; /* 加密引擎出端口丢弃的次数 */ + u32 macsec_enc_drop_pkt_cnt; /* 外部端口丢包的数量 */ + u32 macsec_enc_oadp_out_msop_cnt; /* output sop 统计次数 */ + u32 macsec_enc_oadp_out_meop_cnt; /* output eop 统计次数 */ + } enc_cnt_s; + u32 cnt_info[0]; +} macsec_enc_cnt_u; + +/** + * @brief macsec_dec_cnt_u - MACSEC解密侧的DFX统计值 + * @details 包括解密侧可纠和不可纠异常、数据路径的sop和eop异常以及根据profile id进行不同处理的统计值等 + */ +typedef union { + struct { + u32 macsec_dec_msop_cnt; /* MSOP中断产生的次数 */ + u32 macsec_dec_meop_cnt; /* MEOP中断产生的次数 */ + u32 macsec_dec_iadp_ierr_c_cnt; /* IADP中断可纠错误统计 */ + u32 macsec_dec_iadp_ierr_u_cnt; /* IADP中断不可纠错误统计 */ + u32 macsec_dec_oadp_ierr_c_cnt; /* OADP中断可纠错误统计 */ + u32 macsec_dec_oadp_ierr_u_cnt; /* OADP中断不可纠错误统计 */ + u32 macsec_dec_pa_ierr_c_cnt; /* PA中断可纠错误统计 */ + u32 macsec_dec_pa_ierr_u_cnt; /* PA中断不可纠错误统计 */ + u32 macsec_dec_pg_ierr_c_cnt; /* PG中断可纠错误统计 */ + u32 macsec_dec_pg_ierr_u_cnt; /* PG中断不可纠错误统计 */ + u32 macsec_dec_mib_ierr_c_cnt; /* MIB中断可纠错误统计 */ + u32 macsec_dec_mib_ierr_u_cnt; /* MIB中断不可纠错误统计 */ + u32 macsec_dec_dpp_ierr_u_cnt; /* DPP中断可纠错误统计 */ + u32 macsec_dec_dpp_ierr_c_cnt; /* DPP中断不可纠错误统计 */ + u32 macsec_dec_dpp_ec01_pkt_cnt; /* E=0且C=1统计 */ + u32 macsec_dec_dpp_ec10_pkt_cnt; /* E=1且C=0统计 */ + u32 macsec_dec_dpp_unknownsci_bypass_pkt_cnt; /* 未知SCI bypass报文统计 */ + u32 macsec_dec_dpp_illegal_pad_pkt_cnt; /* 非法的padding报文统计 */ + u64 macsec_dec_dpp_white_list_plaintext_pkt_cnt; /* 白名单放通报文统计 */ + /* profile id为0的放通报文统计 */ + u64 macsec_dec_dpp_profile_id_0_bypass_pkt_cnt; + /* porfile id非0但profile信息为0被bypass报文统计 */ + u32 macsec_dec_dpp_profile_read_all0_bypass_pkt_cnt; + u32 macsec_dec_dpp_profile_id_1_pkt_cnt; /* profile id为1的报文统计 */ + u32 macsec_dec_dpp_profile_id_2_pkt_cnt; /* profile id为2的报文统计 */ + u32 macsec_dec_dpp_profile_id_3_pkt_cnt; /* profile id为3的报文统计 */ + u32 macsec_dec_dpp_profile_id_4_pkt_cnt; /* profile id为4的报文统计 */ + u32 macsec_dec_dpp_illegal_pkt_cnt; /* 非法报文统计 */ + u64 macsec_dec_intf_in_sop_cnt; /* DEC_INTF入端口sop的统计 */ + u64 macsec_dec_intf_in_eop_cnt; /* DEC_INTF入端口eop的统计 */ + u64 macsec_dec_intf_in_abort_cnt; /* DEC_INTF入端口丢弃的统计 */ + u64 macsec_dec_out_sop_cnt; /* DEC_INTF出端口sop的统计 */ + u64 macsec_dec_out_eop_cnt; /* DEC_INTF出端口eop的统计 */ + u64 macsec_dec_out_abort_cnt; /* DEC_INTF出端口丢弃的统计 */ + u32 macsec_dec_intf_drop_pkt_cnt; /* 端口disable丢弃包统计 */ + u32 macsec_dec_oadp_out_msop_cnt; /* out msop计数统计 */ + u32 macsec_dec_oadp_out_meop_cnt; /* out meop计数统计 */ + } dec_cnt_s; + u32 cnt_info[0]; +} macsec_dec_cnt_u; + +/** + * @brief macsec_cmd_err_cnt_operation_s - macsec内部的dfx cnt信息获取 + * @details 包括加密和解密侧的统计信息,用于问题定位 + */ +typedef struct tag_macsec_cmd_err_cnt_operation_s { + struct mgmt_msg_head head; /* Management msg header info, 8B */ + macsec_enc_cnt_u enc_module_cnt; /* enc cnt info */ + macsec_dec_cnt_u dec_module_cnt; /* dec cnt info */ + u8 reserved[4]; /* reserved for expanding */ +} macsec_cmd_err_cnt_operation_s; + +/** + * @brief macsec_cmd_service_operation_s - macsec服务管理操作 + * @details 包括驱动加卸载时对macsec的处理 + */ +typedef struct tag_macsec_cmd_service_operation_s { + struct mgmt_msg_head head; /* Management msg header info, 8B */ + u8 op_code; /* Operation code, 0: disable macsec/1: enable macsec */ + u8 reserved[7]; /* reserved for expanding */ +} macsec_cmd_service_operation_s; + +/** + * @brief macsec_feature_nego_cmd_s - feature negotiation command struct defination + * @details 包括驱动加载时对macsec特性协商的处理 + */ +typedef struct tag_macsec_feature_nego_cmd { + struct mgmt_msg_head head; /* Management msg header info, 8B */ + u64 s_feature[MACSEC_MAX_FEATURE_QWORD]; /* macsec features */ + u8 op_code; /* Operation code, 0: get macsec feature/1: set macsec feature */ + u8 rsvd[7]; /* reserved for expanding */ +} macsec_feature_nego_cmd_s; + +/** + * @brief tag_macsec_flush_cmd_s - MACSEC resource actively cleans up parameters + * @details 下发flush命令时包括的参数 + */ +typedef struct tag_macsec_flush_cmd { + struct mgmt_msg_head head; /* Management msg header info, 8B */ + u64 sci; /* 64-bit SCI value used to identify an SC */ + u8 op_code; /* Operation code, 0: flush sc and sa, 1: flush sa */ + u8 reserved[7]; /* reserved for expanding */ +} tag_macsec_flush_cmd_s; + +// macsec 白名单工具支持的操作类型 +typedef enum macsec_whitelist_tool_op { + MACSEC_WHITELIST_TOOL_OP_ENABLE = 0, + MACSEC_WHITELIST_TOOL_OP_SET, + MACSEC_WHITELIST_TOOL_OP_LIST, + MACSEC_WHITELIST_TOOL_OP_MAX +} macsec_whitelist_tool_op_e; + +// macsec 白名单的配置场景 +typedef enum macsec_whitelist_tool_mode { + MACSEC_WHITELIST_TOOL_MODE_DMAC = 0, + MACSEC_WHITELIST_TOOL_MODE_TXVLAN, + MACSEC_WHITELIST_TOOL_MODE_RXVLAN, + MACSEC_WHITELIST_TOOL_MODE_ETYPE, + MACSEC_WHITELIST_TOOL_MODE_MAX +} macsec_whitelist_tool_mode_e; + +// inbuf 包含一个hdr +typedef struct macsec_whitelist_cmd_hdr { + macsec_whitelist_tool_op_e cmd_type; + macsec_whitelist_tool_mode_e mode_type; +} macsec_whitelist_cmd_hdr_t; + +// inbuf 定义 +typedef struct macsec_whitelist_info { + u64 dmac;/* dmac value */ + u32 vlanid;/* vlanid value */ + u32 mask; + u32 dmac_id; + u32 port; + u32 dmac_mask; + u8 dmac_service; + u8 service; + u8 etype_control; + u8 dmac_flag; + u8 dmac_service_flag; + u8 vlanid_flag; + u8 mask_flag; + u8 etype_flag; + u8 etype_control_flag; + u8 rsvd[3]; +} macsec_whitelist_info_t; + +typedef struct macsec_whitelist_cmd_in { + struct mgmt_msg_head head; + macsec_whitelist_cmd_hdr_t hdr; + macsec_whitelist_info_t buf; +} macsec_whitelist_cmd_in_t; + +// outbuf 定义 +typedef struct macsec_whitelist_cmd_out { + struct mgmt_msg_head head; + u64 dmac_list[MACSEC_DMAC_TEMPLATE_NUM]; + u32 dmac_mask_list[MACSEC_DMAC_TEMPLATE_NUM]; + u32 dmac_valid_num; + u8 dmac_port_list[MACSEC_PORT_NUM]; + u32 txvlan_list[MACSEC_PORT_NUM]; + u32 txvlan_valid_num; + u32 rxvlan_list[MACSEC_PORT_NUM]; + u32 rxvlan_mask_list[MACSEC_PORT_NUM]; + u32 rxvlan_valid_num; + u32 etype_list[MACSEC_ETYPE_TEMPLATE_NUM]; +} macsec_whitelist_cmd_out_s; + +#endif /* MACSEC_MPU_CMD_DEFS_H */ \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/mag/mag_mpu_cmd.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/mag/mag_mpu_cmd.h new file mode 100644 index 00000000..894390ca --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/mag/mag_mpu_cmd.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * Description: serdes/mag cmd definition between driver and mpu + * Author: ETH group + * Create: 2021-07-30 + */ + +#ifndef MAG_MPU_CMD_H +#define MAG_MPU_CMD_H + +/* serdes/mag消息命令字定义 */ +enum mag_cmd { + /* serdes命令字,统一封装所有serdes命令 */ + SERDES_CMD_PROCESS = 0, + + /* mag命令字,按功能划分 */ + /* 端口配置相关 0-29 */ + MAG_CMD_SET_PORT_CFG = 1, + MAG_CMD_SET_PORT_ADAPT = 2, + MAG_CMD_CFG_LOOPBACK_MODE = 3, + + MAG_CMD_GET_PORT_ENABLE = 5, + MAG_CMD_SET_PORT_ENABLE = 6, + MAG_CMD_GET_LINK_STATUS = 7, + MAG_CMD_SET_LINK_FOLLOW = 8, + MAG_CMD_SET_PMA_ENABLE = 9, + MAG_CMD_CFG_FEC_MODE = 10, + + MAG_CMD_CFG_AN_TYPE = 12, /* reserved for future use */ + MAG_CMD_CFG_LINK_TIME = 13, + + MAG_CMD_SET_PANGEA_ADAPT = 15, + MAG_CMD_HILINK_MODE = 16, /* set port hilink debug mode @see struct mag_cmd_hilink_mode */ + + /* bios link配置相关 30-49 */ + MAG_CMD_CFG_BIOS_LINK_CFG = 31, + MAG_CMD_RESTORE_LINK_CFG = 32, + MAG_CMD_ACTIVATE_BIOS_LINK_CFG = 33, + + /* 光模块、LED、PHY等外设配置管理 50-99 */ + /* LED */ + MAG_CMD_SET_LED_CFG = 50, + + /* PHY */ + MAG_CMD_GET_PHY_INIT_STATUS = 55, /* reserved for future use */ + + /* 光模块 */ + MAG_CMD_GET_XSFP_INFO = 60, + MAG_CMD_SET_XSFP_ENABLE = 61, + MAG_CMD_GET_XSFP_PRESENT = 62, + MAG_CMD_SET_XSFP_RW = 63, /* sfp/qsfp single byte read/write, for equipment test */ + MAG_CMD_CFG_XSFP_TEMPERATURE = 64, + MAG_CMD_SET_XSFP_TLV_INFO = 65, + MAG_CMD_GET_XSFP_TLV_INFO = 66, + + /* 事件上报 100-149 */ + MAG_CMD_WIRE_EVENT = 100, + MAG_CMD_LINK_ERR_EVENT = 101, + + /* DFX、Counter相关 */ + MAG_CMD_EVENT_PORT_INFO = 150, + MAG_CMD_GET_PORT_STAT = 151, + MAG_CMD_CLR_PORT_STAT = 152, + MAG_CMD_GET_PORT_INFO = 153, + MAG_CMD_GET_PCS_ERR_CNT = 154, + MAG_CMD_GET_MAG_CNT = 155, + MAG_CMD_DUMP_ANTRAIN_INFO = 156, + MAG_CMD_GET_UBMAC_COUNTER = 157, + MAG_CMD_GET_HIMAC_BER = 158, + MAG_CMD_OP_HIMAC_THRD = 159, + MAG_CMD_GET_HIMAC_BANDWIDTH = 160, + MAG_CMD_SET_HIMAC_PRBS = 161, + MAG_CMD_GET_HIMAC_PRBS = 162, + + /* patch预留cmd */ + MAG_CMD_PATCH_RSVD_0 = 200, + MAG_CMD_PATCH_RSVD_1 = 201, + MAG_CMD_PATCH_RSVD_2 = 202, + MAG_CMD_PATCH_RSVD_3 = 203, + MAG_CMD_PATCH_RSVD_4 = 204, + + MAG_CMD_MAX = 0xFF +}; + +#endif diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/mag/mag_mpu_cmd_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/mag/mag_mpu_cmd_defs.h new file mode 100644 index 00000000..542c03cc --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/mag/mag_mpu_cmd_defs.h @@ -0,0 +1,1673 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. + * Description: serdes/mag cmd definition between driver and mpu + * Author: ETH group + * Create: 2021-07-30 + */ + +#ifndef MAG_MPU_CMD_DEFS_H +#define MAG_MPU_CMD_DEFS_H + +#if defined(__LINUX__) || defined(__VMWARE__) +#include <linux/types.h> +#elif defined(__WIN__) +#include "base_type.h" +#else +#include "typedef.h" +#endif +#include "mpu_cmd_base_defs.h" + +/* serdes cmd struct define */ +#define CMD_ARRAY_BUF_SIZE 64 +#define SERDES_CMD_DATA_BUF_SIZE 512 +#define RX_RSFEC_PHY_DFX_STA_TBL_SIZE 25 + +struct serdes_in_info { + u32 chip_id : 16; + u32 macro_id : 16; + u32 start_sds_id : 16; + u32 sds_num : 16; + + u32 cmd_type : 8; /* reserved for iotype */ + u32 sub_cmd : 8; + u32 rw : 1; /* 0: read, 1: write */ + u32 rsvd : 15; + + u32 val; + union { + char field[CMD_ARRAY_BUF_SIZE]; + u32 addr; + u8 *ex_param; + }; +}; + +struct serdes_out_info { + u32 str_len; /* out_str length */ + u32 result_offset; + u32 type; /* 0:data; 1:string */ + char out_str[SERDES_CMD_DATA_BUF_SIZE]; +}; + +struct serdes_cmd_in { + struct mgmt_msg_head head; + + struct serdes_in_info serdes_in; +}; + +struct serdes_cmd_out { + struct mgmt_msg_head head; + + struct serdes_out_info serdes_out; +}; + +struct mag_port_info { + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 fec; + u32 supported_mode; + u32 advertised_mode; + u32 supported_fec_mode; +}; + +enum mag_cmd_port_speed { + PORT_SPEED_NOT_SET = 0, + PORT_SPEED_10MB = 1, + PORT_SPEED_100MB = 2, + PORT_SPEED_1GB = 3, + PORT_SPEED_10GB = 4, + PORT_SPEED_25GB = 5, + PORT_SPEED_40GB = 6, + PORT_SPEED_50GB = 7, + PORT_SPEED_100GB = 8, + PORT_SPEED_200GB = 9, + PORT_SPEED_400GB = 10, + PORT_SPEED_800GB = 11, + PORT_SPEED_UNKNOWN +}; + +enum mag_cmd_port_an { + PORT_AN_NOT_SET = 0, + PORT_CFG_AN_ON = 1, + PORT_CFG_AN_OFF = 2 +}; + +enum mag_cmd_port_adapt { + PORT_ADAPT_NOT_SET = 0, + PORT_CFG_ADAPT_ON = 1, + PORT_CFG_ADAPT_OFF = 2 +}; + +enum mag_cmd_prbs_scr_en { + PRBS_SCR_EN_OFF = 0, + PRBS_SCR_EN_ON = 1, + PRBS_SCR_EN_END +}; + +enum mag_cmd_dirction { + DIRECTION_TX = 0, + DIRECTION_RX = 1, + DIRECTION_TXRX = 2, + DIRECTION_END +}; + +enum mag_cmd_port_sriov { + PORT_SRIOV_NOT_SET = 0, + PORT_CFG_SRIOV_ON = 1, + PORT_CFG_SRIOV_OFF = 2 +}; + +enum mag_cmd_port_fec { + PORT_FEC_NOT_SET = 0, + PORT_FEC_RSFEC = 1, + PORT_FEC_BASEFEC = 2, + PORT_FEC_NOFEC = 3, + PORT_FEC_LLRSFEC = 4, + PORT_FEC_AUTO = 5 +}; + +enum mag_cmd_port_lanes { + PORT_LANES_NOT_SET = 0, + PORT_LANES_X1 = 1, + PORT_LANES_X2 = 2, + PORT_LANES_X4 = 4, + PORT_LANES_X8 = 8 /* reserved for future use */ +}; + +enum mag_cmd_port_duplex { + PORT_DUPLEX_HALF = 0, + PORT_DUPLEX_FULL = 1 +}; + +enum mag_cmd_wire_node { + WIRE_NODE_UNDEF = 0, + CABLE_10G = 1, + FIBER_10G = 2, + CABLE_25G = 3, + FIBER_25G = 4, + CABLE_40G = 5, + FIBER_40G = 6, + CABLE_50G = 7, + FIBER_50G = 8, + CABLE_100G = 9, + FIBER_100G = 10, + CABLE_200G = 11, + FIBER_200G = 12, + CABLE_400G = 13, + FIBER_400G = 14, + CABLE_800G = 15, + FIBER_800G = 16, + WIRE_NODE_NUM +}; + +#define CABLE_10G_SPEED (1 << PORT_SPEED_10GB) +#define CABLE_25G_SPEED ((1 << PORT_SPEED_25GB) | (1 << PORT_SPEED_10GB)) +#define CABLE_40G_SPEED ((1 << PORT_SPEED_40GB) | (1 << PORT_SPEED_10GB)) +#define CABLE_50G_SPEED ((1 << PORT_SPEED_50GB) | (1 << PORT_SPEED_25GB) | \ + (1 << PORT_SPEED_10GB)) +#define CABLE_100G_SPEED ((1 << PORT_SPEED_100GB) | (1 << PORT_SPEED_50GB) | \ + (1 << PORT_SPEED_40GB) | (1 << PORT_SPEED_25GB) | \ + (1 << PORT_SPEED_10GB)) +#define CABLE_200G_SPEED ((1 << PORT_SPEED_200GB) | (1 << PORT_SPEED_100GB) | \ + (1 << PORT_SPEED_50GB) | (1 << PORT_SPEED_40GB) | \ + (1 << PORT_SPEED_25GB) | (1 << PORT_SPEED_10GB)) +#define CABLE_400G_SPEED ((1 << PORT_SPEED_400GB) | (1 << PORT_SPEED_200GB) | \ + (1 << PORT_SPEED_100GB) | (1 << PORT_SPEED_50GB) | \ + (1 << PORT_SPEED_40GB) | (1 << PORT_SPEED_25GB) | \ + (1 << PORT_SPEED_10GB)) +#define CABLE_800G_SPEED ((1 << PORT_SPEED_800GB) | (1 << PORT_SPEED_400GB) | \ + (1 << PORT_SPEED_200GB) | (1 << PORT_SPEED_100GB) | \ + (1 << PORT_SPEED_50GB) | (1 << PORT_SPEED_40GB) | \ + (1 << PORT_SPEED_25GB) | (1 << PORT_SPEED_10GB)) +#define FIBER_10G_SPEED (1 << PORT_SPEED_10GB) +#define FIBER_25G_SPEED (1 << PORT_SPEED_25GB) +#define FIBER_40G_SPEED (1 << PORT_SPEED_40GB) +#define FIBER_50G_SPEED (1 << PORT_SPEED_50GB) +#define FIBER_100G_SPEED (1 << PORT_SPEED_100GB) +#define FIBER_200G_SPEED (1 << PORT_SPEED_200GB) +#define FIBER_400G_SPEED (1 << PORT_SPEED_400GB) +#define FIBER_800G_SPEED (1 << PORT_SPEED_800GB) + +enum mag_cmd_cnt_type { + MAG_RX_RSFEC_DEC_CW_CNT = 0, + MAG_RX_RSFEC_CORR_CW_CNT = 1, + MAG_RX_RSFEC_UNCORR_CW_CNT = 2, + MAG_RX_PCS_BER_CNT = 3, + MAG_RX_PCS_ERR_BLOCK_CNT = 4, + MAG_RX_PCS_E_BLK_CNT = 5, + MAG_RX_PCS_DEC_ERR_BLK_CNT = 6, + MAG_RX_PCS_LANE_BIP_ERR_CNT = 7, + MAG_CNT_NUM +}; + +/* mag_cmd_set_port_cfg config bitmap */ +#define MAG_CMD_SET_SPEED 0x1 +#define MAG_CMD_SET_AUTONEG 0x2 +#define MAG_CMD_SET_FEC 0x4 +#define MAG_CMD_SET_LANES 0x8 +struct mag_cmd_set_port_cfg { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u32 config_bitmap; + u8 speed; + u8 autoneg; + u8 fec; + u8 lanes; + u8 rsvd1[19]; + u8 adapt_en; /* 跨DIE增加,工具暂未使能该能力 */ +}; + +/* mag supported/advertised link mode bitmap */ +enum mag_cmd_link_mode { + LINK_MODE_GE = 0, + LINK_MODE_10GE_BASE_R = 1, + LINK_MODE_25GE_BASE_R = 2, + LINK_MODE_40GE_BASE_R4 = 3, + LINK_MODE_50GE_BASE_R = 4, + LINK_MODE_50GE_BASE_R2 = 5, + LINK_MODE_100GE_BASE_R = 6, + LINK_MODE_100GE_BASE_R2 = 7, + LINK_MODE_100GE_BASE_R4 = 8, + LINK_MODE_200GE_BASE_R2 = 9, + LINK_MODE_200GE_BASE_R4 = 10, + LINK_MODE_200GE_BASE_R8 = 11, + LINK_MODE_400GE_BASE_R4 = 12, + LINK_MODE_400GE_BASE_R8 = 13, + LINK_MODE_800GE_BASE_R8 = 14, + LINK_MODE_MAX_NUMBERS, + + LINK_MODE_UNKNOWN = 0xFFFF +}; + +enum mag_cmd_himac_prbs_type { + HIMAC_PRBS_NOT_SET = 0, + HIMAC_PRBS13, + HIMAC_PRBS15, + HIMAC_PRBS31, + HIMAC_PRBS58, + HIMAC_PRBS_DISABLE, + HIMAC_PRBS_END +}; + +#define LINK_MODE_GE_BIT 0x1u +#define LINK_MODE_10GE_BASE_R_BIT 0x2u +#define LINK_MODE_25GE_BASE_R_BIT 0x4u +#define LINK_MODE_40GE_BASE_R4_BIT 0x8u +#define LINK_MODE_50GE_BASE_R_BIT 0x10u +#define LINK_MODE_50GE_BASE_R2_BIT 0x20u +#define LINK_MODE_100GE_BASE_R_BIT 0x40u +#define LINK_MODE_100GE_BASE_R2_BIT 0x80u +#define LINK_MODE_100GE_BASE_R4_BIT 0x100u +#define LINK_MODE_200GE_BASE_R2_BIT 0x200u +#define LINK_MODE_200GE_BASE_R4_BIT 0x400u +#define LINK_MODE_200GE_BASE_R8_BIT 0x800u +#define LINK_MODE_400GE_BASE_R4_BIT 0x1000u +#define LINK_MODE_400GE_BASE_R8_BIT 0x2000u +#define LINK_MODE_800GE_BASE_R8_BIT 0x4000u + +#define CABLE_10GE_BASE_R_BIT LINK_MODE_10GE_BASE_R_BIT +#define CABLE_25GE_BASE_R_BIT (LINK_MODE_25GE_BASE_R_BIT | LINK_MODE_10GE_BASE_R_BIT) +#define CABLE_40GE_BASE_R4_BIT LINK_MODE_40GE_BASE_R4_BIT +#define CABLE_50GE_BASE_R_BIT (LINK_MODE_50GE_BASE_R_BIT | LINK_MODE_25GE_BASE_R_BIT | \ + LINK_MODE_10GE_BASE_R_BIT) +#define CABLE_50GE_BASE_R2_BIT LINK_MODE_50GE_BASE_R2_BIT +#define CABLE_100GE_BASE_R_BIT (LINK_MODE_100GE_BASE_R_BIT | LINK_MODE_50GE_BASE_R_BIT | \ + LINK_MODE_25GE_BASE_R_BIT | LINK_MODE_10GE_BASE_R_BIT) +#define CABLE_100GE_BASE_R2_BIT (LINK_MODE_100GE_BASE_R2_BIT | LINK_MODE_50GE_BASE_R2_BIT) +#define CABLE_100GE_BASE_R4_BIT (LINK_MODE_100GE_BASE_R4_BIT | LINK_MODE_40GE_BASE_R4_BIT) +#define CABLE_200GE_BASE_R2_BIT (LINK_MODE_200GE_BASE_R2_BIT | LINK_MODE_100GE_BASE_R2_BIT | \ + LINK_MODE_50GE_BASE_R2_BIT) +#define CABLE_200GE_BASE_R4_BIT (LINK_MODE_200GE_BASE_R4_BIT | LINK_MODE_100GE_BASE_R4_BIT | \ + LINK_MODE_40GE_BASE_R4_BIT) +#define CABLE_200GE_BASE_R8_BIT LINK_MODE_200GE_BASE_R8_BIT +#define CABLE_400GE_BASE_R4_BIT (LINK_MODE_400GE_BASE_R4_BIT | LINK_MODE_200GE_BASE_R4_BIT | \ + LINK_MODE_100GE_BASE_R4_BIT | LINK_MODE_40GE_BASE_R4_BIT) +#define CABLE_400GE_BASE_R8_BIT (LINK_MODE_400GE_BASE_R8_BIT | LINK_MODE_200GE_BASE_R8_BIT) +#define CABLE_800GE_BASE_R8_BIT (LINK_MODE_800GE_BASE_R8_BIT | LINK_MODE_400GE_BASE_R8_BIT | \ + LINK_MODE_200GE_BASE_R8_BIT) + +struct mag_cmd_ber_cor_cnt { + u32 corr_lane_sym_cnt; + u32 lane_cor0_cnt; + u32 lane_cor1_cnt; + u32 cfg_speed; +}; + +union mag_cmd_ber_data_u { + struct mag_cmd_ber_cor_cnt cor_cnt; + u32 data[RX_RSFEC_PHY_DFX_STA_TBL_SIZE]; +}; + +struct mag_cmd_get_himac_ber { + struct mgmt_msg_head head; + + u8 port_id; + u8 op_type; + u8 rsvd0[2]; + + union mag_cmd_ber_data_u ber_data; +}; + +struct mag_cmd_op_himac_thrd { + struct mgmt_msg_head head; + + u8 port_id; + u8 op_type; + u8 cur_status; + u8 rsvd0; +}; + +struct mag_cmd_get_port_info { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u8 wire_type; + u8 an_support; + u8 an_en; + u8 duplex; + + u8 speed; + u8 fec; + u8 lanes; + u8 rsvd1; + + u32 supported_mode; + u32 advertised_mode; + u32 supported_fec_mode; + u8 rsvd2[4]; +}; + +struct mag_cmd_get_himac_bandwidth { + struct mgmt_msg_head head; + + u8 port_id; + u8 txrx_sel; + u8 rsvd0[2]; + u32 win_size; + + u64 rate_mbps; + u64 packet_rate; + u32 rate_byte_h; + u32 rate_byte_l; + u32 rate_pkt; + u32 cal_time_us; + u32 ipg; +}; + +struct mag_cmd_himac_prbs { + struct mgmt_msg_head head; + + u8 port_id; + u8 direction; + u8 prbs_type; + u8 scr_en; + + u8 fec_link_status; + u8 sync_status; + u8 rsvd0[2]; + u32 cw_cnt; + u32 err_cnt; +}; + +#define MAG_CMD_OPCODE_GET 0 +#define MAG_CMD_OPCODE_SET 1 +struct mag_cmd_set_port_adapt { + struct mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get adapt info 1:set adapt */ + u8 enable; + u8 rsvd0; + u32 speed_mode; + u32 rsvd1[3]; +}; + +#define MAG_CMD_LP_MODE_SDS_S_TX2RX 1 +#define MAG_CMD_LP_MODE_SDS_P_RX2TX 2 +#define MAG_CMD_LP_MODE_SDS_P_TX2RX 3 +#define MAG_CMD_LP_MODE_MAC_RX2TX 4 +struct mag_cmd_cfg_loopback_mode { + struct mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get loopback mode 1:set loopback mode */ + u8 lp_mode; + u8 lp_en; /* 0:disable 1:enable */ + + u32 rsvd0[2]; +}; + +#define MAG_CMD_PORT_DISABLE 0x0 +#define MAG_CMD_TX_ENABLE 0x1 +#define MAG_CMD_RX_ENABLE 0x2 +/* the physical port is disable only when all pf of the port are set to down, + * if any pf is enable, the port is enable + */ +struct mag_cmd_set_port_enable { + struct mgmt_msg_head head; + + u16 function_id; /* function_id should not more than + * the max support pf_id(32) + */ + u16 rsvd0; + + u8 state; /* bitmap bit0:tx_en bit1:rx_en */ + u8 rsvd1[3]; +}; + +struct mag_cmd_get_port_enable { + struct mgmt_msg_head head; + + u8 port; + u8 state; /* bitmap bit0:tx_en bit1:rx_en */ + u8 rsvd0[2]; +}; + +#define PMA_FOLLOW_DEFAULT 0x0 +#define PMA_FOLLOW_ENABLE 0x1 +#define PMA_FOLLOW_DISABLE 0x2 +#define PMA_FOLLOW_GET 0x4 +/* the physical port disable link follow only when + * all pf of the port are set to follow disable + */ +struct mag_cmd_set_link_follow { + struct mgmt_msg_head head; + + u16 function_id; /* function_id should not more than + * the max support pf_id(32) + */ + u16 rsvd0; + + u8 follow; + u8 rsvd1[3]; +}; + +/* firmware also use this cmd report link event to driver */ +struct mag_cmd_get_link_status { + struct mgmt_msg_head head; + + u8 port_id; + u8 status; /* 0:link down 1:link up */ + u8 rsvd0[2]; +}; + +struct mag_cmd_set_pma_enable { + struct mgmt_msg_head head; + + u16 function_id; /* function_id should not more than + * the max support pf_id(32) + */ + u16 enable; +}; + +struct mag_cmd_cfg_an_type { + struct mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get an type 1:set an type */ + u8 rsvd0[2]; + + u32 an_type; /* 0:ieee 1:25G/50 eth consortium */ +}; + +struct mag_cmd_get_link_time { + struct mgmt_msg_head head; + u8 port_id; + u8 rsvd0[3]; + + u32 link_up_begin; + u32 link_up_end; + u32 link_down_begin; + u32 link_down_end; +}; + +struct mag_cmd_cfg_fec_mode { + struct mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get fec mode 1:set fec mode */ + u8 advertised_fec; + u8 supported_fec; +}; + +/* speed */ +#define PANGEA_ADAPT_10G_BITMAP 0xd +#define PANGEA_ADAPT_25G_BITMAP 0x72 +#define PANGEA_ADAPT_40G_BITMAP 0x680 +#define PANGEA_ADAPT_100G_BITMAP 0x1900 +#define PANGEA_ADAPT_200G_BITMAP 0x1c000 + +/* speed and fec */ +#define PANGEA_10G_NO_BITMAP 0x8 +#define PANGEA_10G_BASE_BITMAP 0x4 +#define PANGEA_25G_NO_BITMAP 0x10 +#define PANGEA_25G_BASE_BITMAP 0x20 +#define PANGEA_25G_RS_BITMAP 0x40 +#define PANGEA_40G_NO_BITMAP 0x400 +#define PANGEA_40G_BASE_BITMAP 0x200 +#define PANGEA_100G_NO_BITMAP 0x800 +#define PANGEA_100G_RS_BITMAP 0x1000 +#define PANGEA_200G_RS_BITMAP 0x8000 +#define PANGEA_200G_LLRS_BITMAP 0x10000 + +/* adapt or fec */ +#define PANGEA_ADAPT_ADAPT_BITMAP 0x4183 +#define PANGEA_ADAPT_NO_BITMAP 0xc18 +#define PANGEA_ADAPT_BASE_BITMAP 0x224 +#define PANGEA_ADAPT_RS_BITMAP 0x9040 +#define PANGEA_ADAPT_LLRS_BITMAP 0x10000 + +/* default cfg */ +#define PANGEA_ADAPT_CFG_10G_CR 0x200d +#define PANGEA_ADAPT_CFG_10G_SRLR 0xd +#define PANGEA_ADAPT_CFG_25G_CR 0x207f +#define PANGEA_ADAPT_CFG_25G_SRLR 0x72 +#define PANGEA_ADAPT_CFG_40G_CR4 0x2680 +#define PANGEA_ADAPT_CFG_40G_SRLR4 0x680 +#define PANGEA_ADAPT_CFG_100G_CR4 0x3f80 +#define PANGEA_ADAPT_CFG_100G_SRLR4 0x1900 +#define PANGEA_ADAPT_CFG_200G_CR4 0x1ff80 +#define PANGEA_ADAPT_CFG_200G_SRLR4 0x1c000 + +typedef union { + struct { + u32 adapt_10g : 1; /* [0] adapt_10g */ + u32 adapt_25g : 1; /* [1] adapt_25g */ + u32 base_10g : 1; /* [2] base_10g */ + u32 no_10g : 1; /* [3] no_10g */ + u32 no_25g : 1; /* [4] no_25g */ + u32 base_25g : 1; /* [5] base_25g */ + u32 rs_25g : 1; /* [6] rs_25g */ + u32 adapt_40g : 1; /* [7] adapt_40g */ + u32 adapt_100g : 1; /* [8] adapt_100g */ + u32 base_40g : 1; /* [9] base_40g */ + u32 no_40g : 1; /* [10] no_40g */ + u32 no_100g : 1; /* [11] no_100g */ + u32 rs_100g : 1; /* [12] rs_100g */ + u32 auto_neg : 1; /* [13] auto_neg */ + u32 adapt_200g : 1; /* [14] adapt_200g */ + u32 rs_200g : 1; /* [15] rs_200g */ + u32 llrs_200g : 1; /* [16] llrs_200g */ + u32 rsvd0 : 15; /* [31:17] reserved */ + } bits; + + u32 value; +} pangea_adapt_bitmap_u; + +#define PANGEA_ADAPT_GET 0x0 +#define PANGEA_ADAPT_SET 0x1 +struct mag_cmd_set_pangea_adapt { + struct mgmt_msg_head head; + + u16 port_id; + u8 opcode; /* 0:get adapt info 1:cfg adapt info */ + u8 wire_type; + + pangea_adapt_bitmap_u cfg_bitmap; + pangea_adapt_bitmap_u cur_bitmap; + u32 rsvd1[3]; +}; + +struct mag_cmd_cfg_bios_link_cfg { + struct mgmt_msg_head head; + + u8 port_id; + u8 opcode; /* 0:get bios link info 1:set bios link cfg */ + u8 clear; + u8 rsvd0; + + u32 wire_type; + u8 an_en; + u8 speed; + u8 fec; + u8 rsvd1; + u32 speed_mode; + u32 rsvd2[3]; +}; + +struct mag_cmd_restore_link_cfg { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd[7]; +}; + +struct mag_cmd_activate_bios_link_cfg { + struct mgmt_msg_head head; + + u32 rsvd[8]; +}; + +/* led type */ +enum mag_led_type { + MAG_CMD_LED_TYPE_ALARM = 0x0, + MAG_CMD_LED_TYPE_LOW_SPEED = 0x1, + MAG_CMD_LED_TYPE_HIGH_SPEED = 0x2 +}; + +/* led mode */ +enum mag_led_mode { + MAG_CMD_LED_MODE_DEFAULT = 0x0, + MAG_CMD_LED_MODE_FORCE_ON = 0x1, + MAG_CMD_LED_MODE_FORCE_OFF = 0x2, + MAG_CMD_LED_MODE_FORCE_BLINK_1HZ = 0x3, + MAG_CMD_LED_MODE_FORCE_BLINK_2HZ = 0x4, + MAG_CMD_LED_MODE_FORCE_BLINK_4HZ = 0x5, + MAG_CMD_LED_MODE_1HZ = 0x6, + MAG_CMD_LED_MODE_2HZ = 0x7, + MAG_CMD_LED_MODE_4HZ = 0x8 +}; + +/* the led is report alarm when any pf of the port is alram */ +struct mag_cmd_set_led_cfg { + struct mgmt_msg_head head; + + u16 function_id; + u8 type; + u8 mode; +}; + +#define XSFP_INFO_MAX_SIZE 640 +/* xsfp wire type, refer to cmis protocol definition */ +enum mag_wire_type { + MAG_CMD_WIRE_TYPE_UNKNOWN = 0x0, + MAG_CMD_WIRE_TYPE_MM = 0x1, + MAG_CMD_WIRE_TYPE_SM = 0x2, + MAG_CMD_WIRE_TYPE_COPPER = 0x3, + MAG_CMD_WIRE_TYPE_ACC = 0x4, + MAG_CMD_WIRE_TYPE_BASET = 0x5, + MAG_CMD_WIRE_TYPE_AOC = 0x40, + MAG_CMD_WIRE_TYPE_ELECTRIC = 0x41, + MAG_CMD_WIRE_TYPE_BACKPLANE = 0x42 +}; + +struct mag_cmd_get_xsfp_info { + struct mgmt_msg_head head; + + u8 port_id; + u8 wire_type; + u16 out_len; + u32 rsvd; + u8 sfp_info[XSFP_INFO_MAX_SIZE]; +}; + +#define MAG_CMD_XSFP_DISABLE 0x0 +#define MAG_CMD_XSFP_ENABLE 0x1 +/* the sfp is disable only when all pf of the port are set sfp down, + * if any pf is enable, the sfp is enable + */ +struct mag_cmd_set_xsfp_enable { + struct mgmt_msg_head head; + + u32 port_id; + u32 status; /* 0:on 1:off */ +}; + +#define MAG_CMD_XSFP_PRESENT 0x0 +#define MAG_CMD_XSFP_ABSENT 0x1 +struct mag_cmd_get_xsfp_present { + struct mgmt_msg_head head; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsvd[2]; +}; + +#define MAG_CMD_XSFP_READ 0x0 +#define MAG_CMD_XSFP_WRITE 0x1 + +#define MAG_CMD_MISC_BANK_ID_MASK 0x03 /* bit [0:1]: means cmis bank_id */ +struct mag_cmd_set_xsfp_rw { + struct mgmt_msg_head head; + + u8 port_id; + u8 operation; /* 0: read; 1: write */ + u8 value; + u8 rsvd0; + u32 devaddr; + u32 offset; + u8 misc; /* bit [0:1]: means cmis bank_id, bit [2:7]: reserved */ + /* + * for qsfp and cmis, means upper page id. + * for qsfp, if not specified, read the upper page specified last time. + * for cmis, page_id must be specified. + */ + u8 page_id; + u16 rsvd1; +}; + +struct mag_cmd_cfg_xsfp_temperature { + struct mgmt_msg_head head; + + u8 opcode; /* 0:read 1:write */ + u8 rsvd0[3]; + s32 max_temp; + s32 min_temp; +}; + +struct mag_cmd_get_xsfp_temperature { + struct mgmt_msg_head head; + + s16 sfp_temp[8]; + u8 rsvd[32]; + s32 max_temp; + s32 min_temp; +}; + +/* xsfp plug event */ +struct mag_cmd_wire_event { + struct mgmt_msg_head head; + + u8 port_id; + u8 status; /* 0:present, 1:absent */ + u8 rsvd[2]; +}; + +/* link err type definition */ +#define MAG_CMD_ERR_XSFP_UNKNOWN 0x0 +struct mag_cmd_link_err_event { + struct mgmt_msg_head head; + + u8 port_id; + u8 link_err_type; + u8 rsvd[2]; +}; + +#define MAG_PARAM_TYPE_DEFAULT_CFG 0x0 +#define MAG_PARAM_TYPE_BIOS_CFG 0x1 +#define MAG_PARAM_TYPE_TOOL_CFG 0x2 +#define MAG_PARAM_TYPE_FINAL_CFG 0x3 +#define MAG_PARAM_TYPE_WIRE_INFO 0x4 +#define MAG_PARAM_TYPE_ADAPT_INFO 0x5 +#define MAG_PARAM_TYPE_MAX_CNT 0x6 +struct param_head { + u8 valid_len; + u8 info_type; + u8 rsvd[2]; +}; + +struct mag_port_link_param { + struct param_head head; + + u8 an; + u8 fec; + u8 speed; + u8 rsvd0; + + u32 used; + u32 an_fec_ability; + u32 an_speed_ability; + u32 an_pause_ability; +}; + +struct mag_port_wire_info { + struct param_head head; + + u8 status; + u8 rsvd0[3]; + + u8 wire_type; + u8 default_fec; + u8 speed; + u8 rsvd1; + u32 speed_ability; +}; + +struct mag_port_adapt_info { + struct param_head head; + + u32 adapt_en; + u32 flash_adapt; + u32 rsvd0[2]; + + u32 wire_node; + u32 an_en; + u32 speed; + u32 fec; +}; + +struct mag_port_param_info { + u8 parameter_cnt; + u8 lane_id; + u8 lane_num; + u8 rsvd0; + + struct mag_port_link_param default_cfg; + struct mag_port_link_param bios_cfg; + struct mag_port_link_param tool_cfg; + struct mag_port_link_param final_cfg; + + struct mag_port_wire_info wire_info; + struct mag_port_adapt_info adapt_info; +}; + +#define XSFP_VENDOR_NAME_LEN 16 +#define XSFP_VENDOR_SN_LEN 16 +struct mag_cmd_event_port_info { + /* 消息头公共信息 8+4=12B */ + struct mgmt_msg_head head; + u8 port_id; + u8 event_type; + u8 rsvd0[2]; + + /* 光模块相关 16+4*3+4+16+8=56B */ + u8 vendor_name[XSFP_VENDOR_NAME_LEN]; + u32 port_type; /* fiber / copper */ + u32 port_sub_type; /* sr / lr */ + u32 cable_length; /* 1/3/5m */ + u8 cable_temp; /* 温度 */ + u8 max_speed; /* 光模块最大速率 */ + u8 sfp_type; /* sfp/qsfp */ + u8 rsvd1; + u32 power[4]; /* 光功率 */ + + u8 an_state; + u8 fec; + u16 speed; + + u8 gpio_insert; /* 0:present 1:absent */ + u8 alos; + u8 rx_los; /* gpio获取 */ + u8 pma_ctrl; /* eth_ctrl.pma_ctrl.bits.rf_en; */ + + /* himac相关信息 4*5+4=32B */ + u32 pma_fifo_reg; /* himac pma fifo status */ + u32 pma_signal_ok_reg; /* himac pma signal ok status */ + u32 pcs_64_66b_reg; /* himac V600 无该寄存器 */ + u32 rf_lf; /* himac rxmac lf rf status */ + u8 pcs_link; /* himac pcs link status */ + u8 pcs_mac_link; /* link 线程判断后的 link_state */ + u8 tx_enable; /* himac txmac enable status */ + u8 rx_enable; /* himac rxmac enable status */ + u32 pcs_err_cnt; /* himac pcs ber err cnt */ + + u8 eq_data[38]; + u8 rsvd2[2]; + + /* link线程相关dfx 4+4+128+128+4=268 */ + u32 his_link_machine_state; + u32 cur_link_machine_state; + u8 his_machine_state_data[128]; + u8 cur_machine_state_data[128]; + u8 his_machine_state_length; + u8 cur_machine_state_length; + + /* an/adapt/link线程配置信息 4+4*24+16+36= */ + struct mag_port_param_info param_info; + + /* 自协商(an)和自适应(adapt)相关信息 4+8=12B */ + u32 speed_ability; /* supported_mode & advertised_mode */ + u32 fec_ability; /* supported FEC modes */ + u8 duplex; + + /* 线缆SN ASIIC表示 16B */ + u8 vendor_sn[XSFP_VENDOR_SN_LEN]; + u32 osfp_power[4]; /* 4通道光功率 */ + /* 确保兼容性 */ + u8 rsvd3[319]; /* 预留319byte */ +}; + +struct mag_cmd_port_stats { + u64 mac_tx_fragment_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_undermin_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_2047_oct_pkt_num; + u64 mac_tx_2048_4095_oct_pkt_num; + u64 mac_tx_4096_8191_oct_pkt_num; + u64 mac_tx_8192_9216_oct_pkt_num; + u64 mac_tx_9217_12287_oct_pkt_num; + u64 mac_tx_12288_16383_oct_pkt_num; + u64 mac_tx_1519_max_bad_pkt_num; + u64 mac_tx_1519_max_good_pkt_num; + u64 mac_tx_oversize_pkt_num; + u64 mac_tx_jabber_pkt_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + u64 mac_tx_pause_num; + u64 mac_tx_pfc_pkt_num; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_tx_control_pkt_num; + u64 mac_tx_err_all_pkt_num; + u64 mac_tx_from_app_good_pkt_num; + u64 mac_tx_from_app_bad_pkt_num; + + u64 mac_rx_fragment_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_undermin_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_2047_oct_pkt_num; + u64 mac_rx_2048_4095_oct_pkt_num; + u64 mac_rx_4096_8191_oct_pkt_num; + u64 mac_rx_8192_9216_oct_pkt_num; + u64 mac_rx_9217_12287_oct_pkt_num; + u64 mac_rx_12288_16383_oct_pkt_num; + u64 mac_rx_1519_max_bad_pkt_num; + u64 mac_rx_1519_max_good_pkt_num; + u64 mac_rx_oversize_pkt_num; + u64 mac_rx_jabber_pkt_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + u64 mac_rx_pause_num; + u64 mac_rx_pfc_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_rx_control_pkt_num; + u64 mac_rx_sym_err_pkt_num; + u64 mac_rx_fcs_err_pkt_num; + u64 mac_rx_send_app_good_pkt_num; + u64 mac_rx_send_app_bad_pkt_num; + u64 mac_rx_unfilter_pkt_num; +}; + +struct mag_cmd_port_stats_info { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; +}; + +struct mag_cmd_get_port_stat { + struct mgmt_msg_head head; + + struct mag_cmd_port_stats counter; + u64 rsvd1[15]; +}; + +struct mag_cmd_clr_port_stat { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; +}; + +struct mag_cmd_get_pcs_err_cnt { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; + + u32 pcs_err_cnt; +}; + +struct mag_cmd_get_mag_cnt { + struct mgmt_msg_head head; + + u8 port_id; + u8 len; + u8 rsvd0[2]; + + u32 mag_csr[128]; +}; + +struct mag_cmd_dump_antrain_info { + struct mgmt_msg_head head; + + u8 port_id; + u8 len; + u8 rsvd0[2]; + + u32 antrain_csr[256]; +}; + +#define MAG_SFP_PORT_NUM 24 +/* 芯片光模块温度结构体定义 */ +struct mag_cmd_sfp_temp_in_info { + struct mgmt_msg_head head; /* 8B */ + u8 opt_type; /* 0:read operation 1:cfg operation */ + u8 rsv[3]; + s32 max_temp; /* 芯片光模块阈值 */ + s32 min_temp; /* 芯片光模块阈值 */ +}; + +struct mag_cmd_sfp_temp_out_info { + struct mgmt_msg_head head; /* 8B */ + s16 sfp_temp_data[MAG_SFP_PORT_NUM]; /* 读出的温度 */ + s32 max_temp; /* 芯片光模块阈值 */ + s32 min_temp; /* 芯片光模块阈值 */ +}; + +#define XSFP_CMIS_INFO_MAX_SIZE 1536 +#define QSFP_CMIS_PAGE_SIZE 128 + +#define QSFP_CMIS_PAGE_00H 0x00 /* Lower: Control and Essentials, Upper: \ + * Administrative Information \ + */ +#define QSFP_CMIS_PAGE_01H 0x01 /* Advertising */ +#define QSFP_CMIS_PAGE_02H 0x02 /* Module and lane Thresholds */ +#define QSFP_CMIS_PAGE_03H 0x03 /* User EEPROM */ +#define QSFP_CMIS_PAGE_04H 0x04 /* Laser Capabilities Advertising \ + * (Page 04h, Optional) \ + */ +#define QSFP_CMIS_PAGE_05H 0x05 +#define QSFP_CMIS_PAGE_10H 0x10 /* Lane and Data Path Control */ +#define QSFP_CMIS_PAGE_11H 0x11 /* Lane Status */ +#define QSFP_CMIS_PAGE_12H 0x12 +#define QSFP_CMIS_PAGE_13H 0x13 +#define QSFP_CMIS_PAGE_14H 0x14 +#define QSFP_CMIS_PAGE_9FH 0x9f +#define QSFP_CMIS_PAGE_B7H 0xb7 +#define QSFP_CMIS_PAGE_B8H 0xb8 + +/* ETHTOOL中 lower page 00h和high page 00h的page id都为0,后者offset为128, +但是由于MPU定义high page 00h的page id = 1,特殊处理 */ +#define HINIC5_PAGE_L00_H00_OFFSET 0 +#define HINIC5_PAGE_H01_OFFSET 1 +#define HINIC5_PAGE_H02_OFFSET 2 +#define HINIC5_PAGE_H10_OFFSET 3 +#define HINIC5_PAGE_H11_OFFSET 4 +#define HINIC5_PAGE_INVALID_OFFSET 0xff + +/* ethtool 6.6支持解析的最大page id为0x11 */ +#ifndef CMIS_MAX_PAGES +#define CMIS_MAX_PAGES 18 +#endif + +#define MGMT_TLV_U8_SIZE 1 +#define MGMT_TLV_U16_SIZE 2 +#define MGMT_TLV_U32_SIZE 4 + +#define MGMT_TLV_GET_U8(addr) (*((u8 *)(void *)(addr))) +#define MGMT_TLV_SET_U8(addr, value) \ + ((*((u8 *)(void *)(addr))) = ((u8)(value))) + +#define MGMT_TLV_GET_U16(addr) (*((u16 *)(void *)(addr))) +#define MGMT_TLV_SET_U16(addr, value) \ + ((*((u16 *)(void *)(addr))) = ((u16)(value))) + +#define MGMT_TLV_GET_U32(addr) (*((u32 *)(void *)(addr))) +#define MGMT_TLV_SET_U32(addr, value) \ + ((*((u32 *)(void *)(addr))) = ((u32)(value))) + +#define MGMT_TLV_TYPE_END 0xFFFF +enum mag_xsfp_type { + /* 跳过0x00,从0x01开始定义Type的原因:便于和memset的数据做区别 */ + MAG_XSFP_TYPE_PAGE = 0x01, + MAG_XSFP_TYPE_WIRE_TYPE = 0x02, + MAG_XSFP_TYPE_END = MGMT_TLV_TYPE_END +}; + +struct mgmt_tlv_info { + u16 type; + u16 length; + u8 value[0]; // value为page页内容时数据组成: + // page_id(4byte) + page_context(128byte) +}; + +typedef struct tag_mag_cmd_set_xsfp_tlv_req { + struct mgmt_msg_head head; + + /* + * 按结构体struct mgmt_tlv_info格式解析。 + * +---------------------------------------------+ + * | TYPE | LEN | VALUE | + * +--------------------+-------+----------------+ + * | MAG_XSFP_TYPE_PAGE | 4 | Page Number | + * +---------------------------------------------+ + * + * 说明: + * 1、Page编号定义:lower page 00h编号为: + * 0,upper page 00h编号为1,依此类推。 + * 2、规格:当前最大支持10个Page,其中: + * lower page 00h, upper page 00h/01h/02h/10h/11h为必选项, + * 剩余4个为扩展预留。 + */ + u8 tlv_buf[0]; +} mag_cmd_set_xsfp_tlv_req; + +typedef struct tag_mag_cmd_set_xsfp_tlv_rsp { + struct mgmt_msg_head head; +} mag_cmd_set_xsfp_tlv_rsp; + +typedef struct tag_mag_cmd_get_xsfp_tlv_req { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd; + u16 rsp_buf_len; /* 响应里面:用于存放TLV格式数据的Buffer空间长度 */ +} mag_cmd_get_xsfp_tlv_req; + +typedef struct tag_mag_cmd_get_xsfp_tlv_rsp { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd[3]; + + /* + * 按结构体struct mgmt_tlv_info格式组装。 + * +----------------------------------------------------------------------+ + * | TYPE | LEN | VALUE | + * +-------------------------+--------------+-----------------------------+ + * | MAG_XSFP_TYPE_WIRE_TYPE | 4 | Wire Type | + * +-------------------------+--------------+-----------------------------+ + * | MAG_XSFP_TYPE_PAGE | 4 + Page Len | Page Number + Page Content | + * +----------------------------------------------------------------------+ + * + * 说明: + * 1、Page编号定义:lower page 00h编号为:0,upper page 00h编号为:1,依此类推。 + * 2、调用者需要申请足够的空间(包括存放结束Type和结束Length的空间)。 + */ + u8 tlv_buf[0]; +} mag_cmd_get_xsfp_tlv_rsp; + +#define XSFP_CMIS_PARSE_PAGE_NUM 10 + +typedef struct mag_parse_tlv_info { + u8 tlv_page_info[XSFP_CMIS_INFO_MAX_SIZE + 1]; + u32 tlv_page_info_len; + u32 tlv_page_num[XSFP_CMIS_PARSE_PAGE_NUM]; + u32 wire_type; + u8 id; +} parse_tlv_info; + +typedef struct drv_tag_mag_cmd_get_xsfp_tlv_rsp { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd[3]; + + u8 tlv_buf[XSFP_CMIS_INFO_MAX_SIZE]; +} drv_mag_cmd_get_xsfp_tlv_rsp; + +typedef struct { + u8 resv0[14]; /* Reg 0-13: Lower Memory: Page 00h */ + u8 temperature_msb; /* Reg 14: Module Monitor 1: Temperature1 MSB */ + u8 temperature_lsb; /* Reg 15: Module Monitor 1: Temperature1 LSB */ + u8 volt_supply[2]; /* + * Reg 16-17: Internally measured 3.3 volt + * input supply voltage: in 100 µV increments. + */ + u8 resv1[67]; /* Reg 18-84 */ + u8 media_type; /* Reg 85: Table 8-12 Byte 85 Module Media Type Encodings */ + + u8 electrical_interface_id; /* Reg 86: ID from SFF-8024 IDs for + * Host Electrical Interfaces + */ + u8 media_interface_id; /* Reg 87: ID from table selected by Byte 85 + * (see Table 8-12) + */ + u8 lane_count; /* + * Reg 88: Lane Count, ApSel Code: 0001b. + * bit 7-4: Host Lane Count. + * bit 3-0: Media Lane Count. + */ + u8 lane_assignment_options; /* + * Reg 89: Bits 0-7 form a bit map corresponding + * to Host Lanes 1-8. + * A set bit indicates that the Application + * may begin on the corresponding host lane. + */ + u8 resv2[38]; /* Reg 90-127 */ +} qsfp_cmis_lower_page_00_s; + +typedef struct { + u8 identifier; /* Reg 128: Type of Serial Module - See SFF-8024. */ + u8 vendor_name[16]; /* Reg 129-144: Vendor name (ASCII) */ + u8 vendor_oui[3]; /* Reg 145-147: Vendor IEEE company ID */ + u8 vendor_pn[16]; /* Reg 148-163: Part number provided by vendor (ASCII) */ + u8 vendor_rev[2]; /* Reg 164-165: Revision level for part number provided + * by vendor (ASCII) + */ + u8 vendor_sn[16]; /* Reg 166-181: Vendor Serial Number (ASCII) */ + u8 date_code[8]; /* Reg 182-189: Vendor's manufacturing date code */ + u8 clei_code[10]; /* Reg 190-199: Common Language Equipment + * Identification code */ + u8 power_character[2]; /* Reg 200-201: Module power characteristics */ + u8 cable_len; /* + * Reg 202: bit 7-6: Length multiplier field + * (Copper or active cable). + * Reg 202: bit 5-0: Link length base value in meters. + * To calculate actual link length use multiplier + * in bits 7-6. + */ + u8 connector; /* Reg 203: Type of connector present in the module. + * See SFF-8024 for codes + */ + u8 copper_cable_attenuation[6]; /* Reg 204-209: Copper Cable Attenuation */ + u8 near_end_implementation; /* Reg 210: Cable Assembly Lane Information, + * Near end implementation + */ + u8 far_end_config; /* Reg 211: Cable Assembly Lane Information: + * Far End Configuration + */ + u8 media_technology; /* Reg 212: Media Interface Technology encodings */ + u8 resv0[43]; /* Reg 213-255 */ +} qsfp_cmis_upper_page_00_s; + +typedef struct { + u8 firmware_rev[2]; /* Reg 128-129: Numeric representation of + * inactive module firmware revision + */ + u8 hardware_rev[2]; /* Reg 130-131: Numeric representation of + * module hardware revision + */ + u8 smf_len_km; /* + * Reg 132: bit 7-6: Link length multiplier for SMF fiber. + * 00 = 0.1 (1 t0 6.3 km) + * 01 = 1 (1 to 63 km) + * 10, 11 = reserved + * Reg 132: bit 5-0: Base link length for SMF fiber. + * Must be multiplied by value in bits 7-6 + * to calculate actual link length in km. + */ + u8 om5_len; /* Reg 133: Link length supported for OM5 fiber, + * units of 2 m + */ + u8 om4_len; /* Reg 134: Link length supported for OM4 fiber, + * units of 2 m + */ + u8 om3_len; /* Reg 135: Link length supported for EBW 50/125 + * µm fiber (OM3), units of 2m + */ + u8 om2_len; /* Reg 136: Link length supported for 50/125 + * µm fiber (OM2), units of 1m + */ + u8 resv0; /* Reg 137: Reserved */ + u8 wavelength[2]; /* Reg 138-139: Nominal laser wavelength + * (Wavelength = value / 20 in nm) + */ + u8 wavelength_tolerance[2]; /* + * Reg 140-141: Guaranteed range of laser + * wavelength (+/- value) from Nominal wavelength. + * (Wavelength Tol. = value/200 in nm). + */ + u8 pages_implement; /* Reg 142: Implemented pages advertising */ + u8 resv1[16]; /* Reg 143-158 */ + u8 monitor_implement[2]; /* + * Reg 159-160: Implemented Monitors Advertisement. + * Reg 159: bit 7-6: Reserved. + * bit 5: Custom monitor implemented + * bit 4-2: Aux3 ~ Aux1 monitor implemented + * bit 1: Internal 3.3 Volts monitor implemented + * bit 0: Temperature monitor implemented + * Reg 160: bit 7-5: Reserved. + * bit 4-3: Tx Bias current measurement + * and threshold multiplier. + * 00b = multiply x1 + * 01b = multiply x2 + * 10b = multiply x4 + * 11b = reserved + * bit 2: Rx Optical Input Power monitor implemented + * bit 1: Tx Output Optical Power monitor implemented + * bit 0: Tx Bias monitor implemented + */ + u8 resv2[95]; /* Reg 161-255 */ +} qsfp_cmis_upper_page_01_s; + +typedef struct { + u8 temperature_high_alarm[2]; /* Reg 128-129: Threshold for internally + * measured temperature monitor + */ + u8 temperature_low_alarm[2]; /* Reg 130-131: Threshold for internally + * measured temperature monitor + */ + u8 temperature_high_warn[2]; /* Reg 132-133: Threshold for internally + * measured temperature monitor + */ + u8 temperature_low_warn[2]; /* Reg 134-135: Threshold for internally + * measured temperature monitor + */ + u8 volt_high_alarm[2]; /* + * Reg 136-137: Thresholds for internally measured 3.3 volt + * input supply voltage: in 100 µV increments. + */ + u8 volt_low_alarm[2]; /* + * Reg 138-139:Thresholds for internally measured 3.3 volt + * input supply voltage: in 100 µV increments. + */ + u8 volt_high_warn[2]; /* + * Reg 140-141: Thresholds for internally measured 3.3 volt + * input supply voltage: in 100 µV increments. + */ + u8 volt_low_warn[2]; /* + * Reg 142-143: Thresholds for internally measured 3.3 volt + * input supply voltage: in 100 µV increments. + */ + u8 resv0[32]; /* Reg 144-175: Upper Memory: Page 02H */ + u8 tx_power_high_alarm[2]; /* Reg 176-177: Threshold for Tx optical power monitor */ + u8 tx_power_low_alarm[2]; /* Reg 178-179: Threshold for Tx optical power monitor */ + u8 tx_power_high_warn[2]; /* Reg 180-181: Threshold for Tx optical power monitor */ + u8 tx_power_low_warn[2]; /* Reg 182-183: Threshold for Tx optical power monitor */ + u8 tx_bias_high_alarm[2]; /* + * Reg 184-185: Threshold for Tx bias monitor: + * unsigned inter in 2uA increments, + * times the multiplier from Table 8-33. + */ + u8 tx_bias_low_alarm[2]; /* + * Reg 186-187: Threshold for Tx bias monitor: + * unsigned inter in 2uA increments, + * times the multiplier from Table 8-33. + */ + u8 tx_bias_high_warn[2]; /* + * Reg 188-189: Threshold for Tx bias monitor: + * unsigned inter in 2uA increments, + * times the multiplier from Table 8-33. + */ + u8 tx_bias_low_warn[2]; /* + * Reg 190-191: Threshold for Tx bias monitor: + * unsigned inter in 2uA increments, + * times the multiplier from Table 8-33. + */ + u8 rx_power_high_alarm[2]; /* Reg 192-193: Threshold for Rx optical power monitor */ + u8 rx_power_low_alarm[2]; /* Reg 194-195: Threshold for Rx optical power monitor */ + u8 rx_power_high_warn[2]; /* Reg 196-197: Threshold for Rx optical power monitor */ + u8 rx_power_low_warn[2]; /* Reg 198-199: Threshold for Rx optical power monitor */ + u8 resv1[56]; /* Reg 200-255 */ +} qsfp_cmis_upper_page_02_s; + +typedef struct { + u8 resv0[QSFP_CMIS_PAGE_SIZE]; /* Reg 128-255: Upper Memory: Page 03H */ +} qsfp_cmis_upper_page_03_s; + +typedef struct { + u8 resv0[2]; /* Reg 128-129: Upper Memory: Page 10H */ + u8 tx_disable; /* Reg 130: Tx disable, 0b=enabled, 1b=disabled */ + u8 resv1[125]; /* Reg 131-255 */ +} qsfp_cmis_upper_page_10_s; + +typedef struct { + u8 resv0[7]; /* Reg 128-134: Upper Memory: Page 11H */ + u8 tx_fault; /* Reg 135: Latched Tx Fault flag, media lane 1 ~ 8 */ + u8 tx_los; /* Reg 136: Latched Tx LOS flag, lane 1 ~ 8 */ + u8 tx_cdr_lol; /* Reg 137: Latched Tx CDR LOL flag, lane 1 ~ 8 */ + u8 resv1[9]; /* Reg 138-146 */ + u8 rx_los; /* Reg 147: Latched Rx LOS flag, media lane 1 ~ 8. Clear on Read */ + u8 rx_cdr_lol; /* Reg 148: Latched Rx CDR LOL flag, media lane 1 ~ 8. + * Clear on Read + */ + u8 resv2[5]; /* Reg 149-153 */ + u8 tx_power[16]; /* Reg 154-169: Internally measured Tx output optical power */ + u8 tx_bias[16]; /* Reg 170-185: Internally measured Tx bias current monitor: + * unsinged integer in 2uA increments, + * times the multiplier from Table 8-33(Page 01H: Reg 160). + */ + u8 rx_power[16]; /* Reg 186-201: Internally measured Rx input optical power */ + u8 resv3[54]; /* Reg 202-255 */ +} qsfp_cmis_upper_page_11_s; + +typedef struct { + u8 resv0[2]; /* Reg 128-129: Upper Memory: Page 13H */ + u8 diagnostic_reporting_capabilities; /* Reg 130: The diagnostic reporting + * capabilities of the module are advertised + */ + u8 resv1[125]; /* Reg 131-255 */ +} qsfp_cmis_upper_page_13_s; + +#define CMIS_LANE_NUM 8 +typedef struct ber_lane_s { + u8 ber[CMIS_LANE_NUM][2]; +} ber_lane; + +typedef struct { + u8 diagnostics_selector; /* Reg 128: This selects the content of the data + * in bytes 192-255: Page 14H + */ + u8 resv0; /* Reg 129 */ + u8 custom[2]; /* Reg 130-131 */ + u8 latched_diagnostics_flags[8]; /* Reg 132-139 */ + u8 resv1[52]; /* Reg 140-191 */ + union { + u8 contents[64]; + struct { + u8 host_peak_detect_lane1[2]; /* Reg 192-193 */ + u8 host_peak_detect_lane2[2]; + u8 host_peak_detect_lane3[2]; + u8 host_peak_detect_lane4[2]; + u8 host_peak_detect_lane5[2]; + u8 host_peak_detect_lane6[2]; + u8 host_peak_detect_lane7[2]; + u8 host_peak_detect_lane8[2]; + u8 host_snr_lane1[2]; /* Reg 208-209 */ + u8 host_snr_lane2[2]; + u8 host_snr_lane3[2]; + u8 host_snr_lane4[2]; + u8 host_snr_lane5[2]; + u8 host_snr_lane6[2]; + u8 host_snr_lane7[2]; + u8 host_snr_lane8[2]; + u8 media_peak_detect_lane1[2]; /* Reg 224-225 */ + u8 media_peak_detect_lane2[2]; + u8 media_peak_detect_lane3[2]; + u8 media_peak_detect_lane4[2]; + u8 media_peak_detect_lane5[2]; + u8 media_peak_detect_lane6[2]; + u8 media_peak_detect_lane7[2]; + u8 media_peak_detect_lane8[2]; + u8 media_snr_lane1[2]; /* Reg 240-241 */ + u8 media_snr_lane2[2]; + u8 media_snr_lane3[2]; + u8 media_snr_lane4[2]; + u8 media_snr_lane5[2]; + u8 media_snr_lane6[2]; + u8 media_snr_lane7[2]; + u8 media_snr_lane8[2]; + } snr_and_peak_detect; + + struct { + ber_lane host_ber; /* Reg 192-193 */ + ber_lane media_ber; /* Reg 208-209 */ + } host_and_media_ber; + } err_info; /* Reg 192-255: Contents defined by Diagnostics Selector */ +} qsfp_cmis_upper_page_14_s; + +typedef struct { + u8 resv0[16]; /* Reg 128-143: Upper Memory: Page B7H */ + u8 ret_loss_status[8]; /* Reg 144-151: Optical link return + * loss detection status on lane 1~8 + */ + u8 ret_loss_overview; /* Reg 152: Optical link return loss detection overview */ + u8 resv1[103]; /* Reg 153-255 */ +} qsfp_cmis_upper_page_b7_s; + +typedef struct { + u8 minimum_value[2]; /* LPL PM data 2 bytes – minimum value */ + u8 average_value[2]; /* LPL PM data 2 bytes – average (mean) value */ + u8 maximum_value[2]; /* LPL PM data 2 bytes – maximum value */ +} cdb_pam4_ltp_6bytes; + +typedef struct { + u8 minimum_value[2]; /* LPL PM data 2 bytes – minimum value */ + u8 average_value[2]; /* LPL PM data 2 bytes – average (mean) value */ + u8 maximum_value[2]; /* LPL PM data 2 bytes – maximum value */ + u8 current_value[2]; /* LPL PM data 2 bytes – an optional current value */ +} cdb_pam4_ltp_8bytes; + +typedef struct { + u8 rsv[2]; /* 136~137 保留 */ + u8 max_ref_pot; /* 138 通道1-8最大反射强度点是否有回损异常告警, + * bit0-7:标识通道1-8, + * 0:无异常告警,1代表有告警(建议清理链路) + */ + u8 sec_ref_pot; /* 139 通道1-8第二大反射强度点是否有回损异常告警, + * bit0-7:标识通道1-8, + * 0:无异常告警,1代表有告警(建议清理链路) + */ + u8 max_ref_val[8]; /* 140~147 通道1-8最大反射强度,无符号数据, + * 单位:-0.2 dB + * 例如:0x0A表示回损强度为-2db + */ + u8 max_ref_pos[8][3]; /* 148~171 通道1-8最大反射强度点的位置,无符号数据, + * 单位:0.1m, + * 上报0xFFFFFFh表示无效值(未检测到最大反射点) + */ + u8 sec_ref_val[8]; /* 172~179 通道1-8第二大反射强度,无符号数据, + * 单位:-0.2 dB + * 例如:0x0A表示回损强度为-2db + */ + u8 sec_ref_pos[8][3]; /* 180~203 通道1-8第二大反射强度点的位置, + * 无符号数据,单位:0.1m,上报0xFFFFFFh表示无效值 + *(未检测到最大反射点) + */ + u8 far_ref_val[8]; /* 204~211 通道1-8最远反射强度,无符号数据, + * 单位:-0.2 dB + * 例如:0x0A表示回损强度为-2db + */ + u8 far_ref_pos[8][3]; /* 212~235 通道1-8最远反射强度点的位置, + * 无符号数据,单位:0.1m, + * 上报0xFFFFFFh表示无效值(未检测到最大反射点) + */ + u8 peer_tx_alarm; /* 236 对端TX光口是否有告警,bit0-7:TX光口1-8, + * 0:无异常告警,1代表有告警(建议清理链路) + */ + u8 peer_tx_val[8]; /* 237~244 对端TX光口1-8告警程度,无符号数据 */ + u8 local_rx_alarm; /* 245 本端RX光口是否有告警,bit0-7:RX光口1-8, + * 0:无异常告警,1代表有告警(建议清理链路) + */ + u8 local_rx_val[8]; /* 246~253 本端RX光口1-8告警程度,无符号数据 */ + u8 rsv1[2]; /* 254~255 保留 */ +} cdb_dirt_detection; + +typedef struct { + u8 resv0[8]; /* Reg 128-135: Upper Memory: Page 9FH */ + union { + u8 val[120]; /* Reg 136-255: LPL PM data 120 bytes */ + cdb_pam4_ltp_6bytes ltp6[8]; /* Reg 136-183: LPL PM data: lane1~8 PAM4 LTP, + * Each LTP contains 6 bytes of data. + */ + cdb_pam4_ltp_8bytes ltp8[8]; /* Reg 136-199: LPL PM data: lane1~8 PAM4 LTP, + * Each LTP contains 8 bytes of data. + */ + cdb_dirt_detection dirt0; /* Reg 136-255: 上报最大反射、 + * 第二大发射、最远反射以及TX光口检测信息. + */ + } lpl_pm_data; +} qsfp_cmis_upper_page_9f_s; + +/* 数值数据类型的默认字节序(存储顺序)在第8.1.3.5节中定义(大端序)。在非默认存储顺序(小端序)的情况下,必须明确指定非默认字节序。 */ +typedef struct { + qsfp_cmis_lower_page_00_s lower_page_00; /* QSFP-DD-CMIS lower page 00 128-byte data */ + qsfp_cmis_upper_page_00_s upper_page_00; /* QSFP-DD-CMIS upper page 00 128-byte data */ + qsfp_cmis_upper_page_01_s upper_page_01; /* QSFP-DD-CMIS upper page 01 128-byte data */ + qsfp_cmis_upper_page_02_s upper_page_02; /* QSFP-DD-CMIS upper page 02 128-byte data */ + qsfp_cmis_upper_page_10_s upper_page_10; /* QSFP-DD-CMIS upper page 10 128-byte data */ + qsfp_cmis_upper_page_11_s upper_page_11; /* QSFP-DD-CMIS upper page 11 128-byte data */ + qsfp_cmis_upper_page_13_s upper_page_13; /* QSFP-DD-CMIS upper page 13 128-byte data */ + qsfp_cmis_upper_page_14_s upper_page_14; /* QSFP-DD-CMIS upper page 14 128-byte data */ + qsfp_cmis_upper_page_9f_s upper_page_9f; /* QSFP-DD-CMIS upper page 9f 128-byte data */ + qsfp_cmis_upper_page_b7_s upper_page_b7; /* QSFP-DD-CMIS upper page b7 128-byte data */ +} qsfp_cmis_info_s; + +/* optical_speed */ +#define XSFP_MAC_SPEED_UNKNOWN 0 /* unknown */ +#define XSFP_MAC_SPEED_10M 10 /* 10 Mbps */ +#define XSFP_MAC_SPEED_100M 100 /* 100 Mbps */ +#define XSFP_MAC_SPEED_1G 1000 /* 1000 Mbps = 1 Gbps */ +#define XSFP_MAC_SPEED_10G 10000 /* 10000 Mbps = 10 Gbps */ +#define XSFP_MAC_SPEED_25G 25000 /* 25000 Mbps = 25 Gbps */ +#define XSFP_MAC_SPEED_40G 40000 /* 40000 Mbps = 40 Gbps */ +#define XSFP_MAC_SPEED_50G 50000 /* 50000 Mbps = 50 Gbps */ +#define XSFP_MAC_SPEED_100G 100000 /* 100000 Mbps = 100 Gbps */ +#define XSFP_MAC_SPEED_200G 200000 /* 200000 Mbps = 200 Gbps */ +#define XSFP_MAC_SPEED_400G 400000 /* 400000 Mbps = 400 Gbps */ +#define XSFP_MAC_SPEED_800G 800000 /* 800000 Mbps = 800 Gbps */ + +struct mag_bios_cfg { + u8 speed; /* enum of port speed */ + u8 auto_neg; /* 自协商开关 0 - 字段无效 1 - 开2 - 关 */ + u8 lanes; /* lane num */ + u8 fec; /* FEC模式, 参考 enum mag_cmd_port_fec */ + u8 auto_adapt; /* 自适应模式配置0 - 无效配置 1 - 开启 2 - 关闭 */ +}; + +#define VL_NUM 16 +#define LANE_NUM 8 + +typedef struct { + u32 ubmac_nl_id; + u32 ubmac_port_id; + + u32 ubmac_port_info; // PORT_INFO + u32 ubmac_port_link_sta; // PORT_LINK_STA + u32 ubmac_phy_link_status_1; // PHY_LINK_STA_1 + u32 ubmac_phy_link_status_2; // PHY_LINK_STA_2 + u32 ubmac_phy_link_status_3; // PHY_LINK_STA_3 + u32 ubmac_phy_link_err_status; // PHY_LINK_ERR_STA + u32 ubmac_fec_err_bit_num_low; // ST_MAC_FEC_ERR_BIT_NUM_LOW + u32 ubmac_fec_err_bit_num_high; // ST_MAC_FEC_ERR_BIT_NUM_HIGH + u32 ubmac_fec_decoding_fail_num_low; // ST_MAC_FEC_DECODING_FAIL_NUM_LOW + u32 ubmac_fec_decoding_fail_num_high; // ST_MAC_FEC_DECODING_FAIL_NUM_HIGH + + u32 ubmac_dl_vl_enable; // CFG_LINK_CAP_11 + u32 ubmac_dl_rx_flit_cnt[VL_NUM]; // RX_FLIT_CNT_0 ~ 15 + u32 ubmac_dl_tx_flit_cnt[VL_NUM]; // TX_FLIT_CNT_0 ~ 15 + u32 ubmac_dl_crd_exist_cnt[VL_NUM]; // ST_CRD_0 ~ 15 + u32 ubmac_dl_crd_compensate_cnt[VL_NUM]; // ST_CRD_COMPENSATE_0 ~ 15 + u32 ubmac_dl_crd_vn_return_limit[VL_NUM]; // DATA_LINK_PFM_OPT_CFG_0 ~ 7 + u32 ubmac_dl_crd_vn_l1crd_limit[VL_NUM]; // DATA_LINK_PFM_OPT_CFG_8 ~ 15 + + u32 ubmac_pfa_tx_fast_err_pkt_int_cnt; // PFA_TX_FAST_ERR_PKT_IN_CNT + u32 ubmac_pfa_tx_fast_pkt_int_cnt; // PFA_TX_FAST_PKT_IN_CNT + u32 ubmac_pfa_tx_norm_pkt_int_cnt; // PFA_TX_NORM_PKT_IN_CNT + u32 ubmac_pfa_rx_pkt_int_cnt; // PFA_RX_PKT_IN_CNT + u32 ubmac_pfa_tx_norm_err_pkt_int_cnt; // PFA_TX_NORM_ERR_PKT_IN_CNT + u32 ubmac_pfa_rx_err_pkt_int_cnt; // PFA_RX_ERR_PKT_IN_CNT + u32 ubmac_pfa_tx_fast_short_pkt_cnt; // PFA_TX_FAST_SHORT_PKT_CNT + u32 ubmac_pfa_tx_fast_long_pkt_cnt; // PFA_TX_FAST_LONG_PKT_CNT + u32 ubmac_pfa_tx_fast_mode_err_pkt_cnt; // PFA_TX_FAST_MODE_ERR_PKT_CNT + u32 ubmac_pfa_tx_norm_short_pkt_cnt; // PFA_TX_NORM_SHORT_PKT_CNT + u32 ubmac_pfa_tx_norm_long_pkt_cnt; // PFA_TX_NORM_LONG_PKT_CNT + u32 ubmac_pfa_tx_norm_mode_err_pkt_cnt; // PFA_TX_NORM_MODE_ERR_PKT_CNT + u32 ubmac_pfa_rx_short_pkt_cnt; // PFA_RX_SHORT_PKT_CNT + u32 ubmac_pfa_rx_mode_err_pkt_cnt; // PFA_RX_MODE_ERR_PKT_CNT + u32 ubmac_pfa_dfx_tx_pkt_len_chk_en; // PFA_DFX_TX_PKT_LEN_CHK_EN + u32 ubmac_pfa_dfx_rx_pkt_len_chk_en; // PFA_DFX_RX_PKT_LEN_CHK_EN + u32 ubmac_pfa_dfx_pkt_info_chk_en; // PFA_DFX_PKT_INFO_CHK_EN + u32 ubmac_pfa_dfx_tx_fifo_state; // PFA_DFX_TX_FIFO_STATE + u32 ubmac_pfa_dfx_rx_fifo_state; // PFA_DFX_RX_FIFO_STATE + u32 ubmac_pfa_dfx_work_mode; // PFA_DFX_WORK_MODE + u32 ubmac_pfa_dfx_tx_adp_fifo_state; // PFA_DFX_TX_ADP_FIFO_STATE + u32 ubmac_pfa_tx_fast_lng_pkt_in_cnt; // PFA_TX_FAST_LNG_PKT_IN_CNT + u32 ubmac_pfa_tx_fast_lng_err_pkt_in_cnt; // PFA_TX_FAST_LNG_ERR_PKT_IN_CNT + u32 ubmac_pfa_tx_fast_lng_short_pkt_cnt; // PFA_TX_FAST_LNG_SHORT_PKT_CNT + u32 ubmac_pfa_tx_fast_lng_long_pkt_cnt; // PFA_TX_FAST_LNG_LONG_PKT_CNT + u32 ubmac_pfa_tx_fast_lng_mode_err_pkt_cnt; // PFA_TX_FAST_LNG_MODE_ERR_PKT_CNT + + u32 ubmac_pcs_lane_tsb_crc_cnt[LANE_NUM]; // ST_LANE0_TSB_CRC_CNT ~ 7 +} ubmac_counter_info_s; + +typedef struct { + struct mgmt_msg_head head; /* 8B */ + u8 nl_id; /* nl id 0~2 */ + u8 port_id; /* port id 0~7 */ + u8 rsvd; +} ubmac_counter_cmd_req; + +typedef struct { + struct mgmt_msg_head head; /* 8B */ + ubmac_counter_info_s ubmac_cnt_info; +} ubmac_counter_cmd_resp; + +/* 因为固件返回速率的字段为u8不能很好的承载200Ge以上数据,200Ge以上数据需要转换后显示 + 可参考chip_attr_get_port_speed做转换 + 上述内容参考工具侧修改 +*/ +typedef enum { + PORT_SPEED_MODE_START = 200, + PORT_SPEED_MODE_400G = 201, + PORT_SPEED_MODE_800G = 202, + PORT_SPEED_MODE_END = 203, +} port_speed_mode_e; + +typedef enum { + PORT_SPEED_UNKOWN = 0, + PORT_SPEED_1G = 1, + PORT_SPEED_10G = 10, + PORT_SPEED_25G = 25, + PORT_SPEED_40G = 40, + PORT_SPEED_50G = 50, + PORT_SPEED_100G = 100, + PORT_SPEED_200G = 200, + PORT_SPEED_400G = 400, + PORT_SPEED_800G = 800 +} port_speed_e; + +struct speed_mode_map_s { + u8 speed_mode; + u32 real_speed; +}; + +#endif diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_board_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_board_defs.h new file mode 100644 index 00000000..f9e56ee8 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_board_defs.h @@ -0,0 +1,144 @@ +/* ***************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + ****************************************************************************** + File Name : mpu_board_defs.h + Version : Initial Draft + Created : 2022/3/30 + Last Modified : + Description : COMM board info between Driver and MPU + Function List : +***************************************************************************** */ + +#ifndef COMM_BOARD_INFO_H +#define COMM_BOARD_INFO_H + +#define BOARD_TYPE_TEST_RANGE_START 1 /* 测试单板起始值(包含) */ +#define BOARD_TYPE_TEST_RANGE_END 29 /* 测试单板结束值(包含) */ +#define BOARD_TYPE_STRG_RANGE_START 30 /* 存储单板起始值(包含) */ +#define BOARD_TYPE_STRG_RANGE_END 99 /* 存储单板结束值(包含) */ +#define BOARD_TYPE_CAL_RANGE_START 100 /* 计算单板起始值(包含) */ +#define BOARD_TYPE_CAL_RANGE_END 169 /* 计算单板结束值(包含) */ +#define BOARD_TYPE_CLD_RANGE_START 170 /* 云单板起始值(包含) */ +#define BOARD_TYPE_CLD_RANGE_END 239 /* 云单板结束值(包含) */ +#define BOARD_TYPE_RSVD_RANGE_START 240 /* 预留的单板起始值(包含) */ +#define BOARD_TYPE_RSVD_RANGE_END 255 /* 预留的单板结束值(包含) */ + +typedef enum { + BOARD_TYPE_MPU_DEFAULT = 0, /* Default config */ + BOARD_TYPE_TEST_EVB_4X25G = 1, /* EVB Board */ + BOARD_TYPE_TEST_CEM_2X100G = 2, /* 2X100G CEM Card */ + BOARD_TYPE_TEST_EVB1_DDIE_2X200G = 3, /* 2X200G EVB1 DDIE Card */ + BOARD_TYPE_TEST_EVB1_SMDIE_1X200G = 4, /* 1X200G EVB1 SMDIE Card */ + BOARD_TYPE_TEST_EVB1_SSDIE_1X200G = 5, /* 1X200G EVB1 SSDIE Card */ + BOARD_TYPE_TEST_EVB2_DDIE_4X100G = 6, /* 4X100G EVB2 DDIE Card */ + BOARD_TYPE_TEST_EVB2_SMDIE_2X100G = 7, /* 2X100G EVB2 SMDIE Card */ + BOARD_TYPE_TEST_EVB2_SSDIE_2X100G = 8, /* 2X100G EVB2 SSDIE Card */ + BOARD_TYPE_TEST_EVB3_DDIE_4X25G = 9, /* 4X25G EVB3 DDIE Card */ + BOARD_TYPE_TEST_EVB3_SMDIE_2X25G = 10, /* 2X25G EVB3 SMDIE Card */ + BOARD_TYPE_TEST_EVB3_SSDIE_2X25G = 11, /* 2X25G EVB3 SSDIE Card */ + BOARD_TYPE_TEST_EVBS_SDIE_4X25G = 12, /* 4X25G EVBS SDIE Card */ + BOARD_TYPE_TEST_EVBS_SDIE_2X100G = 13, /* 2X100G EVBS SDIE Card */ + BOARD_TYPE_TEST_EVB1_2X400G = 14, /* 2X400G EVB1 Card */ + BOARD_TYPE_TEST_EVB2_1X800G = 15, /* 1X800G EVB2 Card */ + BOARD_TYPE_TEST_EVB3_2X200G_4x50G = 16, /* 2X200G_4X50G EVB3 Card */ + + BOARD_TYPE_STRG_SMARTIO_4X32G_FC = 30, /* 4X32G SmartIO FC Card */ + BOARD_TYPE_STRG_SMARTIO_4X25G_TIOE = 31, /* 4X25GE SmartIO TIOE Card */ + BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE = 32, /* 4X25GE SmartIO ROCE Card */ + BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE_AA = 33, /* 4X25GE SmartIO ROCE_AA Card */ + BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV = 34, /* 4X25GE SmartIO container Card */ + BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV_SW = 35, /* 4X25GE SmartIO container + * switch Card + */ + BOARD_TYPE_STRG_4X25G_COMSTORAGE = 36, /* 4X25GE compute storage + * Onboard Card + */ + BOARD_TYPE_STRG_SMARTIO_4X25G_OVS = 37, /* 4x25GE SmartIO卡OVS */ + BOARD_TYPE_STRG_2X100G_TIOE = 40, /* 2X100G SmartIO TIOE Card */ + BOARD_TYPE_STRG_2X100G_ROCE = 41, /* 2X100G SmartIO ROCE Card */ + BOARD_TYPE_STRG_2X100G_ROCE_AA = 42, /* 2X100G SmartIO ROCE_AA Card */ + BOARD_TYPE_STRG_2X100G_OVS = 43, /* 2x100GE SmartIO卡OVS */ + BOARD_TYPE_STRG_2X100G_TIOE_ATLANTIC = 44, /* 2X100GE SmartIO卡TIOE */ + BOARD_TYPE_STRG_4X25G_TIOE_ATLANTIC = 45, /* 4X25GE SmartIO卡TIOE */ + BOARD_TYPE_STRG_2X100G_TIOE_SMARTNIC = 46, /* 2X100GE ETH标卡TIOE */ + BOARD_TYPE_STRG_2X25G_TIOE_SMARTNIC = 47, /* 2X25GE ETH标卡TIOE */ + BOARD_TYPE_STRG_2X25G_OVS_SMARTNIC = 48, /* 2x25GE ETH标卡OVS */ + BOARD_TYPE_STRG_2X100G_ROCE_SMARTNIC = 49, /* 2X100GE ETH标卡ROCE */ + BOARD_TYPE_STRG_SMARTIO_2X25G_COMPUTE = 50, /* 2x25GE SmartIO卡ROCE */ + BOARD_TYPE_STRG_2X200G_ROCE_ATLANTIC = 51, /* 2X200G SmartIO卡ROCE */ + BOARD_TYPE_STRG_4X25G_OVS_LITE = 52, /* 4x25GE SmartIO卡OVS LITE */ + BOARD_TYPE_STRG_2X200G_ROCE = 53, /* 2X200G SmartIO卡ROCE */ + BOARD_TYPE_STRG_SMARTIO_4X64G_FC = 54, /* 4X64G FC SmartIO卡 */ + BOARD_TYPE_STRG_2X100G_ROCE_SRIOV = 55, /* 2X100GE SmartIO卡前端容器卡 */ + BOARD_TYPE_STRG_2X100G_ROCE_SRIOV_SW = 56, /* 2X100GE SmartIO卡环回容器卡 */ + BOARD_TYPE_STRG_2X200G_ROCE_SRIOV = 57, /* 2X200GE SmartIO卡前端容器卡 */ + BOARD_TYPE_STRG_2X200G_ROCE_SRIOV_SW = 58, /* 2X200GE SmartIO卡环回容器卡 */ + BOARD_TYPE_STRG_2X200G_TIOE = 59, /* 2X200G SmartIO卡TIOE */ + BOARD_TYPE_STRG_DPU_A_SECURE = 60, /* 存储安全卡DPU-A */ + BOARD_TYPE_STRG_2X25G_DPU_A_FUNCTION = 61, /* 2X25G存储功能卡DPU-A */ + BOARD_TYPE_STRG_2X25G_DPU_TIOE = 62, /* 2X25G DPU卡TIOE */ + BOARD_TYPE_STRG_2X200G_DPU_ROCE = 63, /* 2X200G DPU卡ROCE */ + BOARD_TYPE_STRG_4X25G_ROCE_SRIOV = 76, /* 4X25GE SmartIO卡前端容器卡 */ + BOARD_TYPE_STRG_2X200G_ROCE_AA = 77, /* 2X200G SmartIO卡ROCE_AA */ + BOARD_TYPE_STRG_8X25G_TIOE = 78, /* 8X25GE SmartIO卡TIOE */ + BOARD_TYPE_STRG_2X200G_OVS_LITE = 79, /* 2x200GE SmartIO卡OVS LITE */ + BOARD_TYPE_STRG_2X25G_ROCE_SMARTNIC = 80, /* 2X25GE ETH标卡ROCE */ + BOARD_TYPE_STRG_2X200G_ROCE_SMARTNIC = 81, /* 2X200GE ETH标卡ROCE */ + BOARD_TYPE_STRG_2X200G_TIOE_SMARTNIC = 82, /* 2X200GE ETH标卡TIOE */ + BOARD_TYPE_STRG_2X100G_OVS_SMARTNIC = 83, /* 2x100GE ETH标卡OVS */ + BOARD_TYPE_STRG_2X100G_ROCE_SMARTNIC_LIFT = 84, /* 2X100GE ETH标卡ROCE */ + BOARD_TYPE_STRG_2X100G_TIOE_SMARTNIC_LIFT = 85, /* 2X100GE ETH标卡TIOE */ + + BOARD_TYPE_CAL_2X25G_NIC_75MPPS = 100, /* 2X25G ETH Standard card 75MPPS */ + BOARD_TYPE_CAL_2X25G_NIC_40MPPS = 101, /* 2X25G ETH Standard card 40MPPS */ + BOARD_TYPE_CAL_2X100G_DPU = 102, /* 2X100G DPU card */ + BOARD_TYPE_CAL_4X25G_NIC_120MPPS = 105, /* 4X25G ETH Standard card 120MPPS */ + BOARD_TYPE_CAL_4X25G_COMSTORAGE = 106, /* 4X25GE compute storage + * Onboard Card + */ + BOARD_TYPE_CAL_2X32G_FC_HBA = 110, /* 2X32G FC HBA card */ + BOARD_TYPE_CAL_2X16G_FC_HBA = 111, /* 2X16G FC HBA card */ + BOARD_TYPE_CAL_2X100G_NIC_120MPPS = 115, /* 2X100G ETH Standard card 120MPPS */ + BOARD_TYPE_CAL_2X25G_DPU = 116, /* 2x25G DPU Card */ + BOARD_TYPE_CAL_2X100G_TCE = 117, /* 2X100G TCE板载卡 */ + BOARD_TYPE_CAL_4X25G_DPU = 118, /* 4x25G DPU Card */ + BOARD_TYPE_CAL_4X25G_SMARTNIC = 119, /* 4X25G SmartIO卡 */ + BOARD_TYPE_CAL_2X100G_SMARTNIC = 120, /* 2X100G SmartIO卡 */ + BOARD_TYPE_CAL_6X25G_DPU = 121, /* 6X25G DPU */ + BOARD_TYPE_CAL_4X25G_DPU_BD = 122, /* 4*25G DPU大数据卡 */ + BOARD_TYPE_CAL_2X25G_NIC_4HOST = 123, /* 2*25GE天工4HOST标卡 */ + BOARD_TYPE_CAL_2X200G_NIC_120MPPS = 124, /* 2X200G ETH标卡 120MPPS */ + BOARD_TYPE_CAL_2X10G_NIC_LOW = 125, /* 2X10G ETH标卡低功耗 */ + BOARD_TYPE_CAL_2X200G_SMARTNIC = 126, /* 2*200G SmartIO卡 */ + BOARD_TYPE_CAL_2X200G_NIC_INTERNET = 127, /* 2*200G 互联网卡 */ + BOARD_TYPE_CAL_2X100G_NIC_INTERNET = 128, /* 2*100G 互联网卡 */ + BOARD_TYPE_CAL_1X100GR2_OCP = 129, /* 1*100GR2 DSFP56接口OCP卡 */ + BOARD_TYPE_CAL_2X200G_DPU = 130, /* 2X200G DPU卡 */ + BOARD_TYPE_CAL_2X100_OCP = 131, /* 2*100G DSFP56/QSFP56接口OCP卡 */ + /* 2*400G UBC 1*8芯片半宽卡/2*8芯片半宽大卡 */ + BOARD_TYPE_CAL_2X400G_POD = 132, + BOARD_TYPE_CAL_2X400G_UB_EXP = 133, /* 2*400G A5 Sever卡 */ + BOARD_TYPE_CAL_2X200G_V2 = 134, /* 2*200G PCIE标卡 */ + BOARD_TYPE_CAL_1X400G = 135, /* 1X400G PCIE标卡 */ + + BOARD_TYPE_CAL_SP23X_2X10G = 136, /* 1872 2*10G PCIE标卡 */ + BOARD_TYPE_CAL_SP23X_2X25G = 137, /* 1872 2*25G PCIE标卡 */ + BOARD_TYPE_CAL_SP23X_2X100G = 138, /* 1872 2*100G PCIE标卡 */ + BOARD_TYPE_CAL_SP23X_2X100G_OCP = 139, /* 1872 2*100G OCP卡 */ + BOARD_TYPE_CAL_SP23X_1X200G = 140, /* 1872 1*200G PCIE标卡 */ + + BOARD_TYPE_CAL_2X400G_UBX_BOARD = 141, /* 1825 2*400G UBX BOARD */ + BOARD_TYPE_CAL_2X400G_UB_EXP_V1 = 142, /* 1825 2*400G UB EXP定制卡 */ + + BOARD_TYPE_CLD_2X100G_SDI5_1 = 170, /* 2X100G SDI 5.1 Card */ + BOARD_TYPE_CLD_2X25G_SDI5_0_LITE = 171, /* 2x25G SDI5.0 Lite Card */ + BOARD_TYPE_CLD_2X100G_SDI5_0 = 172, /* 2x100G SDI5.0 Card */ + BOARD_TYPE_CLD_2X200G_SDI6_0 = 173, /* 2x200G SDI6.0 Card */ + BOARD_TYPE_CLD_2X200G_UNIC = 174, /* 2x200G UNIC Card */ + BOARD_TYPE_CLD_4X25G_SDI5_0_C = 175, /* 4*25G SDI5.0.C卡 */ + BOARD_TYPE_CLD_2X200G_SDI6_1 = 176, /* 2x200G SDI6.1卡 */ + BOARD_TYPE_CLD_2X400G_SDI_BOX = 177, /* 2*400G 云智算SDI-BOX卡 */ + + BOARD_TYPE_MAX_INDEX = 0xFF, /* 卡的最大值 */ +} board_type_define_e; +#endif diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_cmd_base_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_cmd_base_defs.h new file mode 100644 index 00000000..c5fae97e --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_cmd_base_defs.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * Filename : mpu_cmd_base_defs.h + * Version : Initial Draft + * Creation time : 2023/09/22 + * Last Modified : + * Description : MPU common definitions + */ +#ifndef MPU_CMD_BASE_DEFS_H +#define MPU_CMD_BASE_DEFS_H + +#include "base_type.h" + +typedef enum { + RES_TYPE_FLUSH_BIT = 0, /* flush的function reset标志位 */ + RES_TYPE_MQM, /* mqm的function reset标志位 */ + RES_TYPE_SMF, /* smf的function reset标志位 */ + RES_TYPE_PF_BW_CFG, /* bw 配置的function reset标志位 */ + + RES_TYPE_COMM = 10, /* 公共的function reset标志位 */ + RES_TYPE_COMM_MGMT_CH, /* bw 配置的function reset标志位, + * clear mbox and aeq, The RES_TYPE_COMM bit must be set + */ + RES_TYPE_COMM_CMD_CH, /* bw 配置的function reset标志位, + * clear cmdq and ceq, The RES_TYPE_COMM bit must be set + */ + RES_TYPE_NIC, /* nic的function reset标志位 */ + RES_TYPE_OVS, /* ovs的function reset标志位 */ + RES_TYPE_VBS, /* vbsfunction reset标志位 */ + RES_TYPE_ROCE, /* roce的function reset标志位 */ + RES_TYPE_FC, /* fc的function reset标志位 */ + RES_TYPE_TOE, /* toe的function reset标志位 */ + RES_TYPE_IPSEC, /* ipsec的function reset标志位 */ + RES_TYPE_SMF_CACHE_INVALID, /* smf cache 无效的function reset标志位 */ + RES_TYPE_MAX, /*function reset标志位的最大值 */ +} func_reset_flag_e; /* func reset的flag ,用于指示清理哪种资源 */ + +#define DEVICE_TYPE_L2NIC 0 /* L2NIC 设备 */ +#define DEVICE_TYPE_NVME 1 /* nvme 设备 */ +#define DEVICE_TYPE_VIRTIO_NET 2 /* virtio net 设备 */ +#define DEVICE_TYPE_VIRTIO_BLK 3 /* virtio blk 设备 */ +#define DEVICE_TYPE_VIRTIO_VSOCK 4 /* virtio vsock 设备 */ +#define DEVICE_TYPE_VIRTIO_NET_TRANSITION 5 /* virtio net transition 设备 */ +#define DEVICE_TYPE_VIRTIO_BLK_TRANSITION 6 /* virtio blk transition 设备 */ +#define DEVICE_TYPE_VIRTIO_SCSI_TRANSITION 7 /* virtio scsi transition 设备 */ +#define DEVICE_TYPE_VIRTIO_HPC 8 /* virtio nhpc 设备 */ +#define DEVICE_TYPE_VIRTIO_FS 9 /* virtio fs 设备 */ + +/** + * @brief 判断是否为virtio net 设备 + * @param device:device type + * @return true or false + */ +#define MPU_DEVICE_IS_VIRTIO_NET(device) \ + (((device) == DEVICE_TYPE_VIRTIO_NET) || ((device) == DEVICE_TYPE_VIRTIO_NET_TRANSITION)) + +/** + * @brief 判断是否为virtio blk 设备 + * @param device:device type + * @return true or false + */ +#define MPU_DEVICE_IS_VIRTIO_BLK(device) \ + (((device) == DEVICE_TYPE_VIRTIO_BLK) || ((device) == DEVICE_TYPE_VIRTIO_BLK_TRANSITION)) + +/** + * @brief 判断是否为virtio scsi 设备 + * @param device:device type + * @return true or false + */ +#define MPU_DEVICE_IS_VIRTIO_SCSI(device) \ + ((device) == DEVICE_TYPE_VIRTIO_SCSI_TRANSITION) + +/** + * @brief 判断是否为virtio 存储 设备 + * @param device:device type + * @return true or false + */ +#define MPU_DEVICE_IS_VIRTIO_STORAGE(device) \ + (MPU_DEVICE_IS_VIRTIO_BLK(device) || MPU_DEVICE_IS_VIRTIO_SCSI(device)) + +/** + * @brief 判断是否为virtio 设备 + * @param device:device type + * @return true or false + */ +#define MPU_DEVICE_IS_VIRTIO(device) \ + (MPU_DEVICE_IS_VIRTIO_NET(device) || MPU_DEVICE_IS_VIRTIO_BLK(device) || \ + MPU_DEVICE_IS_VIRTIO_SCSI(device)) + +enum hinic5_svc_type { + SVC_T_COMM = 0, + SVC_T_NIC, + SVC_T_OVS, + SVC_T_ROCE, + SVC_T_TOE, + SVC_T_IOE, + SVC_T_FC, + SVC_T_VBS, + SVC_T_IPSEC, + SVC_T_VIRTIO, + SVC_T_MIGRATE, + SVC_T_PPA, + SVC_T_MAX, +}; + +#define MGMT_MSG_CMD_OP_SET 1 /* 设置命令 */ +#define MGMT_MSG_CMD_OP_GET 0 /* 获取命令 */ +#define MGMT_MSG_CMD_OP_START 1 /* 开始命令 */ +#define MGMT_MSG_CMD_OP_STOP 0 /* 停止命令 */ + +/* 获取die id,入参为mgmt_msg_head结构体 */ +#define MGMT_GET_DIE_ID(msg_head) \ + (((msg_head)->die_id_valid != 0) ? (msg_head)->die_id : 0) + +struct mgmt_msg_head { + u8 status; /* 响应消息的返回值 */ + u8 version; /* 消息的版本号 */ + u8 rep_aeq_num; /* response aeq number, unused for now */ + u8 rsvd0; /* 保留字段 */ + u8 die_id_valid : 1; /* 双die选择有效位(工具命令选择主/从die) */ + u8 die_id : 1; /* die_id_valid有效的前提下,die_id:0, 主die die_id:1, 从die */ + u8 rsvd1 : 6; /* 保留字段 */ + u8 rsvd2[3]; /* 保留字段 */ +}; + +enum hinic5_fw_ver_type { + HINIC5_FW_VER_TYPE_BOOT, /* BOOT固件 */ + HINIC5_FW_VER_TYPE_MPU, /* MPU固件 */ + HINIC5_FW_VER_TYPE_NPU, /* NPU固件 */ + HINIC5_FW_VER_TYPE_SMU_L0, /* SMU L0固件 */ + HINIC5_FW_VER_TYPE_SMU_L1, /* SMU L1固件 */ + HINIC5_FW_VER_TYPE_CFG, /* SMU 配置固件 */ + HINIC5_FW_VER_TYPE_PLATFORM, /* 基础平台 */ + HINIC5_FW_VER_TYPE_ROCE_SCC, /* roce scc固件 */ + HINIC5_FW_VER_TYPE_ROCE_SCC_CS, /* roce scc客户固件 */ + HINIC5_FW_VER_TYPE_ROCE_IMP, /* roce imp固件 */ + HINIC5_FW_VER_TYPE_UBC_IMP, /* ubc imp固件 */ + HINIC5_FW_VER_TYPE_IMP, /* imp固件 */ + HINIC5_FW_VER_TYPE_PSM, /* PSM固件 */ + HINIC5_FW_VER_TYPE_UBG_IMP, /* ubg imp固件 */ + HINIC5_FW_VER_TYPE_UB_SCC, /* ub scc固件 */ + HINIC5_FW_VER_TYPE_GRAY_NPU = 100, /* 灰卡 NPU固件 */ +}; + +#define PCIE_MODE_PORT_NUM 32 +#ifdef HI1825V100 +#define PCIE_MODE_HOST_NUM 6 +#else +#define PCIE_MODE_HOST_NUM 4 +#endif +#define PCIE_MODE_PF_NUM 32 +#define PCIE_MODE1_VF_NUM 128 +#define PCIE_MODE2_VF_NUM 256 +#define PCIE_MODE_HOST2PORT_MAP 4 +#define FLR_CUR_REG_NUM 128 +#define MPU_FLR_INTR_BIT_NUM 32 +#define FLR_STAT_CUR_REG_OFFSET 4 +#define PCIE_MODE_FLR_STAT_REG_NUM 8 +#define PCIE_FLR_MODE2_REG_OFFSET 64 + +#define PCIE_MODE_ALL_PORT_MAP 0x1111 + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_inband_cmd.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_inband_cmd.h new file mode 100644 index 00000000..7fb1655c --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_inband_cmd.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * Filename : mpu_inband_cmd.h + * Version : Initial Draft + * Creation time : 2023/09/22 + * Last Modified : + * Description : In-band commands between the driver and the MPU + */ + +#ifndef MPU_INBAND_CMD_H +#define MPU_INBAND_CMD_H + +/**< COMM Commands between Driver to MPU */ +enum hinic5_mgmt_cmd { + /**< flr及资源清理相关命令 */ + COMM_MGMT_CMD_FUNC_RESET = 0, /**< 驱动加卸载function reset清理资源 */ + COMM_MGMT_CMD_FEATURE_NEGO, /**< fw与驱动兼容性属性协商 */ + COMM_MGMT_CMD_FLUSH_DOORBELL, /**< 驱动卸载flush流程,清理资源 */ + COMM_MGMT_CMD_START_FLUSH, /**< 驱动加卸载flush握手 */ + COMM_MGMT_CMD_SET_FUNC_FLR, /**< 驱动主动通过mbox流程触发flr */ + COMM_MGMT_CMD_GET_GLOBAL_ATTR, /**< 获取sm全局表 */ + COMM_MGMT_CMD_SET_PPF_FLR_TYPE, /**< 设置ppf flr执行范围 */ + COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, /**< 设置对应function驱动是否加载/使用 */ + COMM_MGMT_CMD_GET_FUNC_FLR_INFO, /**< 获取flr执行dfx信息 */ + + /**< 驱动中断资源 */ + COMM_MGMT_CMD_CFG_MSIX_NUM = 10, /**< 获取驱动msix中断信息 */ + + /**< 驱动相关配置命令 */ + COMM_MGMT_CMD_SET_CMDQ_CTXT = 20, /**< 设置cmdq cxt */ + COMM_MGMT_CMD_SET_VAT, /**< 设置vat表 */ + COMM_MGMT_CMD_CFG_PAGESIZE, /**< 配置root cxt 页面大小 */ + COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, /**< 配置中断msix */ + COMM_MGMT_CMD_SET_CEQ_CTRL_REG, /**< 配置ceq */ + COMM_MGMT_CMD_SET_DMA_ATTR, /**< 配置驱动dma属性 */ + COMM_MGMT_CMD_SET_ENHANCE_CMDQ_CTXT, /**< 配置增强型cmdq */ + COMM_MGMT_CMD_GET_FUNC_SECURE_MEM, /**< 获取安全内存标识 */ + COMM_MGMT_CMD_SET_FUNC_PLUG_SRV, /**< 设置热拔插bmp */ + COMM_MGMT_CMD_GET_FUNC_PLUG_SRV, /**< 获取热拔插bmp */ + COMM_MGMT_CMD_SET_PPF_TBL_HTR_FLG, /**< 设置热替换标识 */ + COMM_MGMT_CMD_GET_FAST_MSG_CAP, /**< 获取fast msg能力 */ + COMM_MGMT_CMD_SET_FAST_MSG_RQ_ADDR, /**< 设置fast msg地址 */ + COMM_MGMT_CMD_CLEAR_FAST_MSG_SML, /**< 清除fast msg表项 */ + + /**< INFRA配置相关命令字 */ + COMM_MGMT_CMD_GET_MQM_FIX_INFO = 40, /**< mqm获取chunk num */ + COMM_MGMT_CMD_SET_MQM_CFG_INFO, /**< 从驱动接收ppf和page_size */ + COMM_MGMT_CMD_SET_MQM_SRCH_GPA, /**< 从驱动接收search gpa地址 */ + COMM_MGMT_CMD_SET_PPF_TMR, /**< 配置smf timer */ + COMM_MGMT_CMD_SET_HT_GPA, /**< 设置 ht gpa (bank gpa) 地址 */ + COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, /**< 使能smf timer */ + COMM_MGMT_CMD_SET_MBX_CRDT, /**< 设置mbox信用,dfx性能调优用 */ + COMM_MGMT_CMD_CFG_TEMPLATE, /**< 设置/获取温度告警阈值 */ + COMM_MGMT_CMD_SET_MQM_LIMIT, /**< 设置/获取mqm限速配置,1823V100盘古驱动使用. */ + COMM_MGMT_CMD_SET_BAT_INFO, /**< 设置bat信息 */ + COMM_MGMT_CMD_SET_VIO_EN, /**< 设置cpi MSI使能*/ + COMM_MGMT_CMD_CFG_DATA, /**< 配置模板中的Function相关参数 */ + + /**< 信息获取相关命令字 */ + COMM_MGMT_CMD_GET_FW_VERSION = 60, /**< 获取fw版本信息 */ + COMM_MGMT_CMD_GET_BOARD_INFO, /**< 获取单板信息 */ + COMM_MGMT_CMD_SYNC_TIME, /**< 同步驱动时间戳 */ + COMM_MGMT_CMD_GET_HW_PF_INFOS, /**< 获取硬件pf信息 */ + COMM_MGMT_CMD_SEND_BDF_INFO, /**< 接收驱动获取到的dbf信息 */ + COMM_MGMT_CMD_GET_VIRTIO_BDF_INFO, /**< virtio场景获取dbf信息 */ + COMM_MGMT_CMD_GET_SML_TABLE_INFO, /**< 获取smlb表项信息 */ + COMM_MGMT_CMD_GET_SDI_INFO, /**< 获取sdI信息(裸机/虚机) */ + COMM_MGMT_CMD_ROOT_CTX_LOAD, /**< 获取root cxt信息 */ + COMM_MGMT_CMD_GET_HW_BOND = 69, /* 1823V100 */ + COMM_MGMT_CMD_MPU_AND_NPU_VER = 70, /* 1823V100 */ + COMM_MGMT_CMD_GET_PF_BY_FUNC = 71, /* 1823V100 */ + COMM_MGMT_CMD_GET_PF_BUS_BY_DEV = 72, /* 1823V100 */ + + /**< 升级相关命令字 */ + COMM_MGMT_CMD_UPDATE_FW = 80, /**< fw升级 */ + COMM_MGMT_CMD_ACTIVE_FW, /**< fw冷激活 */ + COMM_MGMT_CMD_HOT_ACTIVE_FW, /**< fw热激活 */ + COMM_MGMT_CMD_HOT_ACTIVE_DONE_NOTICE, /**< fw热激活完成(当前未使用) */ + COMM_MGMT_CMD_SWITCH_CFG, /**< 配置文件切换(当前未使用) */ + COMM_MGMT_CMD_CHECK_FLASH, /**< 存储场景flash静默检测 */ + COMM_MGMT_CMD_CHECK_FLASH_RW, /**< 存储场景升级前 */ + COMM_MGMT_CMD_RESOURCE_CFG, /**< 原配置模板(当前未使用) */ + COMM_MGMT_CMD_UPDATE_BIOS, /**< bios升级命令(已废弃,bios升级和网卡升级合并) */ + COMM_MGMT_CMD_MPU_GIT_CODE, /**< 获取版本git号 */ + COMM_MGMT_QUERY_MODULE_IMAGES, /**< 镜像握手,获取固件信息 */ + COMM_MGMT_CMD_UPDATE_CUSTOM_FW = 98, /* 升级CUSTOM_FW固件,不可修改该值 */ + COMM_MGMT_CMD_ACTIVE_CUSTOM_FW = 99, /* 激活CUSTOM_FW固件 */ + + /**< chip reset相关 */ + COMM_MGMT_CMD_FAULT_REPORT = 100, /**< mpu发往驱动信息,异常告警 */ + COMM_MGMT_CMD_WATCHDOG_INFO, /**< mpu发往驱动信息,看门狗告警 */ + COMM_MGMT_CMD_MGMT_RESET, /**< mpu发往驱动信息,mpu LastWord */ + COMM_MGMT_CMD_FFM_SET, /**< mpu发往驱动消息,异常中断显示信息 */ + + /**< chip info/log 相关 */ + COMM_MGMT_CMD_GET_LOG = 120, /**< 获取固件log */ + COMM_MGMT_CMD_TEMP_OP, /**< 获取芯片温度 */ + COMM_MGMT_CMD_EN_AUTO_RST_CHIP, /**< 使能芯片跟随perst复位自动复位 */ + COMM_MGMT_CMD_CFG_REG, /**< 配置芯片寄存器(当前未使用) */ + COMM_MGMT_CMD_GET_CHIP_ID, /**< 获取芯片id(多芯片场景) */ + COMM_MGMT_CMD_SYSINFO_DFX, /**< 获取芯片软件系统dfx信息 */ + COMM_MGMT_CMD_PCIE_DFX_NTC, /**< 通知驱动收集pcie dfx信息 */ + COMM_MGMT_CMD_DICT_LOG_STATUS, /**< 获取log收集状态 */ + COMM_MGMT_CMD_MSIX_INFO, /**< 配置msix信息 */ + COMM_MGMT_CMD_CHANNEL_DETECT, /**< mbox通道探测 */ + COMM_MGMT_CMD_DICT_COUNTER_STATUS, /**< 获取flash counter计数 */ + COMM_MGMT_CMD_UCODE_SM_COUNTER, /**< 获取sm counter计数 */ + COMM_MGMT_CMD_CLEAR_LOG = 132, /* 1823V100 */ + COMM_MGMT_CMD_UCODE_SM_COUNTER_PER = 133, /* 1823V100 */ + + /**< switch workmode 相关 */ + COMM_MGMT_CMD_CHECK_IF_SWITCH_WORKMODE = 140, /**< 配置切换(卡内多份配置文件,已废弃) */ + COMM_MGMT_CMD_SWITCH_WORKMODE, /**< 切换工作模式(已废弃) */ + + /**< mpu 相关 */ + COMM_MGMT_CMD_MIGRATE_DFX_HPA = 150, /**< 热迁移hpa dfx */ + COMM_MGMT_CMD_BDF_INFO, /**< 获取pcie bdf号 */ + COMM_MGMT_CMD_NCSI_CFG_INFO_GET_PROC, /**< 获取ncsi配置信息 */ + COMM_MGMT_CMD_CPI_TCAM_DBG, /**< cpi tcam信息调试(当前暂未使用) */ + COMM_MGMT_CMD_LLDP_TX_FUNC_SET_PROC, /**< lldp使能 */ + COMM_MGMT_CMD_FUNC_ENABLE_INFO = 155, /* 1823V100 */ + COMM_MGMT_CMD_FUNC_VIRTIO_INFO = 156, /* 1823V100 */ + COMM_MGMT_CMD_NCSI_LOW_POWER_PROC, /* 1872V100 NCSI 低功耗使能 */ + + /**< rsvd0 section */ + COMM_MGMT_CMD_SECTION_RSVD_0 = 160, + COMM_MGMT_CMD_SWITCH_RESET_CFG, + + /**< rsvd1 section */ + COMM_MGMT_CMD_SECTION_RSVD_1 = 170, + + /**< rsvd2 section */ + COMM_MGMT_CMD_SECTION_RSVD_2 = 180, + + /**< rsvd3 section */ + COMM_MGMT_CMD_SECTION_RSVD_3 = 190, + COMM_MGMT_CMD_GET_INDIR_TABLE, /**< tool read indirect table */ + COMM_MGMT_CMD_SET_INDIR_TABLE, /**< tool write indirect table */ + + /**< move to DFT mode */ + COMM_MGMT_CMD_GET_TDIE_ID = 199, /**< 获取totem die id */ + COMM_MGMT_CMD_GET_UDIE_ID = 200, /**< 获取unic die id */ + COMM_MGMT_CMD_GET_EFUSE_TEST, /**< efuse测试(当前暂未使用) */ + COMM_MGMT_CMD_EFUSE_INFO_CFG, /**< 烧写efuse信息 */ + COMM_MGMT_CMD_GPIO_CTL, /**< gpio测试(当前暂未使用) */ + COMM_MGMT_CMD_HI30_SERLOOP_START, /**< hi30开始环回 */ + COMM_MGMT_CMD_HI30_SERLOOP_STOP, /**< hi30停止环回 */ + COMM_MGMT_CMD_HI30_MBIST_SET_FLAG, /**< hi30 bist测试(当前暂未使用) */ + COMM_MGMT_CMD_HI30_MBIST_GET_RESULT, /**< hi30 获取bist测试结果(当前暂未使用) */ + COMM_MGMT_CMD_ECC_TEST, /**< 芯片ecc测试 */ + COMM_MGMT_CMD_FUNC_BIST_TEST, /**< 芯片function bist测试 */ + + COMM_MGMT_CMD_VPD_SET = 210, /**< 写vpd信息到flash */ + COMM_MGMT_CMD_VPD_GET, /**< 工具读取flash vpd信息 */ + + COMM_MGMT_CMD_ERASE_FLASH, /**< 擦除flash(dfx版本开启) */ + COMM_MGMT_CMD_QUERY_FW_INFO, /**< 查询固件状态机信息 */ + COMM_MGMT_CMD_GET_CFG_INFO, /**< 获取配置信息(功能暂未开启) */ + COMM_MGMT_CMD_GET_UART_LOG, /**< 串口重定向输出 */ + COMM_MGMT_CMD_SET_UART_CMD, /**< 串口重定向输入 */ + COMM_MGMT_CMD_SPI_TEST, /**< spi测试(dfx版本开启) */ + + COMM_MGMT_CMD_HEART_EVENT, /**< 心跳检测(当前暂未使用) */ + COMM_MGMT_CMD_NCSI_OEM_GET_DRV_INFO, /**< 发往驱动的消息,获取bdf号(方案已替换) */ + COMM_MGMT_CMD_LASTWORD_GET, /**< 发往驱动的消息,mpu临终遗言 */ + COMM_MGMT_CMD_READ_BIN_DATA, /**< 获取固件bin数据(已废弃) */ + COMM_MGMT_CMD_GET_REG_VAL, /**< 读取寄存器(dfx版本开启) */ + COMM_MGMT_CMD_SET_REG_VAL, /**< 写寄存器(dfx版本开启) */ + + /**< COMM_MGMT_CMD_WWPN_GET, TBD: move to FC? */ + /**< COMM_MGMT_CMD_WWPN_SET, TBD: move to FC? 229 */ + + /**< check if needed */ + COMM_MGMT_CMD_SET_VIRTIO_DEV = 230, /**< 设置vitro设备类型 */ + COMM_MGMT_CMD_SET_MAC, /**< 设置固化mac */ + COMM_MGMT_CMD_LOAD_PATCH, /**< load mpu patch(已废弃) */ + COMM_MGMT_CMD_REMOVE_PATCH, /**< 移除mpu patch(已废弃) */ + COMM_MGMT_CMD_PATCH_ACTIVE, /**< 激活mpu patch(已废弃) */ + COMM_MGMT_CMD_PATCH_DEACTIVE, /**< 去激活mpu patch(已废弃) */ + COMM_MGMT_CMD_PATCH_SRAM_OPTIMIZE, /**< pathc空间刷新(已废弃) */ + COMM_MGMT_CMD_CONTAINER_HOST_PROC, /**< 存储容器场景,设置主host */ + COMM_MGMT_CMD_NCSI_COUNTER_PROC, /**< 获取ncsi counter信息 */ + COMM_MGMT_CMD_CHANNEL_STATUS_CHECK, /**< 存储场景,通道探测 */ + + /**< 热补丁保留命令字 */ + COMM_MGMT_CMD_RSVD_0 = 240, /**< 热补丁保留命令0 */ + COMM_MGMT_CMD_RSVD_1, /**< 热补丁保留命令1 */ + COMM_MGMT_CMD_RSVD_2, /**< 热补丁保留命令2 */ + COMM_MGMT_CMD_RSVD_3, /**< 热补丁保留命令3 */ + COMM_MGMT_CMD_SECTION_INTEGRITY, /**客户固件完整性验证 */ + + COMM_MGMT_CMD_SEND_API_ACK_BY_UP, /**< 无效字段,版本收编删除,编译使用 */ + + COMM_MGMT_CMD_GET_VER_COMPATIBLE_INFO = 254, /**< for tool ver compatible info */ + + /**< 注:添加cmd,不能修改已有命令字的值,请在前方rsv section中添加;原则上所有分支cmd表完全一致 */ + COMM_MGMT_CMD_MAX = 255, /**< */ +}; + +#endif diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_inband_cmd_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_inband_cmd_defs.h new file mode 100644 index 00000000..e8232786 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_inband_cmd_defs.h @@ -0,0 +1,1840 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + * Filename : mpu_inband_cmd_defs.h + * Version : Initial Draft + * Creation time : 2023/09/22 + * Last Modified : + * Description : In-band command-related structures between the driver and the MPU + */ + +#ifndef MPU_INBAND_CMD_DEFS_H +#define MPU_INBAND_CMD_DEFS_H + +#include "mpu_cmd_base_defs.h" + +#define HARDWARE_ID_1XX3V200_TAG 32 /* 1xx3v200 tag */ +#define DUMP_16B_PER_LINE 16 /* dump 16byte对齐 */ +#define DUMP_4_VAR_PER_LINE 4 /* dump 单位4byte */ +#define FW_UPDATE_MGMT_TIMEOUT 3000000U /** mbox消息升级命令超时时间 */ + +#define FUNC_RESET_FLAG_MAX_VALUE ((1U << (RES_TYPE_MAX + 1)) - 1) /* func_reset_flag的边界值 */ +struct comm_cmd_func_reset { /* 驱动加卸载场景,function reset清理资源 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 需要reset的function id */ + u16 rsvd1[3]; /* 保留字段 */ + u64 reset_flag; /* 具体reset资源的bitmap */ +}; + +struct comm_cmd_ppf_flr_type_set { /* flr场景,设定ppf执行flr的范围 */ + struct mgmt_msg_head head; /* */ + + u16 func_id; /* flr的function id */ + u8 rsvd1[2]; /* 保留字段 */ + u32 ppf_flr_type; /* ppf执行flr的范围类型,0: 仅ffp, 1: 改ppf下的所有function */ +}; + +enum { + COMM_F_API_CHAIN = 1U << 0, /* 属性协商,cpi chain */ + COMM_F_CLP = 1U << 1, /* 属性协商,clp */ + COMM_F_CHANNEL_DETECT = 1U << 2, /* 属性协商,通道探测 */ + COMM_F_MBOX_SEGMENT = 1U << 3, /* 属性协商,mbox */ + COMM_F_CMDQ_NUM = 1U << 4, /* 属性协商,cmdq */ + COMM_F_VIRTIO_VQ_SIZE = 1U << 5, /* 属性协商,vio vq size */ + COMM_F_EXTEND_CAP = 1U << 6, /* 属性协商,能力集扩展 */ + COMM_F_SMF_CACHE_INVALID = 1U << 7, /* 属性协商,cache invalid */ + COMM_F_ONLY_ENHANCE_CMDQ = 1U << 8, /* 属性协商,增强型cmdq */ + COMM_F_USE_REAL_RX_BUF_SIZE = 1U << 9, /* 属性协商,use real rx buf */ + COMM_F_CMD_BUF_SIZE = 1U << 10, /* 属性协商,cmd buf size */ + COMM_F_HTN_CMD = 1U << 11, /* 属性协商,Hard Tile - NIC (硬件nic) */ + COMM_F_MBOX_MSG_HEAD_SUPP_VER1 = 1U << 12, /* 属性协商,mode扩展 */ + COMM_F_FAST_MSG = 1U << 13, /* 属性协商,fast msg */ + COMM_F_UFHD = 1U << 14, /* 属性协商,Update Firmware from Host DDR + * - 支持 DDR 微码热升级 + */ + COMM_F_VIRTIO_FC_CACHE_MODE = 1U << 15, /* 属性协商, + * 驱动支持Virtio function context cache mode */ + COMM_F_NON_PTP_SYNC = 1U << 16, /* 属性协商,非ptp同步 */ + COMM_F_HT_GPA = 1U << 17, /* 属性协商,HT GPA (Bank GPA) */ + COMM_F_UFHD_FLEX_SEG = 1U << 18, /* 属性协商,UFHD 增加支持 segement size 协商能力。 + * 该特性不能与 COMM_F_UFHD 特性同时开启。 + * 为支持 segment size 协商, + * 需要开启 COMM_F_EXTEND_CAP。 + */ +}; + +/* mode扩展version */ +#define CHECK_COMM_F_SUPP_MBOX_MSG_HEAD_VER1(feature) \ + (((feature) & COMM_F_MBOX_MSG_HEAD_SUPP_VER1) > 0) + +enum { + COMM_PLUG_SRV_NIC = 0, /* plug nic */ + COMM_PLUG_SRV_VROCE, /* plug vroce */ + COMM_PLUG_SRV_UB, /* plug ub */ + COMM_PLUG_SRV_BUTT, +}; + +#define COMM_MAX_FEATURE_QWORD 4 +struct comm_cmd_feature_nego { /* 属性协商 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 opcode; /* 1: set, 0: get */ + u8 rsvd[5]; /* 保留域段 */ + u64 s_feature[COMM_MAX_FEATURE_QWORD]; /* 协商信息 */ +}; + +struct comm_cmd_clear_doorbell { /* 驱动卸载flush db */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u16 rsvd1[3]; /* 保留域段 */ +}; + +struct comm_cmd_clear_resource { /* 驱动卸载flush流程,清理资源 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u16 rsvd1[3]; /* 保留域段 */ +}; + +struct comm_global_attr { /* 获取芯片全局属性信息 */ + u8 max_host_num; /* 最大host个数 */ + u8 max_pf_num; /* 最大pf个数 */ + u16 vf_id_start; /* 起始vf id */ + + u8 mgmt_host_node_id; /* 管理host节点id */ + u8 cmdq_num; /* cmdq个数 */ + u16 cmd_buf_size; /* cmd buff size */ + + u32 rsvd2[8]; /* 保留域段 */ +}; + +struct comm_cmd_heart_event { /* mpu与驱动之间的mbox心跳事件 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u8 init_sta; /* 0: mpu init ok, 1: mpu init error */ + u8 rsvd1[3]; /* 保留域段 */ + u32 heart; /* 心跳标识 */ + u32 heart_handshake; /* should be alwasys: 0x5A5A5A5A */ +}; + +struct comm_cmd_channel_detect { /* 通道探测 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u16 rsvd1[3]; /* 保留域段1 */ + u32 rsvd2[2]; /* 保留域段2 */ +}; + +struct comm_cmd_func_svc_used_state { /* function使用状态 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u16 func_id; /* 指定function id */ + u16 svc_type; /* service类型(暂未使用) */ + u8 used_state; /* 使用状态 */ + u8 rsvd[35]; /* */ +}; + +struct comm_cmd_get_flr_info { /* flrdx信息 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u16 func_id; /* 指定function id */ + u8 flr_valid; /* flr有效位 */ + u8 flr_step; /* flr状态机 */ + u32 flr_used_time_ms; /* flr耗时时间 */ + u16 max_flr_time_func_id; /* 耗时最长function */ + u32 max_flr_used_time_ms; /* flr最长耗时 */ + u8 rsvd[30]; /* 保留域段 */ +}; + +struct sml_table_id_info { /* sml表信息 */ + u8 node_id; /* 节点id */ + u8 instance_id; /* instance id */ +}; + +struct comm_cmd_get_sml_tbl_data { /* sml表内容 */ + struct mgmt_msg_head head; /*mbox消息头 */ + u8 tbl_data[512]; /* sml payload */ +}; + +struct comm_cmd_get_glb_attr { /* 获取芯片全局信息 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + struct comm_global_attr attr; /* 全局信息 */ +}; + +#define HINIC5_FW_VERSION_LEN 16 /* version长度 */ +#define HINIC5_FW_COMPILE_TIME_LEN 20 /* 时间长度 */ + +struct comm_cmd_get_fw_version { /* 获取固件版本号 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 fw_type; /* 固件类型 */ + u16 fw_dfx_vld : 1; /* 版本类型,0: release, 1: debug */ + u16 rsvd1 : 15; /* 保留域段 */ + char ver[HINIC5_FW_VERSION_LEN]; /* 版本 */ + char time[HINIC5_FW_COMPILE_TIME_LEN]; /* 时间 */ +}; + +struct cmdq_ctxt_info { /* hardware define: cmdq context */ + u64 curr_wqe_page_pfn; /* wqe页面信息 */ + u64 wq_block_pfn; /* wqe地址 */ +}; + +struct comm_cmd_cmdq_ctxt { /* 配置cmdq context */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 cmdq_id; /* cmdq id */ + u8 rsvd1[5]; /* 保留域段 */ + + struct cmdq_ctxt_info ctxt; /* ctx信息 */ +}; + +struct enhance_cmdq_ctxt_info { /* hardware define: enhance cmdq context */ + u64 eq_cfg; /* eq cfg */ + u64 dfx_pi_ci; /* 指针pi ci */ + + u64 pft_thd; /* pft thd */ + u64 pft_ci; /* pft ci */ + + u64 rsv; /* 保留域段 */ + u64 ci_cla_addr; /* cla地址 */ +}; + +struct comm_cmd_enhance_cmdq_ctxt { /* 增强型cmdq ctx配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 cmdq_id; /* cmdq id */ + u8 rsvd1[5]; /* 保留域段 */ + + struct enhance_cmdq_ctxt_info ctxt; /* ctx信息 */ +}; + +struct comm_cmd_virtio_en { /* virtio 配置 到加载sdk之后 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u8 msien_snap_2_virtio_en; /* msien virtio使能 */ + u8 rsv[3]; +}; + +struct hinic5_cqm_cmd_func_secure_mem { /* 安全内存信息获取 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u16 func_id; /* 指定function id */ + u16 rsvd0; /* 保留域段 */ + u32 gpa_hi; /* gpa高位地址 */ + u32 gpa_lo; /* gpa低位地址 */ + u32 len; /* 长度 */ + u8 gpa_mode; /* gpa模式 */ + u8 valid; /* 有效位 */ + u8 rsvd1[2]; /* 保留域段 */ +}; + +struct nic_plug_cap { /* 热拔插能力 */ + u16 max_sqs; /* 最大sq size */ + u16 max_rqs; /* 最大rq size */ +}; + +struct comm_cmd_plug_srv { /* 热拔插服务 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u16 func_id; /* 指定function id */ + u8 srv_type; /* service类型 */ + u8 attach_en; /* 使能标识 */ + struct nic_plug_cap nic_cap; /* nic热拔插能力 */ + u32 rsvd; /* 保留域段 */ +}; + +struct comm_cmd_fast_msg_cap { /* fast msg能力 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u32 func_id; /* 指定function id */ + u32 fast_msg_depth; /* PF:2048, VF:512 */ + u32 fast_msg_page_size; /* 消息页面大小256(单位K) */ + u32 rsvd[9]; /* 保留域段 */ +}; + +#define FAST_MSG_MAX_PAGE_NUM 32 /* fast msg页面数量 */ +struct comm_cmd_fast_msg_rq_addr { + struct mgmt_msg_head head; + u32 func_id; + u32 page_num; + u32 rsvd[2]; + u64 page_addr[FAST_MSG_MAX_PAGE_NUM]; +}; + +struct fast_msg_rq_addr { /* fast msg rq地址 */ + u64 rq_page_addr; /* rq页面地址 */ +}; + +struct comm_cmd_set_fast_msg_rq_addr { /* fast msg rq地址设置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u32 func_id; /* 指定function id */ + u32 page_num; /* 页面数量 */ + u32 rsvd[2]; /* 保留域段 */ + struct fast_msg_rq_addr page_addr[32]; /* 页面地址 */ +}; + +struct comm_cmd_clear_fast_msg_sml_table { /* fast msg 清除表项 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u32 func_id; /* 指定function id */ + u32 rsvd[5]; /* 保留域段, 32 Bytes total */ +}; + +struct comm_cmd_root_ctxt { /* root ctx配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 set_cmdq_depth; /* cmdq深度设置标识 */ + u8 cmdq_depth; /* cmdq深度 */ + u16 rx_buf_sz; /* rx buff size */ + u8 lro_en; /* lro使能标识 */ + u8 cmdq_mode; /* cmdq模式 */ + u16 sq_depth; /* sq深度 */ + u16 rq_depth; /* rq深度 */ + u32 rsvd1; /* 保留域段 */ + u64 rsvd2; /* 保留域段 */ +}; + +struct comm_cmd_wq_page_size { /* root ctx wqe配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 opcode; /* 操作标识,0:get,1:set */ + u8 page_size; /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ + u32 rsvd1; /* 保留域段 */ +}; + +struct comm_cmd_msix_config { /* msix中断配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 opcode; /* 操作标识,0:get,1:set */ + u8 rsvd1; /* 保留域段 */ + u16 msix_index; /* 中断idx */ + u8 pending_cnt; /* It specifies the maximum wait time for resending period. */ + u8 coalesce_timer_cnt; /* 聚合配置 */ + u8 resend_timer_cnt; /* 重传计数 */ + u8 lli_timer_cnt; /* 信用补偿配置 */ + u8 lli_credit_cnt; /* 信用补充门限 */ + u8 rsvd2[5]; /* 保留域段 */ +}; + +struct comm_cmd_cfg_msix_num { /* msix中断数量配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 op_code; /* 1: alloc 0: free */ + u8 rsvd0; /* 保留域段 */ + + u16 msix_num; /* msix数量 */ + u16 rsvd1; /* 保留域段 */ +}; + +struct comm_cmd_dma_attr_config { /* dma属性配置(当前暂未使用) */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 entry_idx; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u32 resv1; +}; + +struct comm_cmd_ppf_tbl_htrp_config { /* ppf热替换配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u32 hotreplace_flag; /* 热替换标识 */ +}; + +struct comm_cmd_ceq_ctrl_reg { /* ceq配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u16 q_id; /* q id */ + u32 ctrl0; /* ceq ctrl0 */ + u32 ctrl1; /* ceq ctrl1 */ + u32 rsvd1; /* 保留域段 */ +}; + +struct comm_cmd_func_tmr_bitmap_op { /* 使能 smf timr bitmap操作 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 opcode; /* 1: start, 0: stop */ + u8 rsvd1[5]; /* 保留域段 */ +}; + +struct comm_cmd_ppf_tmr_op { /* smf timr配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u8 ppf_id; /* ppf id */ + u8 opcode; /* 1: start, 0: stop */ + u8 rsvd1[6]; /* 保留域段 */ +}; + +#define HT_GPA_CLEAR 0 /* gpa clr */ +#define HT_GPA_SET 1 /* gpa set */ +struct comm_cmd_ht_gpa { /* gpa操作 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u8 host_id; /* 指定host */ + u8 opcode; /* 1:set, 0: clear */ + u8 rsvd0[2]; /* 保留域段 */ + u32 rsvd1[7]; /* 保留域段 */ + u64 page_pa0; /* gpa地址0 */ + u64 page_pa1; /* gpa地址1 */ +}; + +struct comm_cmd_get_eqm_num { /* mqm eqm配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u8 host_id; /* 指定host */ + u8 rsvd1[3]; /* 保留域段 */ + u32 chunk_num; /* chunk num */ + u32 search_gpa_num; /* search gpa num */ +}; + +struct comm_cmd_eqm_cfg { /* mqm溢出配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u8 host_id; /* 指定host */ + u8 valid; /* 有效位 */ + u16 rsvd1; /* 保留域段 */ + u32 page_size; /* 页面size */ + u32 rsvd2; /* 保留域段 */ +}; + +struct comm_cmd_eqm_search_gpa { /* mqm search gpa配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u8 host_id; /* 指定host */ + u8 rsvd1[3]; /* 保留域段 */ + u32 start_idx; /* 起始idx */ + u32 num; /* 数量 */ + u32 rsvd2; /* 保留域段 */ + u64 gpa_hi52[0]; /* gpa */ +}; + +struct comm_cmd_set_bat_info { + struct mgmt_msg_head head; + + u16 func_id; + u8 smf_id; + u8 rsvd1; + u32 bat_offset; + u32 data_size; + u8 data[256]; +}; + +struct hinic5_board_info { /* 获取板卡信息 */ + u8 board_type; /* 板卡类型 */ + u8 port_num; /* 网口数量 */ + u8 port_speed; /* 板卡速率 */ + u8 host_width; /* 网口带宽 */ + u8 host_num; /* 支持host数量 */ + u8 pf_num; /* 支持pf数量 */ + u16 vf_total_num; /* 支持vf数量 */ + u8 tile_num; /* 支持tile数量 */ + u8 qcm_num; /* 支持qcm数量 */ + u8 core_num; /* 支持tile core数量 */ + u8 work_mode; /* 板卡工作模式 */ + u8 service_mode; /* 板卡支持service模式 */ + u8 board_mode; /* 板卡模式 */ + u8 boot_sel; /* 启动模式 */ + u8 board_id; /* 板卡id */ + u32 cfg_addr; /* 配置文件地址 */ + u32 service_en_bitmap; /* service使能特性 */ + u8 scenes_id; /* 场景id */ + u8 cfg_template_id; /* 配置模板id */ + u8 hardware_id; /* 硬件id */ + u8 spu_en; /* spu使能标识 */ + u16 pf_vendor_id; /* 设备厂商id */ + u8 tile_bitmap; /* tile使能bitmap */ + u8 sm_bitmap; /* sm使能bitmap */ + u8 smf_bitmap_hi; /* 保存高4bit的smf */ + u8 board_type_hi; /* 保存高8bit的board_type */ + u8 host_type : 2; /* 0 : pcie, 1 : ubc */ + u8 pg_grade : 2; /* 取值参考PARTIAL_GOOD_GRADE_MODE,0 : fg, 1 : pg */ + u8 rsvd0 : 4; + u8 rsvd; + u32 service_en_bitmap2; /* service使能特性扩展 */ +}; + +struct comm_cmd_board_info { /* 获取板卡信息 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + struct hinic5_board_info info; /* 板卡信息 */ + u32 rsvd[20]; /* 保留域段 */ +}; + +struct comm_cmd_sync_time { /* 驱动时间同步 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u64 mstime; /* 时间戳 */ + u64 sync_time; +}; + +struct comm_cmd_sdi_info { /* 获取sdi信息 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u32 cfg_sdi_mode; /* 配置sdi模式 */ +}; + +enum tool_run_env { + TOOL_RUN_ENV_HOST, /* host侧 */ + TOOL_RUN_ENV_SPU, /* spu侧 */ + TOOL_RUN_ENV_INVALID = 0xFF +}; +typedef u8 tool_run_env_u8; + +enum chip_ver { + CHIP_VER_HI1823V100, /* 芯片1823v100 */ + CHIP_VER_HI1823EV100, /* 芯片1823v100e */ + CHIP_VER_HI1823V200, /* 芯片1823v200 */ + CHIP_VER_HI1872V100, /* 芯片1872v100 */ + CHIP_VER_HI1825V100, /* 芯片1825v100 */ + CHIP_VER_INVALID = 0xFF +}; +typedef u8 chip_ver_u8; /* 芯片版本 */ + +enum chip_type { + CHIP_TYPE_FPGA, /* 芯片fpga */ + CHIP_TYPE_ASIC, /* 芯片asic */ + CHIP_TYPE_EMU, /* 芯片emu */ + CHIP_TYPE_EDA, /* 芯片eda */ + CHIP_TYPE_INVALID = 0xFF +}; +typedef u8 chip_type_u8; /* 芯片平台 */ + +struct comm_cmd_compatible_info { /* huoq环境信息 */ + struct mgmt_msg_head head; /* mbox消息头 */ + chip_ver_u8 chip_ver; /* 芯片版本 */ + tool_run_env_u8 host_env; /* host类型 */ + chip_type_u8 chip_type; /* 芯片类型/平台 */ + u8 dual_die_flag; /* 双die使能标识 0:否(单die),1:是(双die) */ + u32 mpu_ver; /* mpu版本 */ + u32 npu_ver; /* 微码版本 */ + u32 rsv1[31]; /* 保留域段 */ +}; + +/* func flr set */ +struct comm_cmd_func_flr_set { /* 设置function flr类型 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 type; /* 1: close 置flush */ + u8 isall; /* 是否操作对应pf下的所有vf 1: all vf */ + u32 rsvd; +}; + +struct comm_cmd_bdf_info { /* 获取bdf号 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 function_idx; /* 指定function id */ + u8 rsvd1[2]; /* 保留域段 */ + u8 bus; /* 总线号 */ + u8 device; /* 设备号 */ + u8 function; /* function号 */ + u8 rsvd2[5]; /* 保留域段 */ +}; + +struct hw_pf_info { /* 硬件pf信息 */ + u16 glb_func_idx; /* 全局function id */ + u16 glb_pf_vf_offset; /* 该pf的起始vf id */ + u8 p2p_idx; /* p2p idx */ + u8 itf_idx; /* host id */ + u16 max_vfs; /* vf数量 */ + u16 max_queue_num; /* 队列数量 */ + u16 vf_max_queue_num; /* vf支持的队列数量 */ + u16 port_id; /* 网口id */ + u16 rsvd0; /* 保留域段 */ + u32 pf_service_en_bitmap; /* pf的service en */ + u32 vf_service_en_bitmap; /* vf的service en */ + u16 rsvd1[2]; /* 保留域段 */ + + u8 device_type; /* 设备类型 */ + u8 bus_num; /* 总线号 */ + u16 vf_stride; /* vf步长 */ + u16 vf_offset; /* vf相对偏移 */ + u8 func_valid_map : 2; /* 0:呈现所有function, 1: 呈现奇数function, + * 2:呈现偶数function, 3:无效值 + */ + u8 rsvd2 : 6; /* 保留域段 */ + u8 rsvd; /* 保留域段 */ +}; + +#define CMD_MAX_MAX_PF_NUM 32 /* 最大pf数量 */ +struct hinic5_hw_pf_infos { /* 硬件pf信息 */ + u8 num_pfs; /* pf个数 */ + u8 rsvd1[3]; /* 保留域段 */ + + struct hw_pf_info infos[CMD_MAX_MAX_PF_NUM]; /* 硬件pf信息 */ +}; + +struct comm_cmd_hw_pf_infos { /* 硬件pf信息 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + struct hinic5_hw_pf_infos infos; /* pf信息 */ +}; + +#define DD_CFG_TEMPLATE_MAX_IDX 12 /* 支持的配置模板数量 */ +#define DD_CFG_TEMPLATE_MAX_TXT_LEN 64 /* 支持的配置模板大小 */ +#define CFG_TEMPLATE_OP_QUERY 0 /* 查询配置模板 */ +#define CFG_TEMPLATE_OP_SET 1 /* 设置配置模板 */ +#define CFG_TEMPLATE_SET_MODE_BY_IDX 0 +#define CFG_TEMPLATE_SET_MODE_BY_NAME 1 + +struct comm_cmd_cfg_template { /* 配置版本配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u8 opt_type; /* 0: query 1: set */ + u8 set_mode; /* 0-index mode. 1-name mode. */ + u8 tp_err; /* 模板错误标识 */ + u8 rsvd0; /* 保留域段 */ + + u8 cur_index; /* Current cfg tempalte index. */ + u8 cur_max_index; /* Max support cfg tempalte index. */ + u8 rsvd1[2]; /* 保留域段 */ + u8 cur_name[DD_CFG_TEMPLATE_MAX_TXT_LEN]; /* 当前模板名称 */ + /* 当前模板信息 */ + u8 cur_cfg_temp_info[DD_CFG_TEMPLATE_MAX_IDX][DD_CFG_TEMPLATE_MAX_TXT_LEN]; + + u8 next_index; /* Next reset cfg tempalte index. */ + u8 next_max_index; /* Max support cfg tempalte index. */ + u8 rsvd2[2]; /* 保留域段 */ + u8 next_name[DD_CFG_TEMPLATE_MAX_TXT_LEN]; /* 下一个模板名称 */ + /* 下一个模板信息 */ + u8 next_cfg_temp_info[DD_CFG_TEMPLATE_MAX_IDX][DD_CFG_TEMPLATE_MAX_TXT_LEN]; +}; + +#define MQM_SUPPORT_COS_NUM 8 /* cos数量 */ +#define MQM_INVALID_WEIGHT 256 /* mqm表大小 */ +#define MQM_LIMIT_SET_FLAG_READ 0 /* read */ +#define MQM_LIMIT_SET_FLAG_WRITE 1 /* write */ +struct comm_cmd_set_mqm_limit { /* mqm限速配置 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 set_flag; /* 置位该标记位表示设置 */ + u16 func_id; /* 指定function id */ + u16 cos_weight[MQM_SUPPORT_COS_NUM]; /* 对应cos_id所占的权重, + * 0-255, 0为SP调度. + */ + u32 host_min_rate; /* 本host支持的最低限速 */ + u32 func_min_rate; /* 本function支持的最低限速,单位Mbps */ + u32 func_max_rate; /* 本function支持的最高限速,单位Mbps */ + u8 rsvd[64]; /* 保留域段 */ +}; + +enum core_type_e { + CORE_TYPE_ARM = 0, + CORE_TYPE_LINX = 1 +}; + +struct arm_core_reg_info { + u64 elr; /* 通用寄存器 */ + u64 spsr; /* 通用寄存器 */ + u64 far; /* 通用寄存器 */ + u64 esr; /* 通用寄存器 */ + u64 xzr; /* 通用寄存器 */ + u64 x30; /* 通用寄存器 */ + u64 x29; /* 通用寄存器 */ + u64 x28; /* 通用寄存器 */ + u64 x27; /* 通用寄存器 */ + u64 x26; /* 通用寄存器 */ + u64 x25; /* 通用寄存器 */ + u64 x24; /* 通用寄存器 */ + u64 x23; /* 通用寄存器 */ + u64 x22; /* 通用寄存器 */ + u64 x21; /* 通用寄存器 */ + u64 x20; /* 通用寄存器 */ + u64 x19; /* 通用寄存器 */ + u64 x18; /* 通用寄存器 */ + u64 x17; /* 通用寄存器 */ + u64 x16; /* 通用寄存器 */ + u64 x15; /* 通用寄存器 */ + u64 x14; /* 通用寄存器 */ + u64 x13; /* 通用寄存器 */ + u64 x12; /* 通用寄存器 */ + u64 x11; /* 通用寄存器 */ + u64 x10; /* 通用寄存器 */ + u64 x09; /* 通用寄存器 */ + u64 x08; /* 通用寄存器 */ + u64 x07; /* 通用寄存器 */ + u64 x06; /* 通用寄存器 */ + u64 x05; /* 通用寄存器 */ + u64 x04; /* 通用寄存器 */ + u64 x03; /* 通用寄存器 */ + u64 x02; /* 通用寄存器 */ + u64 x01; /* 通用寄存器 */ + u64 x00; /* 通用寄存器 */ +}; + +struct linx_core_reg_info { + u32 s0; + u32 s1; + u32 s2; + u32 s3; + u32 s4; + u32 s5; + u32 s6; + u32 s7; + u32 s8; + u32 s9; + u32 s10; + u32 s11; + u32 ra; + u32 gp; + u32 tp; + u32 t0; + u32 t1; + u32 t2; + u32 t3; + u32 t4; + u32 t5; + u32 t6; + u32 a0; + u32 a1; + u32 a2; + u32 a3; + u32 a4; + u32 a5; + u32 a6; + u32 a7; + u32 mepc; + u32 mstatus; + u32 mcause; + u32 rsv[39]; /* 该结构体总大小与struct arm_core_reg_info保持一致 */ +}; + +#define DATA_LEN_1K 1024 +struct comm_info_sw_watchdog { /* 软狗超时信息上报接口 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + /* 全局信息 */ + u32 curr_time_h; /* 发生死循环的时间,cycle */ + u32 curr_time_l; /* 发生死循环的时间,cycle */ + u32 task_id; /* 发生死循环的任务 */ + u8 core_type; /* 参考core_type_e定义 */ + u8 rsv[3]; /* 保留字段,用于扩展 */ + + /* 寄存器信息,TSK_CONTEXT_S */ + u64 pc; /* 通用寄存器 */ + + union core_reg { + struct arm_core_reg_info arm_reg; + struct linx_core_reg_info linx_reg; + } reg_info; + + /* 堆栈控制信息,STACK_INFO_S */ + u64 stack_top; /* 栈顶 */ + u64 stack_bottom; /* 栈底 */ + u64 sp; /* 栈当前SP指针值 */ + u32 curr_used; /* 栈当前使用的大小 */ + u32 peak_used; /* 栈使用的历史峰值 */ + u32 is_overflow; /* 栈是否溢出 */ + + /* 堆栈具体内容 */ + u32 stack_actlen; /* 实际的堆栈长度(<=1024) */ + u8 stack_data[DATA_LEN_1K]; /* 超过1024部分,会被截断 */ +}; + +/* 临终遗言信息 */ +#define XREGS_NUM 31 /* 寄存器数量 */ +typedef struct tag_cpu_tick { /* 时间 */ + u32 cnt_hi; /* cycle计数高32位 */ + u32 cnt_lo; /* cycle计数低32位 */ +} CPU_TICK; + +typedef struct tag_ax_exc_reg_info { /* 通用寄存器 */ + u64 ttbr0; /* 通用寄存器 */ + u64 ttbr1; /* 通用寄存器 */ + u64 tcr; /* 通用寄存器 */ + u64 mair; /* 通用寄存器 */ + u64 sctlr; /* 通用寄存器 */ + u64 vbar; /* 通用寄存器 */ + u64 current_el; /* 通用寄存器 */ + u64 sp; /* 通用寄存器 */ + /* 以下字段的内存布局与TskContext保持一致 */ + u64 elr; /* 通用寄存器 */ + u64 spsr; /* 通用寄存器 */ + u64 far_r; /* 通用寄存器 */ + u64 esr; /* 通用寄存器 */ + u64 xzr; /* 通用寄存器 */ + u64 xregs[XREGS_NUM]; /* 寄存器0~30: x30~x0 */ +} EXC_REGS_S; + +typedef struct exc_call_stack_info { + u32 depth; /* 调用栈深度 */ + u64 addrList[10]; /* 调用栈地址列表 */ + char nameList[10][64]; /* 调用栈函数名列表 */ +} exc_call_stack_info_s; + +typedef struct tag_exc_info { + char os_ver[48]; /* OS版本号 */ + char app_ver[64]; /* 产品版本号 */ + u32 exc_cause; /* 异常原因 */ + u32 thread_type; /* 异常前的线程类型 */ + u32 thread_id; /* 异常前线程PID */ + u16 byte_order; /* 字节序 */ + u16 cpu_type; /* CPU类型 */ + u32 cpu_id; /* CPU ID */ + CPU_TICK cpu_tick; /* CPU Tick */ + u32 nest_cnt; /* 异常嵌套计数 */ + u32 fatal_errno; /* 致命错误码,发生致命错误时有效 */ + u64 uw_sp; /* 异常前栈指针 */ + u64 stack_bottom; /* 异常前栈底 */ + /* 异常发生时的核内寄存器上下文信息,82\57必须位于152字节处, + * 若改动,需更新sre_platform.eh中的OS_EXC_REGINFO_OFFSET宏 + */ + EXC_REGS_S reg_info; +} EXC_INFO_S; + +typedef struct tag_exc_info_all { + char os_ver[48]; /* OS版本号 */ + char app_ver[64]; /* 产品版本号 */ + u32 exc_cause; /* 异常原因 */ + u32 thread_type; /* 异常前的线程类型 */ + u32 thread_id; /* 异常前线程PID */ + u16 byte_order; /* 字节序 */ + u16 cpu_type; /* CPU类型 */ + u32 cpu_id; /* CPU ID */ + CPU_TICK cpu_tick; /* CPU Tick */ + u32 nest_cnt; /* 异常嵌套计数 */ + u32 fatal_errno; /* 致命错误码,发生致命错误时有效 */ + u64 uw_sp; /* 异常前栈指针 */ + u64 stack_bottom; /* 异常前栈底 */ + /* 异常发生时的核内寄存器上下文信息,82\57必须位于152字节处, + * 若改动,需更新sre_platform.eh中的OS_EXC_REGINFO_OFFSET宏 + */ + EXC_REGS_S reg_info; + exc_call_stack_info_s call_stack_info; +} EXC_INFO_ALL_S; + +#define MPU_LASTWORD_SIZE 1024 /* 临终遗言单次数据长度 */ +typedef struct tag_comm_info_up_lastword { /* 上报给驱动的up lastword模块接口 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + EXC_INFO_S stack_info; /* 堆栈信息 */ + + /* 堆栈具体内容 */ + u32 stack_actlen; /*实际的堆栈长度(<=1024) */ + u8 stack_data[MPU_LASTWORD_SIZE]; /* payload 超过1024部分,会被截断 */ +} comm_info_up_lastword_s; + +typedef struct { + u32 magic; + u32 symbol_num; /* 符号个数 */ + u32 code_size; /* 补丁代码大小 */ + u32 rsvd0[5]; /* 保留字段 */ + char git_tag[64]; /* 冷基线的git标签 */ + char compile_time[20]; /* 编译时间 */ + u32 rsvd1; /* 用于8字节对齐的保留字段 */ +} patch_head_info_s; + +struct hinic5_cmd_update_firmware { /* 固件升级 */ + struct mgmt_msg_head msg_head; /* mbox消息头 */ + + struct { + u32 sl : 1; /* 尾片 */ + u32 sf : 1; /* 首片 */ + u32 flag : 1; /* 分区标识 */ + u32 bit_signed : 1; /* 签名标识 */ + u32 reserved : 12; /* 保留域段 */ + u32 fragment_len : 16; /* 分片长度 */ + } ctl_info; /* 控制信息 */ + + struct { + u32 section_crc; /* 子固件crc */ + u32 section_type; /* 子固件类型 */ + } section_info; /* 子固件信息 */ + + u32 total_len; /* 镜像长度 */ + u32 section_len; /* 子固件长度 */ + u32 section_version; /* 子固件版本 */ + u32 section_offset; /* 子固件偏移 */ + u32 data[384]; /* 镜像数据 */ +}; + +struct hinic5_cmd_activate_firmware { /* 镜像激活 */ + struct mgmt_msg_head msg_head; /* mbox消息头 */ + u8 index; /* 配置文件激活idx(默认使用0) */ + u8 data[7]; /* payload */ +}; + +struct hinic5_cmd_switch_config { /* 配置文件切换 */ + struct mgmt_msg_head msg_head; /* mbox消息头 */ + u8 index; /* 配置文件idx0~1 */ + u8 data[7]; /* payload */ +}; + +/* start 为适配ub访问寄存器,定义的id,用于接口中识别UB模块, 不能和INTERNAL_RING_NODE_ID_E冲突 */ +enum hinic5_ub_mod_id { + HINIC5_UB_D2H = 64, + HINIC5_UB_LQ_TP = HINIC5_UB_D2H, + HINIC5_UB_MISC, + HINIC5_UB_LQ_MISC, + HINIC5_UBC0_LQ_NL_DL, + HINIC5_UBC1_LQ_NL_DL, + HINIC5_UBC2_LQ_NL_DL, + /* 1872 */ + HINIC5_UBG_MISC, + HINIC5_UBG_BA, + HINIC5_UBG_TM, + HINIC5_UBG_DLPHY, + HINIC5_UBG_NL, + HINIC5_UBG_TA, + HINIC5_UBG_TP, + HINIC5_UBG_IMMU, + HINIC5_UBG_OMMU, + HINIC5_UBC_D2H, + HINIC5_UBC_TP, + HINIC5_UBC_MISC, + HINIC5_UBC_NL, + HINIC5_UBC_DLPHY, + HINIC5_UBC_TA, + /* 1872 */ + HINIC5_UB_END_IDX, +}; +/* end 为适配ub访问寄存器,定义的node id,用于接口中识别UB模块, 不能和INTERNAL_RING_NODE_ID_E冲突 */ + +#define MAX_DATA_NUM (240) +struct csr_msg { /* csr寄存器读信息 */ + struct { + u32 node_id : 5; /* 节点id */ + u32 data_width : 10; /* 访问宽度 */ + u32 module_id : 8; /* 模块id,用于识别地址属于哪个模块(部分模块没有node_id) */ + u32 rsvd : 9; /* 保留域段 */ + } bits; + u32 addr; /* 地址 */ +}; + +struct comm_cmd_mbox_csr_rd_req { /* csr寄存器读请求 */ + struct mgmt_msg_head head; /* mbox消息头 */ + struct csr_msg csr_info[MAX_DATA_NUM]; /* payload */ + u32 data_num; /* 寄存器个数 */ +}; + +struct comm_cmd_mbox_csr_rd_ret { /* csr寄存器读 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u64 value[MAX_DATA_NUM]; /* 寄存器读取结果 */ +}; + +struct comm_cmd_mbox_csr_rd_req_ex { /* csr寄存器读请求 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u32 data_num; /* 寄存器个数 */ + struct csr_msg csr_info[0]; /* 根据实际读取个数填充 */ +}; + +struct comm_cmd_mbox_csr_rd_ret_ex { /* csr寄存器读 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u64 value[0]; /* 根据实际读取个数填充 */ +}; + +struct comm_cmd_mbox_csr_wt_req { /* csr寄存器写 */ + struct mgmt_msg_head head; /* mbox消息头 */ + struct csr_msg csr_info; /* csr控制寄存器信息 */ + u64 value; /* 值 */ +}; + +struct comm_cmd_mbox_csr_wt_ret { /* csr寄存器写 */ + struct mgmt_msg_head head; /* mbox消息头 */ +}; + +#define INDIR_MAX_INDEX_NUM 480 +#define INDIR_MAX_WT_INDEX_NUM 32 + +struct comm_cmd_mbox_indir_addr { /* 间接表操作信息 */ + u32 indir_ctrl_addr; /* 控制寄存器 */ + u32 indir_timeout_addr; /* 超时寄存器 */ + u32 indir_data_addr; /* 数据寄存器 */ +}; + +struct comm_cmd_mbox_indir_tab_rd_req { /* 间接表读请求 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + struct comm_cmd_mbox_indir_addr indir_addr; /* 间接表控制信息 */ + u32 tab_width; /* 表项宽度 */ + u32 index_num; /* offset idx */ + u32 index[INDIR_MAX_INDEX_NUM]; /* payload */ +}; + +struct comm_cmd_mbox_indir_tab_rd_ret { /* 间接表读请求 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u32 data[INDIR_MAX_INDEX_NUM]; /* payload */ +}; + +struct comm_cmd_mbox_indir_tab_wt_req { /* 间接表写请求 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + struct comm_cmd_mbox_indir_addr indir_addr; /* 间接表控制信息 */ + u32 tab_width; /* 表项宽度 */ + u32 index; /* offset idx */ + u32 data[INDIR_MAX_WT_INDEX_NUM]; /* payload */ +}; + +struct comm_cmd_mbox_indir_tab_wt_ret { /* 间接表写请求 */ + struct mgmt_msg_head head; /* mbox消息头 */ +}; + +enum { + MPU_LOG_CLEAR = 0, /* 清除mpu日志 */ + SMU_LOG_CLEAR, /* 清除smu日志 */ + NPU_LOG_CLEAR, /* 清除npu日志 */ + SPU_LOG_CLEAR, /* 清除spu日志 */ + MPU_LASTWORD_CLEAR, /* 清除mpu临终遗言 */ + NPU_LASTWORD_CLEAR, /* 清除微码临终遗言 */ + ALL_LOG_CLEAR, /* 清除全量日志&临终遗言 */ + UBC_IMP_LOG_CLEAR, /* 清除ubc imp日志 */ + UBC_IMP_LASTWORD_CLEAR, /* 清除ubc imp临终遗言 */ + ROCE_IMP_LOG_CLEAR, /* 清除roce imp日志 */ + ROCE_IMP_LASTWORD_CLEAR, /* 清除roce imp临终遗言 */ + ROCE_SCC_LOG_CLEAR, /* 清除roce scc日志 */ + CLEAR_TYPE_BUTT, +}; + +struct comm_cmd_clear_log { /* 清除log */ + struct mgmt_msg_head head; /* mbox消息头 */ + u32 type; /* 清除日志类型 */ +}; + +struct cmd_sector_info { /* 擦除flash */ + struct mgmt_msg_head head; /* mbox消息头 */ + u32 offset; /* flash地址 */ + u32 len; /* flash擦除长度 */ +}; + +enum flash_counter_info_req_type { + FLASH_COUNTER_TYPE_GET_MPU_SIZE, /* mpu counter大小获取类型 */ + FLASH_COUNTER_TYPE_GET_MPU_DATA, /* mpu counter数据获取类型 */ + FLASH_COUNTER_TYPE_GET_NPU_SIZE, /* npu counter大小获取类型 */ + FLASH_COUNTER_TYPE_GET_NPU_DATA, /* npu counter数据获取类型 */ + FLASH_COUNTER_TYPE_INVALID /* counter获取类型无效值 */ +}; + +struct flash_counter_info_req { /* 获取固件counter信息 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u8 type; /* flash_counter_info_req_type */ + u8 rsv[3]; /* 保留域段 */ + u32 offset; /* 地址偏移 */ + u32 length; /* 数据长度 */ +}; + +#define FLASH_COUNTER_TYPE_GET_DATA_MAX_SIZE 1024 +struct flash_counter_info_resp { /* 获取固件counter信息 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u32 length; /* 数据长度 */ + u8 data[FLASH_COUNTER_TYPE_GET_DATA_MAX_SIZE]; /* payload */ +}; + +typedef struct { + u64 smu_images; /* smu镜像 */ + u64 mpu_images; /* mpu镜像 */ + u64 npu_images; /* npu镜像 */ + u64 ppe_images; /* 微码ppe镜像 */ + u64 cfg_images; /* 配置文件镜像 */ + u64 patch_images; /* mpu补丁镜像 */ + u64 rsvd[4]; /* 保留域段 */ +} module_images; /* 固件镜像类型 */ + +typedef struct { + struct mgmt_msg_head head; /* mbox消息头 */ + u32 rsvd[4]; /* 保留域段 */ +} comm_cmd_query_module_images_req; /* 查询镜像类型 */ + +typedef struct { + struct mgmt_msg_head head; /* mbox消息头 */ + module_images img; /* 固件镜像 */ +} comm_cmd_query_module_images_rsp; /* 查询镜像类型 */ + +typedef struct tag_mpu_ncsi_counter_info_s { + u32 ncsi_rx_octets_total_ok; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_octets_bad; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_uc_pkts; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_mc_pkts; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_bc_pkts; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pkts_64octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pkts_65to127octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pkts_128to255octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pkts_255to511octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pkts_512to1023octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pkts_1024to1518octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pkts_1519tomaxoctets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_fcs_errs; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_tagged; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_data_errs; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_align_errs; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_long_errs; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_jabber_errs; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pause_maccontrol_framcounter; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_unknow_maccontrol_framcounter; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_very_long_err_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_runt_err_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_short_err_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_filt_pkt_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_octets_total_filt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_octets_transmitte_ok; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_octets_transmitte_bad; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_uc_pkts; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_mc_pkts; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_bc_pkts; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pkts_64octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pkts_65to127octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pkts_128to255octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pkts_255to511octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pkts_512to1023octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pkts_1024to1518octets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pkts_1519tomaxoctets; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_underrun; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_tagged; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_crc_err; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pause_frams; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_overrun_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_lengthfield_err_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_fail_comma_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_frm_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_frm_err_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_xon_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_xoff_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_xon_err_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_empty_err_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_app_bd_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_add_bd_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_txbd_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_txbd_empty_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_txbd_code_err_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_txbd_min_frame_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_txbd_max_frame_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rls_bd_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pt_pkt_cnt_low; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pt_pkt_cnt_high; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pt_pkt_disc_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pt_ch_err_cnt0; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pt_ch_err_cnt1; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pt_ch_err_cnt2; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pt_ch_err_cnt3; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_pt_pkt_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_ctrl_ok_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_ctrl_disc_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_ctrl_chksum_err_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_ctrl_pkt_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_ctrl_len_mismatch_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_tx_ctrl_len_short; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pt_ch_ok_cnt0; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pt_ch_ok_cnt1; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pt_ch_ok_cnt2; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_pt_ch_ok_cnt3; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_rx_ctrl_pavload_len_err_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 ncsi_ipsurx_hit_count; /* ncsi寄存器,定义详见nmanager */ + u32 pie_to_mpu_bd_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 pie_to_ipsu_bd_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 pie_to_ncsi_bd_cnt; /* ncsi寄存器,定义详见nmanager */ + u32 rsv[10]; +} mpu_ncsi_counter_info_s; /* ncsi寄存器,定义详见nmanager */ + +#define NCSI_COUNT_OPT_TYPE_READ 0 /* ncsi counter读 */ +#define NCSI_COUNT_OPT_TYPE_CLEAR 1 /* nsci counter清除 */ + +struct comm_cmd_ncsi_counter_req { /* 获取ncsi counter */ + struct mgmt_msg_head head; /* mbox消息头 */ + u8 opt_type; /* 0:read counter 1:counter clear */ + u8 rsvd[3]; +}; + +struct comm_cmd_ncsi_counter_resp { /* 获取ncsi counter */ + struct mgmt_msg_head head; /* mbox消息头 */ + + mpu_ncsi_counter_info_s ncsi_cnt_info; /* counter信息 */ +}; +#define SINGLE_EFUSE_BIN_SIZE 512 +#define SEND_EFUSE_DATA_SIZE (SINGLE_EFUSE_BIN_SIZE * 3) +struct send_efuse_data_s { /* efuse信息烧写 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u8 opt_type; /* efuse操作类型:1: burn efuse bin, + * 2: hw rotpk switch to guest rotpk + */ + u8 rsvd0[3]; /* 保留域段 */ + u32 total_len; /* entire package leng value */ + u32 data_csum; /* entire package data count sum value */ + u8 data[SEND_EFUSE_DATA_SIZE]; /* payload 1024B*/ +}; + +typedef enum { + BURN_EFUSE_BIN = 1, + REVOKE_SEC_VER_NUM, + BURN_HISS_EFUSE0_BIN, + BURN_HISS_EFUSE1_BIN, + BURN_ALL_EFUSE_BIN, + NONE_BURN_EFUSE_BIN, +} eufse_option_type_e; + +#define DFX_MAG_MAX_REG_NUM (32) +struct comm_info_dfx_mag_reg { + struct mgmt_msg_head head; + u16 sel; /* 方向: 0 - tx 1 - rx */ + u16 write; /* 读写标志: 0 - 读 1 - 写 */ + u32 reg_addr; /* 寄存器地址 */ + u32 reg_cnt; /* 以reg_addr为BASE_ADDR,读取多少个连续的寄存器个数(不是字节数,是寄存器个数), + * 最大32个 + */ + u32 clear; /* 读清标志: 0 - 不读清 1 - 读清(写操作时此参数无效) */ + u32 data[DFX_MAG_MAX_REG_NUM]; /* 返回的数据,最多DFX_MAG_MAX_REG_NUM个, + * 根据reg_cnt来标明实际有效数据个数 + */ +}; + +#define UPDATE_CMD_HEAD_NEW_VERSION 0x1 // 工具下发的重复包命令版本 + +/* 固件升级错误码定义 */ +enum hinic5_update_fw_err_code { + MPU_FW_UPDATE_OK = 0x00, + MPU_FW_UPDATE_START = 0x01, + MPU_FW_UPDATE_READ_FLASH_ERR = 0x02, + MPU_FW_UPDATE_WRITE_FLASH_ERR = 0x03, + MPU_FW_UPDATE_OTHER_FAIL = 0x04, + MPU_FW_UPDATE_BUSY = 0x05, + MPU_FW_UPDATE_OTHER_OPERAT = 0x06, + MPU_FW_UPDATE_PARA_CHECK_ERR = 0x07, + MPU_FW_UPDATE_DUPLICATE_SUBFW = 0x08, + MPU_FW_UPDATE_FW_CRC_ERR = 0x09, + MPU_FW_UPDATE_FW_VERIFY_ERR = 0x0a, + MPU_FW_UPDATE_SUBFW_PARTIAL = 0x0b, + MPU_FW_UPDATE_REFRESH_STATE_MACHINE_FAIL = 0x0c, + MPU_FW_UPDATE_BOARD_TYPE_CHECK_FAIL = 0x0d, + MPU_FW_UPDATE_PERMISSION_DENINED = 0x0e, + MPU_FW_UPDATE_ALREADY_ACTIVED = 0x0f, + MPU_FW_UPDATE_ALLOC_MEM_FAIL = 0x10, + MPU_FW_UPDATE_CHECK_VERSION_FAIL = 0x11, + MPU_FW_UPDATE_STATE_MACHINE_INVALID = 0x12, + MPU_FW_UPDATE_INSTALL_PATCH_FAIL = 0x13, + MPU_FW_UPDATE_UNINSTALL_PATCH_FAIL = 0x14, + MPU_FW_UPDATE_ACTIVE_PATCH_FAIL = 0x15, + MPU_FW_UPDATE_DEACTIVE_PATCH_FAIL = 0x16, + MPU_FW_UPDATE_GIT_TAG_MISMATCH = 0x17, + MPU_FW_UPDATE_ADD_SYMBOL_FAIL = 0x18, + MPU_FW_UPDATE_PATCH_REPLACE_EXCLUSIVE = 0x19, + MPU_FW_UPDATE_PAUSE_TASKS_FAIL = 0x1a, + MPU_FW_UPDATE_FLR_IS_RUNNING = 0x1b, + MPU_FW_UPDATE_RESET_CORE_FAIL = 0x1c, + MPU_FW_UPDATE_INIT_FAIL = 0x1d, + MPU_FW_UPDATE_HOT_UPDATE_NOT_SUPPORT = 0x1e, + MPU_FW_UPDATE_FLUSH_FLASH_REPEAT = 0xfb, + MPU_FW_UPDATE_OTHER_HOST_RST_SPI_BUSY = 0xfc, + MPU_FW_UPDATE_HOT_ACTIVE_INVALID = 0xfd, + MPU_FW_UPDATE_HOT_ACTIVE_FAIL = 0xfe, +}; + +struct fw_update_msg_st { /* */ + struct mgmt_msg_head msg_head; /* mbox消息头 */ + struct { + u32 SL : 1; /* 尾片 */ + u32 SF : 1; /* 首片 */ + u32 Flag : 1; /* 分区标识 */ + u32 Signed : 1; /* 签名标识 */ + u32 Repeat : 1; /* 分片操作flash */ + u32 updatefw_main_area_flag : 1; /* 主区标识 */ + u32 Reserved : 10; /* 保留域段 */ + u32 Fragment_Len : 16; /* 分片长度 */ + } ctl_info; /* */ + + struct { + u32 FW_section_CRC; /* 分片操作flash */ + u32 FW_section_type; /* 子固件类型 */ + } section_info; /* 子固件信息 */ + + u32 total_len; /* 镜像长度 */ + u32 setion_total_len; /* 子固件长度 */ + u32 fw_section_version; /* 子固件版本 */ + u32 section_offset; /* 子固件偏移 */ + u32 data[384]; /* 镜像数据 */ +}; + +/* 热激活类型 */ +typedef enum { + HOT_ACTIVE_NONE = 0, + HOT_ACTIVE_MPU = 1, + HOT_ACTIVE_NPU = 2, + HOT_ACTIVE_MNPU = 3, + HOT_ACTIVE_SCC = 4, +} hot_active_type_e; + +/* MPU热激活类型 */ +typedef enum { + MPU_HOT_ACTIVE_NONE, /* 还没有进行热升级 */ + MPU_HOT_ACTIVE_PATCH, /* 热补丁 */ + MPU_HOT_ACTIVE_REPLACE, /* 热替换 */ +} mpu_hot_active_type_e; + +struct cmd_hot_active_fw { /* 热升级激活 */ + struct mgmt_msg_head head; /* mbox消息头 */ + u8 type; /* 激活子固件类型,1: mpu; 2: ucode; 3: mpu & npu */ + u8 mpu_hot_active_type; /* MPU热激活类型,当type为MPU或MNPU时有效 */ + u8 data[6]; /* 保留域段 */ +}; + +struct cmd_bat_set_info { /* 热升级bat表项操作过渡信息 */ + struct mgmt_msg_head head; /* mbox消息头 */ + + u16 func_id; /* 指定function id */ + u8 smf_id; /* smf idx */ + u8 rsvd1; /* 保留域段 */ + u32 bat_offset; /* bat偏移 */ + u32 data_size; /* 数据大小 */ + u8 data[256]; /* payload */ +}; + +/* 读取dbf信息 */ +typedef struct { + u32 device_id; + u32 vendor_id; +} mpu_pcie_pf_info_s; + +typedef struct { + struct mgmt_msg_head head; + + mpu_pcie_pf_info_s pf_info[32]; + u32 bus_id; + u32 pf_num; +} mpu_pcie_dev_bdf_info_s; + +typedef struct { + struct mgmt_msg_head head; + u8 valid; /* 1: valid */ + u8 host_id; /* 容器归宿host,范围0 ~ 3 */ + u8 rsvd[2]; +} comm_cmd_con_sel_sta; + +typedef struct pf_bdf_info { + u8 itf_idx; + u16 bdf; + u8 pf_bdf_info_vld; +} comm_pf_bdf_info_s; + +typedef struct vf_bdf_info { + u16 glb_pf_vf_offset; /* global_func_id offset of 1st vf in pf */ + u16 max_vfs; /* vf number */ + u16 vf_stride; /* VF_RID_SETTING.vf_stride */ + u16 vf_offset; /* VF_RID_SETTING.vf_offset */ + u8 bus_num; /* tl_cfg_bus_num */ + u8 rsv[3]; +} comm_vf_bdf_info_s; + +struct comm_cmd_get_bdf_info_s { + struct mgmt_msg_head head; + comm_pf_bdf_info_s pf_bdf_info[PCIE_MODE_PF_NUM]; + comm_vf_bdf_info_s vf_bdf_info[PCIE_MODE_PF_NUM]; + u32 vf_num; /* vf num */ +}; + +typedef struct comm_virtio_dev_cmd { + u16 device_type; + u16 device_id; + u32 devid_switch; + u32 sub_vendor_id; + u32 sub_class_code; + u32 flash_en; +} comm_virtio_dev_cmd_s; + +typedef struct comm_virtio_dev_ctl { + u32 device_type_mark; + u32 devid_switch_mark; + u32 sub_vendor_id_mark; + u32 sub_class_code_mark; + u32 flash_en_mark; +} comm_virtio_dev_ctl_s; + +struct comm_cmd_set_virtio_dev { + struct mgmt_msg_head head; + comm_virtio_dev_cmd_s virtio_dev_cmd; + comm_virtio_dev_ctl_s virtio_dev_ctl; +}; + +#define PSM_GIT_CHAR_NUM (20) +struct cmd_get_mpu_git_code { + struct mgmt_msg_head head; /* 8B */ + u8 rsvd[3]; /* 保留 */ + u8 psm_en; + char mpu_git_code[64]; /* git 号和编译时间长60个字符 */ + char psm_git_code[PSM_GIT_CHAR_NUM + 1]; /* psm git 号和编译时间长20个字符 */ + u8 rsvd1[3]; /* 保留 */ +}; + +/* 关闭芯片自复位 */ +struct comm_cmd_enable_auto_rst_chip { + struct mgmt_msg_head head; + + u8 op_code; /* 0: get 1: set */ + u8 enable; /* 1: 使能自动复位芯片; 0: 禁止自动复位芯片 */ + u8 rsvd[2]; +}; + +/* 芯片核温结构体定义 */ +struct comm_temp_in_info { + struct mgmt_msg_head head; /* 8B */ + u8 opt_type; /* 0:read operation 1:cfg operation */ + u8 rsv[3]; + s32 max_temp; /* 芯片核温阈值 */ + s32 min_temp; /* 芯片核温阈值 */ +}; + +struct comm_temp_out_info { + struct mgmt_msg_head head; /* 8B */ + s32 temp_data; /* 读出的温度 */ + s32 max_temp_threshold; /* 芯片核温阈值 */ + s32 min_temp_threshold; /* 芯片核温阈值 */ + s32 max_temp; /* 芯片核温历史最大值 */ + s32 min_temp; /* 芯片核温历史最小值 */ +}; + +/* chip id信息 */ +struct comm_chip_id_info { + struct mgmt_msg_head head; + + u8 chip_id; + u8 rsvd[3]; +}; + +/* die id模块接口 */ +struct comm_cmd_get_die_id { + struct mgmt_msg_head head; + + u32 die_id_data[8]; +}; + +typedef struct { + struct mgmt_msg_head head; + + u8 lldp_tx_enable; + u8 port; + u8 rsv[2]; +} comm_cmd_lldp_tx_set_s; + +#define MSIX_INFO_LEN 0x200 +struct comm_cmd_msix_info { + struct mgmt_msg_head head; + + u8 rsvd1; + u8 flag; /* 0-second map, 1-actual map, 2-first map entry */ + u16 function_id; +}; + +enum log_status_operation_type { + READ_TYPE = 0, + WRITE_TYPE, +}; + +enum log_status_type { + LOG_NORMAL = 0, + LOG_BUSY, +}; +struct mpu_log_status_info { + struct mgmt_msg_head head; + u8 type; /* 0: 读 1:写 */ + u8 log_status; /* 0:空闲 1:忙碌 */ + u8 rsvd[2]; +}; + +#define RQ_CXT_SIZE 64 +#define SQ_CXT_SIZE 64 +#define CMDQ_COUNT 2 +#define CMDQ_CXT_SIZE 16 +#define ENHANCE_CMDQ_CXT_SIZE 48 +#define ENHANCE_CMDQ_CXT_SIZE_FRIST 16 +#define ENHANCE_CMDQ_CXT_SIZE_SECOND 32 + +typedef struct { + struct mgmt_msg_head head; + u32 func_id; + u32 smf_id; + u32 queue_id; + u32 smf_id_valid; // 传入smf_id是否有效 +} comm_cmd_root_ctx_load_req_s; + +typedef struct { + struct mgmt_msg_head head; + u8 rq_ctx[RQ_CXT_SIZE]; + u8 sq_ctx[SQ_CXT_SIZE]; + u8 cmdq_ctx[CMDQ_CXT_SIZE * CMDQ_COUNT]; + u8 enhance_cmdq_ctx[ENHANCE_CMDQ_CXT_SIZE * CMDQ_COUNT]; +} comm_cmd_root_ctx_load_ret_s; + +struct cmd_query_fw { + struct mgmt_msg_head head; // 8B + u32 offset; // 偏移,因为返回的信息比较大,因此,需要多次返回 + u32 len; // 要读的数据长度 +}; + +#define MAX_CMD_DATA_LEN (1024 + 512) +struct cmd_fw_info { + struct mgmt_msg_head head; // 8B + u32 len; // 返回实际读到的数据长度 + u8 data[MAX_CMD_DATA_LEN]; // 一次最多读1536字节数据 +}; + +typedef struct { + u32 tgt_speed; + u32 cur_speed; + u32 tgt_width; + u32 cur_width; +} pcie_link_info_s; + +typedef struct { + u32 pf_start; + u32 pf_end; + u32 pf_num; + u32 vf_start; + u32 vf_end; + u32 vf_num; +} pcie_pf_vf_info_s; + +typedef struct { + u32 p_tx_left_tag; + u32 np_tx_left_tag; + u32 cpl_tx_left_tag; + u32 p_rx_left_tag; + u32 np_rx_left_tag; + u32 cpl_rx_left_tag; +} pcie_dfx_info_s; + +typedef struct { + u32 host_idx; + u32 core_id; + u32 port_id; + pcie_link_info_s link_info; + pcie_pf_vf_info_s pf_vf_info; + pcie_dfx_info_s dfx_info; +} pcie_topo_item_s; + +#define CMD_PCIE_MAX_HOST_IDX 0xD +typedef struct { + struct mgmt_msg_head head; + u32 cur_host; + u32 host_cnt; + pcie_topo_item_s item[CMD_PCIE_MAX_HOST_IDX + 1]; +} comm_cmd_get_pcie_topo_s; + +typedef struct { + struct mgmt_msg_head head; + u32 type; + u32 condition1; + u32 condition2; + u32 opc; + u64 data; + u32 ret; +} comm_cmd_pcie_option_s; + +#define MAX_TYPE_NAME_LEN 8 + +/* VF mapping flags for mqm vf_map type */ +#define VF_MAP_FLAG_FUNC_ID_SET 0x01 +#define VF_MAP_FLAG_VNIC_ID_SET 0x02 +#define VF_MAP_FLAG_VNIC_GRP_ID_SET 0x04 + +struct cmd_mpu_set_shaper { + struct mgmt_msg_head head; /* 8B */ + char option[MAX_TYPE_NAME_LEN]; + char module_name[MAX_TYPE_NAME_LEN]; + char shaper_mod[MAX_TYPE_NAME_LEN]; + char type_name[MAX_TYPE_NAME_LEN]; + u8 pqm_mod; + u8 port_id; + u8 tc_id; + u8 cos_id; + u8 mqm_type; + u8 rsvd[3]; + u32 mqm_shaper_id; + u32 vnic_vnic_grp_id; + u32 cir; + u32 cbs; + u32 pir; + u32 pbs; + u32 xir; + u32 xbs; + u32 func_id; + u32 vnic_id; + u32 vnic_group_id; + u32 vf_map_flags; + u32 rsvds[16]; +}; + +#define VF_SQ_RQ_MAX_NUM 128 +#define PF_SQ_RQ_MAX_NUM 256 +#define CFG_VF_MAX_NUM 63 +#define CFG_VF_TOTAL_NUM 126 +#define CFG_MSIX_MAX_NUM 3072 /* NIC QP + AEQ = 3K, NOT CONTAIN RoCE */ +#define CFG_PF_MAX_NUM 16 +#define CFG_INVALID_VALUE 0xFFFF +#define PF_VF_TOTAL_QUEUE_MAX_NUM 1744 + +#define CFG_BAR_INDEX0 0 +#define CFG_BAR_INDEX1 1 +#define CFG_BAR_INDEX2 2 +#define CFG_BAR_INDEX3 3 +#define CFG_BAR_INDEX4 4 +#define CFG_BAR_INDEX5 5 + +#define PF_TYPE 0 +#define VF_TYPE 1 +#define CFG_BAR_INDEX_NUM 6 +#define CFG_BAR_SIZE_MIN_NUM 4 +#define CFG_PF_BAR_SIZE_MAX_NUM 64 +#define CFG_PF_BAR3_SIZE_MAX_NUM 128 +#define CFG_VF_BAR_SIZE_MAX_NUM 64 +#define CFG_VF_BAR4_SIZE_MAX_NUM 4096 +#define CFG_BAR_SIZE_INVALID_VALUE 0xFFFFFFFF +#define CFG_BAR_TRANSLATE_KB_TO_BYTE(bar_size) (((bar_size) * 1024) - 1) +#define CFG_BAR_TRANSLATE_BYTE_TO_KB(bar_size) (((bar_size) + 1) / 1024) + +#define CFG_DATA_OP_GET 0 +#define CFG_DATA_OP_SET 1 +#define CFG_DATA_OP_CLEAR 2 +#define CFG_DATA_OP_BAR_GET 3 +#define CFG_DATA_OP_BAR_SET 4 +#define CFG_DATA_OP_BAR_CLEAR 5 +#define QUEUE_BIT_PF_SQ_RQ 0 +#define QUEUE_BIT_VF_SQ_RQ 1 +#define QUEUE_BIT_VF_NUM 2 + +#define CFG_BAR_MODE_TEMP 0 +#define FLASH_BAR_MODE_TEMP 0xFFFFFFFF +#define CLEAN_BAR_REBOOT_TWICE (FLASH_BAR_MODE_TEMP - 2) +#define CFG_BAR_MODE_PERM 1 +#define FLASH_BAR_MODE_PERM 0x1 + +#define CFG_BIT(x) (0x1U << (x)) +#define CFG_GET_BIT(val, bit) (((val) >> (bit)) & 0x1) +#define CFG_SET_BIT(val, bit) ((val) |= CFG_BIT(bit)) +#define CFG_CLEAR_BIT(val, bit) ((val) &= ~CFG_BIT(bit)) + +typedef struct mpu_nic_func_queue_s { + u32 magic_func_sq_rq_queue; + u16 pf_sq_rq; + u16 vf_sq_rq; + u16 vf_num; + u16 rsvd0; +} mpu_nic_func_queue; + +typedef struct mpu_nic_bar_s { + u32 magic_bar_set; + u8 pf_bar_index; /* 0-3 */ + u8 vf_bar_index; /* 0,2,4 */ + u8 pf_bar_set_flag; + u8 vf_bar_set_flag; + u32 bar_mode; /* temp: 0 in tool, 0xFFFFFFFF in flash ; + * permanently: 1 in tool, 0x1 in flash + */ + u32 pf_bar_size[CFG_BAR_INDEX_NUM]; + u32 vf_bar_size[CFG_BAR_INDEX_NUM]; +} mpu_nic_bar; + +struct comm_cmd_cfg_data { + struct mgmt_msg_head head; + u8 opt_type; /* operation type 0: query 1: set 2: clear 4: bar set 5: bar clear */ + u8 pf_index; + u8 queue_bitmap; /* 0: pf_sq_rq 1: vf_sq_rq 2: vf_num */ + u8 pf_num; + u16 pf_sq_rq; + u16 vf_sq_rq; + u16 vf_num; + u16 total_queue_num; + u8 is_set_diff_template; + u8 rsvd0[3]; + u32 rsvd1[210]; + mpu_nic_bar bar_info_current; + mpu_nic_bar bar_info_default; + mpu_nic_bar bar_info; + mpu_nic_func_queue cur_func_queue[CFG_PF_MAX_NUM]; + mpu_nic_func_queue next_func_queue[CFG_PF_MAX_NUM]; +}; + +enum voltage_type_e { + VOLTAGE_TYPE_VRD, + VOLTAGE_TYPE_VSENSOR, +}; +typedef struct comm_cmd_voltage_info_s { + struct mgmt_msg_head head; + u8 type; // 参考voltage_type_e + u8 rsv[3]; + u16 vol_integer; + u16 vol_decimal; +} comm_cmd_voltage_info; + +typedef enum { + RTOS_INFO_TYPE_TASK_INFO, + RTOS_INFO_TYPE_CPU_PER, + RTOS_INFO_TYPE_VER_INFO, + RTOS_INFO_TYPE_HWI_INFO, + RTOS_INFO_TYPE_SEM_INFO, + RTOS_INFO_TYPE_BUTT, +} rtos_info_type_e; + +typedef struct { + struct mgmt_msg_head head; + u8 type; + u8 rsv[3]; + u32 para; +} cmd_query_rtos_info; + +#define MAX_RTOS_INFO_LEN (2000) +typedef struct { + struct mgmt_msg_head head; + u8 data[MAX_RTOS_INFO_LEN]; +} cmd_rtos_info; + +#define MAX_RTOS_ID_NUM 200 +typedef struct { + u16 num; + u16 rsv; + u32 id[MAX_RTOS_ID_NUM]; +} rtos_list_info; + +#define RTOS_TASK_NAME_LEN 16 +typedef struct { + u32 task_pid; + char name[RTOS_TASK_NAME_LEN]; + u16 status; + u16 prio; + u64 pc; + u64 sp; + u32 sem_id; +} rtos_task_info; + +typedef struct { + u32 pid; + u16 usage; + u16 rsv; + char name[RTOS_TASK_NAME_LEN]; +} thread_cpup_info; + +#define RTOS_MAX_THREAD_CPUP_NUM 50 +typedef struct { + u32 core_id; + u32 cpup; +} core_cpup_info; + +#define RTOS_MAX_CORE_NUM 4 +typedef struct { + u32 core_num; + u32 thread_num; + core_cpup_info cpup_info[RTOS_MAX_CORE_NUM]; + thread_cpup_info thread_info[RTOS_MAX_THREAD_CPUP_NUM]; +} rtos_cpup_info; + +#define RTOS_VER_INFO_LEN 1024 +typedef struct { + char version[RTOS_VER_INFO_LEN]; +} rtos_ver_info; + +typedef struct { + u32 no; + u16 type; + u16 prio; + u64 para; +} hwi_info; + +#define RTOS_MAX_HWI_NUM 100 +typedef struct { + u32 num; + hwi_info info[RTOS_MAX_HWI_NUM]; +} rtos_hwi_info; + +/* + * @ingroup OS_sem + * 信号量类型。 + */ +typedef enum { + RTOS_SEM_TYPE_COUNT, /* 计数型信号量 */ + RTOS_SEM_TYPE_BIN, /* 二进制信号量 */ + RTOS_SEM_TYPE_BUTT +} rtos_sem_type_e; + +/* + * @ingroup OS_sem + * 信号量模块被阻塞线程唤醒方式。 + */ +typedef enum { + RTOS_SEM_MODE_FIFO, // 信号量FIFO唤醒模式 + RTOS_SEM_MODE_PRIOR, // 信号量优先级唤醒模式 + RTOS_SEM_MODE_BUTT // 信号量非法唤醒方式 +} rtos_sem_mode_e; + +typedef struct { + u32 count; + u32 owner; + u16 sem_id; + u8 mode; // 信号量唤醒模式 + u8 type; +} sem_info; + +#define RTOS_MAX_SEM_NUM 128 +typedef struct { + u32 num; + sem_info info[RTOS_MAX_SEM_NUM]; +} rtos_sem_info; + +#define FW_RESTORE_ENABLE 1 +#define FW_RESTORE_DISABLE 0 +#define FW_RESTORE_SET_MAX_NUM (200) +#define FW_RESTORE_INSTALL_SDK_MAX_TIMEOUT 30 +#define FW_RESTORE_INSTALL_SDK_MIN_TIMEOUT 1 +#define FW_RESTORE_MAX_FAIL_COUNT 20 +#define FW_RESTORE_MIN_FAIL_COUNT 1 + +#define SWITCH_RESET_OPT_READ 0 +#define SWITCH_RESET_OPT_WRITE 1 + +typedef enum { + RESTORE_SET_TYPE_SWITCH = 0, // 启动切区标志位 + RESTORE_SET_TYPE_RESET, // 切区后自动复位标志位 + RESTORE_SET_TYPE_FAIL_COUNT, // 设置检测SDK加载失败次数 + RESTORE_SET_TYPE_TIMEOUT, // 设置SDK加载超时时间 + RESTORE_SET_TYPE_BUTT +} fw_restore_set_e; + +struct cmd_chip_switch_reset { + struct mgmt_msg_head head; + u8 op_code; /* 0: set 1: get */ + u8 type; /* fw_restore_set_e, 读时复用作为fail_count返回值 */ + u8 value; /* 待设置的值, 读时复用作为switch&reset返回值 */ + u8 read_value; /* 只在读时使用,为timeout返回值 */ +}; + +/* integrity */ +#define GRAY_INFO_MAGIC_NUM 0xc380f8dd +#define HASH_SIG_SIZE 512 +#define KEY_HASH_SIZE 32 +#define PUBKEY_SIZE 1024 +#define INTEGERITY_VERIFY_ENABLE 0x5A /* 使用魔法数字标识完整性验证开启 */ +typedef enum { + INTEGERITY_CMD_ENABLE = 0, + INTEGERITY_CMD_UPDATE, + INTEGERITY_CMD_DISABLE, + INTEGERITY_CMD_MAX, +} integrity_cmd_type; + +typedef struct { + u8 integrity_type; /* 用户固件完整性保护开关 */ + u8 rsvd[3]; /* 预留字段*/ + u8 key_hash[KEY_HASH_SIZE]; /* 客户根公钥哈希值 */ + u8 pubkey[PUBKEY_SIZE]; + u8 keysig[HASH_SIG_SIZE]; + u8 newpubkey[PUBKEY_SIZE]; + u8 newkeysig[HASH_SIG_SIZE]; /* 更新操作用到的签名文件 */ +} cskey_status; + +typedef struct { + u32 magic_num; + cskey_status key_hash_sign; + u32 crc; +} gray_card_info_s; +/* integrity */ + +/* update err code */ +typedef enum MPU_INTEGRITY_STATUS { + MPU_INTEGRITY_OK = 0, + MPU_INTEGRITY_NOT_ENABLE = 101, + MPU_INTEGRITY_IS_ENABLE, + MPU_INTEGRITY_MEMCPY_FAIL, + MPU_INTEGRITY_WRITE_FLASH_FAIL, + MPU_INTEGRITY_SMU_VERIFY_FAIL, +} MPU_INTEGRITY_STATUS_ENUM; + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_mailbox_msg_header.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_mailbox_msg_header.h new file mode 100644 index 00000000..93fd8893 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/mpu/mpu_mailbox_msg_header.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Filename : mpu_mailbox_msg_header.h + * Creation time : 2024/08/13 + * Description : The message header if in-band commands between the driver and the MPU + */ +#ifndef MPU_MAILBOX_MSG_HEADER_H +#define MPU_MAILBOX_MSG_HEADER_H + +#include "base_type.h" + +#define MPU_MAILBOX_HEADER_VER_0 0 /**< mailbox 消息version 0 */ +#define MPU_MAILBOX_HEADER_VER_1 1 /**< mailbox 消息version 1 */ + +#define MPU_MAILBOX_HEADER_VER1_LEN_UNIT 4 /**< mailbox 消息长度的单位4 bytes */ + +typedef union { + struct { + u32 func_id : 13; /**< indicates function id of this message */ + u32 status : 1; /**< indicates the status of this message */ + u32 rsvd : 1; /**< 保留字段 */ + u32 source : 1; /* indicates the source of this message. + * 0--mailbox 1--api cmd + */ + u32 aeq_id : 2; /**< indicates message response aeq id */ + u32 msg_id : 4; /**< indicates the ID of this message */ + u32 cmd : 10; /**< user defined command */ + u32 msg_len : 11; /**< total message length, maximum (2 << 11) - 1 */ + u32 module : 5; /**< module id */ + u32 seg_len : 6; /**< current segment length */ + u32 no_ack : 1; /**< no_ack 标志 */ + u32 tlp : 1; /**< this bit indicates the message is tlp格式 */ + u32 seg_id : 6; /**< segment sequence id */ + u32 last_flg : 1; /**< the last segment flag */ + u32 direction : 1; /**< 0 send, 1 receive */ + } v0; + + struct { + u32 func_id : 13; /**< indicates function id of this message */ + u32 status : 1; /**< indicates the status of this message */ + u32 version : 1; /**< indicates the version of this message */ + u32 source : 1; /* indicates the source of this message. + * 0--mailbox 1--api cmd + */ + u32 aeq_id : 2; /**< indicates message response aeq id */ + u32 msg_id : 4; /**< indicates the ID of this message */ + u32 cmd : 10; /**< user defined command */ + u32 msg_len : 9; /**< total message length, maximum (2 << 11) - 1 */ + u32 rsvd0 : 2; /**< 保留字段 */ + u32 module : 6; /**< module id */ + u32 rsvd1 : 1; /**< 保留字段 */ + u32 seg_len : 4; /**< current segment length */ + u32 no_ack : 1; /**< no_ack 标志 */ + u32 tlp : 1; /**< this bit indicates the message is tlp格式 */ + u32 seg_id : 6; /**< segment sequence id */ + u32 last_flg : 1; /**< the last segment flag */ + u32 direction : 1; /**< 0 send, 1 receive */ + } v1; +} mpu_mbx_header; + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_cfg_comm.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_cfg_comm.h new file mode 100644 index 00000000..f4e83cee --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_cfg_comm.h @@ -0,0 +1,807 @@ +/****************************************************************************** + + Copyright (C), 2001-2021, Huawei Tech. Co., Ltd. + + ****************************************************************************** + File Name : nic_cfg_comm.h + Version : Initial Draft + Description : nic config common header file + Function List : + History : + Modification: Created file + +******************************************************************************/ + +#ifndef NIC_CFG_COMM_H +#define NIC_CFG_COMM_H + +#if defined(__LINUX__) || defined(__VMWARE__) +#include <linux/types.h> +#endif + +#include "nic_mpu_cmd_structs.h" +#include "nic_mpu_cmd_structs_extend.h" +/* rss */ +#define HINIC5_RSS_TYPE_VALID_SHIFT 23 /**< 定义RSS(Receive Side Scaling)类型有效位的偏移量 */ +#define HINIC5_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 /**< 定义RSS类型TCP IPv6扩展的偏移量 */ +#define HINIC5_RSS_TYPE_IPV6_EXT_SHIFT 25 /**< 定义RSS类型IPv6扩展头的偏移量 */ +#define HINIC5_RSS_TYPE_TCP_IPV6_SHIFT 26 /**< 定义RSS类型TCP IPv6的偏移量 */ +#define HINIC5_RSS_TYPE_IPV6_SHIFT 27 /**< 定义RSS类型IPv6的偏移量 */ +#define HINIC5_RSS_TYPE_TCP_IPV4_SHIFT 28 /**< 定义RSS类型TCP IPv4的偏移量 */ +#define HINIC5_RSS_TYPE_IPV4_SHIFT 29 /**< 定义RSS类型IPv4的偏移量 */ +#define HINIC5_RSS_TYPE_UDP_IPV6_SHIFT 30 /**< 定义RSS类型UDP IPv6的偏移量 */ +#define HINIC5_RSS_TYPE_UDP_IPV4_SHIFT 31 /**< 定义RSS类型UDP IPv4的偏移量 */ + +/* vlan */ +#define NIC_CVLAN_INSERT_ENABLE 0x1 +#define NIC_QINQ_INSERT_ENABLE 0x3 +#define NIC_CONFIG_ALL_QUEUE_VLAN_CTX 0xffff + +/** + * @brief 定义一个宏,用于设置RSS类型 + * @param val 要设置的值 + * @param member 要设置的成员 + * @return 返回设置后的结果 + */ +#define HINIC5_RSS_TYPE_SET(val, member) (((u32)(val) & 0x1) << HINIC5_RSS_TYPE_##member##_SHIFT) +/** + * @brief 定义一个宏,用于获取RSS类型 + * @param val 要获取的值 + * @param member 要获取的成员 + * @return 返回获取到的结果 + */ +#define HINIC5_RSS_TYPE_GET(val, member) (((u32)(val) >> HINIC5_RSS_TYPE_##member##_SHIFT) & 0x1) + +/** + * @brief 定义RSS哈希类型枚举 + * @details 此枚举用于表示网络接口卡(NIC)的接收方硬件分布(RSS)哈希类型 + */ +enum nic_rss_hash_type { + NIC_RSS_HASH_TYPE_XOR = 0, /**< XOR哈希类型 */ + NIC_RSS_HASH_TYPE_TOEP, /**< TOEP哈希类型 */ + + NIC_RSS_HASH_TYPE_MAX /**< MUST BE THE LAST ONE */ +}; + +/** + * @brief 定义CQE合一场景下微码上报的csum_err类型枚举 + * @details CQE合一场景下, CQE中csum_err由原来的9bit压缩到2bit, 与驱动保持以下配合 + * NIC_RX_CSUM_IPSU_OTHER_ERR时, 即非CQE合一场景csum_err bit[8]不为0 ---> CQE合一场景csum_err修改成2 + * NIC_RX_CSUM_HW_BYPASS_ERR时, 即非CQE合一场景csum_err bit[7]不为0 ---> CQE合一场景csum_err修改成3 + * l3或l4层报文csum错误时, 非CQE合一场景csum_err bit[0]~bit[6]不为0 ---> CQE合一场景csum_err修改成1 + */ +enum nic_compact_cqe_csum_err_type { + NIC_RX_COMPACT_CSUM_NO_ERROR = 0, + NIC_RX_COMPACT_L3_L4_CSUM_ERROR, + NIC_RX_COMPACT_CSUM_OTHER_ERROR, + NIC_RX_COMPACT_HW_BYPASS_ERROR +}; + +#define NIC_RSS_INDIR_SIZE 256 /**< 定义RSS间接表的大小为256 */ +#define NIC_RSS_KEY_SIZE 40 /**< 定义RSS ket大小为40 */ + +/* * + * Definition of the NIC receiving mode + */ +#define NIC_RX_MODE_UC 0x01 /**< 单播模式 */ +#define NIC_RX_MODE_MC 0x02 /**< 组播模式 */ +#define NIC_RX_MODE_BC 0x04 /**< 广播模式 */ +#define NIC_RX_MODE_MC_ALL 0x08 /**< 全组播模式 */ +#define NIC_RX_MODE_PROMISC 0x10 /**< 混杂模式,接收所有的数据包 */ + +/* IEEE 802.1Qaz std */ +#define NIC_DCB_DSCP_NUM 0x8 /**< 定义网络接口控制数据包(NIC DCB)的最大DSCP值 */ +#define NIC_DCB_IP_PRI_MAX 0x40 /**< 定义网络接口控制数据包(NIC DCB)的最大IP优先级 */ + +#define NIC_DCB_PRIO_DWRR 0x0 /**< 定义一个宏,表示优先级的分配方式为带宽的严格分配 */ +#define NIC_DCB_PRIO_STRICT 0x1 /**< 定义一个宏,表示优先级的分配方式为严格的优先级 */ + +#define NIC_DCB_MAX_PFC_NUM 0x4 /**< 定义一个宏,表示最大优先级流控制(PFC)的数量 */ + +#ifndef ETH_ALEN +#define ETH_ALEN 6 /**< 定义单mac地址长度为6B */ +#endif + +#ifndef BIT +/** + * @brief 定义一个宏,用于将一个数的二进制形式左移n位 + * @param n 要左移的位数 + * @return 返回左移n位后的结果 + */ +#define BIT(n) (1UL << (n)) +#endif + +/** + * @brief 网络接口卡功能能力枚举定义 + * + * @details 枚举类型 nic_feature_cap 定义了网络接口卡的各种功能能力。 + * 每个枚举值都是一个位掩码的BIT位,可以通过位操作来设置或者检查某个功能是否被支持。 + */ + +enum nic_feature_cap { + NIC_F_CSUM_BIT = 0, /**< 校验和计算 */ + NIC_F_SCTP_CRC_BIT = 1, /**< SCTP CRC校验 */ + NIC_F_TSO_BIT = 2, /**< TCP Segmentation Offload */ + NIC_F_LRO_BIT = 3, /**< Large Receive Offload */ + NIC_F_UFO_BIT = 4, /**< UDP Fragmentation Offload */ + NIC_F_RSS_BIT = 5, /**< Receive Side Scaling */ + NIC_F_RX_VLAN_FILTER_BIT = 6, /**< 接收VLAN过滤 */ + NIC_F_RX_VLAN_STRIP_BIT = 7, /**< 接收VLAN去除 */ + NIC_F_TX_VLAN_INSERT_BIT = 8, /**< 发送VLAN插入 */ + NIC_F_VXLAN_OFFLOAD_BIT = 9, /**< VXLAN Offload */ + NIC_F_IPSEC_OFFLOAD_BIT = 10, /**< IPsec Offload */ + NIC_F_FDIR_BIT = 11, /**< Flow Director */ + NIC_F_PROMISC_BIT = 12, /**< 混杂模式 */ + NIC_F_ALLMULTI_BIT = 13, /**< 接收所有组播 */ + NIC_F_XSFP_REPORT_BIT = 14, /**< XSFP状态报告 */ + NIC_F_VF_MAC_BIT = 15, /**< 虚拟函数MAC地址 */ + NIC_F_RATE_LIMIT_BIT = 16, /**< 速率限制 */ + NIC_F_RXQ_RECOVERY_BIT = 17, /**< 接收队列恢复 */ + NIC_F_PTP_1588_V2_BIT = 18, /**< PTP 1588v2 */ + NIC_F_TX_WQE_COMPACT_TASK_BIT = 19, /**< 发送WQE压缩 */ + NIC_F_RX_HW_COMPACT_CQE_BIT = 20, /**< HTN合一CQE */ + NIC_F_HTN_CMDQ_BIT = 21, /**< HTN命令队列 */ + NIC_F_GENEVE_OFFLOAD_BIT = 22, /**< Geneve Offload */ + NIC_F_IPXIP_OFFLOAD_BIT = 23, /**< IPXIP Offload */ + NIC_F_TC_FLOWER_OFFLOAD_BIT = 24, /**< TCAM流控卸载 */ + NIC_F_HTN_FDIR_BIT = 25, /**< HTN FDIR功能 */ + NIC_F_SQ_RQ_CI_COALESCE_BIT = 26, /**< SQ RQ CI共用 */ + NIC_F_RX_SW_COMPACT_CQE_BIT = 27, /**< ucode合一CQE */ + NIC_F_HALF_BOND_OFFLOAD_BIT = 28, /**< 半Bond卸载 */ + NIC_F_MACSEC_OFFLOAD_BIT = 29, /**< MACSec卸载 */ + NIC_F_VEB_OFFLOAD_BIT = 30, /**< VEB卸载 */ + NIC_F_GET_COUNTER_BY_CMDQ_BIT = 31, /**< 支持通过CMDQ读取vport计数 */ + NIC_F_HTN_CMDQ_CAR_BIT = 32, /**< 支持通过CMDQ设置CAR限速 */ + NIC_F_ARP_DUAL_BIT = 33, /**< 支持arp双发 */ +}; + +#define NIC_F_BIT(bit) ((u64)1 << (bit)) +#define NIC_F(name) NIC_F_BIT(NIC_F_##name##_BIT) + +#define NIC_F_CSUM NIC_F(CSUM) +#define NIC_F_SCTP_CRC NIC_F(SCTP_CRC) +#define NIC_F_TSO NIC_F(TSO) +#define NIC_F_LRO NIC_F(LRO) +#define NIC_F_UFO NIC_F(UFO) +#define NIC_F_RSS NIC_F(RSS) +#define NIC_F_RX_VLAN_FILTER NIC_F(RX_VLAN_FILTER) +#define NIC_F_RX_VLAN_STRIP NIC_F(RX_VLAN_STRIP) +#define NIC_F_TX_VLAN_INSERT NIC_F(TX_VLAN_INSERT) +#define NIC_F_VXLAN_OFFLOAD NIC_F(VXLAN_OFFLOAD) +#define NIC_F_IPSEC_OFFLOAD NIC_F(IPSEC_OFFLOAD) +#define NIC_F_FDIR NIC_F(FDIR) +#define NIC_F_PROMISC NIC_F(PROMISC) +#define NIC_F_ALLMULTI NIC_F(ALLMULTI) +#define NIC_F_XSFP_REPORT NIC_F(XSFP_REPORT) +#define NIC_F_VF_MAC NIC_F(VF_MAC) +#define NIC_F_RATE_LIMIT NIC_F(RATE_LIMIT) +#define NIC_F_RXQ_RECOVERY NIC_F(RXQ_RECOVERY) +#define NIC_F_PTP_1588_V2 NIC_F(PTP_1588_V2) +#define NIC_F_TX_WQE_COMPACT_TASK NIC_F(TX_WQE_COMPACT_TASK) +#define NIC_F_RX_HW_COMPACT_CQE NIC_F(RX_HW_COMPACT_CQE) +#define NIC_F_HTN_CMDQ NIC_F(HTN_CMDQ) +#define NIC_F_GENEVE_OFFLOAD NIC_F(GENEVE_OFFLOAD) +#define NIC_F_IPXIP_OFFLOAD NIC_F(IPXIP_OFFLOAD) +#define NIC_F_TC_FLOWER_OFFLOAD NIC_F(TC_FLOWER_OFFLOAD) +#define NIC_F_HTN_FDIR NIC_F(HTN_FDIR) +#define NIC_F_SQ_RQ_CI_COALESCE NIC_F(SQ_RQ_CI_COALESCE) +#define NIC_F_RX_SW_COMPACT_CQE NIC_F(RX_SW_COMPACT_CQE) +#define NIC_F_HALF_BOND_OFFLOAD NIC_F(HALF_BOND_OFFLOAD) +#define NIC_F_MACSEC_OFFLOAD NIC_F(MACSEC_OFFLOAD) +#define NIC_F_VEB_OFFLOAD NIC_F(VEB_OFFLOAD) +#define NIC_F_GET_COUNTER_BY_CMDQ NIC_F(GET_COUNTER_BY_CMDQ) +#define NIC_F_HTN_CMDQ_CAR NIC_F(HTN_CMDQ_CAR) +#define NIC_F_ARP_DUAL NIC_F(ARP_DUAL) + +#define NIC_F_1823_MASK 0x1FFEF /**< 1823的所有属性 */ +#define NIC_F_1825_MASK (NIC_F_CSUM | NIC_F_SCTP_CRC | NIC_F_TSO | NIC_F_LRO | NIC_F_RSS | \ + NIC_F_RX_VLAN_FILTER | NIC_F_RX_VLAN_STRIP | NIC_F_TX_VLAN_INSERT | \ + NIC_F_VXLAN_OFFLOAD | NIC_F_IPSEC_OFFLOAD | NIC_F_FDIR | NIC_F_PROMISC | \ + NIC_F_ALLMULTI | NIC_F_XSFP_REPORT | NIC_F_VF_MAC | NIC_F_RATE_LIMIT | \ + NIC_F_TX_WQE_COMPACT_TASK | NIC_F_RX_SW_COMPACT_CQE | \ + NIC_F_HALF_BOND_OFFLOAD | NIC_F_GET_COUNTER_BY_CMDQ) + +#define NIC_F_182X_MASK (NIC_F_1823_MASK | NIC_F_1825_MASK) /**< 182x的所有属性 */ + +#define NIC_F_1872_PF_MASK (NIC_F_CSUM | NIC_F_SCTP_CRC | NIC_F_TSO | NIC_F_TX_WQE_COMPACT_TASK | \ + NIC_F_RX_HW_COMPACT_CQE | NIC_F_HTN_CMDQ | NIC_F_PROMISC | \ + NIC_F_ALLMULTI | NIC_F_VXLAN_OFFLOAD | NIC_F_GENEVE_OFFLOAD | \ + NIC_F_IPXIP_OFFLOAD | NIC_F_RX_VLAN_STRIP | NIC_F_TX_VLAN_INSERT | \ + NIC_F_RSS | NIC_F_RX_VLAN_FILTER | NIC_F_LRO | NIC_F_FDIR | \ + NIC_F_HTN_FDIR | NIC_F_SQ_RQ_CI_COALESCE | NIC_F_PTP_1588_V2 | \ + NIC_F_TC_FLOWER_OFFLOAD | NIC_F_MACSEC_OFFLOAD | NIC_F_VEB_OFFLOAD | \ + NIC_F_RATE_LIMIT | NIC_F_HTN_CMDQ_CAR | NIC_F_ARP_DUAL | \ + NIC_F_XSFP_REPORT) /**< 187x PF的所有属性 */ + +#define NIC_F_1872_VF_MASK (NIC_F_CSUM | NIC_F_SCTP_CRC | NIC_F_TSO | NIC_F_TX_WQE_COMPACT_TASK | \ + NIC_F_RX_HW_COMPACT_CQE | NIC_F_HTN_CMDQ | NIC_F_ALLMULTI | \ + NIC_F_VXLAN_OFFLOAD | NIC_F_GENEVE_OFFLOAD | NIC_F_IPXIP_OFFLOAD | \ + NIC_F_RX_VLAN_STRIP | NIC_F_TX_VLAN_INSERT | NIC_F_RSS | \ + NIC_F_RX_VLAN_FILTER | NIC_F_LRO | NIC_F_FDIR | NIC_F_HTN_FDIR | \ + NIC_F_SQ_RQ_CI_COALESCE | NIC_F_TC_FLOWER_OFFLOAD | \ + NIC_F_MACSEC_OFFLOAD | NIC_F_VEB_OFFLOAD | NIC_F_RATE_LIMIT | \ + NIC_F_HTN_CMDQ_CAR | NIC_F_ARP_DUAL) /**< 187x VF的所有属性 */ + +#define NIC_F_1872_MASK (NIC_F_1872_PF_MASK | NIC_F_1872_VF_MASK) +#define NIC_F_ALL_MASK (NIC_F_182X_MASK | NIC_F_1872_MASK) + +#define HINIC5_TCAM_BLOCK_ENABLE 1 /**< TCAM块启用 */ +#define HINIC5_TCAM_BLOCK_DISABLE 0 /**< TCAM块禁用 */ +#define HINIC5_MAX_TCAM_RULES_NUM 4096 /**< TCAM的最大规则数量 */ + +/** + * @brief 定义一个枚举类型,用于表示NIC TCAM块的类型。 + * @details 这个枚举类型包含了两种类型:NIC_TCAM_BLOCK_TYPE_LARGE和NIC_TCAM_BLOCK_TYPE_SMALL。 + * NIC_TCAM_BLOCK_TYPE_LARGE表示块大小为16,NIC_TCAM_BLOCK_TYPE_SMALL表示块大小为0。 + */ +enum { + NIC_TCAM_BLOCK_TYPE_LARGE = 0, /**< block_size: 16 */ + NIC_TCAM_BLOCK_TYPE_SMALL, /**< block_size: 0 */ + NIC_TCAM_BLOCK_TYPE_MAX +}; + +/** + * @struct hinic5_tcam_key_ipv4_mem + * @brief 定义了一个用于存储IPv4 TCAM键的结构体 + * @details 该结构体包含了IPv4 TCAM键的各个字段,包括保留位、隧道类型、IP协议类型、IP类型、 + * Func ID、源IPv4地址、目的IPv4地址、目的端口号、源端口号、外部源IPv4地址、外部目的IPv4地址、VNI等信息。 + */ +struct hinic5_tcam_key_ipv4_mem { + u32 rsvd1 : 4; /**< 保留位1 */ + u32 tunnel_type : 4; /**< 隧道类型 */ + u32 ip_proto : 8; /**< IP协议类型 */ + u32 rsvd0 : 16; /**< 保留位0 */ + u32 sipv4_h : 16; /**< 源IPv4地址的高16位 */ + u32 ip_type : 1; /**< IP类型 */ + u32 function_id : 15; /**< Func ID */ + u32 dipv4_h : 16; /**< 目的IPv4地址的高16位 */ + u32 sipv4_l : 16; /**< 源IPv4地址的低16位 */ + u32 rsvd2 : 16; /**< 保留位2 */ + u32 dipv4_l : 16; /**< 目的IPv4地址的低16位 */ + u32 rsvd3; /**< 保留位3 */ + u32 dport : 16; /**< 目的端口号 */ + u32 rsvd4 : 16; /**< 保留位4 */ + u32 rsvd5 : 16; /**< 保留位5 */ + u32 sport : 16; /**< 源端口号 */ + u32 outer_sipv4_h : 16; /**< 外部源IPv4地址的高16位 */ + u32 rsvd6 : 16; /**< 保留位6 */ + u32 outer_dipv4_h : 16; /**< 外部目的IPv4地址的高16位 */ + u32 outer_sipv4_l : 16; /**< 外部源IPv4地址的低16位 */ + u32 vni_h : 16; /**< VNI的高16位 */ + u32 outer_dipv4_l : 16; /**< 外部目的IPv4地址的低16位 */ + u32 rsvd7 : 16; /**< 保留位7 */ + u32 vni_l : 16; /**< VNI的低16位 */ +}; + +/** + * @struct hinic5_tcam_key_ipv6_mem + * @brief 定义了一个用于存储IPv6 TCAM键的结构体 + * @details 该结构体用于存储IPv6 TCAM键,包含了源IPv6地址和目标IPv6地址的各个部分, + * 以及相关的协议类型、端口号等信息。 + */ +struct hinic5_tcam_key_ipv6_mem { + u32 rsvd1 : 3; /**< 保留位1 */ + u32 outer_ip_type : 1; /**< 外部IP类型 */ + u32 tunnel_type : 4; /**< 隧道类型 */ + u32 ip_proto : 8; /**< IP协议类型 */ + u32 rsvd0 : 16; /**< 保留位0 */ + u32 sipv6_key0 : 16; /**< 源IPv6地址的低16位 */ + u32 ip_type : 1; /**< IP类型 */ + u32 function_id : 15; /**< 功能ID */ + u32 sipv6_key2 : 16; /**< 源IPv6地址的第二部分 */ + u32 sipv6_key1 : 16; /**< 源IPv6地址的第一部分 */ + u32 sipv6_key4 : 16; /**< 源IPv6地址的第四部分 */ + u32 sipv6_key3 : 16; /**< 源IPv6地址的第三部分 */ + u32 sipv6_key6 : 16; /**< 源IPv6地址的第六部分 */ + u32 sipv6_key5 : 16; /**< 源IPv6地址的第五部分 */ + u32 dport : 16; /**< 目标端口 */ + u32 sipv6_key7 : 16; /**< 源IPv6地址的第七部分 */ + u32 dipv6_key0 : 16; /**< 目标IPv6地址的低16位 */ + u32 sport : 16; /**< 源端口 */ + u32 dipv6_key2 : 16; /**< 目标IPv6地址的第二部分 */ + u32 dipv6_key1 : 16; /**< 目标IPv6地址的第一部分 */ + u32 dipv6_key4 : 16; /**< 目标IPv6地址的第四部分 */ + u32 dipv6_key3 : 16; /**< 目标IPv6地址的第三部分 */ + u32 dipv6_key6 : 16; /**< 目标IPv6地址的第六部分 */ + u32 dipv6_key5 : 16; /**< 目标IPv6地址的第五部分 */ + u32 rsvd2 : 16; /**< 保留位2 */ + u32 dipv6_key7 : 16; /**< 目标IPv6地址的第七部分 */ +}; + +/** + * @struct hinic5_tcam_key_vxlan_ipv6_mem + * @brief 定义了一个用于存储VXLAN IPv6 TCAM键的结构体 + * @details 该结构体包含了VXLAN IPv6 TCAM键的各个字段,包括保留位、隧道类型、IP协议类型等。 + */ +struct hinic5_tcam_key_vxlan_ipv6_mem { + u32 rsvd1 : 4; /**< 保留位1 */ + u32 tunnel_type : 4; /**< 隧道类型 */ + u32 ip_proto : 8; /**< IP协议类型 */ + u32 rsvd0 : 16; /**< 保留位0 */ + + u32 dipv6_key0 : 16; /**< IPv6目标地址的低16位 */ + u32 ip_type : 1; /**< IP类型 */ + u32 function_id : 15; /**< 功能ID */ + + u32 dipv6_key2 : 16; /**< IPv6目标地址的第二部分 */ + u32 dipv6_key1 : 16; /**< IPv6目标地址的第一部分 */ + + u32 dipv6_key4 : 16; /**< IPv6目标地址的第四部分 */ + u32 dipv6_key3 : 16; /**< IPv6目标地址的第三部分 */ + + u32 dipv6_key6 : 16; /**< IPv6目标地址的第六部分 */ + u32 dipv6_key5 : 16; /**< IPv6目标地址的第五部分 */ + + u32 dport : 16; /**< 目标端口 */ + u32 dipv6_key7 : 16; /**< IPv6目标地址的第七部分 */ + + u32 rsvd2 : 16; /**< 保留位2 */ + u32 sport : 16; /**< 源端口 */ + + u32 outer_sipv4_h : 16; /**< 外部源IPv4地址的高16位 */ + u32 rsvd3 : 16; /**< 保留位3 */ + + u32 outer_dipv4_h : 16; /**< 外部目标IPv4地址的高16位 */ + u32 outer_sipv4_l : 16; /**< 外部源IPv4地址的低16位 */ + + u32 vni_h : 16; /**< VXLAN网络标识的高16位 */ + u32 outer_dipv4_l : 16; /**< 外部目标IPv4地址的低16位 */ + + u32 rsvd4 : 16; /**< 保留位4 */ + u32 vni_l : 16; /**< VXLAN网络标识的低16位 */ +}; + +/** + * @struct hinic5_tcam_key_mem_htn + * @brief 定义了一个用于存储htn TCAM键的结构体 + * @details 该结构体包含了htn TCAM键的各个字段,包括保留位、隧道类型、IP协议类型等。 + */ +struct hinic5_tcam_key_mem_htn { + u32 function_id_h : 5; /**< function id高5位 */ + u32 tunnel_type : 3; /**< 存储隧道类型 */ + u32 ip_proto : 8; /**< 存储IP协议类型 */ + u32 rsvd0 : 16; /**< 保留16位 */ + + u32 outer_sipv4_h : 16; /**< 外部源IPv4的高16位 */ + u32 rsvd1 : 8; /**< 保留8位 */ + u32 outer_ip_type : 1; /**< 存储外部IP类型 */ + u32 ip_type : 2; /**< 存储IP类型 */ + u32 function_id_l : 5; /**< function id低5位 */ + + u32 outer_dipv4_h : 16; /**< 外部目的IPv4的高16位 */ + u32 outer_sipv4_l : 16; /**< 外部源IPv4的低16位 */ + + u32 vni_h : 8; /**< 高8位存储虚拟网络标识符 */ + u32 rsvd2 : 8; /**< 保留8位 */ + u32 outer_dipv4_l : 16; /**< 外部目的IPv4的低16位 */ + + u32 sipv4_h : 16; /**< 源IPv4的高16位 */ + u32 vni_l : 16; /**< 低8位存储虚拟网络标识符 */ + + u32 rsvd5 : 16; /**< 保留16位 */ + u32 sipv4_l : 16; /**< 源IPv4的低16位 */ + + u32 rsvd6; /**< 保留 */ + u32 rsvd7; /**< 保留 */ + + u32 dipv4_h : 16; /**< 目的IPv4的高16位 */ + u32 rsvd8 : 16; /**< 保留16位 */ + + u32 sport : 16; /**< 源端口 */ + u32 dipv4_l : 16; /**< 目的IPv4的低16位 */ + + u32 rsvd9 : 16; /**< 保留16位 */ + u32 dport : 16; /**< 目的端口 */ +}; + +/** + * @struct hinic5_tcam_key_ipv6_mem_htn + * @brief 定义了一个用于存储htn ipv6 TCAM键的结构体 + * @details 该结构体包含了htn ipv6 TCAM键的各个字段,包括保留位、隧道类型、IP协议类型等。 + */ +struct hinic5_tcam_key_ipv6_mem_htn { + u32 function_id_h : 5; /**< function id高5位 */ + u32 tunnel_type : 3; /**< 存储隧道类型 */ + u32 ip_proto : 8; /**< 存储IP协议类型 */ + u32 rsvd0 : 16; /**< 保留16位 */ + + u32 sipv6_key0 : 16; /**< 源IPv6的第一部分 */ + u32 rsvd1 : 8; /**< 保留字段1,未使用 */ + u32 outer_ip_type : 1; /**< 外部IP类型,1位 */ + u32 ip_type : 2; /**< IP类型,2位 */ + u32 function_id_l : 5; /**< function id低5位 */ + + u32 sipv6_key2 : 16; /**< 源IPv6的第二部分 */ + u32 sipv6_key1 : 16; /**< 源IPv6的第一部分 */ + + u32 sipv6_key4 : 16; /**< 源IPv6的第四部分 */ + u32 sipv6_key3 : 16; /**< 源IPv6的第三部分 */ + + u32 sipv6_key6 : 16; /**< 源IPv6的第六部分 */ + u32 sipv6_key5 : 16; /**< 源IPv6的第五部分 */ + + u32 dipv6_key0 : 16; /**< 目的IPv6的第一部分 */ + u32 sipv6_key7 : 16; /**< 源IPv6的第七部分 */ + + u32 dipv6_key2 : 16; /**< 目的IPv6的第二部分 */ + u32 dipv6_key1 : 16; /**< 目的IPv6的第一部分 */ + + u32 dipv6_key4 : 16; /**< 目的IPv6的第四部分 */ + u32 dipv6_key3 : 16; /**< 目的IPv6的第三部分 */ + + u32 dipv6_key6 : 16; /**< 目的IPv6的第六部分 */ + u32 dipv6_key5 : 16; /**< 目的IPv6的第五部分 */ + + u32 sport : 16; /**< 源端口 */ + u32 dipv6_key7 : 16; /**< 目的IPv6的第七部分 */ + + u32 rsvd2 : 16; /**< 保留字段2,未使用 */ + u32 dport : 16; /**< 目的端口 */ +}; + +/** + * @struct hinic5_tcam_key_vxlan_ipv6_mem_htn + * @brief 定义了一个用于存储htn vxlan ipv6 TCAM键的结构体 + * @details 该结构体包含了htn vxlan ipv6 TCAM键的各个字段,包括保留位、隧道类型、IP协议类型等。 + */ +struct hinic5_tcam_key_vxlan_ipv6_mem_htn { + u32 function_id_h : 5; /**< function id高5位 */ + u32 tunnel_type : 3; /**< 隧道类型 */ + u32 ip_proto : 8; /**< IP协议 */ + u32 rsvd0 : 16; /**< 保留16位 */ + + u32 outer_sipv4_h : 16; /**< 高16位外部源IPv4地址 */ + u32 rsvd1 : 8; /**< 保留8位 */ + u32 outer_ip_type : 1; /**< 外部IP类型 */ + u32 ip_type : 2; /**< IP类型 */ + u32 function_id_l : 5; /**< function id低5位 */ + + u32 outer_dipv4_h : 16; /**< 高16位外部目的IPv4地址 */ + u32 outer_sipv4_l : 16; /**< 低16位外部源IPv4地址 */ + + u32 vni_h : 8; /**< 高8位虚拟网络标识符 */ + u32 rsvd2 : 8; /**< 保留8位 */ + u32 outer_dipv4_l : 16; /**< 低16位外部目的IPv4地址 */ + + u32 rsvd3 : 16; /**< 保留16位 */ + u32 vni_l : 16; /**< 低16位虚拟网络标识符 */ + + u32 dipv6_key0 : 16; /**< 第0部分的目的IPv6地址 */ + u32 rsvd4 : 16; /**< 保留16位 */ + + u32 dipv6_key2 : 16; /**< 第2部分的目的IPv6地址 */ + u32 dipv6_key1 : 16; /**< 第1部分的目的IPv6地址 */ + + u32 dipv6_key4 : 16; /**< 第4部分的目的IPv6地址 */ + u32 dipv6_key3 : 16; /**< 第3部分的目的IPv6地址 */ + + u32 dipv6_key6 : 16; /**< 第6部分的目的IPv6地址 */ + u32 dipv6_key5 : 16; /**< 第5部分的目的IPv6地址 */ + + u32 sport : 16; /**< 源端口 */ + u32 dipv6_key7 : 16; /**< 第7部分的目的IPv6地址 */ + + u32 rsvd5 : 16; /**< 保留16位 */ + u32 dport : 16; /**< 目的端口 */ +}; + +/** + * @struct tcam_key_ctrl_mem + * @brief 定义了一个用于存储htn 控制报文 TCAM键的结构体 + * @details 该结构体包含了htn vxlan ipv6 TCAM键的各个字段,包括保留位、隧道类型、IP协议类型等。 + */ +struct tcam_key_ctrl_mem { + u32 function_id1 : 5; /**< function id高5位 */ + u32 pkt_fmt : 3; /**< 报文格式 */ + u32 packet_type : 8; /**< 报文类型 */ + u32 rsvd0 : 16; /**< 保留字段0 */ + + u32 rsvd2 : 16; /**< 保留字段2 */ + u32 rsvd1 : 8; /**< 保留字段1 */ + u32 outer_type : 1; /**< 外层类型 */ + u32 inner_type : 2; /**< 内层类型 */ + u32 function_id2 : 5; /**< function id低5位 */ + + u32 rsvd3; /**< 保留字段3 */ + u32 rsvd4; /**< 保留字段4 */ + u32 rsvd5; /**< 保留字段5 */ + u32 rsvd6; /**< 保留字段6 */ + u32 rsvd7; /**< 保留字段7 */ + u32 rsvd8; /**< 保留字段8 */ + u32 rsvd9; /**< 保留字段9 */ + u32 rsvd10; /**< 保留字段10 */ + u32 rsvd11; /**< 保留字段11 */ +}; + +/** + * @struct tag_tcam_key + * @brief TCAM键结构体,用于存储TCAM键的信息和掩码 + * @details 该结构体包含两个联合体,分别用于存储IPv4键的信息和掩码。 + */ +struct tag_tcam_key { + /** + * @union + * @brief TCAM键信息 + */ + union { + struct hinic5_tcam_key_ipv4_mem key_info; + struct tcam_key_ctrl_mem key_info_ctrl; + struct hinic5_tcam_key_ipv6_mem key_info_ipv6; + struct hinic5_tcam_key_vxlan_ipv6_mem key_info_vxlan_ipv6; + struct hinic5_tcam_key_mem_htn key_info_htn; + struct hinic5_tcam_key_ipv6_mem_htn key_info_ipv6_htn; + struct hinic5_tcam_key_vxlan_ipv6_mem_htn key_info_vxlan_ipv6_htn; + }; + + /** + * @union + * @brief TCAM键掩码 + */ + union { + struct hinic5_tcam_key_ipv4_mem key_mask; + struct tcam_key_ctrl_mem key_mask_ctrl; + struct hinic5_tcam_key_ipv6_mem key_mask_ipv6; + struct hinic5_tcam_key_vxlan_ipv6_mem key_mask_vxlan_ipv6; + struct hinic5_tcam_key_mem_htn key_mask_htn; + struct hinic5_tcam_key_ipv6_mem_htn key_mask_ipv6_htn; + struct hinic5_tcam_key_vxlan_ipv6_mem_htn key_mask_vxlan_ipv6_htn; + }; +}; + +#define TCAM_RULE_FDIR_TYPE 0 /**< 定义TCAM规则的类型,FDIR类型对应值为0 */ +#define TCAM_RULE_PPA_TYPE 1 /**< 定义TCAM规则的类型,PPA类型对应值为1 */ +#define TCAM_RULE_BIFURCATION_TYPE 2 /**< 定义TCAM规则的类型,BIFURCATION类型对应值为1 */ + +/** + * @struct hinic5_phy_fpga_port_stats + * @brief 定义了一个结构体,用于存储PHY FPGA端口的统计信息 + * @details 这里是结构体的详细描述 + */ +struct hinic5_phy_fpga_port_stats { + u64 mac_rx_total_octs_port; /**< 接收的总字节数 */ + u64 mac_tx_total_octs_port; /**< 发送的总字节数 */ + u64 mac_rx_under_frame_pkts_port; /**< 接收的帧长度小于64字节的数据包数 */ + u64 mac_rx_frag_pkts_port; /**< 接收的碎片数据包数 */ + u64 mac_rx_64_oct_pkts_port; /**< 接收的64字节数据包数 */ + u64 mac_rx_127_oct_pkts_port; /**< 接收的127字节数据包数 */ + u64 mac_rx_255_oct_pkts_port; /**< 接收的255字节数据包数 */ + u64 mac_rx_511_oct_pkts_port; /**< 接收的511字节数据包数 */ + u64 mac_rx_1023_oct_pkts_port; /**< 接收的1023字节数据包数 */ + u64 mac_rx_max_oct_pkts_port; /**< 接收的最大长度数据包数 */ + u64 mac_rx_over_oct_pkts_port; /**< 接收的超长数据包数 */ + u64 mac_tx_64_oct_pkts_port; /**< 发送的64字节数据包数 */ + u64 mac_tx_127_oct_pkts_port; /**< 发送的127字节数据包数 */ + u64 mac_tx_255_oct_pkts_port; /**< 发送的255字节数据包数 */ + u64 mac_tx_511_oct_pkts_port; /**< 发送的511字节数据包数 */ + u64 mac_tx_1023_oct_pkts_port; /**< 发送的1023字节数据包数 */ + u64 mac_tx_max_oct_pkts_port; /**< 发送的最大长度数据包数 */ + u64 mac_tx_over_oct_pkts_port; /**< 发送的超长数据包数 */ + u64 mac_rx_good_pkts_port; /**< 接收的无错误数据包数 */ + u64 mac_rx_crc_error_pkts_port; /**< 接收的CRC错误数据包数 */ + u64 mac_rx_broadcast_ok_port; /**< 接收的广播数据包数 */ + u64 mac_rx_multicast_ok_port; /**< 接收的多播数据包数 */ + u64 mac_rx_mac_frame_ok_port; /**< 接收的MAC帧数据包数 */ + u64 mac_rx_length_err_pkts_port; /**< 接收的长度错误数据包数 */ + u64 mac_rx_vlan_pkts_port; /**< 接收的VLAN数据包数 */ + u64 mac_rx_pause_pkts_port; /**< 接收的暂停数据包数 */ + u64 mac_rx_unknown_mac_frame_port; /**< 接收的未知MAC帧数据包数 */ + u64 mac_tx_good_pkts_port; /**< 发送的无错误数据包数 */ + u64 mac_tx_broadcast_ok_port; /**< 发送的广播数据包数 */ + u64 mac_tx_multicast_ok_port; /**< 发送的多播数据包数 */ + u64 mac_tx_underrun_pkts_port; /**< 发送的缓冲区不足数据包数 */ + u64 mac_tx_mac_frame_ok_port; /**< 发送的MAC帧数据包数 */ + u64 mac_tx_vlan_pkts_port; /**< 发送的VLAN数据包数 */ + u64 mac_tx_pause_pkts_port; /**< 发送的暂停数据包数 */ +}; + +/** + * @struct hinic5_port_stats + * @brief 定义了hinic5端口统计信息的结构体 + * @details 该结构体包含了管理消息头部信息和物理端口统计信息 + */ +struct hinic5_port_stats { + struct hinic5_mgmt_msg_head msg_head; /**< 管理消息头部信息 */ + + struct hinic5_phy_fpga_port_stats stats; /**< 物理端口统计信息 */ +}; + +/** + * @struct hinic5_rss_indir_table + * @brief 定义了一个RSS(Receive Side Scaling,接收端缩放)的间接查找表结构体 + * @details 该结构体用于存储RSS的相关信息,包括管理消息头部、函数ID、保留字段和间接查找表。 + */ +struct hinic5_rss_indir_table { + struct hinic5_mgmt_msg_head msg_head; /**< 管理消息头部 */ + + u16 func_id; /**< func id */ + u16 rsvd1; /** 保留字段1 */ + u8 indir[NIC_RSS_INDIR_SIZE]; /**< 间接查找表 */ +}; + +#define NIC_RSS_CMD_TEMP_ALLOC 0x01 /**< 用于临时分配rss资源 */ +#define NIC_RSS_CMD_TEMP_FREE 0x02 /**< 用于临时释放rss资源 */ + +/** + * @struct hinic5_func_tbl_cfg_bitmap + * @brief 函数表配置位图结构体 + * @details 该结构体用于表示函数表配置位图,包含了初始化配置、接收缓冲区大小配置和最大传输单元配置。 + */ +enum hinic5_func_tbl_cfg_bitmap { + FUNC_CFG_INIT, /**< 初始化配置 */ + FUNC_CFG_RX_BUF_SIZE, /**< 接收缓冲区大小配置 */ + FUNC_CFG_MTU, /**< 最大传输单元配置 */ + FUNC_CFG_ISOLATION_VF_MAC, /**< 集群模式VF MAC配置 */ + FUNC_CFG_ISOLATION_VF_SVLAN, /**< 集群模式VF SVLAN配置 */ +}; + +typedef struct mac_table_cnt { + u32 valid_table_cnt; /**< 有效的MAC表数量 */ + u32 mac_table_cnt; /**< 最大支持的MAC表数量 */ + u16 uc_mac_cnt; /**< 单播计数 */ + u16 mc_mac_cnt; /**< 组播计数 */ +} mac_table_cnt_s; + +#define NIC_FUNC_MAX_NUM 4096 +typedef struct mac_table_res_stat { + u16 uc_mac_cnt; /**< 单播表资源使用统计 */ + u16 mc_mac_cnt; /**< 组播表资源使用统计 */ + u16 share_mac_res_cur_cnt; /**< 当前共享资源池使用统计 */ + u16 share_mac_res_total; /**< 共享资源池总大小 */ + u16 func_uc_mac_cnt[NIC_FUNC_MAX_NUM]; /**< Func粒度单播表资源使用统计 */ +} mac_table_res_stat_s; + +#define HINIC5_CMD_OP_SET 1 /**< cmd操作类型为set */ +#define HINIC5_CMD_OP_GET 0 /**< cmd操作类型为get */ + +#define HINIC5_CMD_OP_ADD 1 /**< cmd操作类型为add */ +#define HINIC5_CMD_OP_DEL 0 /**< cmd操作类型为del */ + +/** + * @brief 定义枚举类型,用于表示不同的命令类型 + * @details 这个枚举类型定义了一系列的命令类型,用于在不同的上下文中表示不同的操作。 + */ +enum { + PPA_TABLE_ID_CLEAN_CMD = 0, /**< 清理PPA表的命令*/ + PPA_TABLE_ID_ADD_CMD, /**< 添加PPA表的命令*/ + PPA_TABLE_ID_DEL_CMD, /**< 删除PPA表的命令*/ + FDIR_TABLE_ID_ADD_CMD, /**< 添加FDIR表的命令*/ + FDIR_TABLE_ID_DEL_CMD, /**< 删除FDIR表的命令*/ + PPA_TABEL_ID_MAX /**< PPA表的最大值*/ +}; + +/** + * @brief 定义一个枚举类型,用于表示网卡NVM数据的类型 + * @details 这个枚举类型包含了多个标志位,每个标志位代表一种网卡NVM数据的类型。 + */ +enum { + NIC_NVM_DATA_SET = BIT(0), /**< 1-save, 0-read */ + NIC_NVM_DATA_PXE = BIT(1), /**< PXE */ + NIC_NVM_DATA_VLAN = BIT(2), /**< VLAN */ + NIC_NVM_DATA_VLAN_PRI = BIT(3), /**< VLAN PRI */ + NIC_NVM_DATA_VLAN_ID = BIT(4), /**< VLAN ID */ + NIC_NVM_DATA_WORK_MODE = BIT(5), /**< 任务类型 */ + NIC_NVM_DATA_PF_SPEED_LIMIT = BIT(6), /**< PPF限速 */ + NIC_NVM_DATA_GE_MODE = BIT(7), /**< GE模式 */ + NIC_NVM_DATA_AUTO_NEG = BIT(8), /**< AUTO NEG */ + NIC_NVM_DATA_LINK_FEC = BIT(9), /**< LINK FEC */ + NIC_NVM_DATA_PF_ADAPTIVE_LINK = BIT(10), /**< PF自适应link */ + NIC_NVM_DATA_SRIOV_CONTROL = BIT(11), /**< SRIOV CONTROL */ + NIC_NVM_DATA_EXTEND_MODE = BIT(12), /**< 拓展模式 */ + NIC_NVM_DATA_RESET = BIT(31), /**< RESET */ +}; + +#define BIOS_CFG_SIGNATURE 0x1923E518 /**< 定义BIOS配置签名 */ +#define BIOS_OP_CFG_ALL(op_code_val) ((((op_code_val) >> 1) & (0xFFFFFFFF)) != 0) /**< 定义BIOS操作码全部配置的宏 */ +#define BIOS_OP_CFG_WRITE(op_code_val) ((((op_code_val) & NIC_NVM_DATA_SET)) != 0) /**< 定义BIOS操作码写入配置的宏 */ +#define BIOS_OP_CFG_PXE_EN(op_code_val) (((op_code_val) & NIC_NVM_DATA_PXE) != 0) /**< 定义BIOS操作码PXE启用的宏 */ +#define BIOS_OP_CFG_VLAN_EN(op_code_val) (((op_code_val) & NIC_NVM_DATA_VLAN) != 0) /**< 定义BIOS操作码VLAN启用的宏 */ +#define BIOS_OP_CFG_VLAN_PRI(op_code_val) (((op_code_val) & NIC_NVM_DATA_VLAN_PRI) != 0) /**< 定义BIOS操作码VLAN优先级的宏 */ +#define BIOS_OP_CFG_VLAN_ID(op_code_val) (((op_code_val) & NIC_NVM_DATA_VLAN_ID) != 0) /**< 定义BIOS操作码VLAN ID的宏 */ +#define BIOS_OP_CFG_WORK_MODE(op_code_val) (((op_code_val) & NIC_NVM_DATA_WORK_MODE) != 0) /**< 定义BIOS操作码工作模式的宏 */ +#define BIOS_OP_CFG_PF_BW(op_code_val) (((op_code_val) & NIC_NVM_DATA_PF_SPEED_LIMIT) != 0) /**< 定义BIOS操作码PF带宽的宏 */ +#define BIOS_OP_CFG_GE_SPEED(op_code_val) (((op_code_val) & NIC_NVM_DATA_GE_MODE) != 0) /**< 定义BIOS操作码GE速度的宏 */ +#define BIOS_OP_CFG_AUTO_NEG(op_code_val) (((op_code_val) & NIC_NVM_DATA_AUTO_NEG) != 0) /**< 定义BIOS操作码自动协商的宏 */ +#define BIOS_OP_CFG_LINK_FEC(op_code_val) (((op_code_val) & NIC_NVM_DATA_LINK_FEC) != 0) /**< 定义BIOS操作码链路FEC的宏 */ +#define BIOS_OP_CFG_AUTO_ADPAT(op_code_val) (((op_code_val) & NIC_NVM_DATA_PF_ADAPTIVE_LINK) != 0) /**< 定义BIOS操作码自动适应的宏 */ +#define BIOS_OP_CFG_SRIOV_ENABLE(op_code_val) (((op_code_val) & NIC_NVM_DATA_SRIOV_CONTROL) != 0) /**< 定义BIOS操作码SR-IOV启用的宏 */ +#define BIOS_OP_CFG_EXTEND_MODE(op_code_val) (((op_code_val) & NIC_NVM_DATA_EXTEND_MODE) != 0) /**< 定义BIOS操作码扩展模式的宏 */ +#define BIOS_OP_CFG_RST_DEF_SET(op_code_val) (((op_code_val) & (u32)NIC_NVM_DATA_RESET) != 0) /**< 定义BIOS操作码重置默认设置的宏 */ + +#define ENHANCED_CMDQ_CTX_SIZE 0x30 /**< 定义增强型cmdq context size为48 */ + +#define FLOW_BIFURCATE_BIT (1U << 10) /* 1872流分叉bit位为10 */ + +#define HINIC5_LRO_DEFAULT_COAL_PKT_SIZE 32 +#define HINIC5_LRO_DEFAULT_TIME_LIMIT 16 + +#define HINIC5_SET_PORT_CAR_PROFILE 0 +#define HINIC5_SET_PORT_CAR_STATE 1 +#define HINIC5_GET_PORT_CAR_LIMIT_SPEED 2 + +#define HINIC5_SET_CAR_PROFILE 0 +#define HINIC5_GET_CAR_PROFILE 1 + +#define HINIC5_FUNC_CAR_ID_OFFSET 16 + +#define HINIC5_HTN_CMD_SET_CAR 0x26 +#define HINIC5_HTN_CMD_GET_CAR 0x27 + +#define CAR_PROFILE_SIZE 32 +#define CAR_INDEX_UNIT 16 + +#define NIC_MPU_LT_RD_NOT_SUPPORT_ERROR 253 +#define NIC_MPU_LT_OPERA_RANGE_ERROR 254 + +#define CMD_QOS_ETS_COS_TC BIT(0) +#define CMD_QOS_ETS_TC_BW BIT(1) +#define CMD_QOS_ETS_COS_PRIO BIT(2) +#define CMD_QOS_ETS_COS_BW BIT(3) +#define CMD_QOS_ETS_TC_PRIO BIT(4) + +#define CMD_QOS_PORT_TRUST BIT(0) +#define CMD_QOS_PORT_DFT_COS BIT(1) + +#define CMD_QOS_MAP_PCP2COS BIT(0) +#define CMD_QOS_MAP_DSCP2COS BIT(1) + +#define STD_SFP_INFO_MAX_SIZE 640 + +#define HINIC5_PF_SET_VF_ALREADY 0x4 + +typedef enum { + HINIC5_GET_CNT = 0, + HINIC5_GET_CNT_RES, + HINIC5_ADD_CNT, + HINIC5_DEL_CNT, + HINIC5_DEL_ALL_CNT, + HINIC5_RESET_CNT, + HINIC5_RESET_ALL_CNT, + HINIC5_NIC_OP_MAX, +} nic_cnt_op_e; + +#define VEB_OFFLOAD_QUERY 0 +#define VEB_OFFLOAD_SET 1 +#define VEB_OFFLOAD_STATUS_OFF 0 +#define VEB_OFFLOAD_STATUS_ON 1 +#define VEB_OFFLOAD_STATUS_INVALID 2 + +enum hinic5_port_car_type { + HINIC5_PORT_CAR_TYPE_PORT = 0, + HINIC5_PORT_CAR_TYPE_FUNC, + HINIC5_PORT_CAR_TYPE_VNIC_GROUP, +}; + +enum hinic5_port_car_pkt_type { + HINIC5_PORT_CAR_PKT_TYPE_TCP = 0, + HINIC5_PORT_CAR_PKT_TYPE_UDP, + HINIC5_PORT_CAR_PKT_TYPE_ARP, + HINIC5_PORT_CAR_PKT_TYPE_ICMP, + HINIC5_PORT_CAR_PKT_TYPE_MAX, +}; + +enum hinic5_port_car_level { + HINIC5_PORT_CAR_LEVEL_256M = 0, + HINIC5_PORT_CAR_LEVEL_500M, + HINIC5_PORT_CAR_LEVEL_1G, + HINIC5_PORT_CAR_LEVEL_2G, + HINIC5_PORT_CAR_LEVEL_INVALID_NUM = 0xFF, +}; + +typedef enum { + NIC_SOFT_LRO_EN_OPERATE = 0, /* 软件LRO使能操作 */ + NIC_HW_LRO_LEN_OPERATE, /* 硬件LRO聚合长度操作 */ + NIC_HW_LRO_NUM_OPERATE, /* 硬件LRO聚合数量操作 */ + NIC_HW_LRO_TIMER_OPERATE, /* 硬件LRO聚合时间操作 */ + NIC_LRO_CFG_OPERATE_MAX +} lro_cfg_operate_type_u; + +struct cmd_mac_info_set_s { + struct mgmt_msg_head head; + + u16 is_valid; + u16 rsvd0; + u8 mac_addr[ETH_ALEN]; + u8 rsvd1[2]; +}; + +#endif diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd.h new file mode 100644 index 00000000..1fb906dd --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd.h @@ -0,0 +1,257 @@ +/* + * 头文件通过MAILBOX基线表格中的脚本自动生成。如有增删,请先修改基线表格,然后自动生成该头文件 + * 修改流程见:https://wiki.huawei.com/domains/1574/wiki/8/WIKI202509298479069 + * File Name : nic_mpu_cmd.h + * Description : NIC Commands between Driver and MPU + */ + +#ifndef HINIC5_NIC_CMD_H +#define HINIC5_NIC_CMD_H + +/** + * @brief enum hinic5_nic_cmd + * @details nic hinic commands + */ +enum hinic5_nic_cmd { + HINIC5_NIC_CMD_VF_REGISTER = 0, /** < @see hinic5_cmd_register_vf */ + HINIC5_NIC_CMD_SET_FUNC_TBL = 5, /* 设置FUNC表初始化、mtu值等 + * @see hinic5_cmd_set_func_tbl + */ + HINIC5_NIC_CMD_SET_VPORT_ENABLE = 6, /* func使能/去使能标记(OVS使用PF代发) + * @see hinic5_vport_state + */ + HINIC5_NIC_CMD_SET_RX_MODE = 7, /* 设置单播、组播混杂等标记 + * @see hinic5_rx_mode_config + */ + HINIC5_NIC_CMD_SQ_CI_ATTR_SET = 8, /* 设置func的CI属性表 + * @see hinic5_cmd_cons_idx_attr + */ + HINIC5_NIC_CMD_GET_VPORT_STAT = 9, /* 获取func 单播、丢包等统计 + * @see hinic5_port_stats_info/hinic5_cmd_vport_stats + */ + HINIC5_NIC_CMD_CLEAN_VPORT_STAT = 10, /* 清理func 单薄、丢包等统计 + * @see hinic5_port_stats_info + */ + HINIC5_NIC_CMD_CLEAR_QP_RESOURCE = 11, /* 清0 func队列的CPI CI值 + * @see hinic5_cmd_clear_qp_resource + */ + HINIC5_NIC_CMD_CFG_FLEX_QUEUE = 12, /** < rsvd */ + HINIC5_NIC_CMD_CFG_RX_LRO = 13, /** < 配置LRO的使能等 @see hinic5_cmd_lro_config */ + HINIC5_NIC_CMD_CFG_LRO_TIMER = 14, /** < 使能LRO的Timer @see hinic5_cmd_lro_timer */ + HINIC5_NIC_CMD_FEATURE_NEGO = 15, /** < func的属性协商 @see hinic5_cmd_feature_nego */ + HINIC5_NIC_CMD_CFG_LOCAL_LRO_STATE = 16, /* 设置func的local_switch_lro_en + * @see hinic5_cmd_local_lro_state + */ + HINIC5_NIC_CMD_CACHE_OUT_QP_RES = 17, /* 清理L2NIC/SMMC_CLA Cache资源 + * @see hinic5_cmd_cache_out_qp_resource + */ + HINIC5_NIC_CMD_SET_FUNC_ER_FWD_ID = 18, /* 设置NIC ER 转发ID,流分叉使用(计算使用,rsvd) */ + HINIC5_NIC_CMD_GET_MAC = 20, /** < func 获取MAC地址 @see hinic5_port_mac_set */ + HINIC5_NIC_CMD_SET_MAC = 21, /** < func 设置MAC地址 @see hinic5_port_mac_set */ + HINIC5_NIC_CMD_DEL_MAC = 22, /** < func 删除MAC地址 @see hinic5_port_mac_set */ + HINIC5_NIC_CMD_UPDATE_MAC = 23, /** < func 更新MAC地址 @see hinic5_port_mac_update */ + HINIC5_NIC_CMD_GET_ALL_DEFAULT_MAC = 24, /* 获取所有默认MAC地址 + * @see nic_cmd_mac_info + */ + HINIC5_NIC_CMD_CFG_FUNC_VLAN = 25, /* 添加/删除func的vlan设备 + * @see hinic5_cmd_vlan_config + */ + HINIC5_NIC_CMD_SET_VLAN_FILTER_EN = 26, /* 设置func的VLAN过滤功能 + * @see hinic5_cmd_set_vlan_filter + */ + HINIC5_NIC_CMD_SET_RX_VLAN_OFFLOAD = 27, /* 设置func表rx_vlan_offload_en + * @see hinic5_cmd_vlan_offload + */ + HINIC5_NIC_CMD_SMAC_CHECK_STATE = 28, /* IPSUTX源MAC检查开关 + * @see hinic5_smac_check_state + */ + HINIC5_NIC_CMD_OUTBAND_SET_FUNC_VLAN = 29, /* 带外设置func的vlan(计算使用,rsvd) */ + HINIC5_NIC_CMD_CFG_VXLAN_PORT = 30, /** < 设置vxlan_dprot(计算使用,rsvd) */ + HINIC5_NIC_CMD_RX_RATE_CFG = 31, /** < rx限速bios设置(计算使用,rsvd) */ + HINIC5_NIC_CMD_WR_ORDERING_CFG = 32, /* 设置PCIe的读写是强保序还是乱序(计算使用,rsvd) */ + HINIC5_NIC_CMD_MAC_SYNC = 33, /** < 设置mac同步(计算使用,rsvd) */ + HINIC5_NIC_CMD_SET_RQ_CI_CTX = 34, /** < @see hinic5_rq_cqe_ctx */ + HINIC5_NIC_CMD_SET_RQ_ENABLE = 35, /** < @see hinic5_rq_enable */ + HINIC5_NIC_CMD_CFG_VF_VLAN = 40, /* 添加/删除func的vlan设备(QinQ) + * @see hinic5_cmd_vf_vlan_config + */ + HINIC5_NIC_CMD_SET_SPOOFCHK_STATE = 41, /** < @see hinic5_cmd_spoofchk_set */ + HINIC5_NIC_CMD_SET_MAX_MIN_RATE = 42, /** < 设置func的限速 @see hinic5_cmd_rate_cfg */ + HINIC5_NIC_CMD_CFG_CQE_COALESCE_OFFLOAD = 43, /* 设置cqe聚合卸载功能(计算使用,rsvd) */ + HINIC5_NIC_CMD_CFG_CQE_COALESCE_OFFLOAD_TIMER = 44, /* 设置cqe聚合卸载功能计时 + *(计算使用,rsvd) + */ + HINIC5_NIC_CMD_CFG_VF_TRUST = 46, /** < 设置vf trust使能 @see hinic5_set_vf_trust */ + HINIC5_NIC_CMD_RSS_CFG = 60, /* 设置func的rss使能、rq_pri_num等 + * @see hinic5_cmd_rss_config + */ + HINIC5_NIC_CMD_RSS_TEMP_MGR = 61, /* 分配/释放func的rss模版表 + * @see hinic5_rss_template_mgmt + */ + HINIC5_NIC_CMD_GET_RSS_CTX_TBL = 62, /* 设置func的rss context + * @see hinic5_rss_context_table + */ + HINIC5_NIC_CMD_CFG_RSS_HASH_KEY = 63, /* 设置func的rss key + * @see hinic5_cmd_rss_hash_key + */ + HINIC5_NIC_CMD_CFG_RSS_HASH_ENGINE = 64, /* 设置func的rss engine + * @see hinic5_cmd_rss_engine_type + */ + HINIC5_NIC_CMD_SET_RSS_CTX_TBL_INTO_FUNC = 65, /* 设置func的rss type和rss type en + * @see hinic5_rss_context_table + */ + HINIC5_NIC_CMD_IPCS_ERR_RSS_ENABLE_OP = 66, /* 设置global表的ipcs_err_rss_en + * @see hinic5_ipcs_err_rss_enable_operation_s + */ + HINIC5_NIC_CMD_GTP_INNER_PARSE_STATUS = 67, /* 控制gtp内层解析状态(计算使用,rsvd) */ + HINIC5_NIC_CMD_ADD_TC_FLOW = 80, /** < 添加TCAM规则 @see nic_cmd_fdir_add_rule */ + HINIC5_NIC_CMD_DEL_TC_FLOW = 81, /** < 删除TCAM规则 @see nic_cmd_fdir_del_rules */ + HINIC5_NIC_CMD_GET_TC_FLOW = 82, /** < 获取TCAM规则 @see nic_cmd_fdir_get_rule */ + HINIC5_NIC_CMD_FLUSH_TCAM = 83, /* 清除某func所有TCAM规则和block + * @see nic_cmd_flush_tcam_rules + */ + HINIC5_NIC_CMD_CFG_TCAM_BLOCK = 84, /* 分配/释放func的block资源 + * @see nic_cmd_ctrl_tcam_block_in/ + * @see nic_cmd_ctrl_tcam_block_out + */ + HINIC5_NIC_CMD_ENABLE_TCAM = 85, /* 设置func的fdir_tcam_enable + * @see nic_cmd_set_tcam_enable + */ + HINIC5_NIC_CMD_GET_TCAM_BLOCK = 86, /* 获取某个block被哪个func使用 + * @see nic_cmd_dfx_fdir_tcam_block_table + */ + HINIC5_NIC_CMD_CFG_PPA_TABLE_ID = 87, /** < @see hinic5_ppa_cfg_table_id_cmd */ + HINIC5_NIC_CMD_SET_PPA_EN = 88, /* 设置func表的ppa使能 @see hinic5_ppa_cfg_ppa_en_cmd */ + HINIC5_NIC_CMD_CFG_PPA_MODE = 89, /** < @see hinic5_ppa_cfg_mode_cmd */ + HINIC5_NIC_CMD_CFG_PPA_FLUSH = 90, /* 设置global表的ppa_flow_flush_en + * @see hinic5_ppa_flush_en_cmd + */ + HINIC5_NIC_CMD_SET_FDIR_STATUS = 91, /* 设置SML FDIR线性表 @see nic_cmd_set_fdir_status */ + HINIC5_NIC_CMD_GET_PPA_COUNTER = 92, /** < @see hinic5_ppa_fdir_query_cmd */ + HINIC5_NIC_CMD_SET_FUNC_FLOW_BIFUR_ENABLE = 93, /* 设置/查询func表的flow_bifur_en + *(计算使用,rsvd) + */ + HINIC5_NIC_CMD_SET_BOND_MASK = 94, /** < 设置bond掩码(计算使用,rsvd) */ + HINIC5_NIC_CMD_GET_BLOCK_TC_FLOWS = 95, /* 获取某个block下的所有规则? + * @see nic_cmd_fdir_get_block_rules + */ + HINIC5_NIC_CMD_GET_BOND_MASK = 96, /** < 读取bond掩码(计算使用,rsvd) */ + HINIC5_NIC_CMD_SET_PORT_ENABLE = 100, /** < @see hinic5_port_state */ + HINIC5_NIC_CMD_CFG_PAUSE_INFO = 101, /* 设置/查询port的pause状态 + * @see hinic5_cmd_pause_config + */ + HINIC5_NIC_CMD_CFG_PORT_CAR = 102, /* 设置/查询/使能 arp/icmp的car限速 + * @see 182x:hinic5_cmd_set_port_car\ + * @see 1872:hinic5_cmd_set_car/hinic5_car_profile + */ + HINIC5_NIC_CMD_SET_ER_DROP_PKT = 103, /** < rsvd */ + HINIC5_NIC_CMD_VF_COS = 104, /** < @see hinic5_cmd_vf_dcb_state */ + HINIC5_NIC_CMD_SETUP_COS_MAPPING = 105, /** < rsvd */ + HINIC5_NIC_CMD_SET_ETS = 106, /** < rsvd */ + HINIC5_NIC_CMD_SET_PFC = 107, /** < rsvd */ + HINIC5_NIC_CMD_QOS_ETS = 108, /* QoS ETS调度权重配置/查询 @see hinic5_cmd_ets_cfg */ + HINIC5_NIC_CMD_QOS_PFC = 109, /** < QoS PFC配置/查询 @see hinic5_cmd_set_pfc */ + HINIC5_NIC_CMD_QOS_DCB_STATE = 110, /* QoS 配置func及其所属Port的DCB状态 + * @see hinic5_cmd_set_dcb_state + */ + HINIC5_NIC_CMD_QOS_PORT_CFG = 111, /* QoS Port的trust信息配置/查询 + * @see hinic5_cmd_qos_port_cfg + */ + HINIC5_NIC_CMD_QOS_MAP_CFG = 112, /* QoS PCP/DSCP映射配置/查询 + * @see hinic5_cmd_qos_map_cfg + */ + HINIC5_NIC_CMD_FORCE_PKT_DROP = 113, /* QoS Port的PFC/Pause强制丢包 + * @see hinic5_force_pkt_drop + */ + HINIC5_NIC_CMD_CFG_TX_PROMISC_SKIP = 114, /* 设置混杂PF是否接收TX未知单播报文 + * @see hinic5_tx_promisc_cfg + */ + HINIC5_NIC_CMD_GET_CIR_DROP = 115, /** < 读取cir(计算使用,rsvd) */ + HINIC5_NIC_CMD_SET_PORT_FLOW_BIFUR_ENABLE = 117, /* 设置/查询port表的flow_bifur_en + *(计算使用,rsvd) + */ + HINIC5_NIC_CMD_TX_PAUSE_EXCP_NOTICE = 118, /** < @see nic_cmd_tx_pause_notice */ + HINIC5_NIC_CMD_INQUIRT_PAUSE_CFG = 119, /* QoS PFC风暴检测参数配置 + * @see nic_cmd_pause_inquiry_cfg + */ + HINIC5_NIC_CMD_BIOS_CFG = 120, /** < 固化BIOS配置/查询 @see nic_cmd_bios_cfg */ + HINIC5_NIC_CMD_SET_FIRMWARE_CUSTOM_PACKETS_MSG = 121, /* 接收快速故障通知内容 + * @see fault_msg_s + */ + HINIC5_NIC_CMD_QOS_EXTEND_CFG = 122, /** < Qos 扩展配置,支持TC限速配置/查询 */ + HINIC5_NIC_CMD_BOND_LINK_INFO_GET = 130, /** < @see hinic5_bond_link_info */ + HINIC5_NIC_CMD_MACSEC_PN_EXPIRED_NOTICE = 131, /* @see tag_macsec_pn_expired_report_cmd_s */ + HINIC5_NIC_CMD_PASS_ARP_PKT = 132, /** < @see hinic5_arp_pkt_info */ + HINIC5_NIC_CMD_BOND_DEV_CFG = 133, /** < @see hinic5_cmd_cfg_bond */ + HINIC5_NIC_CMD_BOND_DEV_CREATE = 134, /** < @see hinic5_cmd_create_bond */ + HINIC5_NIC_CMD_BOND_DEV_DELETE = 135, /** < @see hinic5_cmd_delete_bond */ + HINIC5_NIC_CMD_BOND_DEV_OPEN_CLOSE = 136, /** < @see hinic5_cmd_open_close_bond */ + HINIC5_NIC_CMD_BOND_INFO_GET = 137, /** < @see hinic5_bond_status_info */ + HINIC5_NIC_CMD_BOND_ACTIVE_INFO_GET = 138, /** < @see hinic5_bond_active_report_info */ + HINIC5_NIC_CMD_BOND_ACTIVE_NOTICE = 139, /** < @see nic_cmd_bond_active_report_info */ + HINIC5_NIC_CMD_GET_SM_TABLE = 140, /* 获取mac表等一些信息 @see nic_cmd_dfx_sm_table */ + HINIC5_NIC_CMD_RD_LINE_TBL = 141, /* 获取SM线性表的一些信息 @see nic_mpu_lt_opera */ + HINIC5_NIC_CMD_SET_VEB = 143, /** < 查询或配置veb卸载模式 @see hinic5_veb_set */ + HINIC5_NIC_CMD_NIC_VPORT_CNT = 144, /* 查询或配置VF的vport计数 + * @see hinic5_nic_vport_cnt_info + */ + HINIC5_NIC_CMD_SET_UCAPTURE_OPT = 160, /* ROCE抓包功能开关 @see nic_cmd_capture_info */ + HINIC5_NIC_CMD_SET_VHD_CFG = 161, /* 设置func表的vhd_type相关参数 @see nic_cmd_vhd_config */ + HINIC5_NIC_CMD_GET_UCAPTURE_INFO = 162, /* 获取PF抓取报文功使能情况,遍历PF, + * 返回使能抓包功能的位图(计算使用,rsvd) + */ + HINIC5_NIC_CMD_GET_OUTBAND_CFG = 170, /** < 获取带外设置(计算使用,rsvd) */ + HINIC5_NIC_CMD_OUTBAND_CFG_NOTICE = 171, /* 带外信息获取(计算使用,rsvd) */ + HINIC5_NIC_CMD_FLUSH_TC_FLOW = 176, /** < @see hinic5_tc_flush_info */ + HINIC5_NIC_CMD_CFG_VXLAN_TBL = 177, /** < @see hinic5_tc_vxlan_tbl_cfg_info */ + HINIC5_NIC_CMD_MOVE_TC_TBL = 178, /** < @see hinic5_tc_move_info */ + HINIC5_NIC_CMD_CFG_TC_AGING_TBL = 179, /** < @see hinic5_tc_aging_info */ + HINIC5_NIC_CMD_PFE_CNT = 180, /** < @see hinic5_tc_pfe_cnt_info */ + HINIC5_NIC_CMD_GET_PFE_CFG = 181, /** < @see hinic5_tc_pfe_cfg_reg_info */ + HINIC5_NIC_CMD_CFG_PFE_TCAM = 182, /** < @see hinic5_tc_tcam_info */ + HINIC5_NIC_CMD_CFG_PFE_VTEP_IP = 183, /** < @see hinic5_tc_pfe_vtep_ip_cmd */ + HINIC5_NIC_CMD_SET_PFE_DEFAULT_ACTION = 184, /** < @see hinic5_tc_default_action_info */ + HINIC5_NIC_CMD_PFE_TCAM_FREQ = 185, /** < @see hinic5_tc_pfe_tcam_freq_info */ + HINIC5_NIC_CMD_CFG_TCAM_CLOCK_GATING = 186, /* @see hinic5_tc_tcam_clock_gating_cfg_info */ + HINIC5_NIC_CMD_CFG_TC_FLOW_RULE = 187, /** < @see hinic5_tc_cfg_info */ + HINIC5_NIC_CMD_SET_PFE_CFG_PROFILE = 188, /** < @see hinic5_tc_pfe_cfg_profile_info */ + HINIC5_NIC_CMD_GET_PORT_STAT = 200, /** < rsvd */ + HINIC5_NIC_CMD_CLEAN_PORT_STAT = 201, /** < rsvd */ + HINIC5_NIC_CMD_CFG_LOOPBACK_MODE = 202, /** < rsvd */ + HINIC5_NIC_CMD_GET_SFP_QSFP_INFO = 203, /** < rsvd */ + HINIC5_NIC_CMD_SET_SFP_STATUS = 204, /** < rsvd */ + HINIC5_NIC_CMD_GET_LIGHT_MODULE_ABS = 205, /** < rsvd */ + HINIC5_NIC_CMD_GET_LINK_INFO = 206, /** < rsvd */ + HINIC5_NIC_CMD_CFG_AN_TYPE = 207, /** < rsvd */ + HINIC5_NIC_CMD_GET_PORT_INFO = 208, /** < @see hinic5_cmd_port_info */ + HINIC5_NIC_CMD_SET_LINK_SETTINGS = 209, /** < rsvd */ + HINIC5_NIC_CMD_ACTIVATE_BIOS_LINK_CFG = 210, /** < rsvd */ + HINIC5_NIC_CMD_RESTORE_LINK_CFG = 211, /** < rsvd */ + HINIC5_NIC_CMD_SET_LINK_FOLLOW = 212, /** < rsvd */ + HINIC5_NIC_CMD_GET_LINK_STATE = 213, /** < rsvd */ + HINIC5_NIC_CMD_LINK_STATUS_REPORT = 214, /** < rsvd */ + HINIC5_NIC_CMD_CABLE_PLUG_EVENT = 215, /** < rsvd */ + HINIC5_NIC_CMD_LINK_ERR_EVENT = 216, /** < rsvd */ + HINIC5_NIC_CMD_SET_LED_STATUS = 217, /** < rsvd */ + HINIC5_NIC_CMD_MIG_SET_CEQ_CTRL = 230, /** < @see mig_nic_set_ceq_ctrl */ + HINIC5_NIC_CMD_MIG_CFG_MSIX_INFO = 231, /** < @see mig_nic_msix_info_rw */ + HINIC5_NIC_CMD_MIG_CFG_FUNC_VAT_TBL = 232, /** < @see mig_nic_func_vat_tbl */ + HINIC5_NIC_CMD_MIG_GET_VF_INFO = 233, /** < @see mig_nic_func_cfg */ + HINIC5_NIC_CMD_MIG_CHK_MBX_EMPTY = 234, /** < @see mig_nic_chk_mbx_empty */ + HINIC5_NIC_CMD_MIG_SET_VPORT_ENABLE = 235, /** < @see mig_nic_vport_state */ + HINIC5_NIC_CMD_MIG_CFG_SQ_CI = 236, /** < @see mig_nic_sq_ci */ + HINIC5_NIC_CMD_MIG_CFG_RSS_TBL = 237, /** < @see mig_nic_cfg_rss_tbl */ + HINIC5_NIC_CMD_MIG_TMP_SET_CMDQ_CTX = 238, /** < @see mig_nic_tmp_cfg_cmdq_ctx */ + HINIC5_NIC_CMD_MIG_STOP_SQ = 239, /** < @see nic_mig_sq_stop */ + HINIC5_NIC_CMD_MIG_CFG_FAST_MSG_ADDR = 240, /** < @see mig_nic_fast_msg_addr */ + HINIC5_NIC_CMD_MIG_SET_FUNC_FLR_MGMT = 241, /** < @see hinic5_cmd_set_pcie_flr_mgmt */ + HINIC5_NIC_CMD_LRO_CFG = 242, /** < @see hinic5_cmd_lro_cfg */ + HINIC5_NIC_CMD_CFG_VF_LAG = 243, /** < 设置vf_lag(计算使用,rsvd) */ + HINIC5_NIC_CMD_VF_LAG_SYNC_BOND_STATE = 244, /* vf_lag同步bond状态(计算使用,rsvd) */ + HINIC5_NIC_CMD_EXTEND_SECTION1_START = 257, /* NIC Mbox保留命令字起始,计算产品线使用 */ + HINIC5_NIC_CMD_EXTEND_SECTION1_END = 384, /* NIC Mbox保留命令字结束,计算产品线使用 */ + HINIC5_NIC_CMD_EXTEND_SECTION2_START = 385, /* NIC Mbox保留命令字起始,数存产品线使用 */ + HINIC5_NIC_CMD_EXTEND_SECTION2_END = 512 /* NIC Mbox保留命令字结束,数存产品线使用 */ +}; + +#endif /* HINIC5_NIC_CMD_H */ diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_extend.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_extend.h new file mode 100644 index 00000000..e5547c2b --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_extend.h @@ -0,0 +1,12 @@ +/** + * 该文件用于收编其他产品线回合到应龙仓的命令字 + */ + +#ifndef HINIC5_NIC_CMD_EXTEND_H +#define HINIC5_NIC_CMD_EXTEND_H + +#ifndef HINIC5_NIC_CMD_CLEAR_ASSIGN_QP_RES +#define HINIC5_NIC_CMD_CLEAR_ASSIGN_QP_RES 261 /** < clear resources of assigned qp > hinic5_cmd_clear_assign_qp_res */ +#endif + +#endif /* HINIC5_NIC_CMD_EXTEND_H */ diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_structs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_structs.h new file mode 100644 index 00000000..25ae2ba4 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_structs.h @@ -0,0 +1,1868 @@ +/* + * mailbox命令字结构体定义文件 + * 头文件通过MAILBOX基线表格中的脚本自动生成。如有增删,请先修改基线表格,然后自动生成该头文件 + * 修改流程见:https://wiki.huawei.com/domains/1574/wiki/8/WIKI202509298479069 + */ + +#ifndef HINIC5_NIC_CMD_STRUCTS_H +#define HINIC5_NIC_CMD_STRUCTS_H + +#if defined(__LINUX__) || defined(__VMWARE__) +#include <linux/types.h> +#endif + +#include "mpu_cmd_base_defs.h" + +/** + * @brief 管理消息头部结构体 + * @details 该结构体用于存储管理消息的头部信息,包括状态、版本和保留字段。 + */ +struct hinic5_mgmt_msg_head { + u8 status; /**< 状态字段 */ + u8 version; /**< 版本字段 */ + u8 rsvd0[6]; /**< 保留域段 */ +}; + +/** + * @brief struct hinic5_cmd_register_vf + * @details nic vf register + */ +struct hinic5_cmd_register_vf { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u8 op_register; /**< 0 - unregister, 1 - register */ + u8 rsvd1[3]; /**< 保留域段 */ + u32 support_extra_feature; /**< support extra feature */ + u8 rsvd2[32]; /**< 保留域段 */ +}; + +/** + * @brief 存储函数表的配置信息 + * @details 该结构体包含了接收队列的WQE缓冲区大小、最大传输单元(MTU)以及保留字段。 + */ +struct hinic5_func_tbl_cfg { + u16 rx_wqe_buf_size; /**< 接收队列的WQE缓冲区大小 */ + u16 mtu; /**< 最大传输单元(MTU) */ + u8 rx_compact_wqe_en; /**< rx 8Byte wqe(合一cqe)使能 */ + u8 rsvd0[3]; /**< 保留域段 */ + u8 mac[6]; + u16 vlan_id; + u32 rsvd1[6]; /**< 保留域段 */ +}; + +/** + * @brief 用于设置函数表的结构体 + * @details 该结构体用于设置函数表,包含了管理消息头部、函数ID、保留字段、配置位图和函数表配置等信息。 + */ +struct hinic5_cmd_set_func_tbl { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u16 rsvd; /**< 保留域段 */ + u32 cfg_bitmap; /**< 设置位图,由hinic5_func_tbl_cfg_bitmap定义 */ + struct hinic5_func_tbl_cfg tbl_cfg; /**< 配置表 */ +}; + +/** + * @brief 可配置的func级的属性 + * @details nic vport state info + */ +struct hinic5_vport_state { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u16 rsvd1; /**< 保留域段 */ + u8 state; /**< 0--disable, 1--enable */ + u8 num_qps; /**< queue pairs number */ + u8 rx_compact_wqe_en; + u8 rsvd2; /**< 保留域段 */ +}; + +/** + * @brief 可配置的func级的属性 + * @details nic 配置rx_mode命令结构体 + */ +struct hinic5_rx_mode_config { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u16 rsvd1; /**< 保留域段 */ + u32 rx_mode; /**< rx mode */ +}; + +/** + * @brief 可配置的func级的属性 + * @details nic 设置 cons idx attr + */ +struct hinic5_cmd_cons_idx_attr { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_idx; /**< Func ID */ + u8 dma_attr_off; /**< dma attribute offset */ + u8 pending_limit; /**< pending limit */ + u8 coalescing_time; /**< coalescing time */ + u8 intr_en; /**< interrupt enable */ + u16 intr_idx; /**< interrupt index */ + u32 l2nic_sqn; /**< l2nic sequence number */ + u32 rsvd1; /**< 保留域段 */ + u64 ci_addr; /**< ci address */ +}; + +/** + * @brief 定义了一个用于存储端口统计信息的结构体 + * @details 该结构体包含了管理消息头部和两个字段,用于存储端口统计信息。 + */ +struct hinic5_port_stats_info { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u16 rsvd1; /**< 保留域段 */ +}; + +/** + * @brief 定义了一个结构体,用于存储vport的统计信息 + * @details vport的发送和接收统计信息,包括单播、多播、广播包的数量和字节数,以及发送和接收被丢弃和出现错误的包数。 + */ +struct hinic5_vport_stats { + u64 tx_unicast_pkts_vport; /**< 发送的单播包数 */ + u64 tx_unicast_bytes_vport; /**< 发送的单播字节数 */ + u64 tx_multicast_pkts_vport; /**< 发送的多播包数 */ + u64 tx_multicast_bytes_vport; /**< 发送的多播字节数 */ + u64 tx_broadcast_pkts_vport; /**< 发送的广播包数 */ + u64 tx_broadcast_bytes_vport; /**< 发送的广播字节数 */ + u64 rx_unicast_pkts_vport; /**< 接收的单播包数 */ + u64 rx_unicast_bytes_vport; /**< 接收的单播字节数 */ + u64 rx_multicast_pkts_vport; /**< 接收的多播包数 */ + u64 rx_multicast_bytes_vport; /**< 接收的多播字节数 */ + u64 rx_broadcast_pkts_vport; /**< 接收的广播包数 */ + u64 rx_broadcast_bytes_vport; /**< 接收的广播字节数 */ + u64 tx_discard_vport; /**< 发送被丢弃的包数 */ + u64 rx_discard_vport; /**< 接收被丢弃的包数 */ + u64 tx_err_vport; /**< 发送出现错误的包数 */ + u64 rx_err_vport; /**< 接收出现错误的包数 */ +}; + +/** + * @brief 定义了一个用于存储vport统计信息的结构体 + * @details 该结构体包含了vport统计信息的相关数据,包括管理消息头部、统计信息的大小、保留字段、vport统计信息以及额外的保留字段。 + */ +struct hinic5_cmd_vport_stats { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u32 stats_size; /**< 统计信息的大小 */ + u32 rsvd1; /**< 保留域段 */ + struct hinic5_vport_stats stats; /**< vport统计信息 */ + u64 rsvd2[6]; /**< 保留域段 */ +}; + +/** + * @brief hinic5_cmd_clear_qp_resource + * @details nic 清除qp资源命令结构体 + */ +struct hinic5_cmd_clear_qp_resource { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u16 rsvd1; /**< 保留域段 */ +}; + +/** + * @brief hinic5_cmd_lro_config + * @details nic 配置lro结构体 + */ +struct hinic5_cmd_lro_config { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u8 opcode; /**< operation code */ + u8 rsvd1; /**< 保留域段 */ + u8 lro_ipv4_en; /**< LRO for IPv4 */ + u8 lro_ipv6_en; /**< LRO for IPv6 */ + u8 lro_max_pkt_len; /**< unit is 1K */ + u8 resv2[13]; /**< 保留域段 */ +}; + +/** + * @brief hinic5_cmd_lro_timer + * @details nic 配置lro timer + */ +struct hinic5_cmd_lro_timer { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u8 opcode; /**< 1: set timer value, 0: get timer value */ + u8 rsvd1; /**< 保留域段 */ + u16 rsvd2; /**< 保留域段 */ + u32 timer; /**< timer value */ +}; + +#define NIC_MAX_FEATURE_QWORD 4 + +/** + * @brief hinic5_cmd_feature_nego + * @details nic自协商特性命令结构体 + */ +struct hinic5_cmd_feature_nego { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u8 opcode; /**< opeartion code 1: set, 0: get */ + u8 rsvd; /**< 保留域段 */ + u64 s_feature[NIC_MAX_FEATURE_QWORD]; /**< feature */ +}; + +/** + * @brief hinic5_cmd_local_lro_state + * @details nic 设置lro state + */ +struct hinic5_cmd_local_lro_state { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< 1: set timer value, 0: get timer value */ + u8 opcode; /**< 0: get state, 1: set state */ + u8 state; /**< 0: disable, 1: enable */ +}; + +/** + * @brief hinic5_cmd_cache_out_qp_resource + * @details nic 设置 cache_out_qp_resource + */ +struct hinic5_cmd_cache_out_qp_resource { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u16 rsvd1; /**< 保留域段 */ +}; + +/** + * @brief 定义了一个接收队列完成队列上下文结构体 + * @details 该结构体用于描述接收队列完成队列的上下文信息 + */ +struct hinic5_rq_cqe_ctx { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u8 cqe_type; /**< 完成队列类型 */ + u8 rq_id; /**< 接收队列ID */ + u8 threshold_cqe_num; /**< 完成队列阈值 */ + u8 rsvd1; /**< 保留域段 */ + u16 msix_entry_idx; /**< MSI-X入口索引 */ + u16 rsvd2; /**< 保留域段 */ + u32 ci_addr_hi; /**< 完成队列地址的高位 */ + u32 ci_addr_lo; /**< 完成队列地址的低位 */ + u16 timer_loop; /**< 定时器循环次数 */ + u16 rsvd3; /**< 保留域段 */ +}; + +struct hinic5_rq_enable { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u32 rq_id; + u8 rq_enable; + u8 rsvd1[3]; /**< 保留域段 */ +}; + +#define ETH_ALEN 6 /**< Ethernet address length */ + +/** + * @brief hinic5_port_mac_set + * @details nic port mac设置命令结构体 + */ +struct hinic5_port_mac_set { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u16 vlan_id; /**< vlan id */ + u8 vf_lag_en; /**< vf_lag使能标志位(计算使用,保留) */ + u8 rsvd1; /**< 保留域段 */ + u8 mac[ETH_ALEN]; /**< mac address */ +}; + +/** + * @brief hinic5_port_mac_update + * @details nic port mac修改命令结构体 + */ +struct hinic5_port_mac_update { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u16 vlan_id; /**< vlan id */ + u16 rsvd1; /**< 保留域段 */ + u8 old_mac[ETH_ALEN]; /**< mac address */ + u16 rsvd2; /**< 保留域段 */ + u8 new_mac[ETH_ALEN]; /**< mac address */ +}; + +#define CHIP_ATTR_MAC_MAX_SIZE 192 + +/** + * @brief nic_cmd_mac_info + * @details MAC模块接口 + */ +struct nic_cmd_mac_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 valid_bitmap; /**< valid bitmap, 未使用 */ + u16 rsvd1; /**< 保留域段 */ + u8 host_id[32]; /**< host id, 未使用 */ + u8 port_id[32]; /**< port id, 未使用 */ + u8 mac_addr[CHIP_ATTR_MAC_MAX_SIZE]; /**< mac addr */ +}; + +/** + * @brief hinic5_cmd_vlan_config + * @details nic 配置vlan信息 + */ +struct hinic5_cmd_vlan_config { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u8 opcode; /**< opration code */ + u8 rsvd1; /**< 保留域段 */ + u16 vlan_id; /**< vlan id */ + u8 blacklist_flag; /**< 黑名单标志位(计算使用,保留) */ + u8 rsvd2; /**< 保留域段 */ +}; + +struct hinic5_cmd_vxlan_port_info { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u8 opcode; /**< opration code */ + u8 cfg_mode; /**< 优先级标志位 */ + u16 vxlan_port; /**< 目标port */ + u8 pkt_fmt; /**< 0:vxlan报文 1:vxlan_gpe报文 */ + u8 rsvd2; /**< 保留域段 */ +}; + +/** + * @brief hinic5_cmd_set_vlan_filter + * @details nic set vlan filter + */ +struct hinic5_cmd_set_vlan_filter { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u8 rsvd1[2]; /**< 保留域段 */ + u32 vlan_filter_ctrl; /**< bit0:vlan filter en; bit1:broadcast_filter_en */ +}; + +/** + * @brief hinic5_cmd_vlan_offload + * @details nic 设置 vlan offload + */ +struct hinic5_cmd_vlan_offload { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u8 vlan_offload; /**< vlan offload */ + u8 rsvd1[5]; /**< 保留域段 */ +}; + +struct hinic5_smac_check_state { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 smac_check_en; /**< 1: enable 0: disable */ + u8 op_code; /**< 1: set 0: get */ + u8 flash_en; /**< flash使能标志位 */ + u8 rsvd; /**< 保留域段 */ +}; + +/** + * @brief hinic5_cmd_vf_vlan_config + * @details nic 配置vf的vlan信息 + */ +struct hinic5_cmd_vf_vlan_config { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u8 opcode; /**< opration code */ + u8 rsvd1; /**< 保留域段 */ + u16 vlan_id; /**< vlan id */ + u8 qos; /**< qos */ + u8 rsvd2[5]; /**< 保留域段 */ +}; + +/** + * @brief hinic5_cmd_vf_trust_config + * @details nic 配置vf的trust信息 + */ +struct hinic5_cmd_vf_trust_config { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u16 vlan_id; /**< vlan id */ + u8 trust; /* vf_trust: 0-disable; 1-enable */ + u8 rsvd2[67]; /**< 保留域段 */ +}; + +/** + * @brief hinic5_cmd_spoofchk_set + * @details nic 配置 spoofchk + */ +struct hinic5_cmd_spoofchk_set { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u8 state; /**< state */ + u8 rsvd1; /**< 保留域段 */ +}; + +#define NIC_RATE_DIRECT_RX_BW 0 /**< RX带宽限速 */ +#define NIC_RATE_DIRECT_TX_BW 1 /**< TX带宽限速 */ +#define NIC_RATE_DIRECT_RX_PERCENT 2 /**< RX百分比限速 (设置命令1872实现存在差异) 其他部分完全一样 */ +#define NIC_RATE_DIRECT_TX_PERCENT 3 /**< TX百分比限速 */ +#define NIC_RATE_OP_SET 0 /**< 限速设置 */ +#define NIC_RATE_OP_GET 1 /**< 限速查询 */ +#define NIC_RATE_OP_FLASH 2 /**< FLASH固化 */ +#define NIC_RATE_OP_UNUSE 4 /**< 用于兼容性适配,针对1823(由于不会去读该域段,因此实际会做限速配置的操作),1872/1825等后面代次则实际不做任何操作直接返回 */ + +/** + * @brief hinic5_cmd_rate_cfg + * @details nic rate 速率配置 + */ +struct hinic5_cmd_rate_cfg { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u8 cfg_mode; /* BIT[0]:设置和查询, 0-设置, 1代表查询; + * BIT[1]:是否支持固化到Flash 目前仅支持百分比限速的固化; + * BIT[2]:若为1表示什么都不操作直接返回 + */ + u8 direct; /* 方向: 0-RX带宽限速 1-TX带宽限速 2-RX_RATE百分比 3-TX_RATE百分比 */ + u32 cir; /**< C桶速率 Mbps 旧域段名称min_rate */ + u32 pir; /* P桶速率 Mbps 若为带宽限速,则代表C桶速率(Mbps), + * 若direct为百分比限速,则代表百分比,旧域段名称max_rate + */ + u32 cbs; /**< C桶桶深 Mbit 0则使用默认桶深 */ + u32 pbs; /**< P桶桶深 Mbit 0则使用默认桶深 */ +}; + +#define NIC_RATE_MODE_PERCENT 0 /**< 按照百分比限速生效. */ +#define NIC_RATE_MODE_BANDWIDTH 1 /**< 按照带宽限速生效. */ + +/** + * @brief hinic5_cmd_rate_cfg_ret + * @details nic rate 速率配置查询 + */ +struct hinic5_cmd_rate_cfg_ret { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 car_id; /**< RX方向限速需要返回CAR_ID */ + u8 rate_mode; /**< 当前限速生效的模式:0-百分比限速 1-带宽限速 */ + u8 rsvd; /**< 保留域段 */ + u32 cir; /**< C桶速率 Mbps 旧域段名称min_rate */ + u32 pir; /* P桶速率 Mbps 若为带宽限速,则代表C桶速率(Mbps), + * 若direct为百分比限速,则代表百分比,旧域段名称max_rate + */ + u32 cbs; /**< C桶桶深 Mbit 0则使用默认桶深 */ + u32 pbs; /**< P桶桶深 Mbit 0则使用默认桶深 */ +}; + +#define NIC_DCB_COS_MAX 0x8 /**< 定义网络接口控制数据包(NIC DCB)的最大优先级(COS) */ + +/** + * @brief 定义了一个用于RSS配置的结构体 + * @details 该结构体包含了RSS配置所需的各种参数,如函数ID、RSS使能标志、优先级队列数量、优先级与流量类别的映射关系、队列对数量等。 + */ +struct hinic5_cmd_rss_config { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< 函数ID,用于标识当前配置的是哪个函数的RSS */ + u8 rss_en; /* RSS使能标志,如果为1则表示RSS功能开启,0则表示关闭 */ + u8 rq_priority_number; /* 优先级队列的数量,决定了RSS的具体实现方式 */ + u8 prio_tc[NIC_DCB_COS_MAX]; /* 优先级与流量类别的映射关系,数组长度为NIC_DCB_COS_MAX */ + u16 num_qps; /**< 队列对的数量,决定了RSS的具体实现方式 */ + u16 rsvd1; /**< 保留域段 */ +}; + +/** + * @brief 定义了一个RSS(Receive Side Scaling,接收端缩放)模板管理结构体 + * @details 该结构体用于管理RSS模板,包含了管理消息头部、函数ID、命令、模板ID等信息 + */ +struct hinic5_rss_template_mgmt { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< 函数ID,用于标识当前消息对应的功能模块 */ + u8 cmd; /**< 命令,用于标识需要执行的操作 */ + u8 template_id; /**< 模板ID,用于标识当前操作对应的模板 */ + u8 rsvd1[4]; /**< 保留域段 */ +}; + +/** + * @brief 定义了一个RSS(Receive Side Scaling,接收端缩放)上下文表结构体 + * @details 该结构体包含了rss context表内的相关数据,包含管理消息头部、func id、上下文信息以及保留字段。 + */ +struct hinic5_rss_context_table { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< 函数ID,用于标识当前消息对应的功能模块 */ + u16 rsvd1; /**< 保留域段 */ + u32 context; /**< RSS上下文 */ +}; + +#define NIC_RSS_KEY_SIZE 40 /**< 定义RSS ket大小为40 */ + +/** + * @brief 定义了一个用于RSS哈希键的结构体 + * @details 该结构体用于存储RSS哈希键相关信息 + */ +struct hinic5_cmd_rss_hash_key { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< 函数ID */ + u8 opcode; /**< 操作码 */ + u8 rsvd1; /**< 保留域段 */ + u8 key[NIC_RSS_KEY_SIZE]; /**< RSS哈希键 */ +}; + +/** + * @brief 定义了一个用于RSS引擎类型的结构体 + * @details 该结构体用于描述RSS引擎类型相关的信息 + */ +struct hinic5_cmd_rss_engine_type { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< 函数ID */ + u8 opcode; /**< 操作码 */ + u8 hash_engine; /**< 哈希引擎 */ + u8 rsvd1[4]; /**< 保留域段 */ +}; + +/** + * @brief hinic5_ipcs_err_rss_enable_operation_s + * @details IP checksum error packets, enable rss quadruple hash + */ +struct hinic5_ipcs_err_rss_enable_operation_s { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 en_tag; + u8 type; /**< 1: set 0: get */ + u8 rsvd[2]; /**< 保留域段 */ +}; + +/** + * @brief 网络接口命令过滤删除规则的结构体 + * @details 该结构体用于存储网络接口命令过滤删除规则的相关信息。 + */ +struct nic_cmd_fdir_del_rules { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< 函数ID */ + u8 type; /**< 类型 */ + u8 rsvusrdata; /**< 用户数据 */ + u32 index_start; /**< 索引开始位置 */ + u32 index_num; /**< 索引数量 */ +}; + +/** + * @brief 定义了一个用于刷新TCAM规则的结构体 + * @details 该结构体包含了管理消息头部和函数ID两个成员,用于刷新TCAM规则。 + */ +struct nic_cmd_flush_tcam_rules { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< 函数ID */ + u16 rsvd; /**< 保留域段 */ +}; + +/** + * @brief 分配TCAM块的输入结构体 + * @details 该结构体用于分配TCAM块的输入参数,包含头部信息、函数ID、分配标志、TCAM类型、TCAM块索引和分配块数量等信息。 + */ +struct nic_cmd_ctrl_tcam_block_in { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< 函数ID */ + u8 alloc_en; /**< 0: 释放分配的tcam block, 1: 申请新的tcam block */ + u8 tcam_type; /* 0: 分配16 size 的tcam block, + * 1: 分配0 size的tcam block, 其他预留 + */ + u16 tcam_block_index; /**< index */ + u16 alloc_block_num; /* 驱动发给uP表示驱动希望分配的block大小 + * uP返回给驱动的接口,表示uP 支持的分配的tcam block大小 + */ +}; + +/** + * @brief 分配TCAM块输出结构体 + * @details 该结构体用于存储分配TCAM块的输出信息。 + */ +struct nic_cmd_ctrl_tcam_block_out { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< 函数ID */ + u8 alloc_en; /**< 0: 释放分配的tcam block, 1: 申请新的tcam block */ + u8 tcam_type; /* 0: 分配16 size 的tcam block, + * 1: 分配0 size的tcam block, 其他预留 + */ + u16 tcam_block_index; /**< index */ + u16 mpu_alloc_block_size; /* 驱动发给uP表示驱动希望分配的block大小, + * uP返回给驱动的接口,表示uP 支持的分配的tcam block大小 + */ +}; + +/** + * @brief nic_cmd_set_tcam_enable + * @details enable tcam + */ +struct nic_cmd_set_tcam_enable { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< 函数ID */ + u8 tcam_enable; /**< tcam enable */ + u8 rsvd1; /**< 保留域段 */ + u32 rsvd2; /**< 保留域段 */ +}; + +/** + * @brief 分配TCAM块的输入结构体 + * @details 该结构体用于分配TCAM块的输入参数,包含头部信息、函数ID、分配标志、TCAM类型、TCAM块索引和分配块数量等信息。 + */ +struct nic_cmd_dfx_fdir_tcam_block_table { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 tcam_type; /**< TCAM类型 */ + u8 valid; /**< 有效标志位 */ + u16 tcam_block_index; /**< TCAM块索引 */ + u16 use_function_id; /**< 使用的函数ID */ + u16 rsvd; /**< 保留域段 */ +}; + +struct hinic5_ppa_cfg_table_id_cmd { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 rsvd0; /**< 保留域段 */ + u16 cmd; /**< command */ + u16 table_id; /**< table id */ + u16 rsvd1; /**< 保留域段 */ +}; + +struct hinic5_ppa_cfg_ppa_en_cmd { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< function id */ + u8 ppa_en; /**< ppa enable */ + u8 rsvd; /**< 保留域段 */ +}; + +struct hinic5_ppa_cfg_mode_cmd { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 rsvd0; /**< function id */ + u8 ppa_mode; /**< ppa mode */ + u8 qpc_func_nums; /**< qpc function numbers */ + u16 base_qpc_func_id; /**< base qpc function id */ + u16 rsvd1; /**< 保留域段 */ +}; + +struct hinic5_ppa_flush_en_cmd { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 rsvd0; /**< 保留域段 */ + u8 flush_en; /**< 0 flush done, 1 in flush operation */ + u8 rsvd1; /**< 保留域段 */ +}; + +struct nic_cmd_set_fdir_status { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< function id */ + u16 index; /**< index */ + u8 pkt_type_en; /**< packet type enable */ + u8 pkt_type; /**< packet type */ + u8 qid; /**< queue id */ + u8 flag; /**< packet drop flag */ +}; + +struct hinic5_ppa_fdir_query_cmd { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u32 index; /**< index */ + u32 rsvd; /**< 保留域段 */ + u64 pkt_nums; /**< packet type */ + u64 pkt_bytes; /**< packet bytes */ +}; + +struct hinic5_port_state { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< function id */ + u16 rsvd1; /**< 保留域段 */ + u8 state; /**< 0--disable, 1--enable */ + u8 rsvd2[3]; /**< 保留域段 */ +}; + +struct hinic5_cmd_pause_config { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u8 port_id; /**< port id */ + u8 opcode; /**< operation code */ + u16 rsvd1; /**< 保留域段 */ + u8 auto_neg; /**< auto negotiation */ + u8 rx_pause; /**< receive pause */ + u8 tx_pause; /**< send pause */ + u8 rsvd2[5]; /**< 保留域段 */ +}; + +struct hinic5_port_car_info { + u32 cir; /**< unit: kbps, range:[1,400*1000*1000], i.e. 1Kbps~400Gbps(400M*kbps) */ + u32 xir; /**< unit: kbps, range:[1,400*1000*1000], i.e. 1Kbps~400Gbps(400M*kbps) */ + u32 cbs; /**< unit: Byte, range:[1,320*1000*1000], i.e. 1byte~2560Mbit */ + u32 xbs; /**< unit: Byte, range:[1,320*1000*1000], i.e. 1byte~2560Mbit */ +}; + +/** + * @brief hinic5_cmd_set_port_car + * @details nic 设置car限速命令结构体(1825) + */ +struct hinic5_cmd_set_port_car { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u8 port_id; /**< port id */ + u8 opcode; /**< 0--set car profile, 1--set car state, 2--get car profile */ + u8 state; /**< 0--disable, 1--enable */ + u8 level; /**< limit level */ + struct hinic5_port_car_info car; /**< car info */ +}; + +struct hinic5_cmd_set_car_option { + u8 type; /**< 0:port, 1:func, 2:vnic group*/ + u8 port_id; + u16 func_id; + u8 car_enable; + u8 opcode; /**< 0--set car profile, 1--get car profile */ + u8 pkt_type; + u8 car_alg_type; +}; + +/** + * @brief hinic5_cmd_set_car + * @details nic 设置car限速命令结构体(1872) + */ +struct hinic5_cmd_set_car { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + struct hinic5_cmd_set_car_option option; + struct hinic5_port_car_info car; /**< car info */ +}; + +/** + * @brief hinic5_car_profile + * @details nic 设置car限速命令结构体(1872) + */ +struct hinic5_car_profile { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 car_id; /**< car id */ + u8 level; /**< limit level */ + u8 rsvd; /**< 保留域段 */ + u32 profile[4]; /**< car profile */ +}; + +#define NIC_DCB_TC_MAX 0x8 /**< 定义网络接口控制数据包(NIC DCB)的最大流量类别(TC) */ + +/** + * @brief hinic5_cmd_ets_cfg + * @details nic 配置ets命令结构体 + */ +struct hinic5_cmd_ets_cfg { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 port_id; /**< port id */ + u8 op_code; /**< 1 - set, 0 - get */ + u8 cfg_bitmap; /* bit0 - cos_tc, bit1 - tc_bw, bit2 - cos_prio, + * bit3 - cos_bw, bit4 - tc_prio + */ + u8 rsvd; /**< 保留域段 */ + u8 cos_tc[NIC_DCB_COS_MAX]; + u8 tc_bw[NIC_DCB_TC_MAX]; + u8 cos_prio[NIC_DCB_COS_MAX]; /**< 0 - DWRR, 1 - STRICT */ + u8 cos_bw[NIC_DCB_COS_MAX]; + u8 tc_prio[NIC_DCB_TC_MAX]; /**< 0 - DWRR, 1 - STRICT */ +}; + +struct hinic5_cmd_set_pfc { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 port_id; /**< port id */ + u8 op_code; /**< 0:get 1: set pfc_en 2: set pfc_bitmap 3: set all */ + u8 pfc_en; /**< pfc_en 和 pfc_bitmap 必须同时设置 */ + u8 pfc_bitmap; /**< pfc bitmap */ + u8 rsvd[4]; /**< 保留域段 */ +}; + +/** + * @brief hinic5_cmd_set_dcb_state + * @details nic 设置dcb state命令结构体 + */ +struct hinic5_cmd_set_dcb_state { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< port id */ + u8 op_code; /**< 0 - get dcb state, 1 - set dcb state */ + u8 state; /**< 0 - disable, 1 - enable dcb */ + u8 port_state; /**< 0 - disable, 1 - enable dcb */ + u8 rsvd[7]; /**< 保留域段 */ +}; + +struct hinic5_cmd_qos_port_cfg { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 port_id; /**< port id */ + u8 op_code; /**< 0 - get, 1 - set */ + u8 cfg_bitmap; /**< bit0 - trust, bit1 - dft_cos */ + u8 rsvd0; /**< 保留域段 */ + u8 trust; /**< trust state */ + u8 dft_cos; /**< dft cos */ + u8 rsvd1[18]; /**< 保留域段 */ +}; + +struct hinic5_cmd_qos_map_cfg { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 op_code; /**< operation code */ + u8 cfg_bitmap; /**< bit0 - pcp2cos, bit1 - dscp2cos */ + u16 rsvd0; /**< 保留域段 */ + u8 pcp2cos[8]; /**< 必须8个一起配置 */ + u8 dscp2cos[64]; /* 配置dscp2cos时,若cos值设置为0xFF,MPU则忽略此dscp优先级的配置 + * 允许一次性配置多个dscp跟cos的映射关系 + */ + u32 rsvd1[4]; /**< 保留域段 */ +}; + +/** + * @brief hinic5_cmd_qos_extend_cfg + * @details nic 配置qos命令扩展结构体 + */ +struct hinic5_cmd_qos_extend_cfg { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 port_id; /**< port id */ + u8 op_code; /**< bit0: 1-set tc rate limit, 0-get tc rate limit */ + u16 rsvd0; /**< 保留域段 */ + u16 port_speed; /**< 端口速率, 单位Gbps */ + u16 rsvd1; /**< 保留域段 */ + u32 rate_limit[NIC_DCB_COS_MAX]; /**< tc限速速率值, 单位Mbps */ + u32 port_cir; /**< port的cir值, 单位Mbps */ + u32 rsvd2[115]; /**< 保留域段 */ +}; + +/** + * @brief hinic5_force_pkt_drop + * @details nic force pkt drop命令结构体 + */ +struct hinic5_force_pkt_drop { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u8 port; /**< port id */ + u8 rsvd1[3]; /**< 保留域段 */ +}; + +/** + * @brief nic_cmd_pause_inquiry_cfg + * @details pfc风暴检测配置 + */ +struct nic_cmd_pause_inquiry_cfg { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 valid; /**< valid */ + u32 type; /**< 1: set, 2: get */ + u32 rx_inquiry_pause_drop_pkts_en; /**< rx 卸包使能 */ + u32 rx_inquiry_pause_period_ms; /**< rx pause 检测周期 默认 200ms */ + u32 rx_inquiry_pause_times; /**< rx pause 检测次数 默认1次 */ + u32 rx_inquiry_pause_frame_thd; /* rx pause 检测阈值 + * 默认 PAUSE_FRAME_THD_10G/25G/40G/100 + */ + u32 rx_inquiry_tx_total_pkts; /**< rx pause 检测tx收包总数 */ + u32 tx_inquiry_pause_en; /**< tx pause 检测使能 */ + u32 tx_inquiry_pause_period_ms; /**< tx pause 检测周期 默认 200ms */ + u32 tx_inquiry_pause_times; /**< tx pause 检测次数 默认 5次 */ + u32 tx_inquiry_pause_frame_thd; /**< tx pause 检测阈值 */ + u32 tx_inquiry_rx_total_pkts; /**< tx pause 检测rx收包总数 */ + u32 rsvd[4]; /**< 保留域段 */ +}; + +struct nic_bios_cfg { + u32 signature; /**< 签名,用于判断FLASH的内容合法性 */ + u8 pxe_en; /**< PXE enable: 0 - disable 1 - enable */ + u8 extend_mode; + u8 rsvd0[2]; /**< 保留域段 */ + u8 pxe_vlan_en; /**< PXE VLAN enable: 0 - disable 1 - enable */ + u8 pxe_vlan_pri; /**< PXE VLAN priority: 0-7 */ + u16 pxe_vlan_id; /**< PXE VLAN ID 1-4094 */ + u32 service_mode; /**< 参考CHIPIF_SERVICE_MODE_x 宏 */ + u32 pf_bw; /**< PF速率,百分比 0-100 */ + u8 speed; /**< enum of port speed */ + u8 auto_neg; /**< 自协商开关 0 - 字段无效 1 - 开2 - 关 */ + u8 lanes; /**< lane num */ + u8 fec; /**< FEC模式, 参考 enum mag_cmd_port_fec */ + u8 auto_adapt; /**< 自适应模式配置0 - 无效配置 1 - 开启 2 - 关闭 */ + u8 func_valid; /**< 指示func_id是否有效; 0 - 无效,other - 有效 */ + u8 func_id; /**< 当func_valid不为0时,该成员才有意义 */ + u8 sriov_en; /**< SRIOV-EN: 0 - 无效配置, 1 - 开启, 2 - 关闭 */ +}; + +struct nic_cmd_bios_cfg { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 op_code; /**< Operation Code: Bit0[0: read 1:write, BIT1-6: cfg_mask */ + struct nic_bios_cfg bios_cfg; /**< BIOS configuration */ +}; + +/** + * @brief 定义了一个用于存储半卸载bond func link状态信息的结构体 + * @details 半卸载bond创建/更新/删除会给非slave的func发送link消息 + */ +struct hinic5_bond_link_info { + struct hinic5_mgmt_msg_head head; /**< 管理消息头部信息 */ + u8 bond_en; /**< bond 是否开启 0:关闭 1:开启 */ + u8 port_id; /**< port id */ + u8 link_status; /**< bond的非slave设备link状态 0:down 1:up */ + u8 rsvd[13]; /**< 保留域段 */ +}; + +/** + * @brief 发生PN超阈值中断时, 内部解析信息结构体 + * @details 包含阈值、SC索引、AN等信息 + */ +struct macsec_pn_expired_report_info { + u64 sci[32]; /**< 发生超阈值事件对应SC的SCI */ + u8 an[32]; /**< 发生超阈值事件对应SA的AN号 */ + u8 pn_expired_size; /**< SA阈值 */ + u8 reserved[7]; /**< 保留域段 */ +}; + +/** + * @brief macsec pn阈值上报消息结构体 + * @details 发生PN超阈值中断时, 向驱动上报消息结构体定义 + */ +struct macsec_pn_expired_report_cmd { + struct hinic5_mgmt_msg_head head; /**< 管理消息头部信息 */ + struct macsec_pn_expired_report_info info; /**< 超阈值相关结构体信息 */ + u64 reserved; /**< 保留域段 */ +}; + +/** + * @brief 定义了一个用于删除bond的结构体 + * @details 该结构体包含了删除bond所需的所有信息 + */ +struct hinic5_cmd_delete_bond { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 bond_id; /**< bond的ID */ + u32 rsvd[2]; /**< 保留域段 */ +}; + +/** + * @brief 定义一个结构体,用于存储bond设备的开启/关闭信息 + * @details 该结构体包含了bond设备的开启/关闭信息,包括bond设备号、开启/关闭标识和保留字段。 + */ +struct hinic5_open_close_bond_info { + u32 bond_id; /**< bond设备号 */ + u32 open_close_flag; /**< 开启/关闭bond标识:1为open, 0为close */ + u32 rsvd[2]; /**< 保留域段 */ +}; + +/** + * @brief 定义了一个结构体,用于存储MPU bond的消息接口的相关信息 + * @details 该结构体包含了管理消息头部的信息和MPU bond的开启或关闭的相关信息 + */ +struct hinic5_cmd_open_close_bond { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + struct hinic5_open_close_bond_info open_close_bond_info; /* 存储MPU bond的开启或关闭的 + * 相关信息 + */ +}; + +/** + * @brief LACPDU的port相关字段 + * @details 该结构体用于描述LACPDU(链接聚合控制协议数据单元)中的port相关参数。 + */ +struct lacp_port_params { + u16 port_number; /**< 端口号 */ + u16 port_priority; /**< 端口优先级 */ + u16 key; /**< 密钥 */ + u16 system_priority; /**< 系统优先级 */ + u8 system[ETH_ALEN]; /**< 系统MAC地址 */ + u8 port_state; /**< 端口状态 */ + u8 rsvd; /**< 保留域段 */ +}; + +/** + * @brief 定义了一个结构体,用于存储LACP(链接聚合控制协议)端口的信息 + * @details 该结构体包含了多个成员,每个成员都有其特定的含义和用途。 + */ +struct lacp_port_info { + u32 selected; /**< 表示该端口是否被选中 */ + u32 aggregator_port_id; /**< 使用的 aggregator port ID */ + struct lacp_port_params actor; /**< actor port参数 */ + struct lacp_port_params partner; /**< partner port参数 */ + u64 tx_lacp_pkts; /**< 发送的LACP数据包数量 */ + u64 rx_lacp_pkts; /**< 接收的LACP数据包数量 */ + u64 rx_8023ad_drop; /**< 丢弃的802.3ad数据包数量 */ + u64 tx_8023ad_drop; /**< 发送的802.3ad数据包数量 */ + u64 unknown_pkt_drop; /**< 丢弃的未知数据包数量 */ + u64 rx_marker_pkts; /**< 接收的marker数据包数量 */ + u64 tx_marker_pkts; /**< 发送的marker数据包数量 */ +}; + +#define BOND_MAX_PORT_NUM 4 /**< bond 支持最大port数量 */ +#define BOND_MAX_HOST_NUM 4 /**< bond 支持最大host数量 */ + +/** + * @brief 定义了一个用于存储bond状态信息的结构体 + * @details 该结构体包含了bond状态的各种信息,如bond_id、链路状态、slave port状态、port个数等,并且还包含了每个port的lacp信息,以及每个host成功和失败上报lacp协商结果的次数等。 + */ +struct hinic5_bond_status_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 bond_id; /**< bond id */ + u32 bon_mmi_status; /**< 该bond子设备的链路状态 */ + u32 active_bitmap; /**< 该bond子设备的slave port状态 */ + u32 port_count; /**< 该bond子设备个数 */ + struct lacp_port_info port_info[BOND_MAX_PORT_NUM]; /**< 每个port的lacp信息 */ + u64 success_report_cnt[BOND_MAX_HOST_NUM]; /* 每个host成功上报lacp协商结果次数 */ + u64 fail_report_cnt[BOND_MAX_HOST_NUM]; /* 每个host上报lacp协商结果失败次数 */ + u64 poll_timeout; /**< 轮询超时时间 */ + u64 fast_periodic_timeout; /**< 快速周期性超时时间 */ + u64 slow_periodic_timeout; /**< 慢速周期性超时时间 */ + u64 short_timeout; /**< 短超时时间 */ + u64 long_timeout; /**< 长超时时间 */ + u64 aggregate_wait_timeout; /**< 聚合等待超时时间 */ + u64 tx_period_timeout; /**< 发送周期超时时间 */ + u64 rx_marker_timer; /**< RX标记定时器 */ + u8 bond_mode; /**< bond模式 */ + u8 arp_dual_en; /**< arp双发使能标记 */ + u8 rsvd[6]; /**< 保留域段 */ +}; + +/** + * @brief 定义了一个用于存储bond活动报告信息的结构体 + * @details 该结构体包含了bond活动报告的相关信息,如管理消息头部信息、bond设备的ID、bond子设备的链路状态、bond子设备的slave port状态等。 + */ +struct hinic5_bond_active_report_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 bond_id; /**< bond的ID */ + u32 bon_mmi_status; /**< bond子设备的链路状态 */ + u32 active_bitmap; /**< bond子设备的slave port状态 */ + u8 rsvd[16]; /**< 保留域段 */ +}; + +#define DFX_SM_TBL_BUF_MAX 768 /**< 定义DFX分配的SM表最多为768B */ +#define MAC_TBL_RD_TYPE_MAC_INFO 0 /**< 标识获取数据类型为MAC表内容 */ +#define MAC_TBL_RD_TYPE_MAC_RES_STAT 1 /**< 标识获取数据类型为MAC资源统计 */ + +/** + * @brief MAC表参数 + * @details 该结构体用于存储MAC表参数,包括tbl_index、cnt和total_cnt。 + */ +struct mac_table_arg { + u32 tbl_index; /**< MAC表索引 */ + u32 cnt; /**< 计数 */ + u32 total_cnt; /**< 总计数 */ + u8 mac_tbl_rd_type; /**< 0-读取MAC内容, 1-读取MAC表资源统计 */ + u8 rsvd[3]; /**< 保留字段 */ +}; + +/** + * @brief VLAN电路表参数 + * @details 该结构体用于存储VLAN电路表参数,包括er_id和vlan_id。 + */ +struct vlan_elb_table_arg { + u32 er_id; /**< er ID */ + u32 vlan_id; /**< VLAN ID */ +}; + +/** + * @brief VLAN过滤参数(VLAN Filter1 Table) + * @details 该结构体用于存储VLAN过滤参数,包括tbl_index和func_id。 + */ +struct vlan_filter_arg { + u32 tbl_index; /**< VLAN Filter1表索引 */ + u32 func_id; /**< 功能ID */ +}; + +/** + * @brief VLAN过滤参数(VLAN Filter2 Table) + * @details 该结构体用于存储VLAN过滤参数,包括vlan_id。 + */ +struct vlan_filter2_arg { + u32 vlan_id; /**< VLAN ID */ +}; + +/** + * @brief 多播电路表参数 + * @details 该结构体用于存储多播电路表参数,包括mc_id。 + */ +struct mc_elb_arg { + u32 mc_id; /**< 多播ID */ +}; + +/** + * @brief 功能表参数 + * @details 该结构体用于存储功能表参数,包括func_id。 + */ +struct func_tbl_arg { + u32 func_id; /**< 功能ID */ +}; + +/** + * @brief 端口表参数 + * @details 该结构体用于存储端口表参数,包括port_id。 + */ +struct port_tbl_arg { + u32 port_id; /**< 端口ID */ +}; + +/** + * @brief 该结构体用于存储FDIR IO表参数,包括tbl_index、cnt和total_cnt。 + * @details 该结构体用于存储端口表参数,包括port_id。 + */ +struct fdir_io_table_arg { + u32 tbl_index; /**< FDIR IO表索引 */ + u32 cnt; /**< 计数 */ + u32 total_cnt; /**< 总计数 */ +}; + +/** + * @brief FlexQ表参数 + * @details 该结构体用于存储FlexQ表参数,包括tbl_index、cnt、total_cnt、left_cnt_die0和left_cnt_die1。 + */ +struct flexq_table_arg { + u32 tbl_index; /**< FlexQ表索引 */ + u32 cnt; /**< 计数 */ + u32 total_cnt; /**< 总计数 */ + u16 left_cnt_die0; /**< 剩余计数DIE0 */ + u16 left_cnt_die1; /**< 剩余计数DIE1 */ +}; + +/** + * @brief 定义一个联合体,用于存储各种类型的参数 + * @details 该联合体包含了多个结构体,每个结构体代表不同的参数类型。这些参数类型包括MAC表参数、VLAN电路表参数、VLAN过滤参数、多播电路表参数、功能表参数、端口表参数、FDIR IO表参数和FlexQ表参数。每个参数类型都有其特定的成员变量,如tbl_index、cnt、total_cnt等。 + */ +typedef union { + struct mac_table_arg mac_table_arg; + struct vlan_elb_table_arg vlan_elb_table_arg; + struct vlan_filter_arg vlan_filter_arg; + struct vlan_filter2_arg vlan_filter2_arg; + struct mc_elb_arg mc_elb_arg; + struct func_tbl_arg func_tbl_arg; + struct port_tbl_arg port_tbl_arg; + struct fdir_io_table_arg fdir_io_table_arg; + struct flexq_table_arg flexq_table_arg; + u32 args[4]; /**< 参数数组 */ +} sm_tbl_args; + +/** + * @brief 定义了一个网络接口控制器命令的数据分析表结构体 + * @details 该结构体用于存储网络接口控制器命令的数据分析表相关信息 + */ +struct nic_cmd_dfx_sm_table { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u32 tbl_type; /**< 表类型,用于标识表的用途 */ + sm_tbl_args args; /**< 表参数,用于传递表的相关参数 */ + u8 tbl_buf[DFX_SM_TBL_BUF_MAX]; /**< 表缓冲区,用于存储表的数据 */ +}; + +#define MAC_TBL_UC_CNT_FUNC_RD_NUM 256 /**< 单次命令最多读取256个Function的单播统计 */ + +/** + * @brief 定义了一个MAC表项共享数据资源的结构体(用于转换nic_cmd_dfx_sm_table结构体的tbl_buf数据) + * @details 该结构体用于存储单播、组播、共享资源,单播独占资源的使用情况 + */ +typedef struct nic_cmd_dfx_mac_res_stats_info { + u16 uc_mac_cnt; /**< 单播表资源使用统计 */ + u16 mc_mac_cnt; /**< 组播表资源使用统计 */ + u16 share_mac_res_cur_cnt; /* 当前共享资源池使用统计 */ + u16 share_mac_res_total; /**< 共享资源池总大小 */ + u16 func_uc_mac_cnt[MAC_TBL_UC_CNT_FUNC_RD_NUM]; /* Func粒度单播表资源使用统计 */ + u8 rsvd[248]; /**< 保留字段 */ +} nic_cmd_dfx_mac_res_stats_info_s; + +/** + * @brief mpu_lt_info + * @details nic liner tabel info + */ +struct mpu_lt_info { + u8 node; /**< node id */ + u8 inst; /**< instance id */ + u8 entry_size; /**< entry size */ + u8 sml_table_id; /**< sml table id */ + u32 lt_index; /**< liner tabel index */ + u32 offset; /**< offset */ + u32 len; /**< length */ +}; + +struct nic_mpu_lt_opera { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + struct mpu_lt_info net_lt_cmd; + u8 data[100]; /**< data */ +}; + +struct hinic5_veb_set { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + + u16 opcode; /**< 操作类型: 0=查询, 1=配置 */ + u16 set_status; /**< 配置请求值: 0=off, 1=on */ + u16 cur_status; /**< 当前状态: 0=off, 1=on, 2=error */ + u16 rsvd0; /**< 保留字段 */ + u32 rsvd[30]; /**< 保留字段 */ +}; + +/** + * @brief nic_cmd_capture_info + * @details ucode capture cfg info + */ +struct nic_cmd_capture_info { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u32 op_type; /**< operation type */ + u32 func_port; /**< function port */ + u32 is_en_trx; /**< 也作为tx_rx */ + u32 offset_cos; /**< 也作为cos */ + u32 data_vlan; /**< 也作为vlan */ +}; + +struct nic_cmd_vhd_config { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< function id */ + u8 vhd_type; /**< vhd type */ + u8 virtio_small_enable; /**< 0: mergeable mode, 1: small mode */ +}; + +#define TC_ACL_KEY_BYTE 44 /**< 320 bits + 2 byte pad align to 4 byte */ + +/** + * @brief struct hinic5_tc_action_info + * @details info about tc action + */ +struct hinic5_tc_action_info { + u16 action_flag; /**< action flag */ + u16 output; /**< output */ + u8 flow_queue; /**< flow queue */ + u8 vxlan_tbl_index; /**< vxlan table index */ + u16 vlan_tag; /**< vlan tag */ + u32 flow_mark; /**< flow mark */ + u16 vlan_sel; /**< type of vlan frame */ + u16 count_id; /**< count id */ +}; + +/** + * @brief struct hinic5_tc_cfg_info + * @details info about add/del tc flower rule + */ +struct hinic5_tc_cfg_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 key_tcam_mem[TC_ACL_KEY_BYTE]; /**< tcam key mem */ + u8 mask_tcam_mem[TC_ACL_KEY_BYTE]; /**< tcam mask mem */ + struct hinic5_tc_action_info action; /**< action info */ + u16 opcode; /**< 0:del, 1:add */ + u16 index; /**< index id */ + u8 group_vld; /**< tcam group vld */ + u8 group_id; /**< tcam group id */ + u16 rsvd; /**< 保留域段 */ +}; + +#define ACL_LCAM_BITMAP_LEN 32 /**< 320 bits + 2 byte pad align to 4 byte */ + +/** + * @brief struct hinic5_tc_flush_info + * @details info about add/del tc flower rule + */ +struct hinic5_tc_flush_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u64 active_bitmap[ACL_LCAM_BITMAP_LEN]; /**< active bitmap */ +}; + +/** + * @brief struct hinic5_tc_vxlan_hdr_info + * @details info about tc vxlan header + */ +struct hinic5_tc_vxlan_hdr_info { + u8 dmac[6]; /**< destination mac */ + u8 smac[6]; /**< source mac */ + u16 vlan; /**< vlan tag */ + u8 tos; /**< type of service */ + u8 rsvd0; /**< 保留域段 */ + u8 sip[4]; /**< source ip */ + u8 dip[4]; /**< destination ip */ + u16 sport; /**< source port */ + u16 rsvd1; /**< 保留域段 */ + u32 vni; /**< bit[0:23]: vni, bit[24:31]: rsvd */ +}; + +/** + * @brief struct hinic5_tc_vxlan_tbl_cfg_info + * @details info about add/get/del tunnel encap vxlan table entry + */ +struct hinic5_tc_vxlan_tbl_cfg_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + struct hinic5_tc_vxlan_hdr_info vxlan_hdr; /**< vxlan header */ + u16 index; /**< index id */ + u16 opcode; /**< 0:del, 1:add, 2:get */ + u32 rsvd; /**< 保留域段 */ +}; + +/** + * @brief struct hinic5_tc_move_info + * @details pfe tcam rule 删除命令结构体 + */ +struct hinic5_tc_move_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 old_index; /**< old index */ + u32 new_index; /**< new index */ + u32 len; /**< length */ +}; + +/** + * @brief struct hinic5_tc_pfe_cfg_profile_info + * @details pfe profile config from register + */ +struct hinic5_tc_pfe_cfg_profile_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 reg_value; /**< register value */ + u32 opcode; /**< 0: select profile 1: 3-2 shift 2: 3-1-2 shift */ + u32 rsvd; /**< 保留域段 */ +}; + +#define PFE_ACL_AGING_BLOCK_NUM 128 + +/** + * @brief struct hinic5_tc_aging_info + * @details info about pfe tc aging table + */ +struct hinic5_tc_aging_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 addr; /**< address */ + u16 opcode; /**< 0:get, 1:set, 2:status */ + u32 entry_h[PFE_ACL_AGING_BLOCK_NUM]; /**< high 32 entry */ + u32 entry_l[PFE_ACL_AGING_BLOCK_NUM]; /**< lower 32 entry */ + u16 status; /**< pfe aging table enable status, 0:disable; 1:enable */ + u16 rsvd; /**< 保留域段 */ +}; + +#define HTN_CNT_SIZE 8 + +/** + * @brief struct hinic5_tc_pfe_cnt_info + * @details pfe count info + */ +struct hinic5_tc_pfe_cnt_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 htn_cnt[HTN_CNT_SIZE]; /**< HTN count */ + u16 mode; /**< 0:tx, 1:rx */ + u16 opcode; /**< 0:get, 1:reset */ + u32 count_id; /**< count id */ +}; + +/* VF 可用的vport计数资源 */ +#define MAX_NIC_COUNT_ID 24 + +/** + * @brief struct hinic5_nic_vport_cnt_info + * @details nic vport count info + */ +struct hinic5_nic_vport_cnt_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + struct hinic5_vport_stats stats; /**< vport统计 */ + u32 cnt_res[MAX_NIC_COUNT_ID]; /**< 分配vport计数资源的func id */ + u32 cur_cnt; /**< 当前被占用的资源数 */ + u32 func_id; /**< 配置或查询的func id */ + u32 index; /**< 配置或查询的func id对应的芯片index */ + u32 opcode; /**< 参考 nic_vport_cnt_op_e */ + u32 rsv1; /**< 保留域段 */ + u64 rsv2[192]; /**< 保留域段 */ +}; + +/** + * @brief struct hinic5_tc_pfe_cfg_reg_info + * @details pfe config from register + */ +struct hinic5_tc_pfe_cfg_reg_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 reg_value; /**< register value */ + u32 reg_value2; /**< register value */ + u32 rsvd; /**< 保留域段 */ +}; + +#define TC_XY_KEY_SIZE 11 + +/** + * @brief struct hinic5_tc_tcam_info + * @details info about tcam info + */ +struct hinic5_tc_tcam_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 key_x[TC_XY_KEY_SIZE]; /**< tcam key_x info */ + u32 key_y[TC_XY_KEY_SIZE]; /**< tcam key_y info */ + struct hinic5_tc_action_info action; /**< action info */ + u16 opcode; /**< 0:del, 1:add, 2:group, 3:get */ + u16 index; /**< index id */ + u8 group_cnt; /**< group cnt */ + u8 rsvd[3]; /**< 保留域段 */ +}; + +#define PFE_VTEP_TBL_IP_SIZE 4 +#define PFE_VTEP_TBL_IP_NUM 8 + +struct pfe_vtep_ip { + u32 is_ipv6; /**< 0: ipv4, 1: ipv6 */ + u32 ip_addr[PFE_VTEP_TBL_IP_SIZE]; /* ip_addr[0]: IPv6 DIP[127:96] or IPv4 DIP[31:0] + * ip_addr[1]: IPv6 DIP[95:64] or IPv4 need set to 0 + * ip_addr[2]: IPv6 DIP[63:32] or IPv4 need set to 0 + * ip_addr[3]: IPv6 DIP[31:0] or IPv4 need set to 0 + */ +}; + +/** + * @brief struct hinic5_tc_pfe_vtep_ip_cmd + * @details pfe vtep表配置 + */ +struct hinic5_tc_pfe_vtep_ip_cmd { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + struct pfe_vtep_ip dip[PFE_VTEP_TBL_IP_NUM]; /**< destination ip */ + u16 num; /**< num of ip in dip table */ + u16 opcode; /**< 0: del, 1: add, 2: query */ + u32 rsvd; /**< 保留域段 */ +}; + +#define DEFAULT_ACTION_REG_NUM 9 /**< 默认ACTION(TX2个,RX1个,每个ACTION3个寄存器)涉及的寄存器个数 */ + +/** + * @brief struct hinic5_tc_default_action_info + * @details info about pfe default action + */ +struct hinic5_tc_default_action_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 opcode; /**< action type: 0:drop; 1:upcall; 2:show; 3:output to port */ + u8 addr; /* default action reg addr: + * 0:tx default action1; 1:tx default action2; 2:rx default action + */ + u16 index; /**< func index */ + u32 action[DEFAULT_ACTION_REG_NUM]; /**< default action info */ +}; + +/** + * @brief struct hinic5_tc_pfe_tcam_freq_info + * @details pfe tcam_freq info + */ +struct hinic5_tc_pfe_tcam_freq_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 tcam_sel; /**< TCAM_CLK_SEL */ + u8 mode; /**< 1:500MHz, 2:250MHz, 3:125MHz */ + u8 opcode; /**< 0:get, 1:set */ + u16 rsvd; /**< 保留域段 */ +}; + +/** + * @brief struct hinic5_tc_tcam_clock_gating_cfg_info + * @details info about tcam clock gating + */ +struct hinic5_tc_tcam_clock_gating_cfg_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 opcode; /**< 0: get, 1: set */ + u8 status; /**< 0: 时钟关断, 1: 时钟打开 */ + u16 rsvd; /**< 保留域段 */ +}; + +#define MAX_CEQ_PER_FUNC 0x20 + +/** + * @brief 定义了一个用于设置NIC中断控制的结构体 + * @details 该结构体包含了设置NIC中断控制所需的各种信息 + */ +struct mig_nic_set_ceq_ctrl { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u8 ceq_num; /**< 中断控制的数量 */ + u8 rsvd; /**< 保留域段 */ + u32 ceq_ctrl0[MAX_CEQ_PER_FUNC]; /* 中断控制器0的数组 + * 每个元素对应一个中断控制器 + */ + u32 ceq_ctrl1[MAX_CEQ_PER_FUNC]; /* 中断控制器1的数组 + * 每个元素对应一个中断控制器 + */ +}; + +#define MAX_INTR_NUM 0x80 /**< 最大中断数量128 */ + +/** + * @brief 定义了一个用于迁移NIC中断信息的结构体 + * @details 该结构体包含了管理消息头部、函数ID、中断号、MSI-X控制寄存器操作以及MSI-X控制寄存器0和4的值。 + */ +struct mig_nic_msix_info_rw { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u8 intr_num; /**< 中断号 */ + u8 msi_ctl_csr_op; /* MSI-X控制寄存器操作 + * 用于标识对MSI-X控制寄存器的操作类型 + */ + u32 msix_ctrl0[MAX_INTR_NUM]; /* MSI-X控制寄存器0的值 + * 每个中断对应一个值 + */ + u32 msix_ctrl4[MAX_INTR_NUM]; /* MSI-X控制寄存器1的值 + * 每个中断对应一个值 + */ +}; + +#define MIG_FUNC_TBL_SIZE 0x50 /**< TBL size = (64 + 16(rsvd)) */ +#define MIG_VAT_TBL_SIZE 0x10 /**< vat size: 0x10 */ + +/** + * @brief 定义了一个结构体,用于存储网络接口卡的功能和虚拟地址表信息 + * @details 该结构体包含了网络接口卡的管理消息头部、功能ID、操作码、功能使用标志、功能表和虚拟地址表等信息。 + */ +struct mig_nic_func_vat_tbl { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u8 opcode; /**< 操作码 */ + u8 func_used; /**< function使用标志 */ + u8 func_tbl[MIG_FUNC_TBL_SIZE]; /**< func表 */ + u8 vat_tbl[MIG_VAT_TBL_SIZE]; /**< 虚拟地址表 */ + u8 rsvd[4]; /**< 保留域段 */ +}; + +#define MIG_FAST_MSG_VF_PAGE_NUM 4 + +/** + * @brief 定义了一个结构体,用于存储网络接口的虚拟功能信息 + * @details 结构体中包含了网络接口的虚拟功能信息,如发送队列数量、接收队列数量、命令队列数量等。 + */ +struct mig_nic_vf_info { + u8 sq_num; /**< 发送队列数量 */ + u8 rq_num; /**< 接收队列数量 */ + u8 cmdq_num; /**< 命令队列数量 */ + u8 cmdq_depth; /**< 命令队列深度 */ + u32 rq_depth; /**< 接收队列深度 */ + u32 sq_depth; /**< 发送队列深度 */ + u32 sq_ci_base_addr_h; /**< 发送队列完成指针基地址的高位 */ + u32 sq_ci_base_addr_l; /**< 发送队列完成指针基地址的低位 */ + u16 bat_size; /**< 批处理大小 */ + u8 valid; /**< 有效标志 */ + u8 fast_msg_en; + u8 fast_msg_page_num; + u8 rsvd[3]; + u64 fast_msg_page_addr[MIG_FAST_MSG_VF_PAGE_NUM]; +}; + +/** + * @brief 定义了一个结构体,用于存储网卡功能配置信息 + * @details 该结构体包含了网卡管理消息头部信息、功能ID、保留字段和虚拟功能信息。 + */ +struct mig_nic_func_cfg { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u16 rsvd; /**< 保留域段 */ + struct mig_nic_vf_info vf_info; /**< VF信息 */ +}; + +/** + * @brief 此结构体用于检查消息盒是否为空 + * @details 此结构体主要用于检查网络接口卡的消息盒是否为空,以便于进行后续的操作。 + */ +struct mig_nic_chk_mbx_empty { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u8 bme; /**< 表示消息盒是否为空的标志 */ + u8 is_func_used; /**< 表示func是否被使用的标志 */ + u8 cmdq_num; /**< get cmdq num for stop cmdq */ + u8 rsvd[3]; /**< 保留域段 */ +}; + +/** + * @brief 定义了一个结构体,用于表示迁移网络接口虚拟端口的状态 + * @details 该结构体包含了网络接口虚拟端口的状态信息,如管理消息头部、功能ID、状态等。 + */ +struct mig_nic_vport_state { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u16 rsvd1; /**< 保留域段 */ + u8 state; /**< 0--disable, 1--enable */ + u8 rsvd2[3]; /**< 保留域段 */ +}; + +#define SQ_CI_ATTR_SIZE 0x14 /**< SQ CI地址长度 */ +#define SQ_CI_INDIR_TBL_SIZE 0x20 /**< SQ CI间接表大小 */ +#define MAX_CI_TBL_NUM 0x20 /**< mailbox max 2kb, one sq ci(attr + tbl) 52B, 32 *52 = 1664B < 2kb */ + +/** + * @brief 定义一个结构体,用于存储单个NIC的发送队列控制信息 + * @details 该结构体包含两个成员,sq_ci_tbl和sq_ci_attr,分别用于存储发送队列控制表和发送队列控制属性。 + */ +struct mig_nic_single_sq_ci { + u8 sq_ci_tbl[SQ_CI_INDIR_TBL_SIZE]; /**< sq ci表 */ + u8 sq_ci_attr[SQ_CI_ATTR_SIZE]; /**< sq ci地址表 */ +}; + +/** + * @brief 网络接口结构体,用于存储网络接口的信息 + * @details 该结构体包含了网络接口的基本信息和服务队列的信息 + */ +struct mig_nic_sq_ci { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< 功能ID,用于标识网络接口的功能 */ + u8 opcode; /**< 操作码,用于标识网络接口的操作类型 */ + u8 curr_sq_id; /* 当前的服务队列ID + * 用于标识当前正在使用的服务队列 + */ + u8 round_queue_num; /* 循环队列数量 + * 用于标识网络接口的循环队列数 + */ + u8 rsvd[3]; /**< 保留域段 */ + struct mig_nic_single_sq_ci sq_ci[MAX_CI_TBL_NUM]; /**< 单个sq的ci信息 */ +}; + +#define RSS_INDIR_TBL_SIZE 0x200 /**< rss间接表的大小 */ +#define RSS_KEY_SIZE 0x28 /**< rss key的大小 */ + +/** + * @brief 定义了一个用于网络接口的接收方缩放(RSS)的结构体 + * @details 该结构体包含了RSS相关的各种参数,用于配置网络接口的RSS功能。 + */ +struct mig_nic_rss_tbl { + u8 rss_enable; /**< 启用或禁用RSS的标志 */ + u8 rss_hash_engine; /**< RSS哈希引擎的类型 */ + u16 rsvd; /**< 保留域段 */ + u32 rss_ctx; /**< RSS上下文 */ + u8 rss_indri_tbl[RSS_INDIR_TBL_SIZE]; /**< RSS间接表 */ + u8 rss_key[RSS_KEY_SIZE]; /**< RSS密钥 */ +}; + +/** + * @brief 定义了一个用于网络接口配置的结构体,包含了管理消息头、功能ID、操作码、预留字段和RSS表的信息 + * @details 该结构体用于网络接口的配置,包含了管理消息头、功能ID、操作码、预留字段和RSS表的信息,可以用于实现网络接口的各种配置操作。 + */ +struct mig_nic_cfg_rss_tbl { + struct mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u8 opcode; /**< 操作码 */ + u8 rsvd; /**< 保留域段 */ + struct mig_nic_rss_tbl rss_tbl; /**< rss表 */ +}; + +#define MAX_CMDQ_NUM 0x4 /**< 最大cmdq数量 */ +#define ENHANCED_CMDQ_CTX_SIZE 0x30 /**< 定义增强型cmdq context size为48 */ + +/** + * @brief 定义了一个结构体,用于存储网卡临时配置命令队列的上下文信息 + * @details 该结构体包含了管理消息头部、函数ID、命令队列数量、保留字段和命令队列上下文等信息 + */ +struct mig_nic_tmp_cfg_cmdq_ctx { + struct mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u8 cmdq_num; /**< cmdq数量 */ + u8 rsvd; /**< 保留域段 */ + u8 cmdq_ctx[MAX_CMDQ_NUM * ENHANCED_CMDQ_CTX_SIZE]; /**< cmdq上下文 */ +}; + +/** + * @brief 网卡迁移队列停止信息结构体 + * @details 该结构体用于描述网卡迁移队列停止的相关信息。 + */ +struct nic_mig_sq_stop { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u8 is_stop; /**< 是否停止 */ + u8 sq_num; /**< sq队列号 */ + u8 rsvd[4]; /**< 保留域段 */ +}; + +struct mig_nic_fast_msg_addr { + struct mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u8 opcode; /**< 操作码 */ + u8 page_num; /**< page num */ + u32 rsvd; /**< 保留域段 */ + u64 page_addr[MIG_FAST_MSG_VF_PAGE_NUM]; /**< page addr */ +}; + +/** + * @brief struct hinic5_cmd_set_pcie_flr_mgmt + * @details nic 设置pcie flr命令结构体 + */ +struct hinic5_cmd_set_pcie_flr_mgmt { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u16 rsvd1; /**< 保留域段 */ +}; + +struct hinic5_cmd_lro_cfg { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< func id */ + u8 data; /**< data to be set or get */ + u8 data_type; /**< data type: 参考 lro_cfg_operate_type 枚举定义 */ + u8 opcode; /**< 0: get, 1: set */ + u8 rsvd1[3]; /**< 保留域段 */ +}; + +#define NIC_DCB_UP_MAX 0x8 /**< 定义网络接口控制数据包(NIC DCB)的最大用户优先级(UP) */ + +/** + * @brief hinic5_dcb_state + * @details nic dcb state命令结构体 + */ +struct hinic5_dcb_state { + u8 dcb_on; /**< dcb on or off */ + u8 default_cos; /**< default cos */ + u8 trust; /**< trust state */ + u8 rsvd1; /**< 保留域段 */ + u8 pcp2cos[NIC_DCB_UP_MAX]; /**< pcp to cos */ + u8 dscp2cos[64]; /**< dscp to cos */ + u32 rsvd2[7]; /**< 保留域段 */ +}; + +/** + * @brief hinic5_cmd_vf_dcb_state + * @details nic vf dcb state命令结构体 + */ +struct hinic5_cmd_vf_dcb_state { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + struct hinic5_dcb_state state; /**< dcb state */ +}; + +struct hinic5_cmd_port_info { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u8 port_id; + u8 rsvd1[3]; /**< 保留域段 */ + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 fec; + u16 rsvd2; /**< 保留域段 */ + u32 rsvd3[4]; /**< 保留域段 */ +}; + +/** + * @brief nic_cmd_bond_active_report_info + * @details lacp协商结果更新之后向主机侧发送异步消息通知结构体 + */ +struct nic_cmd_bond_active_report_info { + struct mgmt_msg_head head; /**< 命令字消息头 */ + u32 bond_id; + u32 bon_mmi_status; /**< 该bond子设备的链路状态 */ + u32 active_bitmap; /**< 该bond子设备的slave port状态 */ + u8 rsvd[16]; /**< 保留域段 */ +}; + +/** + * @brief nic_cmd_tx_pause_notice + * @details pfc/pause风暴tx异常上报 + */ +struct nic_cmd_tx_pause_notice { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 tx_pause_except; /**< 1: 异常,0: 正常 */ + u32 except_level; /**< 异常等级 */ + u32 rsvd; /**< 保留域段 */ +}; + +#pragma pack(4) +typedef struct fault_msg_s { + struct mgmt_msg_head msg_head; /**< 命令字消息头 */ + u8 num; /**< 发送的自定义报文数量 */ + u8 rsvd[3]; /**< 保留域段 */ + u16 real_size; + u8 mode; /**< 0: read, 1: write */ + u8 type; /**< 0:不立即发送通知, 1: 立即发送通知 */ + u8 data[0]; /**< 大小由real_size指定 */ +} fault_msg_t; +#pragma pack() + +#define TCAM_FLOW_KEY_SIZE 44 /**< 定义tcam key的大小为44B */ + +typedef struct { + u32 qid : 10; /* 如果flag==1,则fdir_qid表示group id; + * 如果flag==0则fdir_qid表示qid + */ + u32 flag : 1; + u32 rsvd : 21; /**< 保留域段 */ +} qid_htn_s; + +typedef union { + qid_htn_s qid_htn; + u32 qid; +} qid_u; + +/** + * @brief 定义一个结构体,用于存储TCAM查找的结果 + * @details 包含Flow Director的复合目标信息(qid/group + flag)和保留字段。 + */ +struct tcam_result { + qid_u fdir_info; + u32 rsvd; /**< 保留域段 */ +}; + +/** + * @brief 定义一个结构体,用于存储TCAM流键的x和y值 + * @details 结构体用于存储TCAM流键的x和y值,TCAM流键是一种用于流表查找的数据结构,其中x和y值是流键的两个组成部分。 + */ +struct tcam_key_x_y { + u8 x[TCAM_FLOW_KEY_SIZE]; /**< x值,大小为TCAM_FLOW_KEY_SIZE */ + u8 y[TCAM_FLOW_KEY_SIZE]; /**< y值,大小为TCAM_FLOW_KEY_SIZE */ +}; + +/** + * @brief 用于存储网络接口控制器(NIC)的TCAM配置规则 + * @details 包含了TCAM配置规则的索引、数据和键,用于在NIC中存储和管理TCAM配置规则。 + */ +struct nic_tcam_cfg_rule { + u32 index; /**< 规则的索引 */ + struct tcam_result data; /**< 规则的数据 */ + struct tcam_key_x_y key; /**< 规则的键 */ +}; + +/** + * @brief TCAM配置规则 + * @details 包含了TCAM规则的各种配置信息,如规则的键值、掩码、动作等。 + */ +struct nic_cmd_fdir_add_rule { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< Func ID */ + u8 type; /**< 规则类型 */ + u8 usrdata; /**< 用户数据 */ + struct nic_tcam_cfg_rule rule; /**< 配置表 */ +}; + +/** + * @brief 网络接口命令过滤器获取规则的结构体 + * @details 该结构体用于存储网络接口命令过滤器获取规则的相关信息。 + */ +struct nic_cmd_fdir_get_rule { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u32 index; /**< 索引字段 */ + u8 valid; /**< 有效性字段 */ + u8 type; /**< 类型字段 */ + u16 rsvd; /**< 保留域段 */ + struct tcam_key_x_y key; /**< TCAM键字段 */ + struct tcam_result data; /**< TCAM结果字段 */ + u64 packet_count; /**< 包计数字段 */ + u64 byte_count; /**< 字节计数字段 */ +}; + +#define NIC_TCAM_BLOCK_LARGE_NUM 256 /**< 定义TCAM BLOCK最大值为256 */ +#define NIC_TCAM_BLOCK_LARGE_SIZE 16 /**< 定义一个block最大size为16 */ +#define TCAM_RULE_FDIR_TYPE 0 /**< 定义TCAM规则的类型,FDIR类型对应值为0 */ +#define TCAM_RULE_PPA_TYPE 1 /**< 定义TCAM规则的类型,PPA类型对应值为1 */ + +/** + * @brief 获取TCAM块规则的结构体 + * @details 该结构体用于获取TCAM块规则,包括TCAM块类型、TCAM表类型、TCAM块索引、有效性数组、TCAM键数组和TCAM结果数组。 + */ +struct nic_cmd_fdir_get_block_rules { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 tcam_block_type; /**< TCAM块类型,目前仅有 NIC_TCAM_BLOCK_TYPE_LARGE */ + u8 tcam_table_type; /* TCAM规则类型 + * TCAM_RULE_PPA_TYPE 或 TCAM_RULE_FDIR_TYPE + */ + u16 tcam_block_index; /**< TCAM块索引 */ + u8 valid[NIC_TCAM_BLOCK_LARGE_SIZE]; /**< 有效性数组 */ + struct tcam_key_x_y key[NIC_TCAM_BLOCK_LARGE_SIZE]; /**< TCAM键数组 */ + struct tcam_result data[NIC_TCAM_BLOCK_LARGE_SIZE]; /**< TCAM结果数组 */ +}; + +struct hinic5_rate_xir_xbs { + u32 cir; /**< unit: kbps */ + u32 pir; /**< unit: kbps */ + u32 cbs; /**< unit: Byte */ + u32 pbs; /**< unit: Byte */ +}; + +/** + * @brief nic tx MQM速率配置 + * @details 用于设置和获取func的MQM限速参数以及设置fun映射表。 + */ +struct hinic5_cmd_tx_limit_rate { + struct hinic5_mgmt_msg_head msg_head; /**< 命令字消息头 */ + u16 func_id; /**< func ID */ + u8 op_code; /**< 操作类型: 0 - 默认; 1 - get; 2 - set; 3 - map */ + u8 vnicgrp_flag; /**< vnicgrp操作标识:0 - vnic;1 - vnic group */ + u32 vnicgrp_id; /**< vnic group ID */ + u32 vnic_id; /**< vnic ID */ + u8 limit_type; /**< 限速类型:0 - pps; 1-bps */ + u8 rsvd1[3]; /**< 保留域段 */ + struct hinic5_rate_xir_xbs rate_para; /**< rate_cfg */ + u32 rsvd2[64]; /**< 保留域段 */ +}; + +#define HINIC5_TX_SET_PROMISC_SKIP 0 /**< 设置混杂接收未知单播报文 */ +#define HINIC5_TX_GET_PROMISC_SKIP 1 /**< 读取混杂接收未知单播报文 */ + +/** + * @brief 设置混杂是否接收未知单播报文 + * @details 该结构体用于开关是否上送单播未知报文到开混杂PF,包括 + */ +struct hinic5_tx_promisc_cfg { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u8 port_id; /**< 网络侧端口id */ + u8 promisc_skip_en; /**< 0:关闭上送 1:打开上送 */ + u8 opcode; /**< 0:设置 1:读取 */ + u8 rsvd; /**< 保留域段 */ +}; + +#define HINIC5_ARP_PKT_MAX_LEN 512 + +/** + * @brief 向MPU传递ARP相关报文内容结构体 + * @details 该结构体用于向MPU传递需要代发的ARP或ND报文 + */ +struct hinic5_arp_pkt_info { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; /**< func ID */ + u16 pkt_length; /**< 报文长度 */ + u16 origin_queue_id; /**< 驱动发送报文的队列id */ + u8 rsvd[2]; /**< 保留域段 */ + u32 rsvd1[4]; /**< 保留域段 */ + u8 pkt_buf[HINIC5_ARP_PKT_MAX_LEN]; /**< 报文缓冲区 */ +}; + +/** + * @brief 向MPU传递Bond配置信息结构体 + * @details 该结构体用于向MPU传递卸载Bond配置信息 + */ +struct hinic5_cmd_cfg_bond { + struct hinic5_mgmt_msg_head head; /**< 命令字消息头 */ + u16 func_id; + u8 opcode; /**< operation code: 1 set arp status, 0 get arp status */ + u8 arp_en; /**< ARP双发使能 */ + u32 rsvd; /**< 保留域段 */ +}; + +#endif /* HINIC5_NIC_CMD_STRUCTS_H */ + diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_structs_extend.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_structs_extend.h new file mode 100644 index 00000000..184ba40f --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_cmd_structs_extend.h @@ -0,0 +1,26 @@ +#ifndef HINIC5_NIC_CMD_STRUCTS_EXTEND_H +#define HINIC5_NIC_CMD_STRUCTS_EXTEND_H + +#if defined(__LINUX__) || defined(__VMWARE__) +#include <linux/types.h> +#endif + +#include "mpu_cmd_base_defs.h" +#include "nic_mpu_cmd_structs.h" + +#ifndef FUNC_MAX_CLEAR_QP_NUM +#define FUNC_MAX_CLEAR_QP_NUM 256 +/** + * @brief 定义了一个按队列级别清理qp res结构体 + * @details qp_num表示要清理的队列个数,qp表示要清理队列的在func下的local_id + */ +struct hinic5_cmd_clear_assign_qp_res { + struct hinic5_mgmt_msg_head msg_head; + + u16 func_id; + u16 qp_num; + u32 rsvd[4]; + u16 qp[FUNC_MAX_CLEAR_QP_NUM]; +}; +#endif +#endif /* HINIC5_NIC_CMD_STRUCTS_EXTEND_H */ \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_tc_cmd_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_tc_cmd_defs.h new file mode 100644 index 00000000..865536f5 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_mpu_tc_cmd_defs.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) Huawei Technologies Co. Ltd. 2024-2024. All rights reserved. + */ + +#ifndef NIC_MPU_TC_CMD_DEFS_H +#define NIC_MPU_TC_CMD_DEFS_H + +#if defined(__LINUX__) || defined(__VMWARE__) +#include <linux/types.h> +#endif + +#include "nic_cfg_comm.h" + +#define TCAM_KEY_MEM_PAD_2BYTE 2 +#define TCAM_KEY_MEM_PAD_ALIGN_4BYTE 4 +#define SHIFT_16 16 + +#define TCAM_INVLD_INDEX 0xFFFF + +enum tcam_clock_gating_ops { + TCAM_CLOCK_GATING_OPS_GET, + TCAM_CLOCK_GATING_OPS_SET, + TCAM_CLOCK_GATING_OPS_MAX +}; + +enum tcam_clock_gating_status { + TCAM_CLOCK_GATING_DISABLED, + TCAM_CLOCK_GATING_ENABLED +}; + +/* struct hinic5_tc_default_action_info中action地址 */ +#define ACTION_REG_TX1_1 0 +#define ACTION_REG_TX1_2 1 +#define ACTION_REG_TX1_3 2 +#define ACTION_REG_TX2_1 3 +#define ACTION_REG_TX2_2 4 +#define ACTION_REG_TX2_3 5 +#define ACTION_REG_RX1 6 +#define ACTION_REG_RX2 7 +#define ACTION_REG_RX3 8 + +/* 对应寄存器中默认ACTION地址 */ +enum hinic5_tc_default_action_addr { + HINIC5_TC_DEFAULT_ACTION_ADDR_TX1 = 0, /**< tx default action1 */ + HINIC5_TC_DEFAULT_ACTION_ADDR_TX2, /**< tx default action2 */ + HINIC5_TC_DEFAULT_ACTION_ADDR_RX, /**< rx default action */ + HINIC5_TC_DEFAULT_ACTION_ADDR_MAX, +}; + +enum hinic5_tc_default_action_ops { + HINIC5_TC_DEFAULT_ACTION_OPS_DROP = 0, + HINIC5_TC_DEFAULT_ACTION_OPS_UPCALL, + HINIC5_TC_DEFAULT_ACTION_OPS_SHOW, + HINIC5_TC_DEFAULT_ACTION_OPS_PORT, + HINIC5_TC_DEFAULT_ACTION_OPS_MAX, +}; + +enum hinic5_tc_cfg_rule_ops { + HINIC5_TC_CFG_RULE_OPS_DEL, + HINIC5_TC_CFG_RULE_OPS_ADD +}; + +enum pfe_cnt_ops { + PFE_CNT_OPS_GET, + PFE_CNT_OPS_RESET, + PFE_CNT_OPS_MAX +}; + +/* TCAM 3种频率:1:500MHz, 2:250MHz, 3:125MHz */ +#define MAX_PFE_TCAM_FREQ_MODE_NUM 3 + +enum pfe_tcam_freq_ops { + PFE_TCAM_FREQ_OPS_GET, + PFE_TCAM_FREQ_OPS_SET, + PFE_TCAM_FREQ_OPS_MAX +}; + +enum hinic5_tc_vxlan_tbl_cfg_ops { + HINIC5_TC_VXLAN_TBL_CFG_OPS_DEL, + HINIC5_TC_VXLAN_TBL_CFG_OPS_ADD, + HINIC5_TC_VXLAN_TBL_CFG_OPS_GET +}; + +enum pfe_tcam_cfg_ops { + PFE_TCAM_CFG_OPS_DEL, + PFE_TCAM_CFG_OPS_ADD, + PFE_TCAM_CFG_OPS_GROUP, + PFE_TCAM_CFG_OPS_GET +}; + +#define PFE_VTEP_TBL_IP_SIZE 4 +#define PFE_VTEP_TBL_IP_NUM 8 + +enum pfe_vtep_ops { + PFE_VTEP_OPS_DEL, + PFE_VTEP_OPS_ADD, + PFE_VTEP_OPS_GET, + PFE_VTEP_OPS_MAX, +}; + +enum pfe_aging_tbl_ops { + PFE_AGING_TBL_GET, + PFE_AGING_TBL_SET, + PFE_AGING_TBL_STATUS, + PFE_AGING_TBL_CHECK, + PFE_AGING_TBL_MAX +}; + +/* action flag[9 : 0], queue bit 9, decap bit 0 */ +enum hinic5_tc_action_flag { + HINIC5_TC_ACTION_VXLAN_DECAP = 0, + HINIC5_TC_ACTION_VXLAN_ENCAP, + HINIC5_TC_ACTION_FLOW_UPCALL, + HINIC5_TC_ACTION_FLOW_VLAN_POP, + HINIC5_TC_ACTION_FLOW_VLAN_PUSH, + HINIC5_TC_ACTION_FLOW_OUTPUT, + HINIC5_TC_ACTION_FLOW_COUNT, + HINIC5_TC_ACTION_FLOW_DROP, + HINIC5_TC_ACTION_FLOW_MARK, + HINIC5_TC_ACTION_FLOW_QUEUE, + HINIC5_TC_ACTION_TYPE_MAX +}; + +enum hinic5_tc_pfe_cfg_flag { + HINIC5_TC_PFE_GLB_MODE_CFG_REG = 0, + HINIC5_TC_PFE_GLB_MODE_CFG_REG2, + HINIC5_TC_PFE_CFG_MAX +}; + +enum pfe_cfg_ops_profile { + PFE_CFG_OPS_PROFILE_OPTION = 0, + PFE_CFG_OPS_PROFILE_SHIFT, + PFE_CFG_OPS_PROFILE_SHIFT2, + PFE_CFG_OPS_PROFILE_MAX +}; + +#endif /* NIC_MPU_TC_CMD_DEFS_H */ \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_npu_cmd.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_npu_cmd.h new file mode 100644 index 00000000..f5b25d88 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_npu_cmd.h @@ -0,0 +1,82 @@ +/* + * Copyright (C), 2001-2011, Huawei Tech. Co., Ltd. + * File Name : nic_npu_cmd.h + * Version : Initial Draft + * Created : 2019/4/25 + * Last Modified : + * Description : NIC Commands between Driver and NPU + * Function List : + */ + +#ifndef NIC_NPU_CMD_H +#define NIC_NPU_CMD_H + +/** + * @enum hinic5_ucode_cmd + * @brief 定义了各种与NIC ucode相关的命令 + * + * @details 这个结构体定义了一系列与NIC ucode相关的命令,这些命令用于控制NIC ucode的行为。 + */ +enum hinic5_ucode_cmd { + HINIC5_UCODE_CMD_MODIFY_QUEUE_CTX = 0, /* 修改队列上下文。 + * @see > hinic5_sq_ctxt_block + */ + HINIC5_UCODE_CMD_CLEAN_QUEUE_CONTEXT = 1, /* 清理队列上下文。 + * @see > hinic5_clean_queue_ctxt + */ + HINIC5_UCODE_CMD_ARM_SQ = 2, /* 未使用 */ + HINIC5_UCODE_CMD_ARM_RQ = 3, /* 未使用 */ + HINIC5_UCODE_CMD_SET_RSS_INDIR_TABLE = 4, /* 设置RSS间接表。 + * @see > nic_rss_indirect_tbl + */ + HINIC5_UCODE_CMD_SET_RSS_CONTEXT_TABLE = 5, /* 设置RSS上下文表。 + * @see > nic_rss_context_tbl + */ + HINIC5_UCODE_CMD_GET_RSS_INDIR_TABLE = 6, /* 获取RSS间接表。 + * @see > l2nic_cmdq_rss_indir_get + */ + HINIC5_UCODE_CMD_GET_RSS_CONTEXT_TABLE = 7, /* 未使用 */ + HINIC5_UCODE_CMD_SET_IQ_ENABLE = 8, /* 未使用 */ + HINIC5_UCODE_CMD_SET_RQ_FLUSH = 10, /* 设置RQ刷新。 + * @see > hinic5_cmd_set_rq_flush + */ + HINIC5_UCODE_CMD_MODIFY_VLAN_CTX = 11, /* 获取rxq信息。 + * @see > nic_vlan_ctx + */ + HINIC5_UCODE_CMD_PPA_HASH_TABLE = 12, /* 未使用*/ + HINIC5_UCODE_CMD_RXQ_INFO_GET = 13, /* 获取rxq信息。 + * @see > hinic5_rxq_hw, < rxq_check_info + */ + HINIC5_UCODE_MIG_CFG_Q_CTX = 16, /* 热迁移操作上下文表。 + * @see > l2nic_migrate_op_ctx + */ + HINIC5_UCODE_MIG_CHK_SQ_STOP = 17, /* 未使用 */ + HINIC5_UCODE_MIG_CHK_RQ_STOP = 18, /* 热迁移检查rq启停状态。 + * @see > l2nic_cmdq_migrate_check_rq_stop + */ + HINIC5_UCODE_MIG_CHK_CMDQ_STOP = 19, /* 热迁移检查cmdq启停状态。 + * @see > l2nic_cmdq_migrate_check_cmdq_stop + */ + HINIC5_UCODE_MIG_CFG_BAT_INFO = 20, /* 热迁移读写bat表。 + * @see > l2nic_migrate_cfg_bat + */ + HINIC5_UCODE_MIG_COMPENSATE_INTR = 21, /* 热迁移中断补偿。 + * @see > l2nic_cmdq_migrate_compensate_intr + */ + HINIC5_UCODE_MIG_CFG_FAST_MSG = 22, + HINIC5_UCODE_CMD_CLEAR_VPORT_STATS = 23, /* 获取counter统计。 + * @see > l2nic_cmdq_get_vport_stats + */ + HINIC5_UCODE_CMD_GET_VPORT_STATS = 24, /* 清除counter统计。 + * @see > l2nic_cmdq_clear_vport_stats + */ + + HINIC5_UCODE_CMD_EXTEND_SECTION1_START = 192, /* NIC CMDQ保留命令字起始,计算产品线使用 */ + HINIC5_UCODE_CMD_EXTEND_SECTION1_END = 223, /* NIC CMDQ保留命令字结束,计算产品线使用 */ + HINIC5_UCODE_CMD_EXTEND_SECTION2_START = 224, /* NIC CMDQ保留命令字起始,数存产品线使用 */ + HINIC5_UCODE_CMD_EXTEND_SECTION2_END = 255, /* NIC CMDQ保留命令字结束,数存产品线使用 */ + + HINIC5_UCODE_CMD_MAX = 255, +}; + +#endif /* NIC_NPU_CMD_H */ diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_tc_rule_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_tc_rule_defs.h new file mode 100644 index 00000000..a1cd30a9 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/nic/nic_tc_rule_defs.h @@ -0,0 +1,617 @@ +#ifndef NIC_TC_RULE_DEFS_H +#define NIC_TC_RULE_DEFS_H + +#if defined(__LINUX__) || defined(VMWARE) +#include <linux/types.h> +#endif + +enum hinic5_tc_rule_profile_id_e { + /* vlan, tunnel */ + HINIC5_TC_PROFILE_TUN_ETH = 0, /* eth */ + HINIC5_TC_PROFILE_TUN_ETH_VLAN = 1, /* eth/vlan */ + HINIC5_TC_PROFILE_TUN_ETH_QINQ = 2, /* eth/svlan/cvlan */ + + /* vlan, non-tunnel */ + HINIC5_TC_PROFILE_ETH = 3, /* eth */ + HINIC5_TC_PROFILE_ETH_VLAN = 4, /* eth/vlan */ + HINIC5_TC_PROFILE_ETH_QINQ = 5, /* eth/svlan/cvlan */ + + /* ipv4, tunnel */ + HINIC5_TC_PROFILE_TUN_ETH_IP4 = 6, /* eth/ipv4 */ + HINIC5_TC_PROFILE_TUN_ETH_IP4_TCPORUDP = 7, /* eth/ipv4/udp_or_tcp */ + + /* ipv4, non-tunnel */ + HINIC5_TC_PROFILE_ETH_IP4 = 8, /* eth/ipv4 */ + HINIC5_TC_PROFILE_ETH_IP4_TCPORUDP = 9, /* eth/ipv4/udp_or_tcp */ + + /* ipv6, tunnel */ + HINIC5_TC_PROFILE_TUN_ETH_IP6 = 10, /* eth/ipv6 */ + HINIC5_TC_PROFILE_TUN_ETH_IP6_TCPORUDP = 11, /* eth/ipv6/udp_or_tcp */ + + /* ipv6, non-tunnel */ + HINIC5_TC_PROFILE_ETH_IP6 = 12, /* eth/ipv6 */ + HINIC5_TC_PROFILE_ETH_IP6_TCPORUDP = 13, /* eth/ipv6/udp_or_tcp */ + + /* vtep dip检查不通过的隧道报文 */ + HINIC5_TC_PROFILE_OUTER_IP_INNER_IP = 14, /* eth/ipv4(6)/udp/vxlan/eth/ipv4 */ + HINIC5_TC_PROFILE_OUTER_IP_INNER_IP_TCPORUDP = 15, /* eth/ipv4(6)/udp/vxlan/eth/ipv4/udp_or_tcp */ + + HINIC5_TC_PROFILE_MAX = 16 +}; + +#define HINIC5_TC_PROFILE_ADM_MAX 14 + +struct hinic5_tc_rule_tun_eth { + u32 vni_h : 16; + u32 padding : 16; + + u32 smac_2 : 8; + u32 smac_1 : 8; + u32 smac_0 : 8; + u32 vni_l : 8; + + u32 dmac_0 : 8; + u32 smac_5 : 8; + u32 smac_4 : 8; + u32 smac_3 : 8; + + u32 dmac_4 : 8; + u32 dmac_3 : 8; + u32 dmac_2 : 8; + u32 dmac_1 : 8; + + u32 rsvd : 8; + u32 ether_type : 16; + u32 dmac_5 : 8; +}; + +struct hinic5_tc_rule_tun_eth_vlan { + u32 vni_h : 16; + u32 padding : 16; + + u32 smac_2 : 8; + u32 smac_1 : 8; + u32 smac_0 : 8; + u32 vni_l : 8; + + u32 dmac_0 : 8; + u32 smac_5 : 8; + u32 smac_4 : 8; + u32 smac_3 : 8; + + u32 dmac_4 : 8; + u32 dmac_3 : 8; + u32 dmac_2 : 8; + u32 dmac_1 : 8; + + u32 vlan_tag_h : 8; + u32 ether_type : 16; + u32 dmac_5 : 8; + + u32 rsvd : 24; + u32 vlan_tag_l : 8; +}; + +struct hinic5_tc_rule_tun_eth_qinq { + u32 vni_h : 16; + u32 padding : 16; + + u32 smac_2 : 8; + u32 smac_1 : 8; + u32 smac_0 : 8; + u32 vni_l : 8; + + u32 dmac_0 : 8; + u32 smac_5 : 8; + u32 smac_4 : 8; + u32 smac_3 : 8; + + u32 dmac_4 : 8; + u32 dmac_3 : 8; + u32 dmac_2 : 8; + u32 dmac_1 : 8; + + u32 vlan_tag_h : 8; + u32 ether_type : 16; + u32 dmac_5 : 8; + + u32 rsvd : 8; + u32 cvlan_tag : 16; + u32 vlan_tag_l : 8; +}; + +struct hinic5_tc_rule_eth { + u32 smac_1 : 8; + u32 smac_0 : 8; + u32 padding : 16; + + u32 smac_5 : 8; + u32 smac_4 : 8; + u32 smac_3 : 8; + u32 smac_2 : 8; + + u32 dmac_3 : 8; + u32 dmac_2 : 8; + u32 dmac_1 : 8; + u32 dmac_0 : 8; + + u32 ether_type : 16; + u32 dmac_5 : 8; + u32 dmac_4 : 8; +}; + +struct hinic5_tc_rule_eth_vlan { + u32 smac_1 : 8; + u32 smac_0 : 8; + u32 padding : 16; + + u32 smac_5 : 8; + u32 smac_4 : 8; + u32 smac_3 : 8; + u32 smac_2 : 8; + + u32 dmac_3 : 8; + u32 dmac_2 : 8; + u32 dmac_1 : 8; + u32 dmac_0 : 8; + + u32 ether_type : 16; + u32 dmac_5 : 8; + u32 dmac_4 : 8; + + u32 rsvd : 16; + u32 vlan_tag : 16; +}; + +struct hinic5_tc_rule_eth_qinq { + u32 smac_1 : 8; + u32 smac_0 : 8; + u32 padding : 16; + + u32 smac_5 : 8; + u32 smac_4 : 8; + u32 smac_3 : 8; + u32 smac_2 : 8; + + u32 dmac_3 : 8; + u32 dmac_2 : 8; + u32 dmac_1 : 8; + u32 dmac_0 : 8; + + u32 ether_type : 16; + u32 dmac_5 : 8; + u32 dmac_4 : 8; + + u32 cvlan_tag : 16; + u32 vlan_tag : 16; +}; + +struct hinic5_tc_rule_tun_eth_ip4 { + u32 vni_h : 16; + u32 padding : 16; + + u32 dmac_2 : 8; + u32 dmac_1 : 8; + u32 dmac_0 : 8; + u32 vni_l : 8; + + u32 ether_type_h : 8; + u32 dmac_5 : 8; + u32 dmac_4 : 8; + u32 dmac_3 : 8; + + u32 sip_2 : 8; + u32 sip_1 : 8; + u32 sip_0 : 8; + u32 ether_type_l : 8; + + u32 dip_2 : 8; + u32 dip_1 : 8; + u32 dip_0 : 8; + u32 sip_3 : 8; + + u32 rsvd : 16; + u32 proto : 8; + u32 dip_3 : 8; +}; + +struct hinic5_tc_rule_tun_eth_ip4_tcporudp { + u32 vni_h : 16; + u32 padding : 16; + + u32 dmac_2 : 8; + u32 dmac_1 : 8; + u32 dmac_0 : 8; + u32 vni_l : 8; + + u32 ether_type_h : 8; + u32 dmac_5 : 8; + u32 dmac_4 : 8; + u32 dmac_3 : 8; + + u32 sip_2 : 8; + u32 sip_1 : 8; + u32 sip_0 : 8; + u32 ether_type_l : 8; + + u32 dip_2 : 8; + u32 dip_1 : 8; + u32 dip_0 : 8; + u32 sip_3 : 8; + + u32 sport : 16; + u32 proto : 8; + u32 dip_3 : 8; + + u32 rsvd : 16; + u32 dport : 16; +}; + +struct hinic5_tc_rule_eth_ip4 { + u32 vlan_tag : 16; + u32 padding : 16; + + u32 dmac_3 : 8; + u32 dmac_2 : 8; + u32 dmac_1 : 8; + u32 dmac_0 : 8; + + u32 sip_1 : 8; + u32 sip_0 : 8; + u32 dmac_5 : 8; + u32 dmac_4 : 8; + + u32 dip_1 : 8; + u32 dip_0 : 8; + u32 sip_3 : 8; + u32 sip_2 : 8; + + u32 rsvd : 8; + u32 proto : 8; + u32 dip_3 : 8; + u32 dip_2 : 8; +}; + +struct hinic5_tc_rule_eth_ip4_tcporudp { + u32 vlan_tag : 16; + u32 padding : 16; + + u32 dmac_3 : 8; + u32 dmac_2 : 8; + u32 dmac_1 : 8; + u32 dmac_0 : 8; + + u32 sip_1 : 8; + u32 sip_0 : 8; + u32 dmac_5 : 8; + u32 dmac_4 : 8; + + u32 dip_1 : 8; + u32 dip_0 : 8; + u32 sip_3 : 8; + u32 sip_2 : 8; + + u32 sport_h : 8; + u32 proto : 8; + u32 dip_3 : 8; + u32 dip_2 : 8; + + u32 rsvd : 8; + u32 dport : 16; + u32 sport_l : 8; +}; + +struct hinic5_tc_rule_tun_eth_ip6_off { + u32 vni_h : 16; + u32 padding : 16; + + u32 sip6_1_h : 8; + u32 sip6_0 : 16; + u32 vni_l : 8; + + u32 sip6_3_h : 8; + u32 sip6_2 : 16; + u32 sip6_1_l : 8; + + u32 sip6_5_h : 8; + u32 sip6_4 : 16; + u32 sip6_3_l : 8; + + u32 sip6_7_h : 8; + u32 sip6_6 : 16; + u32 sip6_5_l : 8; + + u32 dip6_1_h : 8; + u32 dip6_0 : 16; + u32 sip6_7_l : 8; + + u32 dip6_3_h : 8; + u32 dip6_2 : 16; + u32 dip6_1_l : 8; + + u32 dip6_5_h : 8; + u32 dip6_4 : 16; + u32 dip6_3_l : 8; + + u32 dip6_7_h : 8; + u32 dip6_6 : 16; + u32 dip6_5_l : 8; + + u32 rsvd : 16; + u32 proto : 8; + u32 dip6_7_l : 8; +}; + +struct hinic5_tc_rule_tun_eth_ip6_tcporudp_off { + u32 vni_h : 16; + u32 padding : 16; + + u32 sip6_1_h : 8; + u32 sip6_0 : 16; + u32 vni_l : 8; + + u32 sip6_3_h : 8; + u32 sip6_2 : 16; + u32 sip6_1_l : 8; + + u32 sip6_5_h : 8; + u32 sip6_4 : 16; + u32 sip6_3_l : 8; + + u32 sip6_7_h : 8; + u32 sip6_6 : 16; + u32 sip6_5_l : 8; + + u32 dip6_1_h : 8; + u32 dip6_0 : 16; + u32 sip6_7_l : 8; + + u32 dip6_3_h : 8; + u32 dip6_2 : 16; + u32 dip6_1_l : 8; + + u32 dip6_5_h : 8; + u32 dip6_4 : 16; + u32 dip6_3_l : 8; + + u32 dip6_7_h : 8; + u32 dip6_6 : 16; + u32 dip6_5_l : 8; + + u32 sport : 16; + u32 proto : 8; + u32 dip6_7_l : 8; + + u32 rsvd : 16; + u32 dport : 16; +}; + +struct hinic5_tc_rule_tun_eth_ip6_on { + u32 vni_h : 16; + u32 padding : 16; + + u32 smac_2 : 8; + u32 smac_1 : 8; + u32 smac_0 : 8; + u32 vni_l : 8; + + u32 dmac_0 : 8; + u32 smac_5 : 8; + u32 smac_4 : 8; + u32 smac_3 : 8; + + u32 dmac_4 : 8; + u32 dmac_3 : 8; + u32 dmac_2 : 8; + u32 dmac_1 : 8; + + u32 sip6_0_h : 8; + u32 ether_type : 16; + u32 dmac_5 : 8; + + u32 sip6_2_h : 8; + u32 sip6_1 : 16; + u32 sip6_0_l : 8; + + u32 sip6_4_h : 8; + u32 sip6_3 : 16; + u32 sip6_2_l : 8; + + u32 dip6_1 : 16; + u32 dip6_0 : 16; + + u32 dip6_3 : 16; + u32 dip6_2 : 16; + + u32 rsvd : 16; + u32 proto : 8; + u32 dip6_4_h : 8; +}; + +struct hinic5_tc_rule_tun_eth_ip6_tcporudp_on { + u32 vni_h : 16; + u32 padding : 16; + + u32 smac_2 : 8; + u32 smac_1 : 8; + u32 smac_0 : 8; + u32 vni_l : 8; + + u32 dmac_0 : 8; + u32 smac_5 : 8; + u32 smac_4 : 8; + u32 smac_3 : 8; + + u32 dmac_4 : 8; + u32 dmac_3 : 8; + u32 dmac_2 : 8; + u32 dmac_1 : 8; + + u32 sip6_0_h : 8; + u32 ether_type : 16; + u32 dmac_5 : 8; + + u32 sip6_2_h : 8; + u32 sip6_1 : 16; + u32 sip6_0_l : 8; + + u32 sip6_4_h : 8; + u32 sip6_3 : 16; + u32 sip6_2_l : 8; + + u32 dip6_1 : 16; + u32 dip6_0 : 16; + + u32 dip6_3 : 16; + u32 dip6_2 : 16; + + u32 sport : 16; + u32 proto : 8; + u32 dip6_4_h : 8; + + u32 rsvd : 16; + u32 dport : 16; +}; + +struct hinic5_tc_rule_eth_ip6 { + u32 dmac_1 : 8; + u32 dmac_0 : 8; + u32 padding : 16; + + u32 dmac_5 : 8; + u32 dmac_4 : 8; + u32 dmac_3 : 8; + u32 dmac_2 : 8; + + u32 sip6_1 : 16; + u32 sip6_0 : 16; + + u32 sip6_3 : 16; + u32 sip6_2 : 16; + + u32 sip6_5 : 16; + u32 sip6_4 : 16; + + u32 sip6_7 : 16; + u32 sip6_6 : 16; + + u32 dip6_1 : 16; + u32 dip6_0 : 16; + + u32 dip6_3 : 16; + u32 dip6_2 : 16; + + u32 dip6_5 : 16; + u32 dip6_4 : 16; + + u32 dip6_7 : 16; + u32 dip6_6 : 16; + + u32 rsvd : 24; + u32 proto : 8; +}; + +struct hinic5_tc_rule_eth_ip6_tcporudp { + u32 dmac_1 : 8; + u32 dmac_0 : 8; + u32 padding : 16; + + u32 dmac_5 : 8; + u32 dmac_4 : 8; + u32 dmac_3 : 8; + u32 dmac_2 : 8; + + u32 sip6_1 : 16; + u32 sip6_0 : 16; + + u32 sip6_3 : 16; + u32 sip6_2 : 16; + + u32 sip6_5 : 16; + u32 sip6_4 : 16; + + u32 dip6_1 : 16; + u32 dip6_0 : 16; + + u32 dip6_3 : 16; + u32 dip6_2 : 16; + + u32 dip6_5 : 16; + u32 dip6_4 : 16; + + u32 dip6_7 : 16; + u32 dip6_6 : 16; + + u32 dport_h : 8; + u32 sport : 16; + u32 proto : 8; + + u32 rsvd : 24; + u32 dport_l : 8; +}; + +struct hinic5_tc_rule_outer_ip_inner_ip { + u32 outer_dip_0 : 16; + u32 padding : 16; + + u32 outer_dip_2 : 16; + u32 outer_dip_1 : 16; + + u32 outer_dip_4 : 16; + u32 outer_dip_3 : 16; + + u32 outer_dip_6 : 16; /* ipv4_0 */ + u32 outer_dip_5 : 16; + + u32 vni_h : 16; + u32 outer_dip_7 : 16; /* ipv4_1 */ + + u32 sip_2 : 8; + u32 sip_1 : 8; + u32 sip_0 : 8; + u32 vni_l : 8; + + u32 dip_2 : 8; + u32 dip_1 : 8; + u32 dip_0 : 8; + u32 sip_3 : 8; + + u32 rsvd : 24; + u32 dip_3 : 8; +}; + +struct hinic5_tc_rule_outer_ip_inner_ip_tcporudp { + u32 outer_dip_0 : 16; + u32 padding : 16; + + u32 outer_dip_2 : 16; + u32 outer_dip_1 : 16; + + u32 outer_dip_4 : 16; + u32 outer_dip_3 : 16; + + u32 outer_dip_6 : 16; /* ipv4_0 */ + u32 outer_dip_5 : 16; + + u32 vni_h : 16; + u32 outer_dip_7 : 16; /* ipv4_1 */ + + u32 sip_2 : 8; + u32 sip_1 : 8; + u32 sip_0 : 8; + u32 vni_l : 8; + + u32 dip_2 : 8; + u32 dip_1 : 8; + u32 dip_0 : 8; + u32 sip_3 : 8; + + u32 dport_h : 8; + u32 sport : 16; + u32 dip_3 : 8; + + u32 rsvd : 24; + u32 dport_l : 8; +}; + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/public/comm_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/public/comm_defs.h new file mode 100644 index 00000000..8ac8dafa --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/public/comm_defs.h @@ -0,0 +1,57 @@ +/* ***************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. + ****************************************************************************** + File Name : comm_defs.h + Version : Initial Draft + Created : 2023/10/8 + Last Modified : + Description : driver/mpu/npu/smu COMM defines + Function List : +***************************************************************************** */ + +#ifndef COMM_DEFS_H +#define COMM_DEFS_H + +/* CMDQ MODULE_TYPE */ +typedef enum hinic5_mod_type { + HINIC5_MOD_DEPRECATED +} hinic5_mod_type_e; + +/** + * @brief 注意:以下模块宏必须使用直接数字定义,不能存在运算, 否则CMDQ REGISTER功能将失效 + */ +#define HINIC5_MOD_COMM 0 /* HW communication module */ +#define HINIC5_MOD_L2NIC 1 // (HINIC5_MOD_COMM + 1) /* L2NIC module */ +#define HINIC5_MOD_ROCE 2 // (HINIC5_MOD_L2NIC + 1) +#define HINIC5_MOD_PLOG 3 // (HINIC5_MOD_ROCE + 1) +#define HINIC5_MOD_TOE 4 // (HINIC5_MOD_PLOG + 1) +#define HINIC5_MOD_UB 5 // (HINIC5_MOD_TOE + 1) +#define HINIC5_MOD_VROCE 6 // (HINIC5_MOD_UB + 1) +#define HINIC5_MOD_CFGM 7 // (HINIC5_MOD_VROCE + 1) /* Configuration module */ +#define HINIC5_MOD_HINIC5_CQM 8 // (HINIC5_MOD_CFGM + 1) +#define HINIC5_MOD_VMSEC 9 // (HINIC5_MOD_HINIC5_CQM + 1) +#define COMM_MOD_FC 10 // (HINIC5_MOD_VMSEC + 1) +#define HINIC5_MOD_OVS 11 // (COMM_MOD_FC + 1) +#define HINIC5_MOD_VBS 12 // (HINIC5_MOD_OVS + 1) +#define HINIC5_MOD_MIGRATE 13 // (HINIC5_MOD_VBS + 1) +#define HINIC5_MOD_HILINK 14 // (HINIC5_MOD_MIGRATE + 1) +#define HINIC5_MOD_CRYPT 15 // (HINIC5_MOD_HILINK + 1) /* secure crypto module */ +#define HINIC5_MOD_VIO 16 // (HINIC5_MOD_CRYPT + 1) +#define HINIC5_MOD_IMU 17 // (HINIC5_MOD_VIO + 1) +#define HINIC5_MOD_DFT 18 // (HINIC5_MOD_IMU + 1) /* DFT */ +#define HINIC5_MOD_MACSEC 19 // (HINIC5_MOD_DFT + 1) +#define HINIC5_MOD_SW_FUNC 20 // (HINIC5_MOD_MACSEC + 1) /* Software module id, for PF/VF and multi-host */ +#define HINIC5_MOD_NST 21 // (HINIC5_MOD_SW_FUNC + 1) +#define HINIC5_MOD_HTN 22 // (HINIC5_MOD_NST + 1) +#define HINIC5_MOD_JBOF 23 // (HINIC5_MOD_HTN + 1) +#define HINIC5_MOD_FAKE_FMSG 24 // (HINIC5_MOD_JBOF + 1) +#define HINIC5_MOD_UBCNET 25 // (HINIC5_MOD_FAKE_FMSG + 1) +#define HINIC5_MOD_CFM 26 // (HINIC5_MOD_UBCNET + 1) +#define HINIC5_MOD_HIHTR 27 // (HINIC5_MOD_CFM + 1) +#define HINIC5_MOD_MAX 28 // (HINIC5_MOD_HIHTR + 1) +#define HINIC5_MOD_HW_MAX HINIC5_MOD_MAX + +#define MODULE_ID(module) #module +#define HINIC5_MOD(module) MODULE_ID(module) + +#endif diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/public/hinic5_comm_cmd.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/public/hinic5_comm_cmd.h new file mode 100644 index 00000000..a1cb58ef --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/public/hinic5_comm_cmd.h @@ -0,0 +1,105 @@ +/* ***************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2022. All rights reserved. + ****************************************************************************** + File Name : hinic5_comm_cmd.h + Version : Initial Draft + Created : 2019/4/25 + Last Modified : + Description : COMM Commands between Driver and MPU + Function List : +***************************************************************************** */ + +#ifndef HINIC5_COMMON_CMD_H +#define HINIC5_COMMON_CMD_H + +#include "base_type.h" + +#define DFX_LOG_PRINT_MAX_PARA 8 + +/** + * @brief comm_cmdq_cmd - CmdQ Common subtype + * + * @details COMM Commands between Driver and MPU + */ +enum comm_cmdq_cmd { + COMM_CMD_UCODE_ARM_BIT_SET = 2, /**< 设置UCODE_ARM位的命令 */ + COMM_CMD_SEND_NPU_DFT_CMD, /**< 发送NPU的调试命令 */ + COMM_CMD_MICROLOG_PRINT_CNT_CLEAR, /**< 清除微日志打印计数的命令 */ + COMM_CMD_UCODE_FAST_MSG_CMD, /**< 发送快速消息的命令 */ + COMM_CMD_UCODE_FAST_MSG_CLEAR, /**< 清除快速消息的命令 */ + COMM_CMD_MICROLOG_GPA_SET, /**< 将缓存日志的主机地址保存到sml表 */ + COMM_CMD_MICROLOG_CTRL_INFO_SET, /**< 更新日志控制信息到sml表 */ +}; + +typedef struct tag_cmdq_microlog_gpa_set { + u32 wr_init_pc_h32; + u32 wr_init_pc_l32; + u32 lt_index; +} cmdq_microlog_gpa_set_s; + +typedef struct tag_cmdq_microlog_ctrl_info_set { + struct { +#if (BYTE_ORDER == BIG_ENDIAN) + u32 state : 8; + u32 microlog_en : 1; + u32 microlog_init_flag : 1; + u32 rsvd : 11; + u32 max_num : 11; /* Maximum data block size */ +#else + u32 max_num : 11; /* Maximum data block size */ + u32 rsvd : 11; + u32 microlog_init_flag : 1; + u32 microlog_en : 1; + u32 state : 8; +#endif + }; + u32 ci_index; +} cmdq_microlog_ctrl_info_set_s; + +typedef struct tag_micro_log_item { + union { + struct { +#if (BYTE_ORDER == BIG_ENDIAN) + u32 action : 1; /* 1:记录日志 0:打印 */ + u32 type : 2; /* 0:err 1:trace 2:info */ + u32 feature : 5; /* 0:l2nic 1:roce 2:toe 3:ioe 4:feoe */ + u32 core_id : 6; + u32 thread_id : 2; + u32 valid_param_num : 8; /* 有效的参数数量 */ + u32 tile_id : 3; + u32 ctrl_flag : 1; + u32 rsv : 4; +#else + u32 rsv : 4; + u32 ctrl_flag : 1; + u32 tile_id : 3; + u32 valid_param_num : 8; /* 有效的参数数量 */ + u32 thread_id : 2; + u32 core_id : 6; + u32 feature : 5; /* 0:l2nic 1:roce 2:toe 3:ioe 4:feoe */ + u32 type : 2; /* 0:err 1:trace 2:info */ + u32 action : 1; /* 1:记录日志 0:打印 */ +#endif + } bs; + u32 value; + } ctrl_info; + + u32 string_addr; + u32 data[DFX_LOG_PRINT_MAX_PARA]; + u32 func_name_addr; /* 文件名不超过26个字符 */ + + union { + struct { +#if (BYTE_ORDER == BIG_ENDIAN) + u32 line : 16; /* 对标mpu中log_items,其中的line只需要16bit */ + u32 log_seq : 16; /* 等价于log_pi,范围:0~65535。作为日志seq, 检测是否丢微码日志 */ +#else + u32 log_seq : 16; + u32 line : 16; +#endif + } bs; + u32 value; + } line_and_pi; +} micro_log_item_s; + +#endif /* HINIC5_COMMON_CMD_H */ diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/public/npu_cmdq_base_defs.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/public/npu_cmdq_base_defs.h new file mode 100644 index 00000000..6e5d6ac6 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/public/npu_cmdq_base_defs.h @@ -0,0 +1,220 @@ +/****************************************************************************** + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved. + ****************************************************************************** + File Name : npu_cmdq_base_defs.h + Version : Initial Draft + Description : common command queue interface + Function List : + History : + Modification: Created file + +******************************************************************************/ + +#ifndef NPU_CMDQ_BASE_DEFS_H +#define NPU_CMDQ_BASE_DEFS_H + +#include "base_type.h" + +/* Cmdq ack type */ +enum hinic5_ack_type { + HINIC5_ACK_TYPE_CMDQ, + HINIC5_ACK_TYPE_SHARE_CQN, + HINIC5_ACK_TYPE_APP_CQN, + + HINIC5_MOD_ACK_MAX = 15, +}; + +/* Defines the queue type of the set arm bit. */ +enum { + SET_ARM_BIT_FOR_CMDQ = 0, + SET_ARM_BIT_FOR_L2NIC_SQ, + SET_ARM_BIT_FOR_L2NIC_RQ, + SET_ARM_BIT_TYPE_NUM +}; + +/* Defines the type. Each function supports a maximum of eight CMDQ types. */ +enum { + CMDQ_0 = 0, + CMDQ_1 = 1, /* dedicated and non-blocking queues */ + CMDQ_NUM +}; + +/* *******************cmd common command data structure ************************ */ +// Func->ucode, which is used to set arm bit data, +// The microcode needs to perform big-endian conversion. +struct comm_info_ucode_set_arm_bit { + u32 q_type; + u32 q_id; +}; + +/* *******************WQE data structure ************************ */ +union cmdq_wqe_cs_dw0 { + struct { + u32 err_status : 29; + u32 error_code : 2; + u32 rsvd : 1; + } bs; + u32 val; +}; + +union cmdq_wqe_cs_dw1 { + struct { + u32 token : 16; // [15:0] + u32 cmd : 8; // [23:16] + u32 mod : 5; // [28:24] + u32 ack_type : 2; // [30:29] + u32 obit : 1; // [31] + } drv_wr; // This structure is used when the driver writes the wqe. + + struct { + u32 mod : 5; // [4:0] + u32 ack_type : 3; // [7:5] + u32 cmd : 8; // [15:8] + u32 arm : 1; // [16] + u32 rsvd : 14; // [30:17] + u32 obit : 1; // [31] + } wb; // The uCode writes back the structure of the CS_DW1. + // The driver reads and uses the structure. + u32 val; +}; + +/* CmdQ BD information or write back buffer information */ +struct cmdq_sge { + u32 pa_h; // Upper 32 bits of the physical address + u32 pa_l; // Upper 32 bits of the physical address + u32 len; // Invalid bit[31]. + u32 resv; +}; + +/* Ctrls section definition of WQE */ +struct cmdq_wqe_ctrls { + union { + struct { + u32 bdsl : 8; // [7:0] + u32 drvsl : 2; // [9:8] + u32 rsv : 4; // [13:10] + u32 wf : 1; // [14] + u32 cf : 1; // [15] + u32 tsl : 5; // [20:16] + u32 va : 1; // [21] + u32 df : 1; // [22] + u32 cr : 1; // [23] + u32 difsl : 3; // [26:24] + u32 csl : 2; // [28:27] + u32 ctrlsl : 2; // [30:29] + u32 obit : 1; // [31] + } bs; + u32 val; + } header; + u32 qsf; +}; + +/* Complete section definition of WQE */ +struct cmdq_wqe_cs { + union cmdq_wqe_cs_dw0 dw0; + union cmdq_wqe_cs_dw1 dw1; + union { + struct cmdq_sge sge; + u32 dw2_5[4]; + } ack; +}; + +/* Inline header in WQE inline, describing the length of inline data */ +union cmdq_wqe_inline_header { + struct { + u32 buf_len : 11; // [10:0] inline data len + u32 rsv : 21; // [31:11] + } bs; + u32 val; +}; + +/* Definition of buffer descriptor section in WQE */ +union cmdq_wqe_bds { + struct { + struct cmdq_sge bds_sge; + u32 rsvd[4]; /* Zwy is used to transfer the virtual address of the buffer. */ + } lcmd; /* Long command, non-inline, and SGE describe the buffer information. */ +}; + +/* Definition of CMDQ WQE */ +/* (long cmd, 64B) + * +----------------------------------------+ + * | ctrl section(8B) | + * +----------------------------------------+ + * | | + * | complete section(24B) | + * | | + * +----------------------------------------+ + * | | + * | buffer descriptor section(16B) | + * | | + * +----------------------------------------+ + * | driver section(16B) | + * +----------------------------------------+ + * + * + * (middle cmd, 128B) + * +----------------------------------------+ + * | ctrl section(8B) | + * +----------------------------------------+ + * | | + * | complete section(24B) | + * | | + * +----------------------------------------+ + * | | + * | buffer descriptor section(88B) | + * | | + * +----------------------------------------+ + * | driver section(8B) | + * +----------------------------------------+ + * + * + * (short cmd, 64B) + * +----------------------------------------+ + * | ctrl section(8B) | + * +----------------------------------------+ + * | | + * | complete section(24B) | + * | | + * +----------------------------------------+ + * | | + * | buffer descriptor section(24B) | + * | | + * +----------------------------------------+ + * | driver section(8B) | + * +----------------------------------------+ + */ +struct cmdq_wqe { + struct cmdq_wqe_ctrls ctrls; + struct cmdq_wqe_cs cs; + union cmdq_wqe_bds bds; +}; + +/* Definition of ctrls section in inline WQE */ +struct cmdq_wqe_ctrls_inline { + struct cmdq_wqe_ctrls wqe_ctrls; + u64 db; +}; + +/* Buffer descriptor section definition of WQE */ +union cmdq_wqe_bds_inline { + struct { + union cmdq_wqe_inline_header header; + u32 rsvd; + u8 data_inline[80]; + } mcmd; /* Middle command, inline mode */ + + struct { + union cmdq_wqe_inline_header header; + u32 rsvd; + u8 data_inline[16]; + } scmd; /* Short command, inline mode */ +}; + +struct cmdq_wqe_inline { + struct cmdq_wqe_ctrls_inline ctrls; + struct cmdq_wqe_cs cs; + union cmdq_wqe_bds_inline bds; +}; + +#endif diff --git a/hinic5/src/dpu_develop_interface/fw_msg_intf/public/sml_table_struct_dict_def.h b/hinic5/src/dpu_develop_interface/fw_msg_intf/public/sml_table_struct_dict_def.h new file mode 100644 index 00000000..2dd617b0 --- /dev/null +++ b/hinic5/src/dpu_develop_interface/fw_msg_intf/public/sml_table_struct_dict_def.h @@ -0,0 +1,24 @@ +/* + * @file sml_table_struct_dict_def.h + * Copyright (c) Huawei Technologies Co., Ltd. 2026-2026. All rights reserved. + * @brief SML Table 结构体元数据注册宏定义 + * + * 本文件定义了用于将结构体信息注入二进制符号表的宏接口。 + * 开启 DEFINE_TABLE_STRUCT_ENABLE 宏后,调用 DEFINE_TABLE_STRUCT_VAR + * 将在链接后的二进制文件中生成特定前缀的符号,供 sml_table_struct_dict_gen.py 脚本解析。 + */ +#ifndef SML_TABLE_STRUCT_DICT_DEF_H +#define SML_TABLE_STRUCT_DICT_DEF_H + +#ifdef DEFINE_TABLE_STRUCT_ENABLE +#define DEFINE_TABLE_STRUCT_VAR_DIRECT(struct_name, table_name, entry_id, entry_num) \ + volatile struct_name sml_table_struct_var_prefix__##struct_name##__##entry_id##__##entry_num; \ + volatile const char sml_table_struct_table_name_var_prefix__##struct_name[] = #table_name + +#define DEFINE_TABLE_STRUCT_VAR(struct_name, table_name, entry_id, entry_num) \ + DEFINE_TABLE_STRUCT_VAR_DIRECT(struct_name, table_name, entry_id, entry_num) +#else +#define DEFINE_TABLE_STRUCT_VAR(struct_name, table_name, entry_id, entry_num) +#endif + +#endif diff --git a/hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond.c b/hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond.c new file mode 100644 index 00000000..8f889a84 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond.c @@ -0,0 +1,1287 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2026-2026. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BOND]" fmt + +#include <net/sock.h> +#include <net/bonding.h> +#include <net/netlink.h> +#include <linux/rtnetlink.h> +#include <linux/net.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> +#include <linux/list.h> + +#include "comm_defs.h" +#include "cfg_mgmt_mpu_cmd_defs.h" +#include "hinic5_lld.h" +#include "hinic5_vram_common.h" +#include "hinic5_srv_nic.h" +#include "hinic5_hw.h" +#include "bond_mpu_cmd_defs.h" +#include "bond_cfm_cmd.h" +#include "cfm_cmd.h" +#include "hinic5_bond_inner.h" +#include "ossl_knl.h" +#include "hinic5_bond.h" + +enum bond_service_proc_pos { + BOND_BEFORE_ACTIVE, + BOND_AFTER_ACTIVE, + BOND_BEFORE_MODIFY, + BOND_AFTER_MODIFY, + BOND_BEFORE_DEACTIVE, + BOND_AFTER_DEACTIVE, + BOND_POS_MAX +}; + +#define PCI_DBDF(dom, bus, dev, func) \ + (((dom) << 16) | ((bus) << 8) | ((dev) << 3) | ((func) & 0x7)) + +struct hinic5_bond_mngr { + u32 cnt; + struct hinic5_bond_dev __rcu **bond_dev; + struct socket *rtnl_sock; + struct list_head bond_chip_list; +}; + +enum bond_event_cmd { + BOND_CREATE_CMD = 0, + BOND_DELETE_CMD, + BOND_SET_CMD, + BOND_EVENT_CMD_NUM, +}; + +static u16 g_cfm_cmd_covert[BOND_EVENT_CMD_NUM] = { + CFM_MPU_CMD_BOND_CREATE, + CFM_MPU_CMD_BOND_DELETE, + CFM_MPU_CMD_BOND_SET, +}; + +static u16 g_cmd_covert[BOND_EVENT_CMD_NUM] = { + MPU_CMD_BOND_CREATE, + MPU_CMD_BOND_DELETE, + MPU_CMD_BOND_SET_ATTR, +}; + +static DEFINE_MUTEX(g_bond_event_func_mutex); +static event_func g_bond_event_func[HINIC5_BOND_USER_NUM][BOND_POS_MAX]; + +static DEFINE_MUTEX(g_bond_attach_func_mutex); +static attach_func g_bond_attach_func[HINIC5_BOND_USER_NUM]; + +static DEFINE_MUTEX(g_bond_mutex); +static struct hinic5_bond_mngr bond_mngr = { + .cnt = 0, + .rtnl_sock = NULL, + .bond_dev = NULL, +}; + +struct srcu_struct bdev_srcu; /* cfm bond global SRCU lock */ + +#define BDEV_IS_VALID(id) (bond_mngr.bond_dev && bond_mngr.bond_dev[(id)]) + +struct socket *hinic5_get_bond_mngr_sock(void) +{ + return bond_mngr.rtnl_sock; +} + +struct socket **hinic5_get_bond_mngr_sock_addr(void) +{ + return &bond_mngr.rtnl_sock; +} + +bool bond_call_srv_attach_func(enum hinic5_bond_user user, struct bonding *bond) +{ + bool need_attach = false; + + mutex_lock(&g_bond_attach_func_mutex); + if (g_bond_attach_func[user]) + need_attach = g_bond_attach_func[user](bond); + mutex_unlock(&g_bond_attach_func_mutex); + + return need_attach; +} + +static bool bond_dev_is_activated(struct hinic5_bond_dev *bdev) +{ + bool is_activated = false; + + spin_lock(&bdev->lock); + is_activated = (bdev->status == BOND_DEV_STATUS_ACTIVATED); + spin_unlock(&bdev->lock); + return is_activated; +} + +static u32 bond_gen_uplink_id(struct hinic5_bond_dev *bdev) +{ + struct hinic5_lld_dev *lld_dev = NULL; + struct pci_dev *pdev = NULL; + u32 domain, bus, dev, func; + u32 uplink_id = 0; + u8 i; + + spin_lock(&bdev->lock); + for (i = 0; i < BOND_PORT_MAX_NUM; i++) { + if (BITMAP_JUDGE(bdev->bond_attr.slaves, i) != 0) { + if (bdev->tracker.ndev[i] == NULL) { + continue; + } + lld_dev = hinic5_get_lld_dev_by_netdev(bdev->tracker.ndev[i]); + if (lld_dev == NULL) { + continue; + } + /* TODO: 等待SDK提供接口 */ + pdev = to_pci_dev(lld_dev->dev); + domain = (u32)pci_domain_nr(pdev->bus); + bus = pdev->bus->number; + dev = PCI_SLOT(pdev->devfn); + func = PCI_FUNC(pdev->devfn); + uplink_id = PCI_DBDF(domain, bus, dev, func); + break; + } + } + spin_unlock(&bdev->lock); + + return uplink_id; +} + +void bond_dev_free_chip_bond_id(struct hinic5_bond_dev *bdev) +{ + struct hinic5_bond_chip *node_tmp = NULL; + struct hinic5_bond_chip *bond_chip = NULL; + u32 chip_bid = HINIC5_BOND_START_ID; + + mutex_lock(&g_bond_mutex); + list_for_each_entry(node_tmp, &bond_mngr.bond_chip_list, node) { + if (strncmp(node_tmp->chip_name, bdev->chip_name, IFNAMSIZ) == 0) { + bond_chip = node_tmp; + break; + } + } + + if (bond_chip) { + /* find chip_bond_id is exit */ + for (; chip_bid < HINIC5_MAX_BOND_ID_PER_CARD; chip_bid++) { + if (bond_chip->chip_bond_id[chip_bid] == bdev->bond_attr.bond_id) { + bond_chip->chip_bond_id[chip_bid] = HINIC5_INVALID_BOND_ID; + bond_chip->bond_num--; + bond_master_info(bdev->bond->dev, + "Bond chip %s bond id %u free success\n", + bdev->chip_name, chip_bid); + break; + } + } + + if (bond_chip->bond_num == 0) { + list_del(&bond_chip->node); + kfree(bond_chip); + bond_master_info(bdev->bond->dev, "Bond chip node %s free success\n", + bdev->chip_name); + } + } + mutex_unlock(&g_bond_mutex); +} + +static int bond_dev_alloc_chip_bond_id(struct hinic5_bond_dev *bdev, char *chip_name, const u32 chip_name_len) +{ + struct hinic5_bond_chip *node_tmp = NULL; + struct hinic5_bond_chip *bond_chip = NULL; + u32 chip_bid = HINIC5_BOND_START_ID; + int err = 0; + + mutex_lock(&g_bond_mutex); + /* 初始化bdev的chip name */ + memcpy(bdev->chip_name, chip_name, sizeof(chip_name)); + + list_for_each_entry(node_tmp, &bond_mngr.bond_chip_list, node) { + if (strncmp(node_tmp->chip_name, chip_name, chip_name_len) == 0) { + bond_chip = node_tmp; + break; + } + } + + if (bond_chip) { + /* find chip_bond_id is exit */ + for (; chip_bid < HINIC5_MAX_BOND_ID_PER_CARD; chip_bid++) { + if (bond_chip->chip_bond_id[chip_bid] == bdev->bond_attr.bond_id) + goto exit; + } + + /* find new chip_bond_id */ + for (chip_bid = HINIC5_BOND_START_ID; chip_bid < HINIC5_MAX_BOND_ID_PER_CARD; chip_bid++) { + if (bond_chip->chip_bond_id[chip_bid] == HINIC5_INVALID_BOND_ID) { + bdev->chip_bond_id = chip_bid; + bond_chip->chip_bond_id[chip_bid] = (u8)bdev->bond_attr.bond_id; + bond_chip->bond_num++; + bond_master_info(bdev->bond->dev, + "Bond chip %s bond id %u alloc success\n", + bdev->chip_name, chip_bid); + break; + } + } + + if (chip_bid >= HINIC5_MAX_BOND_ID_PER_CARD) { + bond_master_err(bdev->bond->dev, "bond_dev_get_chip_bond_id: chip_bond_id is full\n"); + err = -EINVAL; + } + } else { + bond_chip = kzalloc(sizeof(struct hinic5_bond_chip), GFP_KERNEL); + if (!bond_chip) { + bond_master_err(bdev->bond->dev, "Bond chip %s node alloc failed\n", chip_name); + err = -ENOMEM; + goto exit; + } + bond_master_info(bdev->bond->dev, "bond chip %s node alloc success\n", chip_name); + + bdev->chip_bond_id = chip_bid; + memcpy(bond_chip->chip_name, chip_name, chip_name_len); + bond_master_info(bdev->bond->dev, "Bond chip %s bond id %u alloc success\n", bdev->chip_name, chip_bid); + bond_chip->chip_bond_id[chip_bid++] = (u8)bdev->bond_attr.bond_id; + bond_chip->bond_num++; + + for (; chip_bid < HINIC5_MAX_BOND_ID_PER_CARD; chip_bid++) { + bond_chip->chip_bond_id[chip_bid] = HINIC5_INVALID_BOND_ID; + } + + list_add_tail(&bond_chip->node, &bond_mngr.bond_chip_list); + } +exit: + if (err != 0) + memset(bdev->chip_name, 0, sizeof(bdev->chip_name)); /* 清除bdev的chip name */ + mutex_unlock(&g_bond_mutex); + return err; +} + +/* get index of physical port and initialize the tracker of bdev */ +u8 bond_dev_track_port(struct hinic5_bond_dev *bdev, struct net_device *ndev) +{ + struct hinic5_lld_dev *lld_dev = NULL; + char chip_name[IFNAMSIZ] = {0}; + char ndev_name[IFNAMSIZ] = {0}; + bool is_replaced = false; + u32 tracker_cnt = 0; + u8 port_id = 0; + int err = 0; + + lld_dev = hinic5_get_lld_dev_by_netdev(ndev); + if (lld_dev == NULL || hinic5_func_type(lld_dev->hwdev) == TYPE_VF) { + bond_slave_err(bdev->bond->dev, ndev, "invalid slave: %s\n", ndev->name); + return PORT_INVALID_ID; + } + + bond_slave_info(bdev->bond->dev, ndev, "track ndev name: %s\n", ndev->name); + port_id = hinic5_physical_port_id(lld_dev->hwdev); + + err = hinic5_get_chip_name(lld_dev, chip_name, sizeof(chip_name)); + if (err != 0) { + bond_slave_err(bdev->bond->dev, ndev, "Bond Slave get chip name err %d\n", err); + return PORT_INVALID_ID; + } + + spin_lock(&bdev->lock); + /* check chip name consistency, must be same card */ + if (bdev->tracker.cnt > 0 && strcmp(bdev->chip_name, chip_name) != 0) { + /* Only support bond for same card */ + spin_unlock(&bdev->lock); + bond_slave_err(bdev->bond->dev, ndev, + "Bond track err, slave not for same card, bond dev chip_name %s, slave chip name %s\n", + bdev->chip_name, chip_name); + return PORT_INVALID_ID; + } + tracker_cnt = bdev->tracker.cnt; + spin_unlock(&bdev->lock); + + /* first slave device , save chip name into bdev */ + if (tracker_cnt == 0) { + err = bond_dev_alloc_chip_bond_id(bdev, chip_name, IFNAMSIZ); + if (err != 0) { + bond_slave_err(bdev->bond->dev, ndev, "Alloc bond chip id err %d\n", err); + return PORT_INVALID_ID; + } + } + + spin_lock(&bdev->lock); + /* attach netdev to the port position associated with it */ + if (bdev->tracker.ndev[port_id]) { + is_replaced = true; + memcpy(ndev_name, bdev->tracker.ndev[port_id]->name, + sizeof(bdev->tracker.ndev[port_id]->name)); + } else { + bdev->tracker.cnt++; + } + tracker_cnt = bdev->tracker.cnt; + bdev->tracker.ndev[port_id] = ndev; + bdev->tracker.netdev_state[port_id].link_up = 0; + bdev->tracker.netdev_state[port_id].tx_enabled = 0; + spin_unlock(&bdev->lock); + if (is_replaced) + bond_slave_warn(bdev->bond->dev, ndev, "Old ndev: %s is replaced\n", ndev_name); + bond_slave_info(bdev->bond->dev, ndev, "TRACK cnt: %u, slave ndev name: %s\n", + tracker_cnt, ndev->name); + + return port_id; +} + +struct hinic5_bond_dev *bond_get_bdev(const struct bonding *bond) +{ + struct hinic5_bond_dev *bdev = NULL; + int bid; + + mutex_lock(&g_bond_mutex); + for (bid = HINIC5_BOND_START_ID; bid < HINIC5_MAX_BODN_ID_NUM; bid++) { + bdev = bond_mngr.bond_dev[bid]; + if (bdev == NULL) + continue; + + if (bond == bdev->bond) { + mutex_unlock(&g_bond_mutex); + return bdev; + } + } + mutex_unlock(&g_bond_mutex); + return NULL; +} + +static int bond_get_service_en_bitmap(struct hinic5_bond_dev *bdev) +{ + int err; + struct hinic5_board_info info = {0}; + struct hinic5_lld_dev *lld_dev = hinic5_get_lld_dev_by_chip_name(bdev->chip_name); + + if (!lld_dev) { + bond_master_err(bdev->bond->dev, "no available hinic5 lld device, chip_name: %s\n", + bdev->chip_name); + return -ENXIO; + } + err = hinic5_get_board_info(lld_dev->hwdev, &info, HINIC5_CHANNEL_NIC); + if (err != 0) { + bond_master_err(bdev->bond->dev, "bond get board info failed\n"); + return err; + } + + bond_master_info(bdev->bond->dev, "get service_en_bitmap success: %d", info.service_en_bitmap); + bdev->service_en_bitmap = info.service_en_bitmap; + + return 0; +} + +static int bond_send_mpu_cfm_msg(struct hinic5_bond_dev *bdev, struct hinic5_bond_cmd *cmd_info, u8 cmd_type) +{ + int err = 0; + u16 msg_cmd_type = g_cfm_cmd_covert[cmd_type]; + u16 out_size = sizeof(cfm_bond_cmd_s); + cfm_bond_cmd_s cfm_bond_cmd_info = {0}; + struct hinic5_lld_dev *lld_dev = hinic5_get_lld_dev_by_chip_name(bdev->chip_name); + + if (!lld_dev) { + bond_master_err(bdev->bond->dev, + "no available hinic5 lld device(cfm), chip_name: %s\n", + bdev->chip_name); + return -ENXIO; + } + + memcpy(&cfm_bond_cmd_info, (const void *)cmd_info, sizeof(struct hinic5_bond_cmd)); + err = hinic5_msg_to_mgmt_sync(lld_dev->hwdev, HINIC5_MOD_CFM, msg_cmd_type, + &cfm_bond_cmd_info, sizeof(cfm_bond_cmd_s), + &cfm_bond_cmd_info, &out_size, + HINIC5_BOND_MSG_TIMEOUT_MS, HINIC5_CHANNEL_NIC); + if (err != 0 || out_size == 0 || cfm_bond_cmd_info.comm_head.status != 0) { + bond_master_err(bdev->bond->dev, + "bond msg cmd type: %u failed, err: %d, " \ + "cfm bond sts: %u, out size: %u\n", + msg_cmd_type, err, cfm_bond_cmd_info.comm_head.status, out_size); + err = -EIO; + } + return err; +} + +static int bond_send_mpu_ovs_msg(struct hinic5_bond_dev *bdev, struct hinic5_bond_cmd *cmd_info, u8 cmd_type) +{ + int err = 0; + u16 msg_cmd_type = g_cmd_covert[cmd_type]; + u16 out_size = sizeof(struct hinic5_bond_cmd); + struct hinic5_lld_dev *lld_dev = hinic5_get_lld_dev_by_chip_name(bdev->chip_name); + + if (!lld_dev) { + bond_master_err(bdev->bond->dev, + "no available hinic5 lld device(ovs), chip_name: %s\n", + bdev->chip_name); + return -ENXIO; + } + + err = hinic5_msg_to_mgmt_sync(lld_dev->hwdev, HINIC5_MOD_OVS, msg_cmd_type, cmd_info, + sizeof(struct hinic5_bond_cmd), cmd_info, &out_size, 0, + HINIC5_CHANNEL_NIC); + if (err != 0 || out_size == 0 || cmd_info->comm_head.status != 0) { + bond_master_err(bdev->bond->dev, + "bond msg cmd type: %u failed, err: %d, sts: %u, out size: %u\n", + msg_cmd_type, err, cmd_info->comm_head.status, out_size); + err = -EIO; + } + return err; +} + +static int bond_send_mpu_msg(struct hinic5_bond_dev *bdev, struct hinic5_bond_cmd *cmd_info, u8 cmd_type) +{ + int err = 0; + + if (bdev->service_en_bitmap == 0) { + err = bond_get_service_en_bitmap(bdev); + if (err != 0) + return err; + } + if (BITMAP_JUDGE(bdev->service_en_bitmap, SERVICE_BIT_CFM) != 0) + return bond_send_mpu_cfm_msg(bdev, cmd_info, cmd_type); + return bond_send_mpu_ovs_msg(bdev, cmd_info, cmd_type); +} + +static int bond_send_upcmd(struct hinic5_bond_dev *bdev, struct bond_attr *attr, u8 cmd_type) +{ + struct hinic5_bond_cmd cmd_info = {{0}, 0}; + + cmd_info.sub_cmd = 0; + cmd_info.comm_head.status = 0; + + if (attr) + memcpy((void *)&cmd_info.attr, attr, sizeof(*attr)); + else + cmd_info.attr.slaves = bdev->bond_attr.slaves; + + /* cmd_info bond_id is chip bond id */ + cmd_info.attr.bond_id = (u16)bdev->chip_bond_id; + + if (cmd_type == BOND_CREATE_CMD) { + strncpy((char *)cmd_info.attr.bond_name, bdev->name, sizeof(cmd_info.attr.bond_name)); + cmd_info.attr.bond_name[sizeof(cmd_info.attr.bond_name) - 1] = '\0'; + } + + return bond_send_mpu_msg(bdev, &cmd_info, cmd_type); +} + +static int bond_upcmd_deactivate(struct hinic5_bond_dev *bdev) +{ + int err; + u16 id_tmp; + enum bond_dev_status status; + + spin_lock(&bdev->lock); + status = bdev->status; + spin_unlock(&bdev->lock); + if (status == BOND_DEV_STATUS_IDLE) + return 0; + + bond_master_info(bdev->bond->dev, "hinic5_bond: deactivate bond: %u\n", bdev->bond_attr.bond_id); + + err = bond_send_upcmd(bdev, NULL, BOND_DELETE_CMD); + if (err == 0) { + spin_lock(&bdev->lock); + id_tmp = bdev->bond_attr.bond_id; + memset(&bdev->bond_attr, 0, sizeof(bdev->bond_attr)); + bdev->status = BOND_DEV_STATUS_IDLE; + bdev->bond_attr.bond_id = id_tmp; + spin_unlock(&bdev->lock); + } + + return err; +} + +static void bond_update_slave_info(struct hinic5_bond_dev *bdev, struct bond_attr *attr) +{ + struct net_device *ndev = NULL; + u8 port_id; + + /* if bond dev down(ifconfig down), slave dev is up, should not set active slaves */ + if (!netif_running(bdev->bond->dev)) + return; + + if (attr->bond_mode == BOND_MODE_ACTIVEBACKUP) { + rcu_read_lock(); + ndev = bond_option_active_slave_get_rcu(bdev->bond); + rcu_read_unlock(); + } + + for (port_id = 0; port_id < BOND_PORT_MAX_NUM; port_id++) { + if (bdev->tracker.netdev_state[port_id].tx_enabled == 0) + continue; + + if (attr->bond_mode == BOND_MODE_8023AD) { + BITMAP_SET(attr->active_slaves, port_id); + BITMAP_SET(attr->lacp_collect_slaves, port_id); + } else if (attr->bond_mode == BOND_MODE_XOR) { + BITMAP_SET(attr->active_slaves, port_id); + } else if (ndev && (ndev == bdev->tracker.ndev[port_id])) { + /* BOND_MODE_ACTIVEBACKUP */ + BITMAP_SET(attr->active_slaves, port_id); + } + } +} + +void bond_print_bdev_attr(struct hinic5_bond_dev *bdev, struct bond_attr *attr) +{ + bond_master_info(bdev->bond->dev, + "mode: %u, up_delay: %u, down_delay: %u, hash: %u, lacp_collect_slaves: %u, tracker cnt: %u\n", + attr->bond_mode, + attr->up_delay, + attr->down_delay, + attr->xmit_hash_policy, + attr->lacp_collect_slaves, + bdev->tracker.cnt); + bond_master_info(bdev->bond->dev, "slave ports bitmap: 0x%x\n", attr->slaves); + bond_master_info(bdev->bond->dev, "active slave ports bitmap: 0x%x\n", attr->active_slaves); + bond_master_info(bdev->bond->dev, "slave pf bitmap: 0x%x\n", attr->bond_pf_bitmap); + bond_master_info(bdev->bond->dev, "user bitmap: 0x%x\n", attr->user_bitmap); +} + +static int bond_upcmd_config(struct hinic5_bond_dev *bdev, struct bond_attr *attr) +{ + int err; + + bond_update_slave_info(bdev, attr); + + if (memcmp(&bdev->bond_attr, attr, sizeof(struct bond_attr)) == 0) + return 0; + + bond_master_info(bdev->bond->dev, "Config bond id: %u\n", attr->bond_id); + bond_print_bdev_attr(bdev, attr); + + err = bond_send_upcmd(bdev, attr, BOND_SET_CMD); + if (err == 0) + memcpy(&bdev->bond_attr, attr, sizeof(struct bond_attr)); + + return err; +} + +static int bond_upcmd_activate(struct hinic5_bond_dev *bdev, struct bond_attr *attr) +{ + int err; + + if (bond_dev_is_activated(bdev)) + return 0; + + bond_update_slave_info(bdev, attr); + bond_master_info(bdev->bond->dev, "Active bond id: %u\n", bdev->bond_attr.bond_id); + bond_print_bdev_attr(bdev, attr); + + err = bond_send_upcmd(bdev, attr, BOND_CREATE_CMD); + if (err == 0) { + spin_lock(&bdev->lock); + bdev->status = BOND_DEV_STATUS_ACTIVATED; + spin_unlock(&bdev->lock); + err = bond_upcmd_config(bdev, attr); /* 先create再set是为了兼容老固件mpu处理流程 */ + } + + return err; +} + +static void bond_call_service_func(struct hinic5_bond_dev *bdev, struct bond_attr *attr, + enum bond_service_proc_pos pos, int err) +{ + u32 user; + + mutex_lock(&g_bond_event_func_mutex); + for (user = HINIC5_BOND_USER_OVS; user < HINIC5_BOND_USER_NUM; user++) { + if (g_bond_event_func[user][pos]) + g_bond_event_func[user][pos](bdev->name, attr, err); + } + mutex_unlock(&g_bond_event_func_mutex); +} + +static u32 bond_get_user_bitmap(struct hinic5_bond_dev *bdev) +{ + u32 user_bitmap = 0; + u32 user; + + for (user = HINIC5_BOND_USER_OVS; user < HINIC5_BOND_USER_NUM; user++) { + if (bdev->slot_used[user] == 1) { + BITMAP_SET(user_bitmap, user); + } + } + return user_bitmap; +} + +static void bond_do_work(struct work_struct *work) +{ + bool is_bonded = 0; + struct bond_attr attr; + int is_in_kexec; + int err = 0; + struct delayed_work *delayed_work = to_delayed_work(work); + struct hinic5_bond_dev *bdev = container_of(delayed_work, struct hinic5_bond_dev, bond_work); + + is_in_kexec = hinic5_vram_get_kexec_flag(); + if (is_in_kexec != 0) { + bond_master_info(bdev->bond->dev, "Skip changing bond status during os replace\n"); + return; + } + + spin_lock(&bdev->lock); + is_bonded = bdev->tracker.is_bonded; + attr = bdev->new_attr; + spin_unlock(&bdev->lock); + attr.user_bitmap = bond_get_user_bitmap(bdev); + + bond_master_info(bdev->bond->dev, + "bond_do_work is_bonded: %d, bond_dev_is_activated(bdev): %d\n", + is_bonded, bond_dev_is_activated(bdev)); + + /* is_bonded indicates whether bond should be activated. */ + if (is_bonded && !bond_dev_is_activated(bdev)) { + bond_call_service_func(bdev, &attr, BOND_BEFORE_ACTIVE, 0); + err = bond_upcmd_activate(bdev, &attr); + bond_call_service_func(bdev, &attr, BOND_AFTER_ACTIVE, err); + } else if (is_bonded && bond_dev_is_activated(bdev)) { + bond_call_service_func(bdev, &attr, BOND_BEFORE_MODIFY, 0); + err = bond_upcmd_config(bdev, &attr); + bond_call_service_func(bdev, &attr, BOND_AFTER_MODIFY, err); + } else if (!is_bonded && bond_dev_is_activated(bdev)) { + bond_call_service_func(bdev, &attr, BOND_BEFORE_DEACTIVE, 0); + err = bond_upcmd_deactivate(bdev); + bond_call_service_func(bdev, &attr, BOND_AFTER_DEACTIVE, err); + } + + if (err != 0) + bond_master_err(bdev->bond->dev, "hinic5_bond: Do bond failed, err: %d\n", err); +} + +static void bond_put_knl_bonding(struct bonding *bond) +{ + dev_put(bond->dev); +} + +static void bond_dev_deinit(struct hinic5_bond_dev *bdev) +{ + spin_lock(&bdev->lock); + WRITE_ONCE(bdev->dead, true); + spin_unlock(&bdev->lock); + + /* 阻塞等待bond_work任务结束 */ + cancel_delayed_work_sync(&bdev->bond_work); + /* 阻塞等待所有srcu读操作结束 */ + synchronize_srcu(&bdev_srcu); + if (bdev->wq) { + destroy_workqueue(bdev->wq); + } + if (bdev->bond != NULL) { + bond_put_knl_bonding(bdev->bond); + bdev->bond = NULL; + } + kfree(bdev); +} + +static struct hinic5_bond_dev *bond_dev_init(struct bonding *bond, const char *name) +{ + struct hinic5_bond_dev *bdev = NULL; + + bdev = kzalloc(sizeof(*bdev), GFP_KERNEL); + if (bdev == NULL) + return NULL; + + bdev->wq = create_singlethread_workqueue("hinic5_bond_wq"); + if (!bdev->wq) { + pr_err("hinic5_bond: Failed to create workqueue\n"); + goto bdev_wq_err; + } + + if (strlen(name) >= sizeof(bdev->name)) { + pr_err("hinic5_bond: bond name too long: %s (max %zu)\n", + name, sizeof(bdev->name) - 1); + goto bdev_name_err; + } + strncpy(bdev->name, name, sizeof(bdev->name)); + + INIT_DELAYED_WORK(&bdev->bond_work, bond_do_work); + bdev->status = BOND_DEV_STATUS_IDLE; + + spin_lock_init(&bdev->lock); + + dev_hold(bond->dev); + bdev->bond = bond; + + return bdev; + +bdev_name_err: + destroy_workqueue(bdev->wq); +bdev_wq_err: + kfree(bdev); + return NULL; +} + +static struct bonding *bond_get_knl_bonding(const char *name) +{ + struct net_device *ndev_tmp = NULL; + + rtnl_lock(); + for_each_netdev(&init_net, ndev_tmp) { + if (netif_is_bond_master(ndev_tmp) && (strcmp(ndev_tmp->name, name) == 0)) { + dev_hold(ndev_tmp); + rtnl_unlock(); + return netdev_priv(ndev_tmp); + } + } + rtnl_unlock(); + return NULL; +} + +static int bond_dev_release(struct hinic5_bond_dev *bdev) +{ + int err; + u8 i; + + err = bond_upcmd_deactivate(bdev); + if (err != 0) { + mutex_unlock(&g_bond_mutex); + bond_master_err(bdev->bond->dev, "Failed to deactivate dev\n"); + return err; + } + + for (i = HINIC5_BOND_START_ID; i < HINIC5_MAX_BODN_ID_NUM; i++) { + if (bond_mngr.bond_dev[i] == bdev) { + bond_mngr.bond_dev[i] = NULL; + bond_mngr.cnt--; + bond_master_info(bdev->bond->dev, "Free bond, id: %u mngr_cnt:%u\n", i, bond_mngr.cnt); + break; + } + } + + mutex_unlock(&g_bond_mutex); + bond_dev_free_chip_bond_id(bdev); + bond_dev_deinit(bdev); + + return err; +} + +static void bond_dev_free(struct kref *ref) +{ + struct hinic5_bond_dev *bdev = NULL; + + bdev = container_of(ref, struct hinic5_bond_dev, ref); + bond_dev_release(bdev); +} + +static struct hinic5_bond_dev *bond_dev_alloc(const char *name, struct bonding *bond) +{ + struct hinic5_bond_dev *bdev = NULL; + u16 i; + + bdev = bond_dev_init(bond, name); + if (bdev == NULL) { + return NULL; + } + + for (i = HINIC5_BOND_START_ID; i < HINIC5_MAX_BODN_ID_NUM; i++) { + if ((bond_mngr.bond_dev != NULL) && (bond_mngr.bond_dev[i] == NULL)) { + bdev->bond_attr.bond_id = i; + bond_mngr.bond_dev[i] = bdev; + bond_mngr.cnt++; + bond_master_info(bond->dev, + "Create bond dev: %s, bond id: %u, bond cnt: %u\n", + name, i, bond_mngr.cnt); + break; + } + } + + if (i >= HINIC5_MAX_BODN_ID_NUM) { + bond_dev_deinit(bdev); + bdev = NULL; + pr_err("Bond dev: %s: Failed to get free bond id\n", name); + } + + return bdev; +} + +static void bond_init_all_slave(struct hinic5_bond_dev *bdev, struct bonding *bond) +{ + int i = 0, cnt = 0; + struct slave *slave = NULL; + struct list_head *iter = NULL; + struct net_device *slave_ndev[BOND_PORT_MAX_NUM]; + + rcu_read_lock(); + bond_for_each_slave_rcu(bond, slave, iter) { + if (cnt >= BOND_PORT_MAX_NUM) + break; + slave_ndev[cnt] = slave->dev; + dev_hold(slave_ndev[cnt++]); + (void)iter; + } + rcu_read_unlock(); + + /* TODO: 该流程是否冗余,后续进一步确定 */ + for (i = 0; i < cnt; ++i) { + if (bond_dev_track_port(bdev, slave_ndev[i]) == PORT_INVALID_ID) + continue; + } + for (i = 0; i < cnt; ++i) + bond_handle_rtnl_event(slave_ndev[i]); + bond_handle_rtnl_event(bond->dev); + + while (cnt != 0) + dev_put(slave_ndev[--cnt]); +} + +static struct hinic5_bond_dev *bond_dev_by_name(const char *name) +{ + struct hinic5_bond_dev *bdev = NULL; + int i; + + for (i = HINIC5_BOND_START_ID; i < HINIC5_MAX_BODN_ID_NUM; i++) { + if (BDEV_IS_VALID(i) && (strcmp(bond_mngr.bond_dev[i]->name, name) == 0)) { + bdev = bond_mngr.bond_dev[i]; + break; + } + } + + return bdev; +} + +static void bond_dev_user_attach(struct hinic5_bond_dev *bdev, enum hinic5_bond_user user) +{ + u32 user_bitmap; + if (user < 0 || user >= HINIC5_BOND_USER_NUM || bdev->slot_used[user] != 0) { + return; + } + + bdev->slot_used[user] = 1; + if (kref_get_unless_zero(&bdev->ref) == 0) { + kref_init(&bdev->ref); + } else { + user_bitmap = bond_get_user_bitmap(bdev); + bond_master_info(bdev->bond->dev, "Bond user %u attach bond %s, user_bitmap %#x\n", + user, bdev->name, user_bitmap); + queue_delayed_work(bdev->wq, &bdev->bond_work, 0); + } +} + +static void bond_dev_user_detach(struct hinic5_bond_dev *bdev, + enum hinic5_bond_user user, bool *freed) +{ + u32 user_bitmap; + if (user < 0 || user >= HINIC5_BOND_USER_NUM) { + return; + } + + if (bdev->slot_used[user] != 0) { + bdev->slot_used[user] = 0; + if (kref_read(&bdev->ref) == 1) + *freed = true; + if (kref_put(&bdev->ref, bond_dev_free) == 0) { + user_bitmap = bond_get_user_bitmap(bdev); + bond_master_info(bdev->bond->dev, "Bond: user %u detach bond %s, " \ + "user_bitmap %#x\n", user, bdev->name, user_bitmap); + queue_delayed_work(bdev->wq, &bdev->bond_work, 0); + } + } +} + +/* bond事件上报时绑定bond */ +int hinic5_bond_event_attach(struct bonding *bond, enum hinic5_bond_user user) +{ + struct hinic5_bond_dev *bdev = NULL; + + if (bond->params.mode != BOND_MODE_8023AD && + bond->params.mode != BOND_MODE_XOR && + bond->params.mode != BOND_MODE_ACTIVEBACKUP) { + bond_master_err(bond->dev, "bond mode:%d is not supported\n", bond->params.mode); + return -EINVAL; + } + + mutex_lock(&g_bond_mutex); + bdev = bond_dev_by_name(bond->dev->name); + if (bdev == NULL) { + bdev = bond_dev_alloc(bond->dev->name, bond); + if (bdev == NULL) { + mutex_unlock(&g_bond_mutex); + return -ENODEV; + } + } else { + bond_master_info(bdev->bond->dev, + "Bond event attach %s already exist\n", bond->dev->name); + } + + bond_dev_user_attach(bdev, user); + mutex_unlock(&g_bond_mutex); + + return 0; +} + +bool hinic5_bond_slave_is_match(struct bonding *bond) +{ + struct hinic5_lld_dev *lld_dev = NULL; + struct list_head *iter = NULL; + struct slave *slave = NULL; + char chip_name[IFNAMSIZ] = {0}; + char tmp_name[IFNAMSIZ] = {0}; + int err = 0; + + if (!bond_has_slaves(bond)) { + bond_master_info(bond->dev, "Have no slaves"); + return true; + } + + rcu_read_lock(); + bond_for_each_slave_rcu(bond, slave, iter) { + lld_dev = hinic5_get_lld_dev_by_netdev(slave->dev); + if (lld_dev == NULL) { + bond_slave_warn(bond->dev, slave->dev, "Bond Slave device mismatch, is not hinic5 netdev\n"); + goto out; + } + + /* 如果组bond的function含有vf则打印告警 */ + if (hinic5_func_type(lld_dev->hwdev) == TYPE_VF) { + bond_slave_warn(bond->dev, slave->dev, "Bond Slave device is VF\n"); + continue;; + } + + err = hinic5_get_chip_name(lld_dev, tmp_name, sizeof(chip_name)); + if (err != 0) { + bond_slave_err(bond->dev, slave->dev, + "Bond Slave get chip name err %d\n", err); + goto out; + } + + if (strlen(chip_name) == 0) { + memcpy(chip_name, tmp_name, sizeof(tmp_name)); + continue; + } + + /* Only support bond for same card */ + if (strcmp(tmp_name, chip_name) != 0) { + bond_slave_err(bond->dev, slave->dev, + "Bond Slave not match err, bond dev chip_name %s, " \ + "slave chip name %s\n", + chip_name, tmp_name); + goto out; + } + } + rcu_read_unlock(); + return true; + +out: + rcu_read_unlock(); + return false; +} + +int hinic5_bond_get_id_by_name(u8 *bond_name, u16 *bond_id) +{ + u16 i; + + if ((bond_name == NULL) || (bond_id == NULL)) { + pr_err("hinic5_bond: invalid input param\n"); + return -EINVAL; + } + + for (i = HINIC5_BOND_START_ID; i < HINIC5_MAX_BODN_ID_NUM; i++) { + if (!BDEV_IS_VALID(i)) { + continue; + } + + if (strcmp(bond_name, bond_mngr.bond_dev[i]->name) == 0) { + *bond_id = bond_mngr.bond_dev[i]->bond_attr.bond_id; + return 0; + } + } + + return -EINVAL; +} + +int hinic5_bond_attach(const char *name, enum hinic5_bond_user user, u16 *bond_id) +{ + struct hinic5_bond_dev *bdev = NULL; + struct bonding *bond = NULL; + bool is_new_dev = false; + + if (user >= HINIC5_BOND_USER_NUM) + return -EINVAL; + + if (!name || !bond_id) + return -EINVAL; + + bond = bond_get_knl_bonding(name); + if (bond == NULL) { + pr_warn("hinic5_bond: Kernel bond %s not exist.\n", name); + return -ENODEV; + } + + if (bond->params.mode != BOND_MODE_8023AD && + bond->params.mode != BOND_MODE_XOR && + bond->params.mode != BOND_MODE_ACTIVEBACKUP) { + bond_master_warn(bond->dev, "bond mode:%d is not supported\n", bond->params.mode); + } + + /* Need to return bond id, so only print warning log */ + if (!hinic5_bond_slave_is_match(bond)) { + bond_master_warn(bond->dev, "Bond attach slaves invalid or not exist\n"); + } + + mutex_lock(&g_bond_mutex); + bdev = bond_dev_by_name(name); + if (bdev == NULL) { + /* if bond_dev_alloc return success, will increment the bond netdev reference count. */ + bdev = bond_dev_alloc(name, bond); + if (bdev == NULL) { + mutex_unlock(&g_bond_mutex); + bond_put_knl_bonding(bond); + return -ENODEV; + } + is_new_dev = true; + } else { + bond_master_info(bdev->bond->dev, "Attach %s already exist\n", name); + } + + bond_dev_user_attach(bdev, user); + mutex_unlock(&g_bond_mutex); + + if (is_new_dev) { + bond_init_all_slave(bdev, bond); + flush_delayed_work(&bdev->bond_work); + } + + bond_put_knl_bonding(bond); + + *bond_id = bdev->bond_attr.bond_id; + return 0; +} +EXPORT_SYMBOL(hinic5_bond_attach); + +int hinic5_bond_detach(u16 bond_id, enum hinic5_bond_user user) +{ + int err = 0; + bool lock_freed = false; + if (user >= HINIC5_BOND_USER_NUM) { + pr_err("Bond attach user num error: %u\n", user); + return -EINVAL; + } + + if (!HINIC5_BOND_ID_IS_VALID(bond_id)) { + pr_warn("hinic5_bond: user:%u Invalid bond id:%u to delete\n", user, bond_id); + return -EINVAL; + } + + mutex_lock(&g_bond_mutex); + if (!BDEV_IS_VALID(bond_id)) + err = -ENODEV; + else + bond_dev_user_detach(bond_mngr.bond_dev[bond_id], user, &lock_freed); + + if (!lock_freed) + mutex_unlock(&g_bond_mutex); + return err; +} +EXPORT_SYMBOL(hinic5_bond_detach); + +void hinic5_bond_clean_user(enum hinic5_bond_user user) +{ + int i = 0; + bool lock_freed = false; + + if (user >= HINIC5_BOND_USER_NUM) { + pr_err("Bond clean user num error: %u\n", user); + return; + } + + mutex_lock(&g_bond_mutex); + for (i = HINIC5_BOND_START_ID; i < HINIC5_MAX_BODN_ID_NUM; i++) { + if (BDEV_IS_VALID(i)) { + bond_dev_user_detach(bond_mngr.bond_dev[i], user, &lock_freed); + if (lock_freed) { + mutex_lock(&g_bond_mutex); + lock_freed = false; + } + } + } + if (!lock_freed) + mutex_unlock(&g_bond_mutex); +} +EXPORT_SYMBOL(hinic5_bond_clean_user); + +int hinic5_bond_get_uplink_id(u16 bond_id, u32 *uplink_id) +{ + if (!HINIC5_BOND_ID_IS_VALID(bond_id) || !uplink_id) { + pr_warn("hinic5_bond: Invalid args, bond id: %u, uplink: %d\n", + bond_id, !!uplink_id); + return -EINVAL; + } + + mutex_lock(&g_bond_mutex); + if (BDEV_IS_VALID(bond_id)) + *uplink_id = bond_gen_uplink_id(bond_mngr.bond_dev[bond_id]); + mutex_unlock(&g_bond_mutex); + + return 0; +} +EXPORT_SYMBOL(hinic5_bond_get_uplink_id); + +int hinic5_bond_register_service_func(enum hinic5_bond_user user, struct bond_srv_func *func) +{ + if (user >= HINIC5_BOND_USER_NUM || func == NULL) + return -EINVAL; + + mutex_lock(&g_bond_event_func_mutex); + g_bond_event_func[user][BOND_BEFORE_ACTIVE] = func->before_active; + g_bond_event_func[user][BOND_AFTER_ACTIVE] = func->after_active; + g_bond_event_func[user][BOND_BEFORE_MODIFY] = func->before_modify; + g_bond_event_func[user][BOND_AFTER_MODIFY] = func->after_modify; + g_bond_event_func[user][BOND_BEFORE_DEACTIVE] = func->before_deactive; + g_bond_event_func[user][BOND_AFTER_DEACTIVE] = func->after_deactive; + mutex_unlock(&g_bond_event_func_mutex); + + mutex_lock(&g_bond_attach_func_mutex); + g_bond_attach_func[user] = func->can_attach; + mutex_unlock(&g_bond_attach_func_mutex); + + return 0; +} +EXPORT_SYMBOL(hinic5_bond_register_service_func); + +int hinic5_bond_unregister_service_func(enum hinic5_bond_user user) +{ + if (user >= HINIC5_BOND_USER_NUM) { + return -EINVAL; + } + + mutex_lock(&g_bond_event_func_mutex); + g_bond_event_func[user][BOND_BEFORE_ACTIVE] = NULL; + g_bond_event_func[user][BOND_AFTER_ACTIVE] = NULL; + g_bond_event_func[user][BOND_BEFORE_MODIFY] = NULL; + g_bond_event_func[user][BOND_AFTER_MODIFY] = NULL; + g_bond_event_func[user][BOND_BEFORE_DEACTIVE] = NULL; + g_bond_event_func[user][BOND_AFTER_DEACTIVE] = NULL; + mutex_unlock(&g_bond_event_func_mutex); + + mutex_lock(&g_bond_attach_func_mutex); + g_bond_attach_func[user] = NULL; + mutex_unlock(&g_bond_attach_func_mutex); + + return 0; +} +EXPORT_SYMBOL(hinic5_bond_unregister_service_func); + +int hinic5_bond_get_slaves(u16 bond_id, struct hinic5_bond_info_s *info) +{ + struct bond_tracker *tracker = NULL; + int size; + int i; + + if (!info || !HINIC5_BOND_ID_IS_VALID(bond_id)) { + pr_warn("hinic5_bond: Invalid args, info: %d, bond id: %u\n", + !!info, bond_id); + return -EINVAL; + } + + size = ARRAY_LEN(info->slaves_name); + if (size < BOND_PORT_MAX_NUM) { + pr_warn("hinic5_bond: Invalid args, size: %u\n", + size); + return -EINVAL; + } + + mutex_lock(&g_bond_mutex); + if (!BDEV_IS_VALID(bond_id)) { + mutex_unlock(&g_bond_mutex); + return 0; + } + info->slaves = bond_mngr.bond_dev[bond_id]->bond_attr.slaves; + tracker = &bond_mngr.bond_dev[bond_id]->tracker; + info->cnt = 0; + for (i = 0; i < BOND_PORT_MAX_NUM; i++) { + if ((BITMAP_JUDGE(info->slaves, i) != 0) && tracker->ndev[i]) { + if (strlen(tracker->ndev[i]->name) >= sizeof(info->slaves_name[0])) { + bond_master_err(bond_mngr.bond_dev[bond_id]->bond->dev, + "hinic5_bond: port name too long: %s (max %zu)\n", + tracker->ndev[i]->name, sizeof(info->slaves_name[0]) - 1); + mutex_unlock(&g_bond_mutex); + return -EINVAL; + } + strncpy(info->slaves_name[info->cnt], tracker->ndev[i]->name, sizeof(info->slaves_name[0])); + info->cnt++; + } + } + mutex_unlock(&g_bond_mutex); + return 0; +} +EXPORT_SYMBOL(hinic5_bond_get_slaves); + +struct net_device *hinic5_bond_get_netdev_by_portid(const char *bond_name, u8 port_id) +{ + struct hinic5_bond_dev *bdev = NULL; + + if (!bond_name || port_id >= BOND_PORT_MAX_NUM) { + return NULL; + } + + mutex_lock(&g_bond_mutex); + bdev = bond_dev_by_name(bond_name); + if (bdev == NULL) { + mutex_unlock(&g_bond_mutex); + return NULL; + } + mutex_unlock(&g_bond_mutex); + return bdev->tracker.ndev[port_id]; +} +EXPORT_SYMBOL(hinic5_bond_get_netdev_by_portid); + +int hinic5_get_bond_tracker_by_name(const char *name, struct bond_tracker *tracker) +{ + struct hinic5_bond_dev *bdev = NULL; + int i; + + if (!name || !tracker) + return -EINVAL; + + mutex_lock(&g_bond_mutex); + for (i = HINIC5_BOND_START_ID; i < HINIC5_MAX_BODN_ID_NUM; i++) { + if (BDEV_IS_VALID(i) && (strcmp(bond_mngr.bond_dev[i]->name, name) == 0)) { + bdev = bond_mngr.bond_dev[i]; + spin_lock(&bdev->lock); + *tracker = bdev->tracker; + spin_unlock(&bdev->lock); + mutex_unlock(&g_bond_mutex); + return 0; + } + } + mutex_unlock(&g_bond_mutex); + return -ENODEV; +} +EXPORT_SYMBOL(hinic5_get_bond_tracker_by_name); + +int hinic5_bond_init(void) +{ + int ret = init_srcu_struct(&bdev_srcu); + if (ret != 0) { + pr_err("Failed to initialize bdev_srcu\n"); + return -ENOMEM; + } + + bond_mngr.bond_dev = kzalloc(sizeof(struct hinic5_bond_dev *) * HINIC5_MAX_BODN_ID_NUM, GFP_KERNEL); + if (bond_mngr.bond_dev == NULL) { + pr_err("Bond dev kzalloc failed\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&bond_mngr.bond_chip_list); + + ret = bond_enable_netdev_event(); + if (ret != 0) { + pr_err("Bond enable netdev event err: %d\n", ret); + goto bond_dev_err; + } + + return 0; + +bond_dev_err: + kfree(bond_mngr.bond_dev); + bond_mngr.bond_dev = NULL; + + return ret; +} + +void hinic5_bond_deinit(void) +{ + bond_disable_netdev_event(); + if (bond_mngr.bond_dev) { + kfree(bond_mngr.bond_dev); + bond_mngr.bond_dev = NULL; + } + cleanup_srcu_struct(&bdev_srcu); +} \ No newline at end of file diff --git a/hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond_event.c b/hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond_event.c new file mode 100644 index 00000000..0b865e7d --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond_event.c @@ -0,0 +1,422 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2026-2026. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [BOND]" fmt + +#include <net/sock.h> +#include <net/bonding.h> +#include <net/netlink.h> +#include <linux/mutex.h> +#include <linux/rtnetlink.h> +#include <linux/net.h> +#include <linux/netdevice.h> + +#include "hinic5_hw.h" +#include "hinic5_lld.h" +#include "cfg_mgmt_mpu_cmd_defs.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic_dev.h" +#include "bond_common_defs.h" +#include "hinic5_bond.h" +#include "hinic5_bond_inner.h" + +static u8 bond_get_slaves_bitmap(struct hinic5_bond_dev *bdev, struct bonding *bond) +{ + struct slave *slave = NULL; + struct list_head *iter = NULL; + struct hinic5_lld_dev *lld_dev = NULL; + u8 bitmap = 0; + u8 port_id; + + rcu_read_lock(); + bond_for_each_slave_rcu(bond, slave, iter) { + lld_dev = hinic5_get_lld_dev_by_netdev(slave->dev); + if (lld_dev == NULL || hinic5_func_type(lld_dev->hwdev) == TYPE_VF) + continue; + + port_id = hinic5_physical_port_id(lld_dev->hwdev); + BITMAP_SET(bitmap, port_id); + (void)iter; + } + rcu_read_unlock(); + + return bitmap; +} + +static void bond_update_attr(struct hinic5_bond_dev *bdev, struct bonding *bond) +{ + spin_lock(&bdev->lock); + bdev->new_attr.bond_mode = (u16)bond->params.mode; + bdev->new_attr.bond_id = bdev->bond_attr.bond_id; + bdev->new_attr.up_delay = (u16)bond->params.updelay; + bdev->new_attr.down_delay = (u16)bond->params.downdelay; + bdev->new_attr.slaves = 0; + bdev->new_attr.active_slaves = 0; + bdev->new_attr.lacp_collect_slaves = 0; + bdev->new_attr.first_roce_func = BOND_DEFAULT_ROCE_FUNC; + + /* Only support L2/L34/L23 three policy */ + if (bond->params.xmit_policy <= BOND_XMIT_POLICY_LAYER23) { + bdev->new_attr.xmit_hash_policy = (u8)bond->params.xmit_policy; + } else { + bond_master_warn(bdev->bond->dev, "Invalid hash policy %u (not layer2/34/23), defaulting to layer2\n", + (u8)bond->params.xmit_policy); + bdev->new_attr.xmit_hash_policy = BOND_XMIT_POLICY_LAYER2; + } + + bdev->new_attr.slaves = bond_get_slaves_bitmap(bdev, bond); + spin_unlock(&bdev->lock); +} + +static u8 bond_get_netdev_idx(const struct hinic5_bond_dev *bdev, + const struct net_device *ndev) +{ + u8 i; + + for (i = 0; i < BOND_PORT_MAX_NUM; i++) { + if (bdev->tracker.ndev[i] == ndev) + return i; + } + + return PORT_INVALID_ID; +} + +static void bond_pf_bitmap_set(struct hinic5_bond_dev *bdev, struct bond_attr *attr, u8 port_id) +{ + struct hinic5_lld_dev *lld_dev = NULL; + u8 pf_id; + + lld_dev = hinic5_get_lld_dev_by_netdev(bdev->tracker.ndev[port_id]); + if (!lld_dev) { + pr_err("hinic5_bond: Failed to get lld dev by netdev\n"); + return; + } + + pf_id = hinic5_pf_id_of_vf(lld_dev->hwdev); + BITMAP_SET(attr->bond_pf_bitmap, pf_id); +} + +static void bond_dev_untrack_port(struct hinic5_bond_dev *bdev, u8 port_id) +{ + u32 track_cnt = 0; + const struct net_device *untrack_ndev = NULL; + + spin_lock(&bdev->lock); + if (bdev->tracker.ndev[port_id] != NULL) { + untrack_ndev = bdev->tracker.ndev[port_id]; + track_cnt = --bdev->tracker.cnt; + bdev->tracker.ndev[port_id] = NULL; + } + spin_unlock(&bdev->lock); + if (track_cnt == 0) + bond_dev_free_chip_bond_id(bdev); + if (untrack_ndev) + bond_master_info(bdev->bond->dev, "untrack port:%u, untrack ndev: %s, tracker cnt: %u\n", + port_id, untrack_ndev->name, track_cnt); +} + +static void bond_slave_event(struct hinic5_bond_dev *bdev, struct slave *slave) +{ + /* 兼容低版本内核socket监听事件动态添加slave PF */ + u8 port_id = bond_get_netdev_idx(bdev, slave->dev); + if (port_id == PORT_INVALID_ID) + port_id = bond_dev_track_port(bdev, slave->dev); + if (port_id == PORT_INVALID_ID) + return; + + spin_lock(&bdev->lock); + bdev->tracker.netdev_state[port_id].link_up = bond_slave_is_up(slave); + bdev->tracker.netdev_state[port_id].tx_enabled = bond_slave_is_up(slave) && bond_is_active_slave(slave); + spin_unlock(&bdev->lock); + /* 如果bdev dead则终止流程 */ + if (unlikely(READ_ONCE(bdev->dead))) + return; + queue_delayed_work(bdev->wq, &bdev->bond_work, 0); +} + +static bool bond_eval_bonding_stats(const struct hinic5_bond_dev *bdev, struct bonding *bond) +{ + return bdev->tracker.cnt > 0; +} + +static void bond_master_event(struct hinic5_bond_dev *bdev, struct bonding *bond) +{ + u8 port_id = 0; + int i = 0, cnt = 0; + struct slave *slave = NULL; + struct list_head *iter = NULL; + + bool slave_is_up[BOND_PORT_MAX_NUM] = {false}; + bool slave_is_active[BOND_PORT_MAX_NUM] = {false}; + struct net_device *slave_ndev[BOND_PORT_MAX_NUM]; /* 暂存网卡设备指针*/ + + /* rcu锁内不能有mutex */ + rcu_read_lock(); + bond_for_each_slave_rcu(bond, slave, iter) { + if (cnt >= BOND_PORT_MAX_NUM) + break; + slave_is_up[cnt] = bond_slave_is_up(slave); + slave_is_active[cnt] = bond_is_active_slave(slave); + slave_ndev[cnt] = slave->dev; + dev_hold(slave_ndev[cnt++]); + (void)iter; + } + rcu_read_unlock(); + + /* 动态添加 Slave PF 场景 */ + for (i = 0; i < cnt; ++i) { + port_id = bond_get_netdev_idx(bdev, slave_ndev[i]); + if (port_id == PORT_INVALID_ID) { + port_id = bond_dev_track_port(bdev, slave_ndev[i]); + if (port_id == PORT_INVALID_ID) + continue; + + spin_lock(&bdev->lock); + bdev->tracker.netdev_state[port_id].link_up = slave_is_up[i]; + bdev->tracker.netdev_state[port_id].tx_enabled = + slave_is_up[i] && slave_is_active[i]; + spin_unlock(&bdev->lock); + } + } + while (cnt != 0) + dev_put(slave_ndev[--cnt]); + /* TODO: 为了逻辑完整性,自旋锁修改bdev需要判断bdev->dead状态, + 该问题在attach/detach新方案修改时统一修改 */ + spin_lock(&bdev->lock); + bdev->tracker.is_bonded = bond_eval_bonding_stats(bdev, bond); + spin_unlock(&bdev->lock); + + /* 动态删除 Slave PF 场景 */ + for (port_id = 0; port_id < BOND_PORT_MAX_NUM; port_id++) { + /* 新 bond_attr 没有 slave pf 但是旧 bond_attr 有 slave pf, + 需要删除旧 bond_attr 的 slave PF */ + if (BITMAP_JUDGE(bdev->new_attr.slaves, port_id) == 0) { + if (BITMAP_JUDGE(bdev->bond_attr.slaves, port_id) != 0) { + bond_dev_untrack_port(bdev, port_id); + } + continue; + } + bond_pf_bitmap_set(bdev, &bdev->new_attr, port_id); + } + /* 如果bdev dead则终止流程 */ + if (unlikely(READ_ONCE(bdev->dead))) + return; + queue_delayed_work(bdev->wq, &bdev->bond_work, 0); +} + +void bond_handle_rtnl_event(struct net_device *ndev) +{ + struct hinic5_bond_dev *bdev = NULL; + struct bonding *bond = NULL; + struct slave *slave = NULL; + struct hinic5_lld_dev *lld_dev = NULL; + int srcu_idx = 0; + + if (netif_is_bond_master(ndev)) { + bond = netdev_priv(ndev); + bdev = bond_get_bdev(bond); + } else if (netif_is_bond_slave(ndev)) { + lld_dev = hinic5_get_lld_dev_by_netdev(ndev); + if (!lld_dev || hinic5_func_type(lld_dev->hwdev) == TYPE_VF) + return; + slave = bond_slave_get_rtnl(ndev); + if (slave) { + bond = bond_get_bond_by_slave(slave); + bdev = bond_get_bdev(bond); + } + } + if (bond == NULL || bdev == NULL) + return; + + /* TODO: 临时解决 bdev 异步时序问题 */ + srcu_idx = srcu_read_lock(&bdev_srcu); + if (!bdev || unlikely(READ_ONCE(bdev->dead))) { + srcu_read_unlock(&bdev_srcu, srcu_idx); + + return; + } + + bond_update_attr(bdev, bond); + if (slave) + bond_slave_event(bdev, slave); + else + bond_master_event(bdev, bond); + + srcu_read_unlock(&bdev_srcu, srcu_idx); +} + +/* 如果服务注册了attach_func会尝试绑定bond */ +void bond_try_attach_user(struct net_device *ndev) +{ + u32 user; + struct bonding *bond = NULL; + struct hinic5_bond_dev *bdev = NULL; + + if (!netif_is_bond_master(ndev)) + return; + + bond = netdev_priv(ndev); + /* if slave invalid or not exist, don't alloc bdev */ + if (!hinic5_bond_slave_is_match(bond)) { + bond_master_warn(ndev, "Bond try attach user slaves invalid or not exist\n"); + return; + } + + if (bond->params.mode != BOND_MODE_ACTIVEBACKUP && + bond->params.mode != BOND_MODE_XOR && + bond->params.mode != BOND_MODE_8023AD) { + return; + } + bdev = bond_get_bdev(bond); + for (user = HINIC5_BOND_USER_OVS; user < HINIC5_BOND_USER_NUM; user++) { + if (bdev && bdev->slot_used[user] != 0) + continue; + if (bond_call_srv_attach_func(user, bond)) { + bond_master_info(bond->dev, "bond try attach user:%d name %s\n", + user, bond->dev->name); + hinic5_bond_event_attach(bond, user); + } + } +} + +#if defined(HAVE_NETDEV_CHANGEUPPER) + +int bond_notifier_netdev_event(struct notifier_block *self, unsigned long event, void *ptr) +{ + struct net_device *ndev = NULL; + struct netdev_notifier_changeupper_info *info = NULL; + struct net_device *upper_dev = NULL; + + switch ((u32)event) { + case NETDEV_CHANGEUPPER: + info = (struct netdev_notifier_changeupper_info *)ptr; + + upper_dev = info->upper_dev; + if (!virt_addr_valid((void *)upper_dev)) /* 低内核版本注册回调但dev可能未完成注册的场景 */ + break; + + bond_try_attach_user(upper_dev); + bond_handle_rtnl_event(upper_dev); + break; + case NETDEV_UP: + case NETDEV_DOWN: + case NETDEV_CHANGEINFODATA: + case NETDEV_CHANGELOWERSTATE: + ndev = netdev_notifier_info_to_dev(ptr); + if (!virt_addr_valid((void *)ndev)) + break; + + bond_handle_rtnl_event(ndev); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_DONE; +} + +struct notifier_block g_bond_nb = { + .notifier_call = bond_notifier_netdev_event, +}; + +int bond_enable_netdev_event(void) +{ + int ret; + + ret = register_netdevice_notifier(&g_bond_nb); + if (ret != 0) { + pr_err("bond register_netdevice_notifier failed\n"); + return ret; + } + + return 0; +} + +void bond_disable_netdev_event(void) +{ + (void)unregister_netdevice_notifier(&g_bond_nb); +} + +#else + +#if defined(HAVE_SK_DATE_READY_BYTES) +void bond_rtnl_data_ready(struct sock *sk, int bytes) +#else +void bond_rtnl_data_ready(struct sock *sk) +#endif +{ + struct net_device *ndev = NULL; + struct ifinfomsg *ifinfo = NULL; + struct nlmsghdr *hdr = NULL; + struct sk_buff *skb = NULL; + int err = 0; + + skb = skb_recv_datagram(sk, 0, 0, &err); + if (err != 0 || !skb) + return; + + hdr = (struct nlmsghdr *)skb->data; + if (!hdr || !((skb->len > (sizeof(struct nlmsghdr) + sizeof(struct ifinfomsg))) && + hdr->nlmsg_len >= sizeof(struct nlmsghdr) && hdr->nlmsg_len <= skb->len) || + hdr->nlmsg_type != RTM_NEWLINK || rtnl_is_locked() == 0) { + goto free_skb; + } + + ifinfo = nlmsg_data(hdr); + ndev = dev_get_by_index(&init_net, ifinfo->ifi_index); + if (ndev) { + bond_try_attach_user(ndev); + bond_handle_rtnl_event(ndev); + dev_put(ndev); + } + +free_skb: + kfree_skb(skb); +} + +int bond_enable_netdev_event(void) +{ + struct sockaddr_nl addr = { + .nl_family = AF_NETLINK, + .nl_groups = RTNLGRP_LINK, + }; + int err; + struct socket **rtnl_sock = hinic5_get_bond_mngr_sock_addr(); + +#if defined (HAVE_SOCK_CREATE_KERN_NET) + err = sock_create_kern(&init_net, AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE, + rtnl_sock); +#else + err = sock_create_kern(AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE, + rtnl_sock); +#endif + if (err != 0) { + pr_err("hinic5_bond: Couldn't create rtnl socket.\n"); + *rtnl_sock = NULL; + return err; + } + + (*rtnl_sock)->sk->sk_data_ready = bond_rtnl_data_ready; + (*rtnl_sock)->sk->sk_allocation = GFP_KERNEL; + + err = kernel_bind(*rtnl_sock, (struct sockaddr *)(u8 *)&addr, sizeof(addr)); + if (err != 0) { + pr_err("hinic5_bond: Couldn't bind rtnl socket.\n"); + sock_release(*rtnl_sock); + *rtnl_sock = NULL; + } + + return err; +} + +void bond_disable_netdev_event(void) +{ + struct socket *rtnl_sock = NULL; + + rtnl_sock = hinic5_get_bond_mngr_sock(); + if (rtnl_sock != NULL) + sock_release(rtnl_sock); +} + +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) */ \ No newline at end of file diff --git a/hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond_inner.h b/hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond_inner.h new file mode 100644 index 00000000..6454a642 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/cfm/bond/hinic5_bond_inner.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +#ifndef HINIC5_BOND_INNER_H +#define HINIC5_BOND_INNER_H + +#include <net/bonding.h> +#include <linux/list.h> +#include <linux/srcu.h> +#include "hinic5_bond.h" + +extern struct srcu_struct bdev_srcu; + +#define bond_slave_info(bond_dev, slave_dev, fmt, ...) \ + netdev_info(bond_dev, "[BOND] (slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) +#define bond_slave_warn(bond_dev, slave_dev, fmt, ...) \ + netdev_warn(bond_dev, "[BOND] (slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) +#define bond_slave_dbg(bond_dev, slave_dev, fmt, ...) \ + netdev_dbg(bond_dev, "[BOND] (slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) +#define bond_slave_err(bond_dev, slave_dev, fmt, ...) \ + netdev_err(bond_dev, "[BOND] (slave %s): " fmt, (slave_dev)->name, ##__VA_ARGS__) + +#define bond_master_info(bond_dev, fmt, ...) \ + netdev_info(bond_dev, "[BOND]" fmt, ##__VA_ARGS__) +#define bond_master_warn(bond_dev, fmt, ...) \ + netdev_warn(bond_dev, "[BOND]" fmt, ##__VA_ARGS__) +#define bond_master_dbg(bond_dev, fmt, ...) \ + netdev_dbg(bond_dev, "[BOND]" fmt, ##__VA_ARGS__) +#define bond_master_err(bond_dev, fmt, ...) \ + netdev_err(bond_dev, "[BOND]" fmt, ##__VA_ARGS__) + +#define PORT_INVALID_ID 0xFF + +#define BITMAP_SET(bm, bit) ((bm) |= (typeof(bm))(1U << (bit))) +#define BITMAP_CLR(bm, bit) ((bm) &= ~((typeof(bm))(1U << (bit)))) +#define BITMAP_JUDGE(bm, bit) ((bm) & (typeof(bm))(1U << (bit))) + +enum bond_dev_status { + BOND_DEV_STATUS_IDLE, + BOND_DEV_STATUS_ACTIVATED, +}; + +#ifndef IFNAMSIZ +#define IFNAMSIZ 16UL +#endif +#define HINIC5_BOND_START_ID 1 +#define HINIC5_MAX_BODN_ID_NUM 64 /* MAX BOND ID for driver */ +#define HINIC5_INVALID_BOND_ID 0xFF + +#define HINIC5_BOND_ID_IS_VALID(_id) (((_id) >= HINIC5_BOND_START_ID) && ((_id) < HINIC5_MAX_BODN_ID_NUM)) +#define HINIC5_BOND_ID_IS_INVALID(_id) (!(HINIC5_BOND_ID_IS_VALID(_id))) + +struct hinic5_bond_dev { + char name[BOND_NAME_MAX_LEN]; + char chip_name[IFNAMSIZ]; + struct bond_attr bond_attr; + struct bond_attr new_attr; + struct bonding *bond; + struct kref ref; + enum bond_dev_status status; + u8 slot_used[HINIC5_BOND_USER_NUM]; + struct workqueue_struct *wq; + struct delayed_work bond_work; + struct bond_tracker tracker; + spinlock_t lock; /* lock for change status */ + u32 service_en_bitmap; + u32 chip_bond_id; + bool dead; /* check bdev liveness under SRCU */ +}; + +#define HINIC5_MAX_BOND_ID_PER_CARD 5 /* MAX BOND ID for per chip */ +struct hinic5_bond_chip { + u8 chip_bond_id[HINIC5_MAX_BOND_ID_PER_CARD]; + char chip_name[IFNAMSIZ]; + struct list_head node; + u32 bond_num; +}; + +void bond_disable_netdev_event(void); +int bond_enable_netdev_event(void); + +struct socket **hinic5_get_bond_mngr_sock_addr(void); +struct socket *hinic5_get_bond_mngr_sock(void); + +int hinic5_bond_event_attach(struct bonding *bond, enum hinic5_bond_user user); +bool bond_call_srv_attach_func(enum hinic5_bond_user user, struct bonding *bond); +void bond_handle_rtnl_event(struct net_device *ndev); +u8 bond_dev_track_port(struct hinic5_bond_dev *bdev, struct net_device *ndev); +struct hinic5_bond_dev *bond_get_bdev(const struct bonding *bond); +bool hinic5_bond_slave_is_match(struct bonding *bond); +void bond_dev_free_chip_bond_id(struct hinic5_bond_dev *bdev); + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_platform_library/host/cfm/fast_msg/hinic5_fast_msg.c b/hinic5/src/dpu_platform_library/host/cfm/fast_msg/hinic5_fast_msg.c new file mode 100644 index 00000000..36685308 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/cfm/fast_msg/hinic5_fast_msg.c @@ -0,0 +1,253 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: hisdk5_fast_msg.c + * Author: - + * Create: + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> + +#include "ossl_knl.h" +#include "fast_msg_common_define.h" +#include "hinic5_comm_cmd.h" +#include "hinic5_hwdev.h" +#include "hinic5_cmdq.h" + +#include "hinic5_fast_msg.h" + +int hinic5_fast_msg_register_cb(void *hwdev, u8 mod, hinic5_fast_msg_rq_cb callback, void *pri_data) +{ + struct hinic5_hwdev *dev = hwdev; + struct hisdk5_fast_msg_to_func *fast_msg_to_func = NULL; + + if (mod >= HINIC5_MOD_MAX || !hwdev) + return -EINVAL; + + if (!dev->fast_msg_to_func) + return -EINVAL; + + fast_msg_to_func = dev->fast_msg_to_func; + + if (!fast_msg_to_func->fast_msg_rq_cb[mod]) { + fast_msg_to_func->fast_msg_rq_cb[mod] = callback; + fast_msg_to_func->fast_msg_rq_data[mod] = pri_data; + } + return 0; +} +EXPORT_SYMBOL(hinic5_fast_msg_register_cb); + +void hinic5_fast_msg_unregister_cb(void *hwdev, u8 mod) +{ + struct hinic5_hwdev *dev = hwdev; + struct hisdk5_fast_msg_to_func *fast_msg_to_func = NULL; + + if (mod >= HINIC5_MOD_MAX || !hwdev) + return; + + if (!dev->fast_msg_to_func) + return; + + fast_msg_to_func = dev->fast_msg_to_func; + + if (fast_msg_to_func->fast_msg_rq_cb[mod]) { + fast_msg_to_func->fast_msg_rq_cb[mod] = NULL; + fast_msg_to_func->fast_msg_rq_data[mod] = NULL; + } +} +EXPORT_SYMBOL(hinic5_fast_msg_unregister_cb); + +void hinic5_fast_msg_clear_bitmap(void *hwdev, u32 rq_offset) +{ + struct hinic5_hwdev *dev = hwdev; + struct hinic5_cmd_buf *cmd_buf = NULL; + hisdk5_fast_msg_buf *fast_msg_buf = NULL; + u64 out_parm; + int err; + + if (!hwdev) + return; + + cmd_buf = hinic5_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + sdk_err(dev->dev_hdl, "Allocate clear bit map cmd buf failed\n"); + return; + } + fast_msg_buf = (struct hisdk5_fast_msg_buf *)cmd_buf->buf; + fast_msg_buf->rq_offset = rq_offset; + + hinic5_cpu_to_be32((void *)fast_msg_buf, sizeof(u32)); + + cmd_buf->size = sizeof(u32); + + err = hinic5_send_fast_msg_need_resp(hwdev, HINIC5_MOD_COMM, + COMM_CMD_UCODE_FAST_MSG_CLEAR, cmd_buf, &out_parm); + if (err != 0 || out_parm != 0) { + sdk_err(dev->dev_hdl, "Failed to get fast msg cap, err = 0x%x, out_parm = 0x%llx\n", + err, out_parm); + } + + hinic5_free_cmd_buf(hwdev, cmd_buf); +} + +static struct hisdk5_fast_msg_buf *hinic5_get_rq_msg(struct hisdk5_fast_msg_to_func *fast_msg, u32 rq_offset) +{ + u32 msg_num_per_page; + u32 page_index; + u32 page_offset; + + msg_num_per_page = fast_msg->fast_msg_rq_page_size / FAST_MSG_ENTRY_SIZE; + page_index = rq_offset / msg_num_per_page; + page_offset = rq_offset % msg_num_per_page; + return (struct hisdk5_fast_msg_buf *) + ((u8 *)fast_msg->rq_mem[page_index] + page_offset * FAST_MSG_ENTRY_SIZE_B); +} + +static void hinic5_fast_msg_recv_msg(struct hisdk5_fast_msg_to_func *fast_msg, u32 rq_offset) +{ + struct hisdk5_fast_msg_buf *rq_msg = hinic5_get_rq_msg(fast_msg, rq_offset); + struct hinic5_hwdev *hwdev = fast_msg->hwdev; + u8 mod = rq_msg->fast_msg_header.mod; + + /* head已经在上半部转换过大小端,这里仅转换data */ + hinic5_be32_to_cpu(rq_msg->fast_msg_data, rq_msg->fast_msg_header.data_len); + + if (fast_msg->fast_msg_rq_cb[mod]) { + fast_msg->fast_msg_rq_cb[mod](rq_msg, fast_msg->fast_msg_rq_data[mod]); + } else { + sdk_err(hwdev->dev_hdl, + "fast_msg_rq_cb is NULL, src_func: 0x%x, mod: %u, cmd: %u, " \ + "data_len: %u, data: 0x%llx\n", + rq_msg->fast_msg_header.src_func_id, mod, + rq_msg->fast_msg_header.cmd, rq_msg->fast_msg_header.data_len, + *(u64 *)rq_msg->fast_msg_data); + } + + hinic5_fast_msg_clear_bitmap(fast_msg->hwdev, rq_offset); +} + +void hinic5_fast_msg_recv_handler(struct work_struct *work) +{ + struct hisdk5_fast_msg_recv_work *recv_work = + container_of(work, struct hisdk5_fast_msg_recv_work, work); + struct hisdk5_fast_msg_recv_entry *entry = NULL; + struct hisdk5_fast_msg_recv_entry *temp = NULL; + struct hinic5_hwdev *hwdev = recv_work->fast_msg_to_func->hwdev; + + spin_lock_bh(&recv_work->lock); + list_for_each_entry_safe(entry, temp, &recv_work->msg_head, entry) { + list_del_init(&entry->entry); + spin_unlock_bh(&recv_work->lock); + + if (entry->type == MSG_WORK_ENTRY_RECV_FAST_MSG) { + hinic5_fast_msg_recv_msg(recv_work->fast_msg_to_func, entry->rq_offset); + } else if (entry->type == MSG_WORK_ENTRY_FORWARDING) { + entry->forward_cb(entry->forward_data); + kfree(entry); + } else { + sdk_err(hwdev->dev_hdl, "Invalid msg type: 0x%x\n", entry->type); + } + + spin_lock_bh(&recv_work->lock); + } + spin_unlock_bh(&recv_work->lock); +} + +void hinic5_fast_msg_rq_handler(void *pri_handle, u32 ceqe_data) +{ + struct hisdk5_fast_msg_to_func *fast_msg = pri_handle; + struct hisdk5_fast_msg_recv_work *recv_work = NULL; + struct hisdk5_fast_msg_buf *rq_msg = NULL; + struct hinic5_hwdev *hwdev = NULL; + u32 rq_offset, work_id; + + if (!pri_handle) + return; + + hwdev = fast_msg->hwdev; + rq_offset = ceqe_data & FAST_MSG_RQ_OFFSET_MASK; + if (rq_offset >= fast_msg->fast_msg_rq_depth) { + sdk_err(hwdev->dev_hdl, "rq offset is invalid, rq_offset: 0x%x, depth: 0x%x\n", + rq_offset, fast_msg->fast_msg_rq_depth); + return; + } + + rq_msg = hinic5_get_rq_msg(fast_msg, rq_offset); + /* 需使用src_func_id,先转换header */ + hinic5_be32_to_cpu(rq_msg, sizeof(hisdk5_fast_msg_header)); + + work_id = rq_msg->fast_msg_header.src_func_id % fast_msg->num_concurrent_work; + recv_work = &fast_msg->recv_concurrent_work[work_id]; + + spin_lock(&recv_work->lock); + if (list_empty(&fast_msg->recv_entries[rq_offset].entry) != 0) { + list_add_tail(&fast_msg->recv_entries[rq_offset].entry, &recv_work->msg_head); + spin_unlock(&recv_work->lock); + + queue_work(fast_msg->workq, &recv_work->work); + } else { + spin_unlock(&recv_work->lock); + sdk_err(hwdev->dev_hdl, "rq offset 0x%x, already in process\n", rq_offset); + } +} + +int hinic5_fast_msg_send(void *hwdev, struct hinic5_cmd_buf *cmd_buf, u64 *out_parm) +{ + struct hinic5_hwdev *dev = hwdev; + int err; + + if (!hwdev || !cmd_buf || !out_parm) + return -EINVAL; + + err = hinic5_send_fast_msg_need_resp(hwdev, HINIC5_MOD_COMM, + COMM_CMD_UCODE_FAST_MSG_CMD, cmd_buf, out_parm); + if (err != 0) + sdk_err(dev->dev_hdl, "Failed to send fast msg, ret = 0x%x\n", err); + + return err; +} +EXPORT_SYMBOL(hinic5_fast_msg_send); + +int hinic5_fast_msg_forward(void *hwdev, u16 src_func_id, void *data, hinic5_fast_msg_forward_cb callback) +{ + struct hisdk5_fast_msg_to_func *fast_msg = NULL; + struct hisdk5_fast_msg_recv_entry *recv_entry = NULL; + struct hisdk5_fast_msg_recv_work *recv_work = NULL; + struct hinic5_hwdev *dev = hwdev; + u32 work_id; + + if (!hwdev || !callback) + return -EINVAL; + + recv_entry = kzalloc(sizeof(*recv_entry), GFP_KERNEL); + if (!recv_entry) + return -ENOMEM; + + INIT_LIST_HEAD(&recv_entry->entry); + recv_entry->type = MSG_WORK_ENTRY_FORWARDING; + recv_entry->forward_data = data; + recv_entry->forward_cb = callback; + + fast_msg = dev->fast_msg_to_func; + work_id = src_func_id % fast_msg->num_concurrent_work; + recv_work = &fast_msg->recv_concurrent_work[work_id]; + spin_lock_bh(&recv_work->lock); + list_add_tail(&recv_entry->entry, &recv_work->msg_head); + spin_unlock_bh(&recv_work->lock); + + queue_work(fast_msg->workq, &recv_work->work); + + return 0; +} +EXPORT_SYMBOL(hinic5_fast_msg_forward); + +bool hinic5_support_fast_msg(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for getting fast msg support capability\n"); + return false; + } + + return COMM_SUPPORT_FAST_MSG((struct hinic5_hwdev *)hwdev); +} +EXPORT_SYMBOL(hinic5_support_fast_msg); diff --git a/hinic5/src/dpu_platform_library/host/cfm/fast_msg/hinic5_fast_msg_init.c b/hinic5/src/dpu_platform_library/host/cfm/fast_msg/hinic5_fast_msg_init.c new file mode 100644 index 00000000..705ae85b --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/cfm/fast_msg/hinic5_fast_msg_init.c @@ -0,0 +1,294 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: hinic5_fast_msg_init.c + * Author: - + * Create: + */ + +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" + +#include "mpu_inband_cmd.h" +#include "hinic5_fast_msg.h" +#include "hinic5_fast_msg_init.h" + +int hinic5_fast_msg_cap_get(struct hinic5_hwdev *hwdev, hisdk5_fast_msg_caps *caps) +{ + int err; + struct comm_cmd_fast_msg_cap fast_msg_cap; + u16 out_size; + + memset(&fast_msg_cap, 0, sizeof(fast_msg_cap)); + + fast_msg_cap.func_id = hinic5_global_func_id(hwdev); + out_size = sizeof(struct comm_cmd_fast_msg_cap); + + err = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, COMM_MGMT_CMD_GET_FAST_MSG_CAP, + &fast_msg_cap, out_size, + &fast_msg_cap, &out_size, 0, HINIC5_CHANNEL_COMM); + if (err != 0 || out_size == 0 || fast_msg_cap.head.status != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to get fast msg cap, ret = %d, status: 0x%x, out size: 0x%x\n", + err, fast_msg_cap.head.status, out_size); + return -EINVAL; + } + + caps->page_size = fast_msg_cap.fast_msg_page_size; + caps->depth = fast_msg_cap.fast_msg_depth; + hwdev->fast_msg_to_func->fast_msg_rq_depth = caps->depth; + + return err; +} + +void hinic5_fast_msg_rq_buf_deinit(struct hinic5_hwdev *hwdev) +{ + struct hisdk5_fast_msg_to_func *fast_msg_to_func = NULL; + u32 page_idx; + + if (!hwdev) + return; + + if (!hwdev->fast_msg_to_func) + return; + + fast_msg_to_func = hwdev->fast_msg_to_func; + + for (page_idx = 0; page_idx < fast_msg_to_func->fast_msg_rq_page_num; page_idx++) { + if (fast_msg_to_func->rq_mem[page_idx]) + dma_free_coherent( + hwdev->dev_hdl, + fast_msg_to_func->fast_msg_rq_page_size * FAST_MSG_ENTRY_UNIT, + fast_msg_to_func->rq_mem[page_idx], + fast_msg_to_func->rq_mem_paddr[page_idx] + ); + } +} + +void hinic5_fast_msg_clear_sml_table(struct hinic5_hwdev *hwdev) +{ + int err; + struct comm_cmd_clear_fast_msg_sml_table clear_info = {0}; + u16 out_size = sizeof(struct comm_cmd_clear_fast_msg_sml_table); + + clear_info.func_id = hinic5_global_func_id(hwdev); + err = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, COMM_MGMT_CMD_CLEAR_FAST_MSG_SML, + &clear_info, sizeof(clear_info), + &clear_info, &out_size, 0, HINIC5_CHANNEL_COMM); + if (clear_info.head.status == HINIC5_MGMT_CMD_UNSUPPORTED) { + sdk_warn(hwdev->dev_hdl, "not support clear fastmsg sml table\n"); + return; + } + + if (err != 0 || out_size == 0 || clear_info.head.status != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to clear fast msg sml table, ret = %d, status: 0x%x, " \ + "out size: 0x%x\n", + err, clear_info.head.status, out_size); + } +} + +int hinic5_fast_msg_rq_buf_init(struct hinic5_hwdev *hwdev, hisdk5_fast_msg_caps *caps) +{ + int err; + struct comm_cmd_set_fast_msg_rq_addr rq_addr; + u16 out_size; + u32 page_idx; + void *page_vaddr = NULL; + dma_addr_t page_paddr; + gfp_t gfp_hinic5_vram; + + memset(&rq_addr, 0, sizeof(rq_addr)); + + rq_addr.func_id = hinic5_global_func_id(hwdev); + rq_addr.page_num = (caps->depth * FAST_MSG_ENTRY_SIZE) / caps->page_size; + hwdev->fast_msg_to_func->fast_msg_rq_page_num = rq_addr.page_num; + hwdev->fast_msg_to_func->fast_msg_rq_page_size = caps->page_size; + out_size = sizeof(struct comm_cmd_set_fast_msg_rq_addr); + + gfp_hinic5_vram = hinic5_hinic5_vram_get_gfp_hinic5_vram(); + + for (page_idx = 0; page_idx < rq_addr.page_num; page_idx++) { + page_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, + caps->page_size * FAST_MSG_ENTRY_UNIT, + &page_paddr, GFP_KERNEL | gfp_hinic5_vram); + if (!page_vaddr) { + sdk_err(hwdev->dev_hdl, + "alloc fast msg rq mem failed, page_idx = 0x%x\n", page_idx); + err = -ENOMEM; + goto err_handler; + } + + hwdev->fast_msg_to_func->rq_mem_paddr[page_idx] = page_paddr; + hwdev->fast_msg_to_func->rq_mem[page_idx] = page_vaddr; + rq_addr.page_addr[page_idx].rq_page_addr = (u64)page_paddr; + } + + err = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, COMM_MGMT_CMD_SET_FAST_MSG_RQ_ADDR, + &rq_addr, out_size, + &rq_addr, &out_size, 0, HINIC5_CHANNEL_COMM); + if (err != 0 || out_size == 0 || rq_addr.head.status != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to get fast msg cap, ret = %d, status: 0x%x, out size: 0x%x\n", + err, rq_addr.head.status, out_size); + err = -EINVAL; + goto err_handler; + } + + return err; +err_handler: + hinic5_fast_msg_rq_buf_deinit(hwdev); + return err; +} + +static void recv_concurrent_work_init(struct hisdk5_fast_msg_to_func *fast_msg) +{ + struct hisdk5_fast_msg_recv_work *recv_work = NULL; + u32 i; + + for (i = 0; i < fast_msg->num_concurrent_work; i++) { + recv_work = &fast_msg->recv_concurrent_work[i]; + recv_work->fast_msg_to_func = fast_msg; + INIT_WORK(&recv_work->work, hinic5_fast_msg_recv_handler); + INIT_LIST_HEAD(&recv_work->msg_head); + spin_lock_init(&recv_work->lock); + } +} + +int hinic5_fast_msg_recv_init(struct hinic5_hwdev *hwdev, struct hisdk5_fast_msg_to_func *fast_msg) +{ + u32 i; + int err; + + err = hinic5_ceq_register_cb(hwdev, fast_msg, HINIC5_FAST_MSG_RQ, + hinic5_fast_msg_rq_handler); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Fail to register fast_msg_rq callback\n"); + return err; + } + + fast_msg->workq = alloc_workqueue(HINIC5_FAST_MSG_WQ_NAME, WQ_MEM_RECLAIM, 0); + if (!fast_msg->workq) { + sdk_err(hwdev->dev_hdl, "Fail to alloc fast_msg workq\n"); + err = -EINVAL; + goto alloc_workq_err; + } + + fast_msg->recv_entries = kcalloc(fast_msg->fast_msg_rq_depth, + sizeof(struct hisdk5_fast_msg_recv_entry), GFP_KERNEL); + if (!fast_msg->recv_entries) { + err = -ENOMEM; + goto alloc_recv_node_err; + } + + for (i = 0; i < fast_msg->fast_msg_rq_depth; i++) { + INIT_LIST_HEAD(&fast_msg->recv_entries[i].entry); + fast_msg->recv_entries[i].type = MSG_WORK_ENTRY_RECV_FAST_MSG; + fast_msg->recv_entries[i].rq_offset = i; + } + + fast_msg->num_concurrent_work = FAST_MSG_RECV_MAX_CONCURRENT; + fast_msg->recv_concurrent_work = kcalloc( + fast_msg->num_concurrent_work, + sizeof(struct hisdk5_fast_msg_recv_work), + GFP_KERNEL + ); + if (!fast_msg->recv_concurrent_work) { + err = -ENOMEM; + goto alloc_recv_work_err; + } + + recv_concurrent_work_init(fast_msg); + + return 0; + +alloc_recv_work_err: + kfree(fast_msg->recv_entries); + +alloc_recv_node_err: + destroy_workqueue(fast_msg->workq); + +alloc_workq_err: + hinic5_ceq_unregister_cb(hwdev, HINIC5_FAST_MSG_RQ); + + return err; +} + +void hinic5_fast_msg_recv_deinit(struct hinic5_hwdev *hwdev, struct hisdk5_fast_msg_to_func *fast_msg) +{ + struct hisdk5_fast_msg_recv_work *recv_work = NULL; + u32 i; + + hinic5_ceq_unregister_cb(hwdev, HINIC5_FAST_MSG_RQ); + destroy_workqueue(fast_msg->workq); + + for (i = 0; i < fast_msg->num_concurrent_work; i++) { + recv_work = &fast_msg->recv_concurrent_work[i]; + spin_lock_deinit(&recv_work->lock); + destroy_work(&recv_work->work); + } + + kfree(fast_msg->recv_concurrent_work); + fast_msg->recv_concurrent_work = NULL; + kfree(fast_msg->recv_entries); + fast_msg->recv_entries = NULL; +} + +int hinic5_fast_msg_init(void *hwdev) +{ + int err; + struct hinic5_hwdev *dev = hwdev; + hisdk5_fast_msg_caps caps; + struct hisdk5_fast_msg_to_func *fast_msg_to_func = NULL; + + memset(&caps, 0, sizeof(caps)); + + fast_msg_to_func = kzalloc(sizeof(struct hisdk5_fast_msg_to_func), GFP_KERNEL); + if (!fast_msg_to_func) { + return -ENOMEM; + } + + dev->fast_msg_to_func = fast_msg_to_func; + fast_msg_to_func->hwdev = hwdev; + + err = hinic5_fast_msg_cap_get(dev, &caps); + if (err != 0) { + sdk_err(dev->dev_hdl, "Failed to get fast_msg cap, err: %d\n", err); + goto err_get_cap; + } + + err = hinic5_fast_msg_recv_init(dev, fast_msg_to_func); + if (err != 0) { + sdk_err(dev->dev_hdl, "Failed to init fast_msg recv msg, err: %d\n", err); + goto err_init_recv_msg; + } + + err = hinic5_fast_msg_rq_buf_init(hwdev, &caps); + if (err != 0) { + sdk_err(dev->dev_hdl, "Failed to init fast_msg rq, err: %d\n", err); + goto err_rq_init; + } + + return 0; + +err_rq_init: + hinic5_fast_msg_recv_deinit(dev, fast_msg_to_func); + +err_init_recv_msg: +err_get_cap: + kfree(fast_msg_to_func); + dev->fast_msg_to_func = NULL; + return err; +} + +void hinic5_fast_msg_deinit(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + hinic5_fast_msg_recv_deinit(dev, dev->fast_msg_to_func); + hinic5_fast_msg_clear_sml_table(hwdev); + hinic5_fast_msg_rq_buf_deinit(hwdev); + + kfree(dev->fast_msg_to_func); + dev->fast_msg_to_func = NULL; +} diff --git a/hinic5/src/dpu_platform_library/host/include/cfm/bond/hinic5_bond.h b/hinic5/src/dpu_platform_library/host/include/cfm/bond/hinic5_bond.h new file mode 100644 index 00000000..d4746c5b --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/include/cfm/bond/hinic5_bond.h @@ -0,0 +1,38 @@ +/**< SPDX-License-Identifier: GPL-2.0 */ +/**< Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_BOND_H +#define HINIC5_BOND_H + +#include "nic_mpu_cmd_structs.h" +#include "drv_bond_api.h" + +/** + * @brief bond初始化接口 + * @param[in] void + * @details nic初始化时调用,用于bond模块的初始化 + * @attention N/A + * @return 返回初始化结果,0为成功,非0失败 + **/ +int hinic5_bond_init(void); + +/** + * @brief bond去初始化接口 + * @param[in] void + * @details nic驱动卸载时调用,用于bond模块去初始化 + * @attention N/A + * @return void + **/ +void hinic5_bond_deinit(void); + +/** + * @brief 通过bond名获取bond id + * @param[in] bond_name bond名 + * @param[out] bond_id 找到的bondid + * @details 返回的bondid为驱动侧维护的id + * @attention N/A + * @return 返回获取结果,0为找到,非0未找到 + **/ +int hinic5_bond_get_id_by_name(u8 *bond_name, u16 *bond_id); + +#endif /**< HINIC5_BOND_H */ diff --git a/hinic5/src/dpu_platform_library/host/include/cfm/fast_msg/hinic5_fast_msg.h b/hinic5/src/dpu_platform_library/host/include/cfm/fast_msg/hinic5_fast_msg.h new file mode 100644 index 00000000..9101faf5 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/include/cfm/fast_msg/hinic5_fast_msg.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_FAST_MSG_H +#define HINIC5_FAST_MSG_H + +#include <linux/types.h> +#include <linux/workqueue.h> +#include "fast_msg_common_define.h" +#include "comm_defs.h" +#include "hinic5_hw.h" + +#define FAST_MSG_ENTRY_SIZE 2 +#define FAST_MSG_ENTRY_UNIT 1024 +#define FAST_MSG_ENTRY_SIZE_B (FAST_MSG_ENTRY_UNIT * FAST_MSG_ENTRY_SIZE) +#define HISDK5_FAST_MSG_MAX_PAGE_NUM 32 +#define MAX_FAST_MSG_RQ_DEPTH 2048 +#define FAST_MSG_RQ_OFFSET_MASK 0xFFF +#define FAST_MSG_RECV_MAX_CONCURRENT 10 + +/* 标识fast msg上层承载消息格式类型 */ +enum hisdk5_fast_msg_ulp_format { + HISDK5_FAST_MSG_ULP_FROMAT_NONE = 0, + HISDK5_FAST_MSG_ULP_FROMAT_MIG = 1, + /* 4bit位宽,类型需小于16 */ + HISDK5_FAST_MSG_ULP_FROMAT_MAX = 16 +}; + +typedef void (*hinic5_fast_msg_rq_cb)(hisdk5_fast_msg_buf *rq_msg, void *fast_msg_rq_data); +typedef void (*hinic5_fast_msg_forward_cb)(void *data); + +enum hisdk5_fast_msg_work_entry_type { + MSG_WORK_ENTRY_RECV_FAST_MSG, + MSG_WORK_ENTRY_FORWARDING +}; + +struct hisdk5_fast_msg_recv_entry { + struct list_head entry; + u32 type; + union { + u32 rq_offset; + struct { + void *forward_data; + hinic5_fast_msg_forward_cb forward_cb; + }; + }; +}; + +struct hisdk5_fast_msg_recv_work { + struct work_struct work; + struct hisdk5_fast_msg_to_func *fast_msg_to_func; + struct list_head msg_head; + spinlock_t lock;/* spinlock protecting the msg_head and work queue data */ +}; + +struct hisdk5_fast_msg_to_func { + void *hwdev; + + u32 fast_msg_rq_depth; + u32 fast_msg_rq_page_size; + u32 fast_msg_rq_page_num; + + void *rq_mem[HISDK5_FAST_MSG_MAX_PAGE_NUM]; + dma_addr_t rq_mem_paddr[HISDK5_FAST_MSG_MAX_PAGE_NUM]; + + struct workqueue_struct *workq; + + hinic5_fast_msg_rq_cb fast_msg_rq_cb[HINIC5_MOD_MAX]; + void *fast_msg_rq_data[HINIC5_MOD_MAX]; + + u32 num_concurrent_work; + struct hisdk5_fast_msg_recv_work *recv_concurrent_work; + struct hisdk5_fast_msg_recv_entry *recv_entries; +}; + +void hinic5_fast_msg_rq_handler(void *pri_handle, u32 ceqe_data); +void hinic5_fast_msg_recv_handler(struct work_struct *work); + +int hinic5_fast_msg_register_cb(void *hwdev, u8 mod, hinic5_fast_msg_rq_cb callback, void *pri_data); +void hinic5_fast_msg_unregister_cb(void *hwdev, u8 mod); +int hinic5_fast_msg_send(void *hwdev, struct hinic5_cmd_buf *cmd_buf, u64 *out_parm); + +/** + * @brief fast msg消息重新保序执行 + * + * @param hwdev SDK句柄 + * @param src_func_id fastmsg消息的源function id + * @param data 回调函数的私有数据 + * @param callback 回调函数指针 + * + * @details 某个function接收到fastmsg消息之后,若消息需要等另一个function前面的消息都处理完, + * 则fastmsg需要重新挂链确保消息在另一个function的任务里串行处理 + * + * @return: 0 - 消息入链表成功, other - 消息入链失败 + */ +int hinic5_fast_msg_forward(void *hwdev, u16 src_func_id, void *data, hinic5_fast_msg_forward_cb callback); + +/** + * @brief 获取本function是否支持fast msg + * + * @param hwdev SDK句柄 + * + * @return: true - 支持, false - 不支持. + */ +bool hinic5_support_fast_msg(void *hwdev); + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_platform_library/host/include/cfm/fast_msg/hinic5_fast_msg_init.h b/hinic5/src/dpu_platform_library/host/include/cfm/fast_msg/hinic5_fast_msg_init.h new file mode 100644 index 00000000..222d1870 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/include/cfm/fast_msg/hinic5_fast_msg_init.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_FAST_MSG_INIT +#define HINIC5_FAST_MSG_INIT + +#include <linux/types.h> + +typedef struct hisdk5_fast_msg_caps { + u32 depth; + u32 page_size; +} hisdk5_fast_msg_caps; + +#define HINIC5_FAST_MSG_WQ_NAME "hinic5_fast_msg" + +int hinic5_fast_msg_init(void *hwdev); +void hinic5_fast_msg_deinit(void *hwdev); + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_platform_library/host/include/sdk/knldk/hinic5_hinic5_vram.h b/hinic5/src/dpu_platform_library/host/include/sdk/knldk/hinic5_hinic5_vram.h new file mode 100644 index 00000000..463299c7 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/include/sdk/knldk/hinic5_hinic5_vram.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_HINIC5_VRAM +#define HINIC5_HINIC5_VRAM + +#include <linux/pci.h> +#include <linux/pm.h> + +#include "mpu_inband_cmd_defs.h" + +typedef int (*hiudk_flush_fn)(void *priv_data); +typedef struct hiudk_dev_flush_infos { + void *lld_dev; + hiudk_flush_fn flush_ops; + + /* private: Internal use */ + int ret; +} hiudk_dev_flush_infos; + +typedef struct hiudk_async_ctrl { + spinlock_t lock; /* spinlock protecting the hiudk_async_ctrl data structure */ + + hiudk_dev_flush_infos flush_infos[CMD_MAX_MAX_PF_NUM]; +} hiudk_async_ctrl; + +int hinic5_wait_for_devices_flush(struct notifier_block *nb, unsigned long action, void *data); +int hiudk5_register_flush_fn(void *lld_dev, hiudk_flush_fn fn); +int hiudk5_unregister_flush_fn(void *lld_dev); +int hisdk5_hinic5_vram_init(void); +void hisdk5_hinic5_vram_deinit(void); + +#endif \ No newline at end of file diff --git a/hinic5/src/dpu_platform_library/host/include/sdk/knldk/hinic5_vram_common.h b/hinic5/src/dpu_platform_library/host/include/sdk/knldk/hinic5_vram_common.h new file mode 100644 index 00000000..e1431919 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/include/sdk/knldk/hinic5_vram_common.h @@ -0,0 +1,182 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved. + * Description: Header File, hinic5_vram common + * Create: 2023/7/19 + */ +#ifndef HINIC5_VRAM_COMMON_H +#define HINIC5_VRAM_COMMON_H + +#if !defined(__UEFI__) && !defined(__WIN__) +#include <linux/notifier.h> +#include <linux/numa.h> +#endif + +#define HINIC5_VRAM_BLOCK_SIZE_2M 0x200000UL +#define KEXEC_SIGN "hinic-in-kexec" +// now hinic5_vram_name max len is 14, when add other hinic5_vram, attention this value +#define HINIC5_VRAM_NAME_SIZE 15 +#define HINIC5_VRAM_NAME_MAX_LEN 16 + +#define HINIC5_VRAM_HINIC5_CQM_GLB_FUNC_BASE "F" +#define HINIC5_VRAM_HINIC5_CQM_FAKE_MEM_BASE "FK" +#define HINIC5_VRAM_HINIC5_CQM_CLA_BASE "C" +#define HINIC5_VRAM_HINIC5_CQM_CLA_TYPE_BASE "T" +#define HINIC5_VRAM_HINIC5_CQM_CLA_SMF_BASE "M" +#define HINIC5_VRAM_HINIC5_CQM_CLA_COORD_X "X" +#define HINIC5_VRAM_HINIC5_CQM_CLA_COORD_Y "Y" +#define HINIC5_VRAM_HINIC5_CQM_CLA_COORD_Z "Z" +#define HINIC5_VRAM_HINIC5_CQM_BITMAP_BASE "B" + +#define HINIC5_VRAM_NIC_DCB "DCB" +#define HINIC5_VRAM_NIC_MHOST_MGMT "MHOST_MGMT" +#define HINIC5_VRAM_NIC_HINIC5_VRAM "NIC_HINIC5_VRAM" +#define HINIC5_VRAM_NIC_FUNC_BASE "NIC_F" + +#define HINIC5_VRAM_NIC_MQM "NM" + +#define HINIC5_VRAM_VBS_IOCB "IOCB" +#define HINIC5_VRAM_VBS_RXQS_CQE "RCQE" +#define HINIC5_VRAM_VBS_NAME_BASE "VBS_" +#define HINIC5_VRAM_VBS_VOLQ_MTT "VOLQMTT" +#define HINIC5_VRAM_VBS_VOLQ_MTT_PAGE "MTT_PAGE" + +#define HINIC5_VRAM_OVS_PORT_CONF "OVS_PORT_CONF" +#define HINIC5_VRAM_OVS_DFX_MGR "OVS_DFX_MGR" + +#define HINIC5_VRAM_VROCE_ENTRY_POOL "VROCE_ENTRY" +#define HINIC5_VRAM_VROCE_GROUP_POOL "VROCE_GROUP" +#define HINIC5_VRAM_VROCE_UUID "VROCE_UUID" +#define HINIC5_VRAM_VROCE_VID "VROCE_VID" +#define HINIC5_VRAM_VROCE_BASE "VROCE_BASE" +#define HINIC5_VRAM_VROCE_DSCP "VROCE_DSCP" +#define HINIC5_VRAM_VROCE_QOS "VROCE_QOS" +#define HINIC5_VRAM_VROCE_DEV "VROCE_DEV" +#define HINIC5_VRAM_VROCE_RGROUP_HT_CNT "RGROUP_CNT" +#define HINIC5_VRAM_VROCE_RACL_HT_CNT "RACL_CNT" +#define HINIC5_VRAM_VROCE_MQM_ENQC "VROCE_MQM_ENQC" + +#define HINIC5_VRAM_DTOE_NUMA_MEM "DTOE_NUMA" +#define HINIC5_VRAM_DTOE_CARD_MEM "DTOE_CARD" +#define HINIC5_VRAM_DTOE_CONN_MEM "DTOE_CONN" +#define HINIC5_VRAM_DTOE_SUB_LEN 10 + +#define HINIC5_VRAM_VROCE_MIG_ENTRY_POOL "VROCE_MIG_ENTRY" +#define HINIC5_VRAM_VROCE_MIG_ENTRY_HT_CNT "MIG_ENTRY_CNT" + +#define MPU_OS_HOTREPLACE_FLAG 0x1 + +#define USE_HINIC5_VRAM 1 +#define NO_USE_HINIC5_VRAM 0 + +#define OS_HOT_REPLACE_DOING 1 +#define OS_HOT_REPLACE_DONE 0 + +#define HINIC5_VRAM_NUMA_NODE_NUM 2 + +/* 从运行时所在的CPU申请 */ +#define HINIC5_VRAM_AFFINITY_NUMA 0xfe + +/* 不指定NUMA, 从空闲NUMA申请 */ +#define HINIC5_VRAM_NO_NUMA 0xff + +enum KUP_HOOK_POINT { + PRE_FREEZE, + FREEZE_TO_KILL, + PRE_UPDATE_KERNEL, + FLUSH_DURING_KUP, + POST_UPDATE_KERNEL, + UNFREEZE_TO_RUN, + POST_RUN, + KUP_HOOK_MAX, +}; + +#if defined(__UEFI__) || defined(__WIN__) || defined(__VMWARE__) +#define hinic5_hinic5_vram_kalloc(name, size) 0 +#define hinic5_vram_get_kexec_flag() 0 +#define hinic5_hinic5_vram_get_gfp_hinic5_vram() 0 +#else + +typedef int (*register_nvwa_notifier_t)(int hook, struct notifier_block *nb); +typedef int (*unregister_nvwa_notifier_t)(int hook, struct notifier_block *nb); +typedef int (*register_euleros_reboot_notifier_t)(struct notifier_block *nb); +typedef int (*unregister_euleros_reboot_notifier_t)(struct notifier_block *nb); +typedef void __iomem *(*hinic5_vram_kalloc_t)(char *name, u64 size); +typedef void __iomem *(*vpmem_kalloc_node_t)(char *name, u64 size, u8 numa); +typedef void (*hinic5_vram_kfree_t)(void __iomem *vaddr, char *name, u64 size); +typedef gfp_t (*hinic5_vram_get_gfp_hinic5_vram_t)(void); + +/** + * @brief init hinic5_vram related symbols + **/ +void lookup_hinic5_vram_related_symbols(void); +/** + * @brief register nvwa notifier + * @param hook @ref enum KUP_HOOK_POINT + * @param nb pointer of notifier block + * @return + * - Zero if successful. Non-zero otherwise. + **/ +int hi_register_nvwa_notifier(int hook, struct notifier_block *nb); +/** + * @brief unregister nvwa notifier + * @param hook @ref enum KUP_HOOK_POINT + * @param nb pointer of notifier block + * @return + * - Zero if successful. Non-zero otherwise. + **/ +int hi_unregister_nvwa_notifier(int hook, struct notifier_block *nb); +/** + * @brief register machine-shutdown notifier + * @param nb pointer of notifier block + * @return + * - Zero if successful. Non-zero otherwise. + **/ +int hi_register_euleros_reboot_notifier(struct notifier_block *nb); +/** + * @brief unregister machine-shutdown notifier + * @param nb pointer of notifier block + * @return + * - Zero if successful. Non-zero otherwise. + **/ +int hi_unregister_euleros_reboot_notifier(struct notifier_block *nb); +/** + * @brief alloc hinic5_vram memory + * @param name name of hinic5_vram memory + * @param size size of hinic5_vram memory + **/ +void __iomem *hinic5_hinic5_vram_kalloc(char *name, u64 size); +/** + * @brief get gfp of hinic5_vram for dma + * @return + * - gfp_t from sdi_nanoos + **/ +gfp_t hinic5_hinic5_vram_get_gfp_hinic5_vram(void); +/** + * @brief set kexec status + * @param status 1 : doing kexec, 0 : done kexec + * @return + * - Zero if successful. Non-zero otherwise. + **/ +int hinic5_set_kexec_status(int status); +/** + * @brief get kexec status + * @return + * - Zero if successful. Non-zero otherwise. + **/ +int hinic5_get_kexec_status(void); +/** + * @brief set use-hinic5_vram flag + * @param flag: true : use hinic5_vram, false : don't use hinic5_vram + **/ +void set_use_hinic5_vram_flag(bool flag); +/** + * @brief get kexec flag + * @return + * - 0: done kexec + * - 1: doing kexec + **/ +int hinic5_vram_get_kexec_flag(void); + +#endif + +#endif /* HINIC5_VRAM_COMMON_H */ \ No newline at end of file diff --git a/hinic5/src/dpu_platform_library/host/include/typedef.h b/hinic5/src/dpu_platform_library/host/include/typedef.h new file mode 100644 index 00000000..1532e591 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/include/typedef.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef TYPEDEF_H +#define TYPEDEF_H + +#include "base_type.h" + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_cfg.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_cfg.c new file mode 100644 index 00000000..ea58480e --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_cfg.c @@ -0,0 +1,2286 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/semaphore.h> + +#include "ossl_knl.h" +#include "hinic5_typedef_inner.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_hwif_inner.h" +#include "comm_defs.h" +#include "cfg_mgmt_mpu_cmd.h" +#include "cfg_mgmt_mpu_cmd_defs.h" +#include "hinic5_bus.h" +#include "mag_mpu_cmd.h" +#include "hinic5_hw_cfg.h" + +#define SVC_CAP_EN_OFFSET_BIT 16 + +static void parse_pub_res_cap_dfx(struct hinic5_hwdev *hwdev, + const struct service_cap *cap) +{ + sdk_info(hwdev->dev_hdl, "Get public resource capbility: svc_cap_en: 0x%x\n", + cap->svc_type); + sdk_info(hwdev->dev_hdl, "Host_id: 0x%x, ep_id: 0x%x, er_id: 0x%x, port_id: 0x%x\n", + cap->host_id, cap->ep_id, cap->er_id, cap->port_id); + sdk_info(hwdev->dev_hdl, "cos_bitmap: 0x%x, flexq: 0x%x, virtio_vq_size: 0x%x\n", + cap->cos_valid_bitmap, cap->flexq_en, cap->virtio_vq_size); + sdk_info(hwdev->dev_hdl, "Host_total_function: 0x%x, host_oq_id_mask_val: 0x%x, max_vf: 0x%x\n", + cap->host_total_function, cap->host_oq_id_mask_val, + cap->max_vf); + sdk_info(hwdev->dev_hdl, "Host_pf_num: 0x%x, pf_id_start: 0x%x, host_vf_num: 0x%x, vf_id_start: 0x%x\n", + cap->pf_num, cap->pf_id_start, cap->vf_num, cap->vf_id_start); + sdk_info(hwdev->dev_hdl, "host_valid_bitmap: 0x%x, master_host_id: 0x%x, srv_multi_host_mode: 0x%x\n", + cap->host_valid_bitmap, cap->master_host_id, cap->srv_multi_host_mode); + sdk_info(hwdev->dev_hdl, + "fake_vf_start_id: 0x%x, fake_vf_num: 0x%x, fake_vf_max_pctx: 0x%x\n", + cap->fake_vf_start_id, cap->fake_vf_num, cap->fake_vf_max_pctx); + sdk_info(hwdev->dev_hdl, "fake_vf_bfilter_start_addr: 0x%x, fake_vf_bfilter_len: 0x%x\n", + cap->fake_vf_bfilter_start_addr, cap->fake_vf_bfilter_len); +} + +static void parse_hinic5_cqm_res_cap(const struct hinic5_hwdev *hwdev, struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap) +{ + struct dev_sf_svc_attr *attr = &cap->sf_svc_attr; + + cap->fake_vf_start_id = dev_cap->fake_vf_start_id; + cap->fake_vf_num = dev_cap->fake_vf_num; + cap->fake_vf_num_cfg = dev_cap->fake_vf_num; + + cap->fake_vf_max_pctx = dev_cap->fake_vf_max_pctx; + /* other fake_vf_max_XXX are parsed from ext_dev_cap(extent devcie capability) */ + + cap->fake_vf_bfilter_start_addr = dev_cap->fake_vf_bfilter_start_addr; + cap->fake_vf_bfilter_len = dev_cap->fake_vf_bfilter_len; + + if (COMM_SUPPORT_VIRTIO_VQ_SIZE(hwdev)) + cap->virtio_vq_size = (u16)(VIRTIO_BASE_VQ_SIZE << dev_cap->virtio_vq_size); + else + cap->virtio_vq_size = VIRTIO_DEFAULT_VQ_SIZE; + cap->virtio_vq_num = dev_cap->virtio_vq_num; + cap->vio_func_num = dev_cap->vio_func_num; + cap->nvme_qp_num = dev_cap->nvme_qp_num; + + if ((dev_cap->sf_svc_attr & SF_SVC_FT_BIT) != 0) + attr->ft_en = true; + else + attr->ft_en = false; + + if ((dev_cap->sf_svc_attr & SF_SVC_RDMA_BIT) != 0) + attr->rdma_en = true; + else + attr->rdma_en = false; + + /* PPF will overwrite it when parse dynamic resource */ + if (dev_cap->func_sf_en != 0) + cap->sf_en = true; + else + cap->sf_en = false; + + cap->lb_mode = dev_cap->lb_mode; + cap->smf_pg = dev_cap->smf_pg; + +#ifndef __UEFI__ + cap->timer_en = dev_cap->timer_en; +#else + cap->timer_en = 0; +#endif + cap->host_oq_id_mask_val = dev_cap->host_oq_id_mask_val; + cap->max_connect_num = dev_cap->max_conn_num; + cap->max_stick2cache_num = dev_cap->max_stick2cache_num; + cap->bfilter_start_addr = dev_cap->max_bfilter_start_addr; + cap->bfilter_len = dev_cap->bfilter_len; + cap->hash_bucket_num = dev_cap->hash_bucket_num; +} + +static void parse_pub_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + cap->host_id = dev_cap->host_id; + cap->ep_id = dev_cap->ep_id; + cap->er_id = dev_cap->er_id; + cap->port_id = dev_cap->port_id; + + cap->svc_type = ((dev_cap->svc_cap_en) | (dev_cap->svc_cap_en_h << SVC_CAP_EN_OFFSET_BIT)); + cap->chip_svc_type = cap->svc_type; + + cap->cos_valid_bitmap = (dev_cap->dev_cos_valid_bitmap == 0) ? + dev_cap->valid_cos_bitmap : dev_cap->dev_cos_valid_bitmap; + cap->cos_mask_mode = (dev_cap->cos_mask_mode == 0) ? + COS_DEFAULT_MASK_MODE : dev_cap->cos_mask_mode; + cap->dcb_state.default_cos = dev_cap->dev_default_cos; + cap->port_cos_valid_bitmap = dev_cap->port_cos_valid_bitmap; + cap->func_gpa_spu_en = dev_cap->func_gpa_spu_en; + cap->flexq_en = dev_cap->flexq_en; + + cap->host_total_function = dev_cap->host_total_func; + cap->host_valid_bitmap = dev_cap->host_valid_bitmap; + cap->master_host_id = dev_cap->master_host_id; + cap->srv_multi_host_mode = dev_cap->srv_multi_host_mode; + + if (type != TYPE_VF) { + cap->max_vf = dev_cap->max_vf; + cap->pf_num = dev_cap->host_pf_num; + cap->pf_id_start = dev_cap->pf_id_start; + cap->vf_num = dev_cap->host_vf_num; + cap->vf_id_start = dev_cap->vf_id_start; + } else { + cap->max_vf = 0; + } + + parse_hinic5_cqm_res_cap(hwdev, cap, dev_cap); + parse_pub_res_cap_dfx(hwdev, cap); +} + +static void parse_dynamic_share_res_cap(struct service_cap *cap, + const struct cfg_cmd_dev_cap *dev_cap) +{ + if (dev_cap->host_sf_en != 0) + cap->sf_en = true; + else + cap->sf_en = false; +} + +static void parse_l2nic_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct nic_service_cap *nic_cap = &cap->nic_cap; + + nic_cap->max_sqs = dev_cap->nic_max_sq_id + 1; + nic_cap->max_rqs = dev_cap->nic_max_rq_id + 1; + nic_cap->default_num_queues = dev_cap->nic_default_num_queues; + + sdk_info(hwdev->dev_hdl, "L2nic resource capbility, max_sqs: 0x%x, max_rqs: 0x%x\n", + nic_cap->max_sqs, nic_cap->max_rqs); + + /* Check parameters from firmware */ + if (nic_cap->max_sqs > HINIC5_CFG_MAX_QP || + nic_cap->max_rqs > HINIC5_CFG_MAX_QP) { + sdk_info(hwdev->dev_hdl, "Number of qp exceed limit[1-%d]: sq: %u, rq: %u\n", + HINIC5_CFG_MAX_QP, nic_cap->max_sqs, nic_cap->max_rqs); + nic_cap->max_sqs = HINIC5_CFG_MAX_QP; + nic_cap->max_rqs = HINIC5_CFG_MAX_QP; + } +} + +static void parse_fc_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_fc_svc_cap *fc_cap = &cap->fc_cap.dev_fc_cap; + + fc_cap->max_parent_qpc_num = dev_cap->fc_max_pctx; + fc_cap->scq_num = dev_cap->fc_max_scq; + fc_cap->srq_num = dev_cap->fc_max_srq; + fc_cap->max_child_qpc_num = dev_cap->fc_max_cctx; + fc_cap->child_qpc_id_start = dev_cap->fc_cctx_id_start; + fc_cap->vp_id_start = dev_cap->fc_vp_id_start; + fc_cap->vp_id_end = dev_cap->fc_vp_id_end; + + sdk_info(hwdev->dev_hdl, "Get fc resource capbility\n"); + sdk_info(hwdev->dev_hdl, + "Max_parent_qpc_num: 0x%x, scq_num: 0x%x, srq_num: 0x%x, max_child_qpc_num: 0x%x, child_qpc_id_start: 0x%x\n", + fc_cap->max_parent_qpc_num, fc_cap->scq_num, fc_cap->srq_num, + fc_cap->max_child_qpc_num, fc_cap->child_qpc_id_start); + sdk_info(hwdev->dev_hdl, "Vp_id_start: 0x%x, vp_id_end: 0x%x\n", + fc_cap->vp_id_start, fc_cap->vp_id_end); +} + +static void parse_roce_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_roce_svc_own_cap *roce_cap = + &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + roce_cap->max_qps = dev_cap->roce_max_qp; + roce_cap->max_cqs = dev_cap->roce_max_cq; + roce_cap->max_srqs = dev_cap->roce_max_srq; + roce_cap->max_mpts = dev_cap->roce_max_mpt; + roce_cap->max_drc_qps = dev_cap->roce_max_drc_qp; + + roce_cap->wqe_cl_start = dev_cap->roce_wqe_cl_start; + roce_cap->wqe_cl_end = dev_cap->roce_wqe_cl_end; + roce_cap->wqe_cl_sz = dev_cap->roce_wqe_cl_size; + roce_cap->qpc_entry_sz = (dev_cap->hyper_qpc_entry_size_en == 0) ? + ROCE_QPC_ENTRY_SZ : HYPER_ROCE_QPC_ENTRY_SZ; + + sdk_info(hwdev->dev_hdl, "Get roce resource capbility, type: 0x%x\n", + type); + sdk_info(hwdev->dev_hdl, "Max_qps: 0x%x, max_cqs: 0x%x, max_srqs: 0x%x, max_mpts: 0x%x, max_drcts: 0x%x\n", + roce_cap->max_qps, roce_cap->max_cqs, roce_cap->max_srqs, + roce_cap->max_mpts, roce_cap->max_drc_qps); + + sdk_info(hwdev->dev_hdl, "Wqe_start: 0x%x, wqe_end: 0x%x, wqe_sz: 0x%x. qpc_entry_sz:0x%x\n", + roce_cap->wqe_cl_start, roce_cap->wqe_cl_end, + roce_cap->wqe_cl_sz, roce_cap->qpc_entry_sz); + + if (roce_cap->max_qps == 0) { + if (type == TYPE_PF || type == TYPE_PPF) { + roce_cap->max_qps = 0x400; + roce_cap->max_cqs = 0x800; + roce_cap->max_srqs = 0x400; + roce_cap->max_mpts = 0x400; + roce_cap->max_drc_qps = 0x40; + } else { + roce_cap->max_qps = 0x200; + roce_cap->max_cqs = 0x400; + roce_cap->max_srqs = 0x200; + roce_cap->max_mpts = 0x200; + roce_cap->max_drc_qps = 0x40; + } + } + + roce_cap->max_child_ctx_num = dev_cap->roce_max_child_ctx_num; +} + +static void parse_roce_ext_res_cap(struct hinic5_hwdev *hwdev, + struct cfg_cmd_ext_dev_cap *ext_dev_cap, + struct service_cap *cap, u32 index) +{ + struct dev_roce_svc_own_cap *roce_cap = &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + struct cfg_roce_ext_caps *roce_ext_caps = NULL; + + roce_ext_caps = (struct cfg_roce_ext_caps *)(&ext_dev_cap->ext_cap[index]); + + roce_cap->reserved_qps = roce_ext_caps->rsvd_qp; + roce_cap->reserved_qps_back = roce_ext_caps->rsvd_qp_back; + roce_cap->reserved_cqs = roce_ext_caps->rsvd_cq; + roce_cap->reserved_cqs_back = roce_ext_caps->rsvd_cq_back; + roce_cap->reserved_srqs = roce_ext_caps->rsvd_srq; + roce_cap->reserved_srqs_back = roce_ext_caps->rsvd_srq_back; + roce_cap->max_pd = roce_ext_caps->max_pd; + roce_cap->max_xrcd = roce_ext_caps->max_xrcd; + roce_cap->max_gid = roce_ext_caps->max_gid; + + sdk_info(hwdev->dev_hdl, "reserved_qps: 0x%x, reserved_qps_back: 0x%x, reserved_cqs: 0x%x\n", + roce_cap->reserved_qps, roce_cap->reserved_qps_back, roce_cap->reserved_cqs); + sdk_info(hwdev->dev_hdl, "reserved_cqs_back: 0x%x, reserved_srqs: 0x%x, reserved_srqs_back: 0x%x\n", + roce_cap->reserved_cqs_back, roce_cap->reserved_srqs, roce_cap->reserved_srqs_back); + sdk_info(hwdev->dev_hdl, "max_pd: 0x%x, max_xrcd: 0x%x, max_gid: 0x%x\n", + roce_cap->max_pd, roce_cap->max_xrcd, roce_cap->max_gid); +} + +static void parse_rdma_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_roce_svc_own_cap *roce_cap = + &cap->rdma_cap.dev_rdma_cap.roce_own_cap; + + roce_cap->cmtt_cl_start = dev_cap->roce_cmtt_cl_start; + roce_cap->cmtt_cl_end = dev_cap->roce_cmtt_cl_end; + roce_cap->cmtt_cl_sz = dev_cap->roce_cmtt_cl_size; + + roce_cap->dmtt_cl_start = dev_cap->roce_dmtt_cl_start; + roce_cap->dmtt_cl_end = dev_cap->roce_dmtt_cl_end; + roce_cap->dmtt_cl_sz = dev_cap->roce_dmtt_cl_size; + + sdk_info(hwdev->dev_hdl, "Get rdma resource capbility, Cmtt_start: 0x%x, cmtt_end: 0x%x, cmtt_sz: 0x%x\n", + roce_cap->cmtt_cl_start, roce_cap->cmtt_cl_end, + roce_cap->cmtt_cl_sz); + + sdk_info(hwdev->dev_hdl, "Dmtt_start: 0x%x, dmtt_end: 0x%x, dmtt_sz: 0x%x\n", + roce_cap->dmtt_cl_start, roce_cap->dmtt_cl_end, + roce_cap->dmtt_cl_sz); +} + +static void parse_ovs_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct ovs_service_cap *ovs_cap = &cap->ovs_cap; + + ovs_cap->dev_ovs_cap.max_pctxs = dev_cap->ovs_max_qpc; + ovs_cap->dev_ovs_cap.fake_vf_max_pctx = dev_cap->fake_vf_max_pctx; + ovs_cap->dev_ovs_cap.fake_vf_start_id = dev_cap->fake_vf_start_id; + ovs_cap->dev_ovs_cap.fake_vf_num = dev_cap->fake_vf_num; + ovs_cap->dev_ovs_cap.dynamic_qp_en = dev_cap->flexq_en; + + sdk_info(hwdev->dev_hdl, + "Get ovs resource capbility, max_qpc: 0x%x, fake_vf_start_id: 0x%x, fake_vf_num: 0x%x\n", + ovs_cap->dev_ovs_cap.max_pctxs, + ovs_cap->dev_ovs_cap.fake_vf_start_id, + ovs_cap->dev_ovs_cap.fake_vf_num); + sdk_info(hwdev->dev_hdl, + "fake_vf_max_qpc: 0x%x, dynamic_qp_en: 0x%x\n", + ovs_cap->dev_ovs_cap.fake_vf_max_pctx, + ovs_cap->dev_ovs_cap.dynamic_qp_en); +} + +static void parse_ppa_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct ppa_service_cap *dip_cap = &cap->ppa_cap; + + dip_cap->qpc_fake_vf_ctx_num = dev_cap->fake_vf_max_pctx; + dip_cap->qpc_fake_vf_start = dev_cap->fake_vf_start_id; + dip_cap->qpc_fake_vf_num = dev_cap->fake_vf_num; + dip_cap->bloomfilter_en = (dev_cap->fake_vf_bfilter_len != 0) ? 1 : 0; + dip_cap->bloomfilter_length = dev_cap->fake_vf_bfilter_len; + sdk_info(hwdev->dev_hdl, + "Get ppa resource capbility, fake_vf_start_id: 0x%x, fake_vf_num: 0x%x, fake_vf_max_qpc: 0x%x\n", + dip_cap->qpc_fake_vf_start, + dip_cap->qpc_fake_vf_num, + dip_cap->qpc_fake_vf_ctx_num); +} + +static void parse_toe_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dev_toe_svc_cap *toe_cap = &cap->toe_cap.dev_toe_cap; + + toe_cap->max_pctxs = dev_cap->toe_max_pctx; + toe_cap->max_cqs = dev_cap->toe_max_cq; + toe_cap->max_srqs = dev_cap->toe_max_srq; + toe_cap->srq_id_start = dev_cap->toe_srq_id_start; + toe_cap->max_mpts = dev_cap->toe_max_mpt; + toe_cap->max_cctxt = dev_cap->toe_max_cctxt; + + sdk_info(hwdev->dev_hdl, + "Get toe resource capbility, max_pctxs: 0x%x, max_cqs: 0x%x, max_srqs: 0x%x, srq_id_start: 0x%x, max_mpts: 0x%x\n", + toe_cap->max_pctxs, toe_cap->max_cqs, toe_cap->max_srqs, + toe_cap->srq_id_start, toe_cap->max_mpts); +} + +static void parse_ipsec_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct ipsec_service_cap *ipsec_cap = &cap->ipsec_cap; + + ipsec_cap->dev_ipsec_cap.max_sactxs = dev_cap->ipsec_max_sactxs; + ipsec_cap->dev_ipsec_cap.max_spctxs = dev_cap->ipsec_max_spctxs; + ipsec_cap->dev_ipsec_cap.max_cqs = dev_cap->ipsec_max_cq; + ipsec_cap->dev_ipsec_cap.sa_hash_bucket_num = dev_cap->ipsec_sa_hash_bucket_num; + ipsec_cap->dev_ipsec_cap.sp_hash_bucket_num = dev_cap->ipsec_sp_hash_bucket_num; + + sdk_info(hwdev->dev_hdl, + "Get IPsec resource capbility, max_sactxs: 0x%x, sa hash bucket num: 0x%x\n", + dev_cap->ipsec_max_sactxs, dev_cap->ipsec_sa_hash_bucket_num); + sdk_info(hwdev->dev_hdl, + "Get IPsec resource capbility, max_spctxs: 0x%x, " \ + "sp hash bucket num: 0x%x, max cq: 0x%x\n", + dev_cap->ipsec_max_spctxs, dev_cap->ipsec_sp_hash_bucket_num, + dev_cap->ipsec_max_cq); +} + +static void parse_vbs_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct vbs_service_cap *vbs_cap = &cap->vbs_cap; + + vbs_cap->vbs_max_volq = dev_cap->vbs_max_volq; + vbs_cap->vbs_host_dma_data_cos = dev_cap->vbs_host_dma_data_cos; + vbs_cap->vbs_volq_cos = dev_cap->vbs_volq_cos; + vbs_cap->vbs_main_pf_enable = dev_cap->vbs_main_pf_enable; + vbs_cap->vbs_vsock_pf_enable = dev_cap->vbs_vsock_pf_enable; + vbs_cap->vbs_fushion_queue_pf_enable = dev_cap->vbs_fushion_queue_pf_enable; + vbs_cap->vbs_child_ctx_num = dev_cap->vbs_child_ctx_num; + vbs_cap->vbs_hash_bucket_num = dev_cap->vbs_hash_bucket_num; + + sdk_info(hwdev->dev_hdl, "Get VBS resource capbility, vbs_max_volq: 0x%x, vbs_child_ctx_num: 0x%x, vbs_hash_bucket_num: 0x%x\n", + dev_cap->vbs_max_volq, dev_cap->vbs_child_ctx_num, dev_cap->vbs_hash_bucket_num); +} + +static void parse_jbof_res_cap(struct hinic5_hwdev *hwdev, + struct cfg_cmd_ext_dev_cap *ext_dev_cap, + struct service_cap *cap, u32 index) +{ + struct jbof_service_cap *jbof_cap = &cap->jbof_cap; + struct cfg_jbof_ext_caps *jbof_ext_caps = NULL; + + jbof_ext_caps = (struct cfg_jbof_ext_caps *)(&ext_dev_cap->ext_cap[index]); + + jbof_cap->max_parent_qpc_num = jbof_ext_caps->jbof_max_pctx; + jbof_cap->max_child_qpc_num = jbof_ext_caps->jbof_max_cctx; + jbof_cap->hash_bucket_num = jbof_ext_caps->jbof_hash_bucket_num; + + sdk_info(hwdev->dev_hdl, + "Get jbof resource capbility, max_parent_qpc_num: 0x%x max_child_qpc_num:0x%x, hash_bucket_num: 0x%x\n", + jbof_cap->max_parent_qpc_num, jbof_cap->max_child_qpc_num, + jbof_cap->hash_bucket_num); +} + +static void parse_dmmu_res_cap(struct hinic5_hwdev *hwdev, + struct service_cap *cap, + struct cfg_cmd_dev_cap *dev_cap, + enum func_type type) +{ + struct dmmu_service_cap *dmmu_cap = &cap->dmmu_cap; + + dmmu_cap->pasid_min = dev_cap->min_fake_pasid; + dmmu_cap->pasid_max = dev_cap->max_fake_pasid; + dmmu_cap->cl_start = dev_cap->dmmu_cl_start; + dmmu_cap->cl_end = dev_cap->dmmu_cl_end; + + sdk_info(hwdev->dev_hdl, "Get DMMU resource capbility, pasid_max: 0x%x\n", + dmmu_cap->pasid_max); +} + +static void parse_ub_res_cap(struct hinic5_hwdev *dev, + struct cfg_cmd_ext_dev_cap *ext_dev_cap, + struct service_cap *cap, u32 index) +{ + struct ub_service_cap *ub_caps = &cap->ub_cap; + struct ub_firmware_caps *ub_ext_caps = NULL; + + ub_ext_caps = (struct ub_firmware_caps *)(&ext_dev_cap->ext_cap[index]); + + ub_caps->sdk_res.max_jfc = ub_ext_caps->max_jfc; + ub_caps->sdk_res.max_jetty = ub_ext_caps->max_jetty; + ub_caps->sdk_res.max_jetty_grp = ub_ext_caps->max_jetty_grp; + ub_caps->sdk_res.max_jfr = ub_ext_caps->max_jfr; + ub_caps->sdk_res.max_mpts = ub_ext_caps->max_mpts; + ub_caps->sdk_res.max_tp = ub_ext_caps->max_tp; + ub_caps->sdk_res.max_tpg = ub_ext_caps->max_tpg; + ub_caps->sdk_res.max_vtp = ub_ext_caps->max_vtp; + ub_caps->sdk_res.max_gid = ub_ext_caps->max_gid; + ub_caps->sdk_res.max_utp = ub_ext_caps->max_utp; + ub_caps->sdk_res.max_jfrc = ub_ext_caps->max_jfrc; + ub_caps->sdk_res.cqc_entry_sz = ub_ext_caps->cqc_entry_sz; // 128 + ub_caps->sdk_res.srqc_entry_sz = ub_ext_caps->srqc_entry_sz; // 64 + ub_caps->sdk_res.qpc_entry_sz = ub_ext_caps->qpc_entry_sz; // 1024 + + ub_caps->net_dev_cap.is_tpf = ub_ext_caps->is_tpf; + ub_caps->net_dev_cap.max_mtu = ub_ext_caps->max_mtu; // 8192 + ub_caps->net_dev_cap.vf_cnt = ub_ext_caps->vf_cnt; + + sdk_info(dev->dev_hdl, "Max_jfc: 0x%x, max_jfr: 0x%x, max_jetty: 0x%x, max_mpts: 0x%x, max_tp: 0x%x\n", + ub_caps->sdk_res.max_jfc, ub_caps->sdk_res.max_jfr, + ub_caps->sdk_res.max_jetty, ub_caps->sdk_res.max_mpts, ub_caps->sdk_res.max_tp); + + sdk_info(dev->dev_hdl, "cqc_entry_sz: 0x%x, srqc_entry_sz: 0x%x, qpc_entry_sz: 0x%x\n", + ub_caps->sdk_res.cqc_entry_sz, ub_caps->sdk_res.srqc_entry_sz, + ub_caps->sdk_res.qpc_entry_sz); +} + +static void parse_fake_vf_ext_cap(struct hinic5_hwdev *hwdev, + struct cfg_cmd_ext_dev_cap *ext_dev_cap, + struct service_cap *cap, u32 index) +{ + struct cfg_fake_vf_ext_caps *ext_cap = + (struct cfg_fake_vf_ext_caps *)(&ext_dev_cap->ext_cap[index]); + + cap->fake_vf_parent_func_id = ext_cap->fake_vf_parent_func_id; + cap->fake_vf_lazy_init = ext_cap->fake_vf_lazy_init != 0; + + cap->fake_vf_max_scqc_ctx = ext_cap->scqc_fake_vf_ctx_num; + cap->fake_vf_max_srqc_ctx = ext_cap->srqc_fake_vf_ctx_num; + cap->fake_vf_max_gid_ctx = ext_cap->gid_fake_vf_ctx_num; + cap->fake_vf_max_mpt_ctx = ext_cap->mpt_fake_vf_ctx_num; + cap->fake_vf_max_childc_ctx = ext_cap->childc_fake_vf_ctx_num; + cap->fake_vf_qpc_ctx_size_en = ext_cap->qpc_fake_vf_ctx_size_order_en != 0; + cap->fake_vf_qpc_ctx_size_order = ext_cap->qpc_fake_vf_ctx_size_order; + + sdk_info(hwdev->dev_hdl, + "Get fake vf capbility, parent func id 0x%x, lazy init %u\n", + cap->fake_vf_parent_func_id, ext_cap->fake_vf_lazy_init); + sdk_info(hwdev->dev_hdl, + "Get fake vf ctx max capbility, scqc 0x%x, srqc 0x%x, gid 0x%x, mpt 0x%x, childc 0x%x\n", + cap->fake_vf_max_scqc_ctx, cap->fake_vf_max_srqc_ctx, cap->fake_vf_max_gid_ctx, + cap->fake_vf_max_mpt_ctx, cap->fake_vf_max_childc_ctx); + sdk_info(hwdev->dev_hdl, + "Get fake vf ctx size capbility. qpc ctx size en %u, order %u\n", + cap->fake_vf_qpc_ctx_size_en, cap->fake_vf_qpc_ctx_size_order); +} + +static void parse_fw_update_ext_cap(struct hinic5_hwdev *hwdev, + struct cfg_cmd_ext_dev_cap *ext_dev_cap, + struct service_cap *cap, u32 index) +{ + cfg_fw_update_ext_caps *ext_caps = (cfg_fw_update_ext_caps *)(&ext_dev_cap->ext_cap[index]); + + *(&cap->fw_update_cap) = *ext_caps; + sdk_info(hwdev->dev_hdl, + "Get fw udpate capbility, fw_img_hdr_size 0x%x, fw_tile_text_size 0x%x\n", + cap->fw_update_cap.fw_img_hdr_size, cap->fw_update_cap.fw_tile_text_size); +} + +static void parse_comm_info_ext_cap(struct hinic5_hwdev *hwdev, + struct cfg_cmd_ext_dev_cap *ext_dev_cap, + struct service_cap *cap, u32 index) +{ + struct comm_info_ext_cap *ext_caps; + struct cfm_service_cap *cfm_cap = &cap->cfm_cap; + + ext_caps = (struct comm_info_ext_cap *)(&ext_dev_cap->ext_cap[index]); + + /* BAT capabilities */ + cap->bat_cid_index_bit_width = ext_caps->bat_cid_index_bit_width; + + /* SMF capabilities */ + cap->smf_max_num = ext_caps->max_smf_num; + + /* SRIOV ext capabilities */ + cap->vf_isolation = (ext_caps->vf_isolation != 0); + + sdk_info(hwdev->dev_hdl, + "Get common ext cap: bat cid index bits %u, smf_max_num %u, vf iso %d\n", + cap->bat_cid_index_bit_width, cap->smf_max_num, + cap->vf_isolation); + + /* CFM CCP capabilities */ + cfm_cap->ccp_child_ctx_sz = ext_caps->ccp_child_ctx_sz; + cfm_cap->ccp_max_child_ctx = ext_caps->ccp_max_child_ctx; + + sdk_info(hwdev->dev_hdl, "Get CFM CCP cap: childc basic size %u, max %u\n", + cfm_cap->ccp_child_ctx_sz, cfm_cap->ccp_max_child_ctx); +} + +static void parse_dev_cap(struct hinic5_hwdev *dev, + struct cfg_cmd_dev_cap *dev_cap, enum func_type type) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + + /* Public resource */ + parse_pub_res_cap(dev, cap, dev_cap, type); + + /* PPF managed dynamic resource */ + if (type == TYPE_PPF) + parse_dynamic_share_res_cap(cap, dev_cap); + + /* L2 NIC resource */ + if (IS_NIC_TYPE(dev) != 0) + parse_l2nic_res_cap(dev, cap, dev_cap, type); + + /* FC without virtulization */ + if (type == TYPE_PF || type == TYPE_PPF) { + if (IS_FC_TYPE(dev) != 0) + parse_fc_res_cap(dev, cap, dev_cap, type); + } + + /* toe resource */ + if (IS_TOE_TYPE(dev) != 0) + parse_toe_res_cap(dev, cap, dev_cap, type); + + /* mtt cache line */ + if (IS_RDMA_ENABLE(dev)) + parse_rdma_res_cap(dev, cap, dev_cap, type); + + /* RoCE resource */ + if (IS_ROCE_TYPE(dev) != 0) + parse_roce_res_cap(dev, cap, dev_cap, type); + + if (IS_OVS_TYPE(dev) != 0) + parse_ovs_res_cap(dev, cap, dev_cap, type); + + if (IS_IPSEC_TYPE(dev) != 0) + parse_ipsec_res_cap(dev, cap, dev_cap, type); + + if (IS_PPA_TYPE(dev) != 0) + parse_ppa_res_cap(dev, cap, dev_cap, type); + + if (IS_VBS_TYPE(dev) != 0) + parse_vbs_res_cap(dev, cap, dev_cap, type); + + if (IS_DMMU_TYPE(dev) != 0) + parse_dmmu_res_cap(dev, cap, dev_cap, type); +} + +static void parse_all_res_cap(struct hinic5_hwdev *dev, + struct cfg_cmd_ext_dev_cap *ext_dev_cap, + struct service_cap *cap, u32 type, u32 index) +{ + switch (type) { + // SERVICE_BIT_UB和EXT_CAP_FAKE_VF类型不同, 消除告警 + case (u32)SERVICE_BIT_UB: + parse_ub_res_cap(dev, ext_dev_cap, cap, index); + break; + case (u32)SERVICE_BIT_ROCE: + parse_roce_ext_res_cap(dev, ext_dev_cap, cap, index); + break; + case (u32)SERVICE_BIT_JBOF: + parse_jbof_res_cap(dev, ext_dev_cap, cap, index); + break; + case (u32)EXT_CAP_FAKE_VF: + parse_fake_vf_ext_cap(dev, ext_dev_cap, cap, index); + break; + case (u32)EXT_CAP_FW_UPDATE: + parse_fw_update_ext_cap(dev, ext_dev_cap, cap, index); + break; + case (u32)EXT_CAP_COMM_INFO: + parse_comm_info_ext_cap(dev, ext_dev_cap, cap, index); + break; + default: + break; + } +} + +static void parse_ext_dev_cap(struct hinic5_hwdev *dev, + struct cfg_cmd_ext_dev_cap *ext_dev_cap, enum func_type type) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + u32 index = 0; + struct cfg_cmd_tlv_hdr *tlv_hdr = NULL; + + do { + tlv_hdr = (struct cfg_cmd_tlv_hdr *)&ext_dev_cap->ext_cap[index]; + if (tlv_hdr->len == 0x0 || tlv_hdr->len % 0x4 != 0x0) + return; + + parse_all_res_cap(dev, ext_dev_cap, cap, tlv_hdr->type, index + sizeof(*tlv_hdr)); + + index += (tlv_hdr->len + sizeof(*tlv_hdr)); + } while (index < MAX_CAP_LEN_QWORD); +} + +static int get_legacy_dev_cap(struct hinic5_hwdev *hwdev, enum func_type type) +{ + struct cfg_cmd_dev_cap dev_cap; + u16 out_len = sizeof(dev_cap); + int err; + + memset(&dev_cap, 0, sizeof(dev_cap)); + dev_cap.func_id = hinic5_global_func_id(hwdev); + sdk_info(hwdev->dev_hdl, "Get cap from fw, func_idx: %u\n", + dev_cap.func_id); + + err = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_CFGM, CFG_CMD_GET_DEV_CAP, + &dev_cap, sizeof(dev_cap), + &dev_cap, &out_len, 0, + HINIC5_CHANNEL_COMM); + if (err != 0 || dev_cap.head.status != 0 || out_len == 0) { + sdk_err(hwdev->dev_hdl, + "Failed to get capability from FW, err: %d, status: 0x%x, out size: 0x%x\n", + err, dev_cap.head.status, out_len); + return -EIO; + } + + parse_dev_cap(hwdev, &dev_cap, type); + + return 0; +} + +static int get_extend_dev_cap(struct hinic5_hwdev *hwdev, enum func_type type) +{ + struct cfg_cmd_ext_dev_cap *ext_dev_cap = NULL; + u16 out_len = sizeof(struct cfg_cmd_ext_dev_cap); + int err; + + if (!COMM_SUPPORT_EXTEND_CAPBILITY(hwdev)) + return 0; + + ext_dev_cap = (struct cfg_cmd_ext_dev_cap *)kzalloc(sizeof(*ext_dev_cap), GFP_KERNEL); + if (!ext_dev_cap) + return -ENOMEM; + ext_dev_cap->func_id = hinic5_global_func_id(hwdev); + + err = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_CFGM, CFG_CMD_GET_EXTEND_DEV_CAP, + ext_dev_cap, sizeof(*ext_dev_cap), + ext_dev_cap, &out_len, 0, + HINIC5_CHANNEL_COMM); + if (err != 0 || ext_dev_cap->head.status != 0 || out_len == 0) { + sdk_err(hwdev->dev_hdl, + "Failed to get extern capability from FW, err: %d, status: 0x%x, out size: 0x%x\n", + err, ext_dev_cap->head.status, out_len); + kfree(ext_dev_cap); + return -EIO; + } + + parse_ext_dev_cap(hwdev, ext_dev_cap, type); + kfree(ext_dev_cap); + + return 0; +} + +STATIC int valid_smf_cap(struct service_cap *cap) +{ + const u8 smf_num_whitelist[] = {0x2, 0x4, 0x8}; + const u8 smf_max_num = cap->smf_max_num; + u32 i; + + for (i = 0; i < ARRAY_SIZE(smf_num_whitelist); i++) { + if (smf_num_whitelist[i] == smf_max_num) + return 0; + } + + return -EINVAL; +} + +static int get_smf_max_and_enabled_num(struct hinic5_hwdev *hwdev) +{ + struct service_cap *cap = &hwdev->cfg_mgmt->svc_cap; + u8 smf_id, smf_enabled_num = 0; + int err; + + if (cap->smf_max_num == 0) + cap->smf_max_num = CHIP_SMF_NUM_MIN; + + err = valid_smf_cap(cap); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Not supported max number of SMFs: %u\n", cap->smf_max_num); + return err; + } + + /* count smf_enabled_num */ + for (smf_id = 0; smf_id < cap->smf_max_num; ++smf_id) { + if ((cap->smf_pg & (1U << smf_id)) != 0) + smf_enabled_num++; + } + cap->smf_enabled_num = smf_enabled_num; + + sdk_info(hwdev->dev_hdl, "SMF cap: max %u, enabled %u\n", cap->smf_max_num, + cap->smf_enabled_num); + + return 0; +} + +static int get_cap_from_fw(struct hinic5_hwdev *dev, enum func_type type) +{ + int err; + + err = get_legacy_dev_cap(dev, type); + if (err != 0) + return err; + + err = get_extend_dev_cap(dev, type); + if (err != 0) + return err; + + return get_smf_max_and_enabled_num(dev); +} + +int hinic5_get_dev_cap(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + enum func_type type; + int err; + + if (!hwdev) + return -EINVAL; + + type = HINIC5_FUNC_TYPE(dev); + + switch (type) { + case TYPE_PF: + case TYPE_PPF: + case TYPE_VF: + err = get_cap_from_fw(dev, type); + if (err != 0) { + sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n"); + return err; + } + break; + default: + sdk_err(dev->dev_hdl, "Unsupported PCI Function type: %d\n", + type); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hinic5_get_dev_cap); + +STATIC int parse_host_timer_cfg(struct service_cap *cap, + struct cfg_cmd_host_timer *cfg) +{ + struct timer_vf_info_seg *segs = cap->timer_vf_segs; + struct timer_vf_info_fake *fake_info = &cfg->timer_vf_info.fake; + + cap->timer_pf_id_start = cfg->timer_pf_id_start; + cap->timer_pf_num = cfg->timer_pf_num; + cap->timer_vf_id_start = cfg->timer_vf_id_start; + cap->timer_vf_num = cfg->timer_vf_num; + + memset(segs, 0, sizeof(cap->timer_vf_segs)); + + if (cfg->timer_vf_info_mode_segs != 0) { + memcpy(segs, + &cfg->timer_vf_info.segs, + sizeof(cfg->timer_vf_info.segs)); + } + + if (cfg->timer_vf_info_mode_fake != 0 && + fake_info->timer_normal_vf_num != 0) { + segs[0].start = cfg->timer_vf_id_start; + segs[0].num = fake_info->timer_normal_vf_num; + segs[0x1].start = fake_info->timer_fake_vf_id_start; + segs[0x1].num = fake_info->timer_fake_vf_num; + } + + return 0; +} + +STATIC int check_host_timer_segments(struct service_cap *cap) +{ + struct timer_vf_info_seg *segs = cap->timer_vf_segs; + u16 vf_start = cap->timer_vf_id_start; + u16 vf_end = cap->timer_vf_id_start + cap->timer_vf_num; + u16 vf_last = vf_start; + int i, err = 0; + + for (i = 0; i < TIMER_VF_SEGS_NUM; i++) { + if (segs[i].start == 0) + break; + if (segs[i].num == 0) { + pr_err("seg %d start is %u, but num is zero\n", + i, segs[i].start); + err = -EINVAL; + goto fail; + } + + if (segs[i].start < vf_last) { + pr_err("seg %d conflict with last, seg start %u, last end %u\n", + i, segs[i].start, vf_last); + err = -EINVAL; + goto fail; + } + vf_last = segs[i].start + segs[i].num; + if (vf_last > vf_end) { + pr_err("seg %d end %u > vf end %u", i, vf_last, vf_end); + err = -ERANGE; + goto fail; + } + } + + return 0; + +fail: + pr_err("vf timer segs: %u-%u %u-%u %u-%u %u-%u %u-%u %u-%u %u-%u\n", + segs[0x0].start, segs[0x0].start + segs[0x0].num, + segs[0x1].start, segs[0x1].start + segs[0x1].num, + segs[0x2].start, segs[0x2].start + segs[0x2].num, + segs[0x3].start, segs[0x3].start + segs[0x3].num, + segs[0x4].start, segs[0x4].start + segs[0x4].num, + segs[0x5].start, segs[0x5].start + segs[0x5].num, + segs[0x6].start, segs[0x6].start + segs[0x6].num); + return err; +} + +int hinic5_get_ppf_timer_cfg(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + struct cfg_cmd_host_timer cfg_host_timer; + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + u16 out_len = sizeof(cfg_host_timer); + int err; + + memset(&cfg_host_timer, 0, sizeof(cfg_host_timer)); + cfg_host_timer.host_id = dev->cfg_mgmt->svc_cap.host_id; + + err = hinic5_msg_to_mgmt_sync(dev, HINIC5_MOD_CFGM, CFG_CMD_GET_HOST_TIMER, + &cfg_host_timer, sizeof(cfg_host_timer), + &cfg_host_timer, &out_len, 0, + HINIC5_CHANNEL_COMM); + if (err != 0 || cfg_host_timer.head.status != 0 || out_len == 0) { + sdk_err(dev->dev_hdl, + "Failed to get host timer cfg from FW, err: %d, status: 0x%x, out size: 0x%x\n", + err, cfg_host_timer.head.status, out_len); + return -EIO; + } + + err = parse_host_timer_cfg(cap, &cfg_host_timer); + if (err != 0) { + sdk_err(dev->dev_hdl, + "Failed to parse host timer config, err %d\n", err); + return err; + } + + sdk_info(dev->dev_hdl, "Get host timer cfg with vf info mode: segs %u, fake %u\n", + cfg_host_timer.timer_vf_info_mode_segs, + cfg_host_timer.timer_vf_info_mode_fake); + + err = check_host_timer_segments(cap); + if (err != 0) { + sdk_err(dev->dev_hdl, + "Failed to check host timer config, err %d\n", err); + return err; + } + + return 0; +} + +static void nic_param_fix(struct hinic5_hwdev *dev) +{ +} + +static void rdma_mtt_fix(struct hinic5_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct rdma_service_cap *rdma_cap = &cap->rdma_cap; + + rdma_cap->log_mtt = LOG_MTT_SEG; + rdma_cap->log_mtt_seg = LOG_MTT_SEG; + rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; + rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; + rdma_cap->num_mtts = RDMA_NUM_MTTS; +} + +static void rdma_param_fix_part(struct rdma_service_cap *rdma_cap) +{ + rdma_cap->max_fmr_maps = RDMA_FRMR_MAP_NUM; + rdma_cap->num_mtts = RDMA_NUM_MTTS; + rdma_cap->log_mtt_seg = LOG_MTT_SEG; + rdma_cap->mtt_entry_sz = MTT_ENTRY_SZ; + rdma_cap->log_rdmarc_seg = LOG_RDMARC_SEG; + rdma_cap->local_ca_ack_delay = LOCAL_ACK_DELAY; + rdma_cap->num_ports = RDMA_NUM_PORTS; + rdma_cap->db_page_size = DB_PAGE_SZ; + rdma_cap->direct_wqe_size = DWQE_SZ; + rdma_cap->num_pds = NUM_PD; + rdma_cap->reserved_pds = RSVD_PD; + rdma_cap->max_xrcds = MAX_XRCDS; + rdma_cap->reserved_xrcds = RSVD_XRCDS; + rdma_cap->max_gid_per_port = MAX_GID_PER_PORT; + rdma_cap->gid_entry_sz = GID_ENTRY_SZ; + rdma_cap->reserved_lkey = RSVD_LKEY; +} + +static void rdma_param_fix(struct hinic5_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct rdma_service_cap *rdma_cap = &cap->rdma_cap; + struct dev_roce_svc_own_cap *roce_cap = + &rdma_cap->dev_rdma_cap.roce_own_cap; + + rdma_cap->log_mtt = LOG_MTT_SEG; + rdma_cap->log_rdmarc = LOG_RDMARC_SEG; + rdma_cap->reserved_qps = RDMA_RSVD_QPS; + rdma_cap->max_sq_sg = RDMA_MAX_SQ_SGE; + + /* RoCE */ + if (IS_ROCE_TYPE(dev) != 0) { + roce_cap->max_wqes = ROCE_MAX_WQES; + roce_cap->max_rq_sg = ROCE_MAX_RQ_SGE; + roce_cap->max_sq_inline_data_sz = ROCE_MAX_SQ_INLINE_DATA_SZ; + roce_cap->max_rq_desc_sz = ROCE_MAX_RQ_DESC_SZ; + roce_cap->rdmarc_entry_sz = ROCE_RDMARC_ENTRY_SZ; + roce_cap->max_qp_init_rdma = ROCE_MAX_QP_INIT_RDMA; + roce_cap->max_qp_dest_rdma = ROCE_MAX_QP_DEST_RDMA; + roce_cap->max_srq_wqes = ROCE_MAX_SRQ_WQES; + roce_cap->max_srq_sge = ROCE_MAX_SRQ_SGE; + roce_cap->srqc_entry_sz = ROCE_SRQC_ENTERY_SZ; + roce_cap->max_msg_sz = ROCE_MAX_MSG_SZ; + } + + rdma_cap->max_sq_desc_sz = RDMA_MAX_SQ_DESC_SZ; + rdma_cap->wqebb_size = WQEBB_SZ; + rdma_cap->max_cqes = RDMA_MAX_CQES; + rdma_cap->reserved_cqs = RDMA_RSVD_CQS; + rdma_cap->cqc_entry_sz = RDMA_CQC_ENTRY_SZ; + rdma_cap->cqe_size = RDMA_CQE_SZ; + rdma_cap->reserved_mrws = RDMA_RSVD_MRWS; + rdma_cap->mpt_entry_sz = RDMA_MPT_ENTRY_SZ; + + /* 2^8 - 1 + * +------------------------+-----------+ + * | 4B | 1M(20b) | Key(8b) | + * +------------------------+-----------+ + * key = 8bit key + 24bit index, + * now Lkey of SGE uses 2bit(bit31 and bit30), so key only have 10bit, + * we use original 8bits directly for simpilification + */ + rdma_param_fix_part(rdma_cap); + rdma_cap->num_comp_vectors = (u32)dev->cfg_mgmt->eq_info.num_ceq; + rdma_cap->page_size_cap = PAGE_SZ_CAP; + rdma_cap->flags = (RDMA_BMME_FLAG_LOCAL_INV | + RDMA_BMME_FLAG_REMOTE_INV | + RDMA_BMME_FLAG_FAST_REG_WR | + RDMA_DEV_CAP_FLAG_XRC | + RDMA_DEV_CAP_FLAG_MEM_WINDOW | + RDMA_BMME_FLAG_TYPE_2_WIN | + RDMA_BMME_FLAG_WIN_TYPE_2B | + RDMA_DEV_CAP_FLAG_ATOMIC); + rdma_cap->max_frpl_len = MAX_FRPL_LEN; + rdma_cap->max_pkeys = MAX_PKEYS; +} + +static void toe_param_fix(struct hinic5_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct toe_service_cap *toe_cap = &cap->toe_cap; + + toe_cap->pctx_sz = TOE_PCTX_SZ; + toe_cap->scqc_sz = TOE_CQC_SZ; +} + +static void ovs_param_fix(struct hinic5_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ovs_service_cap *ovs_cap = &cap->ovs_cap; + + ovs_cap->pctx_sz = OVS_PCTX_SZ; +} + +static void ppa_param_fix(struct hinic5_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ppa_service_cap *ppa_cap = &cap->ppa_cap; + + ppa_cap->pctx_sz = PPA_PCTX_SZ; +} + +static void fc_param_fix(struct hinic5_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct fc_service_cap *fc_cap = &cap->fc_cap; + + fc_cap->parent_qpc_size = FC_PCTX_SZ; + fc_cap->child_qpc_size = FC_CCTX_SZ; + fc_cap->sqe_size = FC_SQE_SZ; + + fc_cap->scqc_size = FC_SCQC_SZ; + fc_cap->scqe_size = FC_SCQE_SZ; + + fc_cap->srqc_size = FC_SRQC_SZ; + fc_cap->srqe_size = FC_SRQE_SZ; +} + +static void ipsec_param_fix(struct hinic5_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ipsec_service_cap *ipsec_cap = &cap->ipsec_cap; + + ipsec_cap->sactx_sz = IPSEC_SACTX_SZ; +} + +static void ub_param_fix(struct hinic5_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct ub_service_cap *ub_cap = &cap->ub_cap; + + ub_cap->sdk_res.mpt_entry_sz = UB_MPT_ENTRY_SZ; + + ub_cap->sdk_res.cmtt_cl_start = UB_CMTT_CL_START; + ub_cap->sdk_res.cmtt_cl_end = UB_CMTT_CL_END; + ub_cap->sdk_res.cmtt_cl_sz = UB_CMTT_CL_SIZE; + + ub_cap->sdk_res.wqe_cl_start = UB_WQE_CL_START; + ub_cap->sdk_res.wqe_cl_end = UB_WQE_CL_END; + ub_cap->sdk_res.wqe_cl_sz = UB_WQE_CL_SIZE; + + ub_cap->sdk_res.dmtt_cl_start = UB_DMTT_CL_START; + ub_cap->sdk_res.dmtt_cl_end = UB_DMTT_CL_END; + ub_cap->sdk_res.dmtt_cl_sz = UB_DMTT_CL_SIZE; + + ub_cap->net_dev_cap.comp_vector_cnt = (u32)dev->cfg_mgmt->eq_info.num_ceq; + ub_cap->net_dev_cap.port_cnt = 1; +} + +static void jbof_param_fix(struct hinic5_hwdev *dev) +{ + struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct jbof_service_cap *jbof_cap = &cap->jbof_cap; + + jbof_cap->parent_qpc_size = JBOF_PCTX_SZ; + jbof_cap->child_qpc_size = JBOF_CCTX_SZ; +} + +static void init_service_param(struct hinic5_hwdev *dev) +{ + if (IS_NIC_TYPE(dev) != 0) + nic_param_fix(dev); + if (IS_RDMA_ENABLE(dev)) + rdma_mtt_fix(dev); + if (IS_ROCE_TYPE(dev) != 0) + rdma_param_fix(dev); + if (IS_FC_TYPE(dev) != 0) + fc_param_fix(dev); + if (IS_TOE_TYPE(dev) != 0) + toe_param_fix(dev); + if (IS_OVS_TYPE(dev) != 0) + ovs_param_fix(dev); + if (IS_IPSEC_TYPE(dev) != 0) + ipsec_param_fix(dev); + if (IS_PPA_TYPE(dev) != 0) + ppa_param_fix(dev); + if (IS_UB_TYPE(dev) != 0) + ub_param_fix(dev); + if (IS_JBOF_TYPE(dev) != 0) + jbof_param_fix(dev); +} + +static void cfg_get_eq_num(struct hinic5_hwdev *dev) +{ + struct cfg_eq_info *eq_info = &dev->cfg_mgmt->eq_info; + + eq_info->num_ceq = dev->hwif->attr.num_ceqs; + eq_info->num_ceq_remain = eq_info->num_ceq; +} + +static int cfg_init_eq(struct hinic5_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_eq *eq = NULL; + u8 num_ceq, i = 0; + + cfg_get_eq_num(dev); + num_ceq = cfg_mgmt->eq_info.num_ceq; + + sdk_info(dev->dev_hdl, "Cfg mgmt: ceqs=0x%x, remain=0x%x\n", + cfg_mgmt->eq_info.num_ceq, cfg_mgmt->eq_info.num_ceq_remain); + + if (num_ceq == 0) + return 0; + + eq = kcalloc(num_ceq, sizeof(*eq), GFP_KERNEL); + if (!eq) + return -ENOMEM; + + for (i = 0; i < num_ceq; ++i) { + eq[i].eqn = i; + eq[i].free = CFG_FREE; + eq[i].type = SERVICE_T_MAX; + } + + cfg_mgmt->eq_info.eq = eq; + + mutex_init(&cfg_mgmt->eq_info.eq_mutex); + + return 0; +} + +int hinic5_vector_to_eqn(void *hwdev, enum hinic5_service_type type, int vector) +{ + struct hinic5_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_eq *eq = NULL; + int eqn = -EINVAL; + int vector_num = vector; + + if (!hwdev || vector < 0) + return -EINVAL; + + if (type != SERVICE_T_ROCE) { + sdk_err(dev->dev_hdl, + "Service type :%d, only RDMA service could get eqn by vector.\n", + type); + return -EINVAL; + } + + cfg_mgmt = dev->cfg_mgmt; + if (!cfg_mgmt) { + sdk_err(dev->dev_hdl, "Service type :%d, cfg_mgmt is null.\n", type); + return -EINVAL; + } + + vector_num = (vector_num % cfg_mgmt->eq_info.num_ceq) + CFG_RDMA_CEQ_BASE; + + eq = cfg_mgmt->eq_info.eq; + if (eq[vector_num].type == SERVICE_T_ROCE && eq[vector_num].free == CFG_BUSY) + eqn = eq[vector_num].eqn; + + return eqn; +} +EXPORT_SYMBOL(hinic5_vector_to_eqn); + +static int cfg_init_interrupt(struct hinic5_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + struct cfg_irq_info *irq_info = &cfg_mgmt->irq_param_info; + u16 intr_num = dev->hwif->attr.num_irqs; + u16 intr_needed = (dev->hwif->attr.msix_flex_en != 0) ? (dev->hwif->attr.num_aeqs + + dev->hwif->attr.num_ceqs + dev->hwif->attr.num_sq) : intr_num; + + if (intr_num == 0) { + sdk_err(dev->dev_hdl, "Irq num cfg in fw is zero, msix_flex_en %d\n", + dev->hwif->attr.msix_flex_en); + return -EFAULT; + } + + if (intr_needed > intr_num) { + sdk_warn(dev->dev_hdl, "Irq num cfg(%u) is less than the needed irq num(%u) msix_flex_en %u\n", + intr_num, intr_needed, dev->hwif->attr.msix_flex_en); + intr_needed = intr_num; + } + + irq_info->alloc_info = kcalloc(intr_num, sizeof(*irq_info->alloc_info), + GFP_KERNEL); + if (!irq_info->alloc_info) + return -ENOMEM; + + irq_info->num_irq_hw = intr_needed; + /* Production requires only surppots MSI-X */ + cfg_mgmt->svc_cap.interrupt_type = INTR_TYPE_MSIX; + + mutex_init(&irq_info->irq_mutex); + + return 0; +} + +static int cfg_enable_interrupt(struct hinic5_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + u16 nreq = cfg_mgmt->irq_param_info.num_irq_hw; + struct irq_alloc_info_st *irq_info = NULL; + struct msix_entry *entry = NULL; + u16 i = 0; + int actual_irq, irq_id; + + irq_info = cfg_mgmt->irq_param_info.alloc_info; + + sdk_info(dev->dev_hdl, "Interrupt type: %d, irq num: %u.\n", + cfg_mgmt->svc_cap.interrupt_type, nreq); + + switch (cfg_mgmt->svc_cap.interrupt_type) { + case INTR_TYPE_MSIX: + if (nreq == 0) { + sdk_err(dev->dev_hdl, "Interrupt number cannot be zero\n"); + return -EINVAL; + } + entry = kcalloc(nreq, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + for (i = 0; i < nreq; i++) + entry[i].entry = i; + + actual_irq = hinic5_adev_irq_vectors_alloc(dev->adapter_hdl, entry, + VECTOR_THRESHOLD, nreq); + if (actual_irq < 0) { + sdk_err(dev->dev_hdl, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n", + actual_irq); + kfree(entry); + return -ENOMEM; + } + + nreq = (u16)actual_irq; + cfg_mgmt->irq_param_info.num_total = nreq; + cfg_mgmt->irq_param_info.num_irq_remain = nreq; + sdk_info(dev->dev_hdl, "Request %u msix vector success.\n", + nreq); + + for (i = 0; i < nreq; ++i) { + /* u16 driver uses to specify entry, OS writes */ + irq_info[i].info.msix_entry_idx = i; + /* u32 kernel uses to write allocated vector */ + irq_id = hinic5_adev_irq_vector(dev->adapter_hdl, i); + if (irq_id < 0) { + sdk_err(dev->dev_hdl, "Unable to get idx %d, irq %d\n", i, + irq_id); + hinic5_adev_irq_vectors_free(dev->adapter_hdl); + kfree(entry); + return -ENOMEM; + } + irq_info[i].info.irq_id = (u32)irq_id; + irq_info[i].type = SERVICE_T_MAX; + irq_info[i].free = CFG_FREE; + } + + kfree(entry); + + break; + + default: + sdk_err(dev->dev_hdl, "Unsupport interrupt type %d\n", + cfg_mgmt->svc_cap.interrupt_type); + break; + } + + return 0; +} + +int hinic5_alloc_irqs(void *hwdev, enum hinic5_service_type type, u16 num, + struct irq_info *irq_info_array, u16 *act_num) +{ + struct hinic5_hwdev *dev = hwdev; + struct cfg_irq_info *irq_info = NULL; + struct irq_alloc_info_st *alloc_info = NULL; + int max_num_irq, i, j; + u16 free_num_irq; + u16 alloc_num = num; + + if (!hwdev || !dev->cfg_mgmt || num == 0 || !irq_info_array || !act_num) + return -EINVAL; + + if (type > SERVICE_T_HINIC5_CQM) { + pr_err("type is out of bounds\n"); + return -EINVAL; + } + + irq_info = &dev->cfg_mgmt->irq_param_info; + mutex_lock(&irq_info->irq_mutex); + + free_num_irq = irq_info->num_irq_remain; + if (free_num_irq == 0) { + sdk_err(dev->dev_hdl, "no free irq resource in cfg mgmt.\n"); + mutex_unlock(&irq_info->irq_mutex); + return -ENOMEM; + } + if (alloc_num > free_num_irq) { + sdk_warn(dev->dev_hdl, "only %u irq resource in cfg mgmt.\n", free_num_irq); + alloc_num = free_num_irq; + } + + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + *act_num = 0; + + for (i = 0; i < alloc_num; i++) { + for (j = 0; j < max_num_irq; j++) { + if (alloc_info[j].free != CFG_FREE) + continue; + + if (irq_info->num_irq_remain == 0) { + /* irq_info->num_irq_remain is not updated correctly */ + sdk_err(dev->dev_hdl, "No free irq resource in cfg mgmt\n"); + mutex_unlock(&irq_info->irq_mutex); + return -EINVAL; + } + alloc_info[j].type = type; + alloc_info[j].free = CFG_BUSY; + + irq_info_array[i].msix_entry_idx = + alloc_info[j].info.msix_entry_idx; + irq_info_array[i].irq_id = alloc_info[j].info.irq_id; + (*act_num)++; + irq_info->num_irq_remain--; + + break; + } + } + + mutex_unlock(&irq_info->irq_mutex); + return 0; +} +EXPORT_SYMBOL(hinic5_alloc_irqs); + +void hinic5_free_irq(void *hwdev, enum hinic5_service_type type, u32 irq_id) +{ + struct hinic5_hwdev *dev = hwdev; + struct cfg_irq_info *irq_info = NULL; + struct irq_alloc_info_st *alloc_info = NULL; + int max_num_irq; + int i; + + if (!hwdev || !dev->cfg_mgmt) + return; + + irq_info = &dev->cfg_mgmt->irq_param_info; + alloc_info = irq_info->alloc_info; + max_num_irq = irq_info->num_total; + + if (max_num_irq > irq_info->num_irq_hw) { + sdk_err(dev->dev_hdl, "alloc_info over range\n"); + return; + } + + mutex_lock(&irq_info->irq_mutex); + + for (i = 0; i < max_num_irq; i++) { + if (irq_id == alloc_info[i].info.irq_id && + type == alloc_info[i].type) { + if (alloc_info[i].free != CFG_BUSY) + continue; + + alloc_info[i].free = CFG_FREE; + irq_info->num_irq_remain++; + if (irq_info->num_irq_remain > max_num_irq) { + sdk_err(dev->dev_hdl, "Find target,but over range\n"); + mutex_unlock(&irq_info->irq_mutex); + return; + } + break; + } + } + + if (i >= max_num_irq) + sdk_warn(dev->dev_hdl, "Irq %u don`t need to free\n", irq_id); + + mutex_unlock(&irq_info->irq_mutex); +} +EXPORT_SYMBOL(hinic5_free_irq); + +int hinic5_alloc_ceqs(void *hwdev, enum hinic5_service_type type, int num, + int *ceq_id_array, int *act_num) +{ + struct hinic5_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_eq_info *eq = NULL; + int free_ceq; + int i, j; + int num_new = num; + + if (!hwdev || !dev->cfg_mgmt || !ceq_id_array || !act_num) + return -EINVAL; + + cfg_mgmt = dev->cfg_mgmt; + eq = &cfg_mgmt->eq_info; + free_ceq = eq->num_ceq_remain; + + mutex_lock(&eq->eq_mutex); + + if (num > free_ceq) { + if (free_ceq <= 0) { + sdk_err(dev->dev_hdl, "No free ceq resource in cfg mgmt\n"); + mutex_unlock(&eq->eq_mutex); + return -ENOMEM; + } + + sdk_warn(dev->dev_hdl, "Only %d ceq resource in cfg mgmt\n", + free_ceq); + } + + *act_num = 0; + + num_new = min(num_new, eq->num_ceq - CFG_RDMA_CEQ_BASE); + for (i = 0; i < num_new; i++) { + if (eq->num_ceq_remain == 0) { + sdk_warn(dev->dev_hdl, "Alloc %d ceqs, less than required %d ceqs\n", + *act_num, num_new); + mutex_unlock(&eq->eq_mutex); + return 0; + } + + for (j = CFG_RDMA_CEQ_BASE; j < eq->num_ceq; j++) { + if (eq->eq[j].free == CFG_FREE) { + eq->eq[j].type = type; + eq->eq[j].free = CFG_BUSY; + eq->num_ceq_remain--; + ceq_id_array[i] = eq->eq[j].eqn; + (*act_num)++; + break; + } + } + } + + mutex_unlock(&eq->eq_mutex); + return 0; +} +EXPORT_SYMBOL(hinic5_alloc_ceqs); + +void hinic5_free_ceq(void *hwdev, enum hinic5_service_type type, int ceq_id) +{ + struct hinic5_hwdev *dev = hwdev; + struct cfg_mgmt_info *cfg_mgmt = NULL; + struct cfg_eq_info *eq = NULL; + u8 num_ceq; + u8 i = 0; + + if (!hwdev || !dev->cfg_mgmt) + return; + + cfg_mgmt = dev->cfg_mgmt; + eq = &cfg_mgmt->eq_info; + num_ceq = eq->num_ceq; + + mutex_lock(&eq->eq_mutex); + + for (i = 0; i < num_ceq; i++) { + if (ceq_id == eq->eq[i].eqn && type == cfg_mgmt->eq_info.eq[i].type) { + if (eq->eq[i].free != CFG_BUSY) + continue; + + eq->eq[i].free = CFG_FREE; + eq->num_ceq_remain++; + if (eq->num_ceq_remain > num_ceq) + eq->num_ceq_remain %= num_ceq; + + mutex_unlock(&eq->eq_mutex); + return; + } + } + + if (i >= num_ceq) + sdk_warn(dev->dev_hdl, "ceq %d don`t need to free.\n", ceq_id); + + mutex_unlock(&eq->eq_mutex); +} +EXPORT_SYMBOL(hinic5_free_ceq); + +int init_cfg_mgmt(struct hinic5_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt = NULL; + + cfg_mgmt = kzalloc(sizeof(*cfg_mgmt), GFP_KERNEL); + if (!cfg_mgmt) + return -ENOMEM; + + dev->cfg_mgmt = cfg_mgmt; + cfg_mgmt->hwdev = dev; + + err = cfg_init_eq(dev); + if (err != 0) { + sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n", + err); + goto free_mgmt_mem; + } + + err = cfg_init_interrupt(dev); + if (err != 0) { + sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n", + err); + goto free_eq_mem; + } + + err = cfg_enable_interrupt(dev); + if (err != 0) { + sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n", + err); + goto free_interrupt_mem; + } + + return 0; + +free_interrupt_mem: + kfree(cfg_mgmt->irq_param_info.alloc_info); + mutex_deinit(&((cfg_mgmt->irq_param_info).irq_mutex)); + cfg_mgmt->irq_param_info.alloc_info = NULL; + +free_eq_mem: + kfree(cfg_mgmt->eq_info.eq); + mutex_deinit(&cfg_mgmt->eq_info.eq_mutex); + cfg_mgmt->eq_info.eq = NULL; + +free_mgmt_mem: + kfree(cfg_mgmt); + dev->cfg_mgmt = NULL; + return err; +} + +void free_cfg_mgmt(struct hinic5_hwdev *dev) +{ + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + /* if the allocated resource were recycled */ + if (cfg_mgmt->irq_param_info.num_irq_remain != + cfg_mgmt->irq_param_info.num_total || + cfg_mgmt->eq_info.num_ceq_remain != cfg_mgmt->eq_info.num_ceq) + sdk_err(dev->dev_hdl, "Can't reclaim all irq and event queue, please check\n"); + + hinic5_adev_irq_vectors_free(dev->adapter_hdl); + + kfree(cfg_mgmt->irq_param_info.alloc_info); + cfg_mgmt->irq_param_info.alloc_info = NULL; + mutex_deinit(&((cfg_mgmt->irq_param_info).irq_mutex)); + + if (cfg_mgmt->eq_info.num_ceq != 0) { + kfree(cfg_mgmt->eq_info.eq); + cfg_mgmt->eq_info.eq = NULL; + mutex_deinit(&cfg_mgmt->eq_info.eq_mutex); + } + + kfree(cfg_mgmt); +} + +int init_capability(struct hinic5_hwdev *dev) +{ + int err; + struct cfg_mgmt_info *cfg_mgmt = dev->cfg_mgmt; + + cfg_mgmt->svc_cap.sf_svc_attr.ft_pf_en = false; + cfg_mgmt->svc_cap.sf_svc_attr.rdma_pf_en = false; + + err = hinic5_get_dev_cap(dev); + if (err != 0) + return err; + + init_service_param(dev); + + sdk_info(dev->dev_hdl, "Init capability success\n"); + return 0; +} + +void free_capability(struct hinic5_hwdev *dev) +{ + sdk_info(dev->dev_hdl, "Free capability success"); +} + +bool hinic5_support_nic(void *hwdev, struct nic_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_NIC_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, + sizeof(struct nic_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_nic); + +bool hinic5_support_ppa(void *hwdev, struct ppa_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_PPA_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ppa_cap, + sizeof(struct ppa_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_ppa); + +bool hinic5_support_migr(void *hwdev, struct migr_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_MIGR_TYPE(dev) == 0) + return false; + + if (cap) + cap->master_host_id = dev->cfg_mgmt->svc_cap.master_host_id; + + return true; +} +EXPORT_SYMBOL(hinic5_support_migr); + +bool hinic5_support_ipsec(void *hwdev, struct ipsec_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_IPSEC_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ipsec_cap, + sizeof(struct ipsec_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_ipsec); + +bool hinic5_support_macsec(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_MACSEC_TYPE(dev) == 0) + return false; + + return true; +} +EXPORT_SYMBOL(hinic5_support_macsec); + +bool hinic5_support_roce(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_ROCE_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, + sizeof(struct rdma_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_roce); + +bool hinic5_support_fc(void *hwdev, struct fc_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_FC_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, + sizeof(struct fc_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_fc); + +bool hinic5_support_rdma(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_RDMA_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, + sizeof(struct rdma_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_rdma); + +bool hinic5_is_rdma_en(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_RDMA_ENABLE(dev)) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(struct rdma_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_is_rdma_en); + +bool hinic5_support_ovs(void *hwdev, struct ovs_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_OVS_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, + sizeof(struct ovs_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_ovs); + +bool hinic5_support_vbs(void *hwdev, struct vbs_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_VBS_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.vbs_cap, + sizeof(struct vbs_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_vbs); + +/* Only PPF support it, PF is not */ +bool hinic5_support_toe(void *hwdev, struct toe_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_TOE_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, + sizeof(struct toe_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_toe); + +bool hinic5_support_ub(void *hwdev, struct ub_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_UB_TYPE(dev) == 0) { + sdk_info(dev->dev_hdl, "dev not ub type\n"); + return false; + } + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.ub_cap, + sizeof(struct ub_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_ub); + +bool hinic5_support_jbof(void *hwdev, struct jbof_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_JBOF_TYPE(dev) == 0) { + sdk_err(dev->dev_hdl, "dev not jbof type\n"); + return false; + } + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.jbof_cap, + sizeof(struct jbof_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_jbof); + +bool hinic5_support_vroce(void *hwdev, struct rdma_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_VROCE_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, + sizeof(struct rdma_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_vroce); + +bool hinic5_support_dmmu(void *hwdev, struct dmmu_service_cap *cap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_DMMU_TYPE(dev) == 0) + return false; + + if (cap) + memcpy(cap, &dev->cfg_mgmt->svc_cap.dmmu_cap, + sizeof(struct dmmu_service_cap)); + + return true; +} +EXPORT_SYMBOL(hinic5_support_dmmu); + +bool hinic5_support_bifur(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return IS_BIFUR_TYPE(dev) != 0; +} +EXPORT_SYMBOL(hinic5_support_bifur); + +bool hinic5_func_for_mgmt(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (dev->cfg_mgmt->svc_cap.chip_svc_type != 0) + return false; + else + return true; +} + +bool hinic5_support_hihtr(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (IS_HIHTR_TYPE(dev) == 0) + return false; + + return true; +} +EXPORT_SYMBOL(hinic5_support_hihtr); + +bool hinic5_get_stateful_enable(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return dev->cfg_mgmt->svc_cap.sf_en; +} +EXPORT_SYMBOL(hinic5_get_stateful_enable); + +u8 hinic5_host_oq_id_mask(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host oq id mask\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_oq_id_mask_val; +} +EXPORT_SYMBOL(hinic5_host_oq_id_mask); + +u8 hinic5_host_id(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_id; +} +EXPORT_SYMBOL(hinic5_host_id); + +u16 hinic5_host_total_func(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting host total function number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.host_total_function; +} +EXPORT_SYMBOL(hinic5_host_total_func); + +u16 hinic5_func_max_qnum(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; +} +EXPORT_SYMBOL(hinic5_func_max_qnum); + +u16 hinic5_func_max_nic_qnum(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function max queue number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.nic_cap.max_sqs; +} +EXPORT_SYMBOL(hinic5_func_max_nic_qnum); + +u8 hinic5_func_cos_mask_mode(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function cos mask mode\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.cos_mask_mode; +} +EXPORT_SYMBOL(hinic5_func_cos_mask_mode); + +u8 hinic5_func_dev_default_cos(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting function default cos\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.dcb_state.default_cos; +} +EXPORT_SYMBOL(hinic5_func_dev_default_cos); + +u8 hinic5_ep_id(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting ep id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.ep_id; +} +EXPORT_SYMBOL(hinic5_ep_id); + +u8 hinic5_er_id(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting er id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.er_id; +} +EXPORT_SYMBOL(hinic5_er_id); + +u8 hinic5_physical_port_id(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting physical port id\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.port_id; +} +EXPORT_SYMBOL(hinic5_physical_port_id); + +u16 hinic5_func_max_vf(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting max vf number\n"); + return 0; + } + return dev->cfg_mgmt->svc_cap.max_vf; +} +EXPORT_SYMBOL(hinic5_func_max_vf); + +int hinic5_cos_valid_bitmap(void *hwdev, u8 *func_dft_cos, u8 *port_cos_bitmap) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev || !dev->cfg_mgmt) { + pr_err("Hwdev pointer is NULL for getting cos valid bitmap\n"); + return 1; + } + *func_dft_cos = dev->cfg_mgmt->svc_cap.cos_valid_bitmap; + *port_cos_bitmap = dev->cfg_mgmt->svc_cap.port_cos_valid_bitmap; + + return 0; +} +EXPORT_SYMBOL(hinic5_cos_valid_bitmap); + +void hinic5_shutdown_hwdev(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return; + + if (IS_SLAVE_HOST(dev) != 0) + set_slave_host_enable(hwdev, hinic5_pcie_itf_id(hwdev), false); +} + +u32 hinic5_host_pf_num(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting pf number capability\n"); + return 0; + } + + return dev->cfg_mgmt->svc_cap.pf_num; +} +EXPORT_SYMBOL(hinic5_host_pf_num); + +u32 hinic5_host_pf_id_start(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting pf id start capability\n"); + return 0; + } + + return dev->cfg_mgmt->svc_cap.pf_id_start; +} +EXPORT_SYMBOL(hinic5_host_pf_id_start); + +u8 hinic5_flexq_en(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return 0; + + return dev->cfg_mgmt->svc_cap.flexq_en; +} +EXPORT_SYMBOL(hinic5_flexq_en); + +bool hinic5_support_htn(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Hwdev pointer is NULL for getting HTN support capability\n"); + return false; + } + + return COMM_SUPPORT_HTN_CMD(dev); +} +EXPORT_SYMBOL(hinic5_support_htn); + +bool hinic5_is_vf_isolation(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + struct service_cap *cap = NULL; + + if (unlikely(!dev || !dev->cfg_mgmt)) { + pr_err("Hwdev pointer or cfg_mgmt is NULL\n"); + return false; + } + + cap = &dev->cfg_mgmt->svc_cap; + return cap->vf_isolation; +} +EXPORT_SYMBOL(hinic5_is_vf_isolation); + +/** + * Prototype : hinic5_bat_get_l3i_entry_config + * Description : Gets L3I entry config corresponding to BAT register file. + * Input : const struct hinic5_hwdev *hwdev + * Output : struct hinic5_bat_entry_config *entry_config + * Return Value : int + * 1.Date : 2024/7/30 + * Modification : Created function + */ +int hinic5_bat_get_l3i_entry_config(const struct hinic5_hwdev *hwdev, + struct hinic5_bat_entry_config *entry_config) +{ + struct hinic5_func_attr *func_attr = NULL; + struct service_cap *svc_cap = NULL; + enum func_type func_type; + bool ft_enable; + bool rdma_enable; + + if (!hwdev || !hwdev->hwif || !hwdev->cfg_mgmt || !entry_config) + return -EINVAL; + + func_attr = &hwdev->hwif->attr; + svc_cap = &hwdev->cfg_mgmt->svc_cap; + func_type = func_attr->func_type; + ft_enable = svc_cap->sf_svc_attr.ft_en; + rdma_enable = svc_cap->sf_svc_attr.rdma_en; + + if (func_type != TYPE_PF && func_type != TYPE_PPF) { + entry_config->mapping = false; + entry_config->bat_entry_offset = 0; + entry_config->bat_entry_size = 0; + return 0; + } + + entry_config->mapping = true; + entry_config->bat_entry_size = HINIC5_BAT_ENTRY_SIZE; + + if (ft_enable && rdma_enable) + entry_config->bat_entry_offset = HINIC5_BAT_L3I_OFF_FT_RDMA_PF; + else if (ft_enable) + entry_config->bat_entry_offset = HINIC5_BAT_L3I_OFF_FT_PF; + else if (rdma_enable) + entry_config->bat_entry_offset = HINIC5_BAT_L3I_OFF_RDMA_PF; + else + entry_config->bat_entry_offset = HINIC5_BAT_L3I_OFF_PF; + + return 0; +} + +int hinic5_dcb_state_op(void *hwdev, enum hisdk5_dcb_state_op op, + struct hisdk5_dcb_state *dcb_state) +{ + struct hinic5_hwdev *dev = hwdev; + struct hisdk5_dcb_state *state = NULL; + + if (!dev || !dev->cfg_mgmt || !dcb_state) { + pr_err("Hwdev pointer or dcb_state pointer is NULL\n"); + return -EINVAL; + } + + state = &dev->cfg_mgmt->svc_cap.dcb_state; + if (op == HISDK5_DCB_STATE_GET) + memcpy(dcb_state, state, sizeof(struct hisdk5_dcb_state)); + else + memcpy(state, dcb_state, sizeof(struct hisdk5_dcb_state)); + + return 0; +} +EXPORT_SYMBOL(hinic5_dcb_state_op); + +int hinic5_get_port_info(void *hwdev, struct mag_port_info *port_info, u16 channel) +{ + struct mag_cmd_get_port_info port_msg = { 0 }; + u16 out_size = sizeof(port_msg); + int err; + struct hinic5_hwdev *dev = hwdev; + + if (unlikely(!hwdev || !port_info)) + return -EINVAL; + + port_msg.port_id = hinic5_physical_port_id(hwdev); + + err = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_HILINK, MAG_CMD_GET_PORT_INFO, &port_msg, + sizeof(port_msg), &port_msg, &out_size, 0, channel); + if (err != 0 || out_size == 0 || port_msg.head.status != 0) { + sdk_err(dev->dev_hdl, + "Failed to get port info, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, port_msg.head.status, out_size, channel); + return -EIO; + } + + port_info->autoneg_cap = port_msg.an_support; + port_info->autoneg_state = port_msg.an_en; + port_info->duplex = port_msg.duplex; + port_info->port_type = port_msg.wire_type; + port_info->speed = port_msg.speed; + port_info->fec = port_msg.fec; + port_info->supported_mode = port_msg.supported_mode; + port_info->advertised_mode = port_msg.advertised_mode; + port_info->supported_fec_mode = port_msg.supported_fec_mode; + + return 0; +} +EXPORT_SYMBOL(hinic5_get_port_info); + +int hinic5_get_speed(void *hwdev, enum mag_cmd_port_speed *speed, u16 channel) +{ + struct mag_port_info port_info = {0}; + int err; + + if (unlikely(!hwdev || !speed)) + return -EINVAL; + + err = hinic5_get_port_info(hwdev, &port_info, channel); + if (err != 0) + return err; + + *speed = port_info.speed; + + return 0; +} +EXPORT_SYMBOL(hinic5_get_speed); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_cfg.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_cfg.h new file mode 100644 index 00000000..d42cd97c --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_cfg.h @@ -0,0 +1,444 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_HW_CFG_H +#define HINIC5_HW_CFG_H + +#include <linux/types.h> +#include <linux/mutex.h> +#include "cfg_mgmt_mpu_cmd_defs.h" +#include "hinic5_hwdev.h" + +enum { + CFG_FREE = 0, + CFG_BUSY = 1 +}; + +/* start position for CEQs allocation, Max number of CEQs is 32 */ +enum { + CFG_RDMA_CEQ_BASE = 0 +}; + +/* Public resources */ +#define CHIP_SMF_NUM_MIN 4 /* Min number of SMFs supported */ +#define CHIP_SMF_NUM_MAX 8 /* Max number of SMFs supported */ + +/* RDMA resource */ +#define K_UNIT BIT(10) +#define M_UNIT BIT(20) +#define G_UNIT BIT(30) + +#define VIRTIO_BASE_VQ_SIZE 2048U +#define VIRTIO_DEFAULT_VQ_SIZE 8192U + +/* L2NIC */ +#define HINIC5_CFG_MAX_QP 256 + +/* RDMA */ +#define RDMA_RSVD_QPS 2 +#define ROCE_MAX_WQES (8 * K_UNIT - 1) +#define IWARP_MAX_WQES (8 * K_UNIT) + +#define RDMA_MAX_SQ_SGE 32 + +#define ROCE_MAX_RQ_SGE 16 + +/* value changed should change ROCE_MAX_WQE_BB_PER_WR synchronously */ +#define RDMA_MAX_SQ_DESC_SZ (256) + +/* (256B(cache_line_len) - 16B(ctrl_seg_len) - 48B(max_task_seg_len)) */ +#define ROCE_MAX_SQ_INLINE_DATA_SZ 912 + +#define ROCE_MAX_RQ_DESC_SZ 256 + +#define ROCE_QPC_ENTRY_SZ 512 +#define HYPER_ROCE_QPC_ENTRY_SZ 1024 + +#define WQEBB_SZ 64 + +#define ROCE_RDMARC_ENTRY_SZ 32 +#define ROCE_MAX_QP_INIT_RDMA 128 +#define ROCE_MAX_QP_DEST_RDMA 128 + +#define ROCE_MAX_SRQ_WQES (16 * K_UNIT - 1) +#define ROCE_MAX_SRQ_SGE 15 +#define ROCE_SRQC_ENTERY_SZ 64 + +#define RDMA_MAX_CQES (8 * M_UNIT - 1) +#define RDMA_RSVD_CQS 0 + +#define RDMA_CQC_ENTRY_SZ 128 + +#define RDMA_CQE_SZ 64 +#define RDMA_RSVD_MRWS 128 +#define RDMA_MPT_ENTRY_SZ 64 +#define RDMA_FRMR_MAP_NUM 0xff +#define RDMA_NUM_MTTS (1 * G_UNIT) +#define LOG_MTT_SEG 9 +#define MTT_ENTRY_SZ 8 +#define LOG_RDMARC_SEG 3 + +#define LOCAL_ACK_DELAY 15 +#define RDMA_NUM_PORTS 1 +#define ROCE_MAX_MSG_SZ (2 * G_UNIT) + +#define DB_PAGE_SZ (4 * K_UNIT) +#define DWQE_SZ 256 + +#define NUM_PD (256 * K_UNIT) +#define RSVD_PD 0 + +#define MAX_XRCDS (64 * K_UNIT) +#define RSVD_XRCDS 0 + +#define MAX_GID_PER_PORT 128 +#define GID_ENTRY_SZ 32 +#define RSVD_LKEY ((RDMA_RSVD_MRWS - 1) << 8) +#define NUM_COMP_VECTORS 32 +#define PAGE_SZ_CAP ((1UL << 12) | (1UL << 16) | (1UL << 21)) +#define ROCE_MODE 1 + +#define MAX_FRPL_LEN 511 +#define MAX_PKEYS 1 +#define COS_DEFAULT_MASK_MODE 0xFF + +/* ToE */ +#define TOE_PCTX_SZ 1024 +#define TOE_CQC_SZ 64 + +/* IoE */ +#define IOE_PCTX_SZ 512 + +/* FC */ +#define FC_PCTX_SZ 256 +#define FC_CCTX_SZ 256 +#define FC_SQE_SZ 128 +#define FC_SCQC_SZ 64 +#define FC_SCQE_SZ 64 +#define FC_SRQC_SZ 64 +#define FC_SRQE_SZ 32 + +/* OVS */ +#define OVS_PCTX_SZ 512 + +/* PPA */ +#define PPA_PCTX_SZ 512 + +/* IPsec */ +#define IPSEC_SACTX_SZ 512 + +/* UB */ +#define UB_CQC_ENTRY_SZ 128 +#define UB_QPC_ENTRY_SZ 1024 +#define UB_SRQC_ENTERY_SZ 64 +#define UB_DWQE_SZ 256 +#define UB_LOG_MTT_SEG 5 +#define UB_NUM_MTTS (1 * G_UNIT) +#define UB_MTT_ENTRY_SZ 8 +#define UB_MPT_ENTRY_SZ 64 +#define MAX_JETTY_DEST_UB 128 +#define UB_RC_ENTRY_SZ 32 +#define LOG_UBRC_SEG 3 +#define UB_MAX_JFS_INLINE_DATA_SZ 192 +#define UB_MAX_MSG_SZ (2 * G_UNIT) +#define UB_MAX_VTP (0x20000) + +#define UB_CMTT_CL_START 0x20 +#define UB_CMTT_CL_END 0x27 +#define UB_CMTT_CL_SIZE 0 // 0: 256B; 1: 512B; 2: 1024B +#define UB_WQE_CL_START 0x10 +#define UB_WQE_CL_END 0x1f +#define UB_WQE_CL_SIZE 0 // 0: 256B; 1: 512B; 2: 1024B + +#define UB_DMTT_CL_START 0x20 +#define UB_DMTT_CL_END 0x27 +#define UB_DMTT_CL_SIZE 0 // 0: 256B; 1: 512B; 2: 1024B + +/* JBOF */ +#define JBOF_PCTX_SZ 256 +#define JBOF_CCTX_SZ 256 +struct dev_sf_svc_attr { + bool ft_en; /* business enable flag (not include RDMA) */ + bool ft_pf_en; /* In FPGA Test VF resource is in PF or not, + * 0 - VF, 1 - PF, VF doesn't need this bit. + */ + bool rdma_en; + bool rdma_pf_en;/* In FPGA Test VF RDMA resource is in PF or not, + * 0 - VF, 1 - PF, VF doesn't need this bit. + */ +}; + +enum intr_type { + INTR_TYPE_MSIX, + INTR_TYPE_MSI, + INTR_TYPE_INT, + INTR_TYPE_NONE, + /* PXE,OVS need single thread processing, + * synchronization messages must use poll wait mechanism interface + */ +}; + +/* device capability */ +struct service_cap { + struct dev_sf_svc_attr sf_svc_attr; + u32 svc_type; /* user input service type */ + u32 chip_svc_type; /* HW supported service type, reference to servic_bit_define_e */ + + u8 host_id; + u8 ep_id; + u8 er_id; /* PF/VF's ER */ + u8 port_id; /* PF/VF's physical port */ + + /* Host global resources */ + u16 host_total_function; + u8 pf_num; + u8 pf_id_start; + u16 vf_num; /* max numbers of vf in current host */ + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 host_valid_bitmap; + u8 master_host_id; + u8 srv_multi_host_mode; + u16 virtio_vq_size; + u16 nvme_qp_num; + u32 virtio_vq_num; + u16 vio_func_num; + + u8 timer_pf_num; + u8 timer_pf_id_start; + u16 timer_vf_num; + u16 timer_vf_id_start; + struct timer_vf_info_seg timer_vf_segs[TIMER_VF_SEGS_NUM]; + + u8 flexq_en; + u8 cos_valid_bitmap; + u8 port_cos_valid_bitmap; + u8 func_gpa_spu_en; + u16 max_vf; /* max VF number that PF supported */ + + /* Fake VF capabilities */ + u16 fake_vf_parent_func_id; /* Parent function id of the fake vf group */ + u16 fake_vf_start_id; + u16 fake_vf_num; /* fake vf number that PF supported */ + u16 fake_vf_num_cfg; /* fake vf number that PF configured */ + bool fake_vf_lazy_init; + + u32 fake_vf_max_pctx; + u32 fake_vf_max_scqc_ctx; + u32 fake_vf_max_srqc_ctx; + u32 fake_vf_max_gid_ctx; + u32 fake_vf_max_mpt_ctx; + u32 fake_vf_max_childc_ctx; + + bool fake_vf_qpc_ctx_size_en; + u8 fake_vf_qpc_ctx_size_order; + + u16 fake_vf_bfilter_start_addr; + u16 fake_vf_bfilter_len; + + /* DO NOT get interrupt_type from firmware */ + enum intr_type interrupt_type; + + bool sf_en; /* stateful business status */ + u8 timer_en; /* 0:disable, 1:enable */ + u8 bloomfilter_en; /* 0:disable, 1:enable */ + + /* SMF capabilities */ + u8 lb_mode; + u8 smf_pg; /* A bitmap indicating which SMFs are enabled. + * The valid length of this bitmap is smf_max_num. + */ + u8 smf_max_num; /* The Number of SMFs in the chip */ + u8 smf_enabled_num; /* The Number of SMFs currently enabled */ + + /* SMF BAT capabilities */ + u8 bat_cid_index_bit_width; + + /* SRIOV capabilities */ + bool vf_isolation; /* The VF communicates directly with the Mgmt */ + + /* For test */ + u32 test_mode; + u32 test_qpc_num; + u32 test_qpc_resvd_num; + u32 test_page_size_reorder; + bool test_xid_alloc_mode; + bool test_gpa_check_enable; + u8 test_qpc_alloc_mode; + u8 test_scqc_alloc_mode; + + u32 test_max_conn_num; + u32 test_max_cache_conn_num; + u32 test_scqc_num; + u32 test_mpt_num; + u32 test_scq_resvd_num; + u32 test_mpt_recvd_num; + u32 test_hash_num; + u32 test_reorder_num; + + u32 max_connect_num; /* PF/VF maximum connection number(1M) */ + /* The maximum connections which can be stick to cache memory, max 1K */ + u16 max_stick2cache_num; + /* Starting address in cache memory for bloom filter, 64Bytes aligned */ + u16 bfilter_start_addr; + /* Length for bloom filter, aligned on 64Bytes. The size is length*64B. + * Bloom filter memory size + 1 must be power of 2. + * The maximum memory size of bloom filter is 4M + */ + u16 bfilter_len; + /* The size of hash bucket tables, align on 64 entries. + * Be used to AND (&) the hash value. Bucket Size +1 must be power of 2. + * The maximum number of hash bucket is 4M + */ + u16 hash_bucket_num; + + u8 cos_mask_mode; + + struct hisdk5_dcb_state dcb_state; + + cfg_fw_update_ext_caps fw_update_cap; /* fw update capability */ + struct nic_service_cap nic_cap; /* NIC capability */ + struct rdma_service_cap rdma_cap; /* RDMA capability */ + struct fc_service_cap fc_cap; /* FC capability */ + struct toe_service_cap toe_cap; /* ToE capability */ + struct ovs_service_cap ovs_cap; /* OVS capability */ + struct ipsec_service_cap ipsec_cap; /* IPsec capability */ + struct ppa_service_cap ppa_cap; /* PPA capability */ + struct vbs_service_cap vbs_cap; /* VBS capability */ + struct ub_service_cap ub_cap; /* UB capability */ + struct jbof_service_cap jbof_cap; /* JBOF capability */ + struct dmmu_service_cap dmmu_cap; /* DMMU capability */ + struct cfm_service_cap cfm_cap; /* CFM capability */ +}; + +struct svc_cap_info { + u32 func_idx; + struct service_cap cap; +}; + +struct cfg_eq { + enum hinic5_service_type type; + int eqn; + int free; /* 1 - alocated, 0- freed */ +}; + +struct cfg_eq_info { + struct cfg_eq *eq; + + u8 num_ceq; + + u8 num_ceq_remain; + + /* mutex used for allocate EQs */ + struct mutex eq_mutex; +}; + +struct irq_alloc_info_st { + enum hinic5_service_type type; + int free; /* 1 - alocated, 0- freed */ + struct irq_info info; +}; + +struct cfg_irq_info { + struct irq_alloc_info_st *alloc_info; + u16 num_total; + u16 num_irq_remain; + u16 num_irq_hw; /* device max irq number */ + + /* mutex used for allocate EQs */ + struct mutex irq_mutex; +}; + +#define VECTOR_THRESHOLD 2 + +struct cfg_mgmt_info { + struct hinic5_hwdev *hwdev; + struct service_cap svc_cap; + struct cfg_eq_info eq_info; /* EQ */ + struct cfg_irq_info irq_param_info; /* IRQ */ + u32 func_seq_num; /* temporary */ +}; + +#define CFG_SERVICE_FT_EN (CFG_SERVICE_MASK_VBS | CFG_SERVICE_MASK_TOE | \ + CFG_SERVICE_MASK_IPSEC | CFG_SERVICE_MASK_FC | \ + CFG_SERVICE_MASK_VIRTIO | CFG_SERVICE_MASK_OVS) +#define CFG_SERVICE_RDMA_EN CFG_SERVICE_MASK_ROCE + +#define IS_NIC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_NIC) +#define IS_ROCE_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_ROCE) +#define IS_VBS_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_VBS) +#define IS_TOE_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_TOE) +#define IS_IPSEC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_IPSEC) +#define IS_MACSEC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_MACSEC) +#define IS_FC_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_FC) +#define IS_OVS_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_OVS) +#define IS_FT_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_FT_EN) +#define IS_RDMA_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_RDMA_EN) +#define IS_RDMA_ENABLE(dev) \ + ((dev)->cfg_mgmt->svc_cap.sf_svc_attr.rdma_en) +#define IS_FT_ENABLE(dev) \ + ((dev)->cfg_mgmt->svc_cap.sf_svc_attr.ft_en) +#define IS_PPA_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_PPA) +#define IS_MIGR_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_MIGRATE) +#define IS_UB_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_UB) +#define IS_JBOF_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_JBOF) +#define IS_VROCE_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_VROCE) +#define IS_DMMU_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_DMMU) +#define IS_BIFUR_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_BIFUR) +#define IS_HIHTR_TYPE(dev) \ + (((u32)(dev)->cfg_mgmt->svc_cap.chip_svc_type) & CFG_SERVICE_MASK_HIHTR) + +int init_cfg_mgmt(struct hinic5_hwdev *dev); + +void free_cfg_mgmt(struct hinic5_hwdev *dev); + +int init_capability(struct hinic5_hwdev *dev); + +void free_capability(struct hinic5_hwdev *dev); + +/* Reference: hwsdk/hinic5_cqm/hinic5_cqm_bat_cla.h#HINIC5_CQM_BAT_ENTRY_MAX */ +#define HINIC5_BAT_ENTRY_MAX 16 +/* Reference: hwsdk/hinic5_cqm/hinic5_cqm_bat_cla.h#HINIC5_CQM_BAT_ENTRY_SIZE */ +#define HINIC5_BAT_ENTRY_SIZE 16 +#define HINIC5_BAT_MAX (HINIC5_BAT_ENTRY_MAX * HINIC5_BAT_ENTRY_SIZE) + +#define HINIC5_BAT_L3I_OFF_FT_RDMA_PF (10 * HINIC5_BAT_ENTRY_SIZE) +#define HINIC5_BAT_L3I_OFF_FT_PF (7 * HINIC5_BAT_ENTRY_SIZE) +#define HINIC5_BAT_L3I_OFF_RDMA_PF (5 * HINIC5_BAT_ENTRY_SIZE) +#define HINIC5_BAT_L3I_OFF_PF 0 + +struct hinic5_bat_entry_config { + bool mapping; /* Whether this entry should be mapped into + * the function address space. + */ + u32 bat_entry_offset; /* Offset of the entry in the BAT register file. */ + u32 bat_entry_size; /* Size of the entry in the BAT register file. */ +}; + +int hinic5_bat_get_l3i_entry_config(const struct hinic5_hwdev *hwdev, + struct hinic5_bat_entry_config *entry_config); + +static inline struct service_cap *get_device_capablity(const struct hinic5_hwdev *hwdev) +{ + return &hwdev->cfg_mgmt->svc_cap; +} + +#endif + diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_comm.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_comm.c new file mode 100644 index 00000000..228a96e8 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_comm.c @@ -0,0 +1,1852 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/kernel.h> +#include <linux/msi.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/semaphore.h> +#include <linux/interrupt.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_common.h" +#include "hinic5_csr_inner.h" +#include "hinic5_hwdev.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_mgmt.h" +#include "hinic5_hw_cfg.h" +#include "hinic5_cmdq.h" +#include "mpu_inband_cmd_defs.h" +#include "mpu_inband_cmd.h" +#include "hinic5_vram_common.h" +#include "hinic5_hinic5_vram_api.h" +#include "hinic5_hw_comm.h" + +unsigned char lowpower_mode; +module_param(lowpower_mode, byte, 0644); +MODULE_PARM_DESC(lowpower_mode, "Set lowpower test mode, 0-sml rd loop, 1-writeback ddr, 2-off"); + +/* 1872 FT B505临时修改方案,待MQM修复CMQ约束后删除 */ +static unsigned char cmdq_cos_offset; +module_param(cmdq_cos_offset, byte, 0444); +MODULE_PARM_DESC(cmdq_cos_offset, "Set cmdq cos start offset"); + +#define HINIC5_MSIX_CNT_LLI_TIMER_SHIFT 0 +#define HINIC5_MSIX_CNT_LLI_CREDIT_SHIFT 8 +#define HINIC5_MSIX_CNT_COALESC_TIMER_SHIFT 8 +#define HINIC5_MSIX_CNT_PENDING_SHIFT 8 +#define HINIC5_MSIX_CNT_RESEND_TIMER_SHIFT 29 + +#define HINIC5_MSIX_CNT_LLI_TIMER_MASK 0xFFU +#define HINIC5_MSIX_CNT_LLI_CREDIT_MASK 0xFFU +#define HINIC5_MSIX_CNT_COALESC_TIMER_MASK 0xFFU +#define HINIC5_MSIX_CNT_PENDING_MASK 0x1FU +#define HINIC5_MSIX_CNT_RESEND_TIMER_MASK 0x7U + +#define HINIC5_MSIX_CNT_SET(val, member) \ + (((val) & HINIC5_MSIX_CNT_##member##_MASK) << \ + HINIC5_MSIX_CNT_##member##_SHIFT) + +#define DEFAULT_RX_BUF_SIZE ((u16)0xB) +#define HINIC5_HT_GPA_PAGE_LEN 1024 + +enum hinic5_rx_buf_size { + HINIC5_RX_BUF_SIZE_32B = 0x20, + HINIC5_RX_BUF_SIZE_64B = 0x40, + HINIC5_RX_BUF_SIZE_96B = 0x60, + HINIC5_RX_BUF_SIZE_128B = 0x80, + HINIC5_RX_BUF_SIZE_192B = 0xC0, + HINIC5_RX_BUF_SIZE_256B = 0x100, + HINIC5_RX_BUF_SIZE_384B = 0x180, + HINIC5_RX_BUF_SIZE_512B = 0x200, + HINIC5_RX_BUF_SIZE_768B = 0x300, + HINIC5_RX_BUF_SIZE_1K = 0x400, + HINIC5_RX_BUF_SIZE_1_5K = 0x600, + HINIC5_RX_BUF_SIZE_2K = 0x800, + HINIC5_RX_BUF_SIZE_3K = 0xC00, + HINIC5_RX_BUF_SIZE_4K = 0x1000, + HINIC5_RX_BUF_SIZE_8K = 0x2000, + HINIC5_RX_BUF_SIZE_16K = 0x4000, +}; + +static inline int comm_msg_to_mgmt_sync(struct hinic5_hwdev *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + return hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, cmd, buf_in, + in_size, buf_out, out_size, 0, + HINIC5_CHANNEL_COMM); +} + +static inline int comm_msg_to_mgmt_sync_ch(struct hinic5_hwdev *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u16 channel) +{ + return hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, cmd, buf_in, + in_size, buf_out, out_size, 0, channel); +} + +int hinic5_get_interrupt_cfg(void *dev, struct interrupt_info *info, + u16 channel) +{ + struct hinic5_hwdev *hwdev = dev; + struct comm_cmd_msix_config msix_cfg; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev || !info) + return -EINVAL; + + memset(&msix_cfg, 0, sizeof(msix_cfg)); + msix_cfg.func_id = hinic5_global_func_id(hwdev); + msix_cfg.msix_index = info->msix_index; + msix_cfg.opcode = MGMT_MSG_CMD_OP_GET; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &msix_cfg, sizeof(msix_cfg), &msix_cfg, + &out_size, channel); + if (err != 0 || out_size == 0 || msix_cfg.head.status != 0) { + sdk_err(hwdev->dev_hdl, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, msix_cfg.head.status, out_size, channel); + return -EINVAL; + } + + info->lli_credit_limit = msix_cfg.lli_credit_cnt; + info->lli_timer_cfg = msix_cfg.lli_timer_cnt; + info->pending_limt = msix_cfg.pending_cnt; + info->coalesc_timer_cfg = msix_cfg.coalesce_timer_cnt; + info->resend_timer_cfg = msix_cfg.resend_timer_cnt; + + return 0; +} + +int hinic5_set_interrupt_cfg_direct(void *hwdev, struct interrupt_info *info, + u16 channel) +{ + struct comm_cmd_msix_config msix_cfg; + u16 out_size = sizeof(msix_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&msix_cfg, 0, sizeof(msix_cfg)); + msix_cfg.func_id = hinic5_global_func_id(hwdev); + msix_cfg.msix_index = info->msix_index; + msix_cfg.opcode = MGMT_MSG_CMD_OP_SET; + + msix_cfg.lli_credit_cnt = info->lli_credit_limit; + msix_cfg.lli_timer_cnt = info->lli_timer_cfg; + msix_cfg.pending_cnt = info->pending_limt; + msix_cfg.coalesce_timer_cnt = info->coalesc_timer_cfg; + msix_cfg.resend_timer_cnt = info->resend_timer_cfg; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, + &msix_cfg, sizeof(msix_cfg), &msix_cfg, + &out_size, channel); + if (err != 0 || out_size == 0 || msix_cfg.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to set interrupt config, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, msix_cfg.head.status, out_size, channel); + return -EINVAL; + } + + return 0; +} + +int hinic5_set_interrupt_cfg(void *dev, struct interrupt_info info, u16 channel) +{ + struct interrupt_info temp_info; + struct hinic5_hwdev *hwdev = dev; + int err; + + if (!hwdev) + return -EINVAL; + + temp_info.msix_index = info.msix_index; + + err = hinic5_get_interrupt_cfg(hwdev, &temp_info, channel); + if (err != 0) + return -EINVAL; + + if (info.lli_set == 0) { + info.lli_credit_limit = temp_info.lli_credit_limit; + info.lli_timer_cfg = temp_info.lli_timer_cfg; + } + + if (info.interrupt_coalesc_set == 0) { + info.pending_limt = temp_info.pending_limt; + info.coalesc_timer_cfg = temp_info.coalesc_timer_cfg; + info.resend_timer_cfg = temp_info.resend_timer_cfg; + } + + return hinic5_set_interrupt_cfg_direct(hwdev, &info, channel); +} +EXPORT_SYMBOL(hinic5_set_interrupt_cfg); + +void hinic5_misx_intr_clear_resend_bit(void *hwdev, u16 msix_idx, + u8 clear_resend_en) +{ + struct hinic5_hwif *hwif = NULL; + u32 msix_ctrl = 0, addr; + + if (!hwdev) + return; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + + msix_ctrl = HINIC5_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX) | + HINIC5_MSI_CLR_INDIR_SET(clear_resend_en, RESEND_TIMER_CLR); + + addr = HINIC5_CSR_FUNC_MSI_CLR_WR_ADDR; + hinic5_hwif_write_reg(hwif, addr, msix_ctrl); +} +EXPORT_SYMBOL(hinic5_misx_intr_clear_resend_bit); + +int hinic5_set_wq_page_size(void *hwdev, u16 func_idx, u32 page_size, + u16 channel) +{ + struct comm_cmd_wq_page_size page_size_info; + u16 out_size = sizeof(page_size_info); + int err; + + memset(&page_size_info, 0, sizeof(page_size_info)); + page_size_info.func_id = func_idx; + page_size_info.page_size = HINIC5_PAGE_SIZE_HW(page_size); + page_size_info.opcode = MGMT_MSG_CMD_OP_SET; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_CFG_PAGESIZE, + &page_size_info, sizeof(page_size_info), + &page_size_info, &out_size, channel); + if (err != 0 || out_size == 0 || page_size_info.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + err, page_size_info.head.status, out_size, channel); + return -EFAULT; + } + + return 0; +} + +int hinic5_func_reset(void *dev, u16 func_id, u64 reset_flag, u16 channel) +{ + struct comm_cmd_func_reset func_reset; + struct hinic5_hwdev *hwdev = dev; + u16 out_size = sizeof(func_reset); + int err = 0; + int is_in_kexec; + + if (!dev) { + pr_err("Invalid para: dev is null.\n"); + return -EINVAL; + } + + is_in_kexec = hinic5_vram_get_kexec_flag(); + if (is_in_kexec != 0) { + sdk_info(hwdev->dev_hdl, "Skip function reset!\n"); + return 0; + } + + sdk_info(hwdev->dev_hdl, "Function is reset, flag: 0x%llx, channel:0x%x\n", + reset_flag, channel); + + memset(&func_reset, 0, sizeof(func_reset)); + func_reset.func_id = func_id; + func_reset.reset_flag = reset_flag; + /* func reset时芯片会出现OUTBOUND_FLUSH_DISABLED, 不进行异常检测 */ + atomic_inc(&hwdev->check_ob_flush_bypass_ref_cnt); + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_FUNC_RESET, + &func_reset, sizeof(func_reset), + &func_reset, &out_size, channel); + if (err != 0 || out_size == 0 || func_reset.head.status != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to reset func resources, reset_flag 0x%llx, err: %d, " \ + "status: 0x%x, out_size: 0x%x\n", + reset_flag, err, func_reset.head.status, out_size); + err = -EIO; + } + + atomic_dec(&hwdev->check_ob_flush_bypass_ref_cnt); + return err; +} +EXPORT_SYMBOL(hinic5_func_reset); + +static u16 get_hw_rx_buf_size(const void *hwdev, u16 rx_buf_sz) +{ + const int hinic5_hw_rx_buf_size[] = { + HINIC5_RX_BUF_SIZE_32B, + HINIC5_RX_BUF_SIZE_64B, + HINIC5_RX_BUF_SIZE_96B, + HINIC5_RX_BUF_SIZE_128B, + HINIC5_RX_BUF_SIZE_192B, + HINIC5_RX_BUF_SIZE_256B, + HINIC5_RX_BUF_SIZE_384B, + HINIC5_RX_BUF_SIZE_512B, + HINIC5_RX_BUF_SIZE_768B, + HINIC5_RX_BUF_SIZE_1K, + HINIC5_RX_BUF_SIZE_1_5K, + HINIC5_RX_BUF_SIZE_2K, + HINIC5_RX_BUF_SIZE_3K, + HINIC5_RX_BUF_SIZE_4K, + HINIC5_RX_BUF_SIZE_8K, + HINIC5_RX_BUF_SIZE_16K, + }; + u16 num_hw_types; + u16 i; + + if (COMM_IS_USE_REAL_RX_BUF_SIZE((struct hinic5_hwdev *)hwdev)) + return rx_buf_sz; + + num_hw_types = ARRAY_SIZE(hinic5_hw_rx_buf_size); + for (i = 0; i < num_hw_types; i++) { + if (hinic5_hw_rx_buf_size[i] == rx_buf_sz) + return i; + } + + pr_err("Chip can't support rx buf size of %d\n", rx_buf_sz); + + return DEFAULT_RX_BUF_SIZE; /* default 2K */ +} + +int hinic5_set_root_ctxt(void *hwdev, u32 rq_depth, u32 sq_depth, u16 rx_buf_sz, + u16 channel) +{ + struct comm_cmd_root_ctxt root_ctxt; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.func_id = hinic5_global_func_id(hwdev); + + root_ctxt.set_cmdq_depth = 0; + root_ctxt.cmdq_depth = 0; + + root_ctxt.lro_en = 1; + + root_ctxt.rq_depth = (u16)ilog2(rq_depth); + root_ctxt.rx_buf_sz = get_hw_rx_buf_size(hwdev, rx_buf_sz); + root_ctxt.sq_depth = (u16)ilog2(sq_depth); + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, channel); + if (err != 0 || out_size == 0 || root_ctxt.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + err, root_ctxt.head.status, out_size, channel); + return -EFAULT; + } + return 0; +} +EXPORT_SYMBOL(hinic5_set_root_ctxt); + +int hinic5_clean_root_ctxt(void *hwdev, u16 channel) +{ + struct comm_cmd_root_ctxt root_ctxt; + u16 out_size = sizeof(root_ctxt); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.func_id = hinic5_global_func_id(hwdev); + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_SET_VAT, + &root_ctxt, sizeof(root_ctxt), + &root_ctxt, &out_size, channel); + if (err != 0 || out_size == 0 || root_ctxt.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to set root context, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + err, root_ctxt.head.status, out_size, channel); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic5_clean_root_ctxt); + +int hinic5_set_cmdq_depth(void *hwdev, u16 cmdq_depth) +{ + struct comm_cmd_root_ctxt root_ctxt; + u16 out_size = sizeof(root_ctxt); + int err; + + memset(&root_ctxt, 0, sizeof(root_ctxt)); + root_ctxt.func_id = hinic5_global_func_id(hwdev); + + root_ctxt.set_cmdq_depth = 1; + root_ctxt.cmdq_depth = (u8)ilog2(cmdq_depth); + root_ctxt.cmdq_mode = ((struct hinic5_hwdev *)hwdev)->cmdq_mode; + + if (((struct hinic5_hwdev *)hwdev)->cmdq_mode == HINIC5_ENHANCE_CMDQ) + root_ctxt.cmdq_depth--; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_VAT, &root_ctxt, + sizeof(root_ctxt), &root_ctxt, &out_size); + if (err != 0 || out_size == 0 || root_ctxt.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to set cmdq depth, err: %d, status: 0x%x, out_size: 0x%x\n", + err, root_ctxt.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic5_set_enhance_cmdq_ctxt(struct hinic5_hwdev *hwdev, u8 cmdq_id, + struct enhance_cmdq_ctxt_info *ctxt) +{ + struct comm_cmd_enhance_cmdq_ctxt cmdq_ctxt; + u16 out_size = sizeof(cmdq_ctxt); + int err; + + memset(&cmdq_ctxt, 0, sizeof(cmdq_ctxt)); + memcpy(&cmdq_ctxt.ctxt, ctxt, sizeof(*ctxt)); + cmdq_ctxt.func_id = hinic5_global_func_id(hwdev); + cmdq_ctxt.cmdq_id = cmdq_id | cmdq_cos_offset; + hwdev->cmdq_cos_offset = cmdq_cos_offset; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_ENHANCE_CMDQ_CTXT, + &cmdq_ctxt, sizeof(cmdq_ctxt), + &cmdq_ctxt, &out_size); + if (err != 0 || out_size == 0 || cmdq_ctxt.head.status != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set enhanced cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmdq_ctxt.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic5_set_cmdq_ctxt(struct hinic5_hwdev *hwdev, u8 cmdq_id, + struct cmdq_ctxt_info *ctxt) +{ + struct comm_cmd_cmdq_ctxt cmdq_ctxt; + u16 out_size = sizeof(cmdq_ctxt); + int err; + + memset(&cmdq_ctxt, 0, sizeof(cmdq_ctxt)); + memcpy(&cmdq_ctxt.ctxt, ctxt, sizeof(*ctxt)); + cmdq_ctxt.func_id = hinic5_global_func_id(hwdev); + cmdq_ctxt.cmdq_id = cmdq_id; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_CMDQ_CTXT, + &cmdq_ctxt, sizeof(cmdq_ctxt), + &cmdq_ctxt, &out_size); + if (err != 0 || out_size == 0 || cmdq_ctxt.head.status != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq ctxt, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cmdq_ctxt.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic5_set_ceq_ctrl_reg(struct hinic5_hwdev *hwdev, u16 q_id, + u32 ctrl0, u32 ctrl1) +{ + struct comm_cmd_ceq_ctrl_reg ceq_ctrl; + u16 out_size = sizeof(ceq_ctrl); + int err; + + memset(&ceq_ctrl, 0, sizeof(ceq_ctrl)); + ceq_ctrl.func_id = hinic5_global_func_id(hwdev); + ceq_ctrl.q_id = q_id; + ceq_ctrl.ctrl0 = ctrl0; + ceq_ctrl.ctrl1 = ctrl1; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_CEQ_CTRL_REG, + &ceq_ctrl, sizeof(ceq_ctrl), + &ceq_ctrl, &out_size); + if (err != 0 || out_size == 0 || ceq_ctrl.head.status != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set ceq %u ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n", + q_id, err, ceq_ctrl.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic5_set_dma_attr_tbl(struct hinic5_hwdev *hwdev, u8 entry_idx, u8 st, u8 at, u8 ph, + u8 no_snooping, u8 tph_en) +{ + struct comm_cmd_dma_attr_config dma_attr; + u16 out_size = sizeof(dma_attr); + int err; + + memset(&dma_attr, 0, sizeof(dma_attr)); + dma_attr.func_id = hinic5_global_func_id(hwdev); + dma_attr.entry_idx = entry_idx; + dma_attr.st = st; + dma_attr.at = at; + dma_attr.ph = ph; + dma_attr.no_snooping = no_snooping; + dma_attr.tph_en = tph_en; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_DMA_ATTR, &dma_attr, sizeof(dma_attr), + &dma_attr, &out_size); + if (err != 0 || out_size == 0 || dma_attr.head.status != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set dma attr, err: %d, status: 0x%x, out_size: 0x%x\n", + err, dma_attr.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_set_bdf_ctxt(void *hwdev, u8 bus, u8 device, u8 function) +{ + struct comm_cmd_bdf_info bdf_info; + u16 out_size = sizeof(bdf_info); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&bdf_info, 0, sizeof(bdf_info)); + bdf_info.function_idx = hinic5_global_func_id(hwdev); + bdf_info.bus = bus; + bdf_info.device = device; + bdf_info.function = function; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SEND_BDF_INFO, + &bdf_info, sizeof(bdf_info), + &bdf_info, &out_size); + if (err != 0 || out_size == 0 || bdf_info.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to set bdf info to MPU, err: %d, status: 0x%x, out_size: 0x%x\n", + err, bdf_info.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_sync_time(void *hwdev, u64 time) +{ + struct comm_cmd_sync_time time_info; + u16 out_size = sizeof(time_info); + int err; + + memset(&time_info, 0, sizeof(time_info)); + time_info.mstime = time; + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SYNC_TIME, &time_info, + sizeof(time_info), &time_info, &out_size); + if (err != 0 || time_info.head.status != 0 || out_size == 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to sync time to mgmt, err: %d, status: 0x%x, out size: 0x%x\n", + err, time_info.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_set_ppf_flr_type(void *hwdev, enum hinic5_ppf_flr_type flr_type) +{ + struct comm_cmd_ppf_flr_type_set flr_type_set; + u16 out_size = sizeof(struct comm_cmd_ppf_flr_type_set); + struct hinic5_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + memset(&flr_type_set, 0, sizeof(flr_type_set)); + flr_type_set.func_id = hinic5_global_func_id(hwdev); + flr_type_set.ppf_flr_type = flr_type; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_FLR_TYPE, + &flr_type_set, sizeof(flr_type_set), + &flr_type_set, &out_size); + if (err != 0 || out_size == 0 || flr_type_set.head.status != 0) { + sdk_err(dev->dev_hdl, "Failed to set ppf flr type, err: %d, status: 0x%x, out size: 0x%x\n", + err, flr_type_set.head.status, out_size); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(hinic5_set_ppf_flr_type); + +int hinic5_set_ppf_tbl_hotreplace_flag(void *hwdev, u8 flag) +{ + struct comm_cmd_ppf_tbl_htrp_config htr_info; + u16 out_size = sizeof(struct comm_cmd_ppf_tbl_htrp_config); + struct hinic5_hwdev *dev = hwdev; + int ret; + + if (!hwdev) { + pr_err("Sdk set ppf table hotreplace flag para is null"); + return -EINVAL; + } + + memset(&htr_info, 0, sizeof(htr_info)); + + htr_info.hotreplace_flag = flag; + ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_TBL_HTR_FLG, + &htr_info, sizeof(htr_info), &htr_info, &out_size); + if (ret != 0 || htr_info.head.status != 0) { + sdk_err(dev->dev_hdl, "Send mbox to mpu failed in sdk, ret:%d, status:%u", + ret, htr_info.head.status); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(hinic5_set_ppf_tbl_hotreplace_flag); + +static int hinic5_get_fw_ver(struct hinic5_hwdev *hwdev, enum hinic5_fw_ver_type type, + u8 *mgmt_ver, u8 version_size, u16 channel) +{ + struct comm_cmd_get_fw_version fw_ver; + u16 out_size = sizeof(fw_ver); + int err; + + if (!hwdev || !mgmt_ver) + return -EINVAL; + + memset(&fw_ver, 0, sizeof(fw_ver)); + fw_ver.fw_type = type; + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_FW_VERSION, + &fw_ver, sizeof(fw_ver), &fw_ver, + &out_size, channel); + if (err != 0 || out_size == 0 || fw_ver.head.status != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to get fw version, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, fw_ver.head.status, out_size, channel); + return -EIO; + } + + if (version_size > HINIC5_MGMT_VERSION_MAX_LEN) { + sdk_err(hwdev->dev_hdl, "memcpy fw_ver.ver over range\n"); + return -ERANGE; + } + + memcpy(mgmt_ver, fw_ver.ver, version_size); + return 0; +} + +int hinic5_get_mgmt_version(void *hwdev, u8 *mgmt_ver, u8 version_size, + u16 channel) +{ + return hinic5_get_fw_ver(hwdev, HINIC5_FW_VER_TYPE_MPU, mgmt_ver, + version_size, channel); +} +EXPORT_SYMBOL(hinic5_get_mgmt_version); + +int hinic5_get_fw_version(void *hwdev, struct hinic5_fw_version *fw_ver, + u16 channel) +{ + int err; + + if (!hwdev || !fw_ver) + return -EINVAL; + + err = hinic5_get_fw_ver(hwdev, HINIC5_FW_VER_TYPE_MPU, + fw_ver->mgmt_ver, sizeof(fw_ver->mgmt_ver), + channel); + if (err != 0) + return err; + + err = hinic5_get_fw_ver(hwdev, HINIC5_FW_VER_TYPE_NPU, + fw_ver->microcode_ver, + sizeof(fw_ver->microcode_ver), channel); + if (err != 0) + return err; + + return hinic5_get_fw_ver(hwdev, HINIC5_FW_VER_TYPE_BOOT, + fw_ver->boot_ver, sizeof(fw_ver->boot_ver), + channel); +} +EXPORT_SYMBOL(hinic5_get_fw_version); + +static int hinic5_comm_features_nego(void *hwdev, u8 opcode, u64 *s_feature, + u16 size) +{ + struct comm_cmd_feature_nego feature_nego; + u16 out_size = sizeof(feature_nego); + struct hinic5_hwdev *dev = hwdev; + int err; + + if (!hwdev || !s_feature || size > COMM_MAX_FEATURE_QWORD) + return -EINVAL; + + memset(&feature_nego, 0, sizeof(feature_nego)); + feature_nego.func_id = hinic5_global_func_id(hwdev); + feature_nego.opcode = opcode; + if (opcode == MGMT_MSG_CMD_OP_SET) + memcpy(feature_nego.s_feature, s_feature, (size * sizeof(u64))); + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_FEATURE_NEGO, + &feature_nego, sizeof(feature_nego), + &feature_nego, &out_size); + if (err != 0 || out_size == 0 || feature_nego.head.status != 0) { + sdk_err(dev->dev_hdl, "Failed to negotiate feature, err: %d, status: 0x%x, out size: 0x%x\n", + err, feature_nego.head.status, out_size); + return -EINVAL; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) + memcpy(s_feature, feature_nego.s_feature, (COMM_MAX_FEATURE_QWORD * sizeof(u64))); + + return 0; +} + +int hinic5_get_comm_features(void *hwdev, u64 *s_feature, u16 size) +{ + return hinic5_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_GET, s_feature, + size); +} + +int hinic5_set_comm_features(void *hwdev, u64 *s_feature, u16 size) +{ + return hinic5_comm_features_nego(hwdev, MGMT_MSG_CMD_OP_SET, s_feature, + size); +} + +int hinic5_comm_channel_detect(struct hinic5_hwdev *hwdev) +{ + struct comm_cmd_channel_detect channel_detect_info; + u16 out_size = sizeof(channel_detect_info); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&channel_detect_info, 0, sizeof(channel_detect_info)); + channel_detect_info.func_id = hinic5_global_func_id(hwdev); + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_CHANNEL_DETECT, + &channel_detect_info, sizeof(channel_detect_info), + &channel_detect_info, &out_size); + if ((channel_detect_info.head.status != HINIC5_MGMT_CMD_UNSUPPORTED && + channel_detect_info.head.status != 0) || err != 0 || out_size == 0) { + sdk_err(hwdev->dev_hdl, + "Failed to send channel detect, err: %d, status: 0x%x, out size: 0x%x\n", + err, channel_detect_info.head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic5_func_tmr_bitmap_set(void *hwdev, u16 func_id, bool en) +{ +#ifdef __UEFI__ + return 0; +#endif + struct comm_cmd_func_tmr_bitmap_op bitmap_op; + u16 out_size = sizeof(bitmap_op); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&bitmap_op, 0, sizeof(bitmap_op)); + bitmap_op.func_id = func_id; + bitmap_op.opcode = en ? FUNC_TMR_BITMAP_ENABLE : FUNC_TMR_BITMAP_DISABLE; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, + &bitmap_op, sizeof(bitmap_op), + &bitmap_op, &out_size); + if (err != 0 || out_size == 0 || bitmap_op.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to set timer bitmap, err: %d, status: 0x%x, out_size: 0x%x\n", + err, bitmap_op.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic5_func_vio_en(void *hwdev, bool en) +{ + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + struct comm_cmd_virtio_en cmd; + u16 out_size = sizeof(cmd); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + cmd.msien_snap_2_virtio_en = en ? 0x1 : 0x0; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_VIO_EN, + &cmd, sizeof(cmd), &cmd, &out_size); + if (cmd.head.status == HINIC5_MGMT_CMD_UNSUPPORTED) { + sdk_warn(dev->dev_hdl, "not support vio en"); + return 0; + } + if (err != 0 || out_size == 0 || cmd.head.status != 0) { + sdk_err(dev->dev_hdl, + "Failed to set vio %s, err: %d, status: 0x%x, out_size: 0x%x\n", + (en ? "enable" : "disable"), + err, cmd.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +static int alloc_bank_buf(struct hinic5_hwdev *hwdev, struct hinic5_page_addr *pg0, + struct hinic5_page_addr *pg1, struct comm_cmd_ht_gpa *ht_gpa_set) +{ + size_t page_len = HINIC5_HT_GPA_PAGE_SIZE * HINIC5_HT_GPA_PAGE_LEN; + + if (lowpower_mode != 0) { + pg0->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, page_len, + &pg0->phys_addr, GFP_KERNEL); + ht_gpa_set->rsvd0[0] = lowpower_mode; + sdk_info(hwdev->dev_hdl, "Alloc pg0 page addr len: 0x%lx, lowpower_mode=%u\n", + page_len, lowpower_mode); + } else { + pg0->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, HINIC5_HT_GPA_PAGE_SIZE, + &pg0->phys_addr, GFP_KERNEL); + } + + if (!pg0->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg0 page addr failed\n"); + return -EFAULT; + } + + pg1->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, + HINIC5_HT_GPA_PAGE_SIZE, + &pg1->phys_addr, GFP_KERNEL); + if (!pg1->virt_addr) { + sdk_err(hwdev->dev_hdl, "Alloc pg1 page addr failed\n"); + return -EFAULT; + } + + return 0; +} + +static int ht_gpa_set(struct hinic5_hwdev *hwdev, struct hinic5_page_addr *pg0, + struct hinic5_page_addr *pg1) +{ + struct comm_cmd_ht_gpa ht_gpa_set; + u16 out_size = sizeof(ht_gpa_set); + int ret; + + memset(&ht_gpa_set, 0, sizeof(ht_gpa_set)); + ret = alloc_bank_buf(hwdev, pg0, pg1, &ht_gpa_set); + if (ret != 0) + return -EFAULT; + + ht_gpa_set.host_id = hinic5_host_id(hwdev); + ht_gpa_set.opcode = HT_GPA_SET; + ht_gpa_set.page_pa0 = pg0->phys_addr; + ht_gpa_set.page_pa1 = pg1->phys_addr; +#ifndef __VMWARE__ + sdk_info(hwdev->dev_hdl, "ht gpa set: page_addr0.pa=0x%llx, page_addr1.pa=0x%llx\n", + pg0->phys_addr, pg1->phys_addr); +#else + sdk_info(hwdev->dev_hdl, "ht gpa set: page_addr0.pa=0x%lx, page_addr1.pa=0x%lx\n", + pg0->phys_addr, pg1->phys_addr); +#endif + ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_HT_GPA, + &ht_gpa_set, sizeof(ht_gpa_set), + &ht_gpa_set, &out_size); + if (ret != 0 || out_size == 0 || ht_gpa_set.head.status != 0) { + sdk_warn(hwdev->dev_hdl, "ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, ht_gpa_set.head.status, out_size); + return -EFAULT; + } + + hwdev->page_pa0.phys_addr = pg0->phys_addr; + hwdev->page_pa0.virt_addr = pg0->virt_addr; + + hwdev->page_pa1.phys_addr = pg1->phys_addr; + hwdev->page_pa1.virt_addr = pg1->virt_addr; + + return 0; +} + +int hinic5_ht_gpa_init(struct hinic5_hwdev *hwdev) +{ + struct hinic5_page_addr page_addr0[HINIC5_HT_GPA_SET_RETRY_TIMES] = { 0 }; + struct hinic5_page_addr page_addr1[HINIC5_HT_GPA_SET_RETRY_TIMES] = { 0 }; + int ret, i, j; + + if (!hwdev) { + pr_err("hwdev is null.\n"); + return -EINVAL; + } + + if (hwdev->page_pa0.phys_addr != 0 || hwdev->page_pa1.phys_addr != 0) { + sdk_err(hwdev->dev_hdl, "ht gpa have be inited.\n"); + return 0; + } + + for (i = 0; i < HINIC5_HT_GPA_SET_RETRY_TIMES; i++) { + ret = ht_gpa_set(hwdev, &page_addr0[i], &page_addr1[i]); + if (ret == 0) + break; + } + + for (j = 0; j < i; j++) { + if (page_addr0[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HINIC5_HT_GPA_PAGE_SIZE, + page_addr0[j].virt_addr, + (dma_addr_t)page_addr0[j].phys_addr); + page_addr0[j].virt_addr = NULL; + } + if (page_addr1[j].virt_addr) { + dma_free_coherent(hwdev->dev_hdl, + HINIC5_HT_GPA_PAGE_SIZE, + page_addr1[j].virt_addr, + (dma_addr_t)page_addr1[j].phys_addr); + page_addr1[j].virt_addr = NULL; + } + } + + if (i >= HINIC5_HT_GPA_SET_RETRY_TIMES) { + sdk_err(hwdev->dev_hdl, "ht gpa init failed, retry times: %d\n", + i); + return -EFAULT; + } + + return 0; +} + +static void ht_gpa_clear(struct hinic5_hwdev *hwdev) +{ + struct comm_cmd_ht_gpa ht_gpa_set; + u16 out_size = sizeof(ht_gpa_set); + int ret; + + memset(&ht_gpa_set, 0, sizeof(ht_gpa_set)); + + ht_gpa_set.host_id = hinic5_host_id(hwdev); + ht_gpa_set.opcode = HT_GPA_CLEAR; + + sdk_info(hwdev->dev_hdl, "ht gpa clear"); + + ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_HT_GPA, + &ht_gpa_set, sizeof(ht_gpa_set), + &ht_gpa_set, &out_size); + if (ret != 0 || out_size == 0 || ht_gpa_set.head.status != 0) { + sdk_warn(hwdev->dev_hdl, "ht gpa set failed, ret: %d, status: 0x%x, out_size: 0x%x\n", + ret, ht_gpa_set.head.status, out_size); + } +} + +void hinic5_ht_gpa_deinit(struct hinic5_hwdev *hwdev) +{ + if (!hwdev) { + pr_err("hwdev is null.\n"); + return; + } + + ht_gpa_clear(hwdev); + + if (hwdev->page_pa0.virt_addr && hwdev->page_pa0.phys_addr != 0) { + dma_free_coherent(hwdev->dev_hdl, HINIC5_HT_GPA_PAGE_SIZE, + hwdev->page_pa0.virt_addr, + (dma_addr_t)(hwdev->page_pa0.phys_addr)); + hwdev->page_pa0.virt_addr = NULL; + hwdev->page_pa0.phys_addr = 0; + } + + if (hwdev->page_pa1.virt_addr && hwdev->page_pa1.phys_addr != 0) { + dma_free_coherent(hwdev->dev_hdl, HINIC5_HT_GPA_PAGE_SIZE, + hwdev->page_pa1.virt_addr, + (dma_addr_t)hwdev->page_pa1.phys_addr); + hwdev->page_pa1.virt_addr = NULL; + hwdev->page_pa1.phys_addr = 0; + } +} + +static int set_ppf_tmr_status(struct hinic5_hwdev *hwdev, + enum ppf_tmr_status status) +{ + struct comm_cmd_ppf_tmr_op op; + u16 out_size = sizeof(op); + int err = 0; + + if (!hwdev) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + + if (hinic5_func_type(hwdev) != TYPE_PPF) + return -EFAULT; + + op.opcode = status; + op.ppf_id = hinic5_ppf_idx(hwdev); + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_TMR, &op, + sizeof(op), &op, &out_size); + if (err != 0 || out_size == 0 || op.head.status != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set ppf timer, err: %d, status: 0x%x, out_size: 0x%x\n", + err, op.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic5_ppf_tmr_start(void *hwdev) +{ + int is_in_kexec; + + if (!hwdev) { + pr_err("Hwdev pointer is NULL for starting ppf timer\n"); + return -EINVAL; + } + + is_in_kexec = hinic5_vram_get_kexec_flag(); + if (is_in_kexec != 0) { + pr_info("Skip starting ppt timer during kexec"); + return 0; + } + + return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_START); +} +EXPORT_SYMBOL(hinic5_ppf_tmr_start); + +int hinic5_ppf_tmr_stop(void *hwdev) +{ + if (!hwdev) { + pr_err("Hwdev pointer is NULL for stop ppf timer\n"); + return -EINVAL; + } + + return set_ppf_tmr_status(hwdev, HINIC_PPF_TMR_FLAG_STOP); +} +EXPORT_SYMBOL(hinic5_ppf_tmr_stop); + +static int hinic5_hinic5_vram_kalloc_align(struct hinic5_hwdev *hwdev, char *name, u32 page_size, + u32 page_num, struct hinic5_dma_addr_align *mem_align) +{ + void *vaddr = NULL, *align_vaddr = NULL; + dma_addr_t paddr, align_paddr; + u64 real_size = page_size; + u64 align = page_size; + + vaddr = (void *)hinic5_hinic5_vram_kalloc(name, real_size); + if (!vaddr) { + sdk_err(hwdev->dev_hdl, "hinic5_vram kalloc failed, name:%s.\n", name); + return -ENOMEM; + } + + paddr = (dma_addr_t)virt_to_phys(vaddr); + align_paddr = ALIGN(paddr, align); + /* align */ + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + hinic5_hinic5_vram_kfree((void *)vaddr, name, real_size); + + /* realloc memory for align */ + real_size = page_size + align; + vaddr = (void *)hinic5_hinic5_vram_kalloc(name, real_size); + if (!vaddr) { + sdk_err(hwdev->dev_hdl, "hinic5_vram kalloc align failed, name:%s.\n", name); + return -ENOMEM; + } + + paddr = (dma_addr_t)virt_to_phys(vaddr); + align_paddr = ALIGN(paddr, align); + align_vaddr = (void *)(uintptr_t)((u64)(uintptr_t)vaddr + (align_paddr - paddr)); + +out: + mem_align->real_size = (u32)real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} + +static void mqm_eqm_free_page_mem(struct hinic5_hwdev *hwdev) +{ + struct hinic5_dma_addr_align *page_addr = NULL; + u32 i; + int is_use_hinic5_vram = get_use_hinic5_vram_flag(); + struct mqm_eqm_hinic5_vram_name_s *mqm_eqm_vram_name = hwdev->mqm_eqm_hinic5_vram_name; + + page_addr = hwdev->mqm_att.brm_srch_page_addr; + + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + if (is_use_hinic5_vram != 0) { + hinic5_hinic5_vram_kfree(page_addr->ori_vaddr, + mqm_eqm_vram_name[i].hinic5_vram_name, + page_addr->real_size); + } else { + hinic5_dma_free_coherent_align(hwdev->dev_hdl, page_addr); + } + page_addr++; + } + kfree(mqm_eqm_vram_name); + hwdev->mqm_eqm_hinic5_vram_name = NULL; +} + +static int mqm_eqm_try_alloc_mem(struct hinic5_hwdev *hwdev, u32 page_size, + u32 page_num) +{ + struct hinic5_dma_addr_align *page_addr = hwdev->mqm_att.brm_srch_page_addr; + int is_use_hinic5_vram = get_use_hinic5_vram_flag(); + struct mqm_eqm_hinic5_vram_name_s *mqm_eqm_hinic5_vram_name = NULL; + u32 valid_num = 0; + u32 flag = 1; + u32 i = 0; + int err; + u16 func_id; + + mqm_eqm_hinic5_vram_name = kzalloc(sizeof(struct mqm_eqm_hinic5_vram_name_s) * page_num, GFP_KERNEL); + if (!mqm_eqm_hinic5_vram_name) + return -ENOMEM; + + hwdev->mqm_eqm_hinic5_vram_name = mqm_eqm_hinic5_vram_name; + func_id = hinic5_global_func_id(hwdev); + + for (i = 0; i < page_num; i++) { + if (is_use_hinic5_vram != 0) { + err = snprintf(mqm_eqm_hinic5_vram_name[i].hinic5_vram_name, + HINIC5_VRAM_NAME_MAX_LEN, "%s%hu%s%u", + HINIC5_VRAM_NIC_FUNC_BASE, func_id, + HINIC5_VRAM_NIC_MQM, i); + if (err < 0) { + sdk_err(hwdev->dev_hdl, + "mqm eqm snprintf name fail, err:%d, index:%u\n", err, i); + flag = 0; + break; + } + err = hinic5_hinic5_vram_kalloc_align(hwdev, + mqm_eqm_hinic5_vram_name[i].hinic5_vram_name, + page_size, page_num, page_addr); + } else { + err = hinic5_dma_zalloc_coherent_align(hwdev->dev_hdl, page_size, + page_size, GFP_KERNEL, page_addr); + } + if (err != 0) { + flag = 0; + break; + } + valid_num++; + page_addr++; + } + + hwdev->mqm_att.page_num = valid_num; + if (flag == 1) { + hwdev->mqm_att.page_size = page_size; + } else { + mqm_eqm_free_page_mem(hwdev); + return -EFAULT; + } + + return 0; +} + +static int mqm_eqm_alloc_page_mem(struct hinic5_hwdev *hwdev) +{ + int ret = 0; + u32 page_num; + + /* apply for 2M page, page number is chunk_num/1024 */ + page_num = (hwdev->mqm_att.chunk_num + 0x3ff) >> 0xa; + ret = mqm_eqm_try_alloc_mem(hwdev, 0x2 * 0x400 * 0x400, page_num); + if (ret == 0) { + sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 2M OK\n"); + return 0; + } + + /* apply for 64KB page, page number is chunk_num/32 */ + page_num = (hwdev->mqm_att.chunk_num + 0x1f) >> 0x5; + ret = mqm_eqm_try_alloc_mem(hwdev, 0x40 * 0x400, page_num); + if (ret == 0) { + sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 64K OK\n"); + return 0; + } + + /* apply for 4KB page, page number is chunk_num/2 */ + page_num = (hwdev->mqm_att.chunk_num + 1) >> 1; + ret = mqm_eqm_try_alloc_mem(hwdev, 0x4 * 0x400, page_num); + if (ret == 0) { + sdk_info(hwdev->dev_hdl, "[mqm_eqm_init] Alloc page_size 4K OK\n"); + return 0; + } + + return ret; +} + +static int mqm_eqm_set_cfg_2_hw(struct hinic5_hwdev *hwdev, u8 valid) +{ + struct comm_cmd_eqm_cfg info_eqm_cfg; + u16 out_size = sizeof(info_eqm_cfg); + int err; + + memset(&info_eqm_cfg, 0, sizeof(info_eqm_cfg)); + + info_eqm_cfg.host_id = hinic5_host_id(hwdev); + info_eqm_cfg.page_size = hwdev->mqm_att.page_size; + info_eqm_cfg.valid = valid; + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_MQM_CFG_INFO, + &info_eqm_cfg, sizeof(info_eqm_cfg), + &info_eqm_cfg, &out_size); + if (err != 0 || out_size == 0 || info_eqm_cfg.head.status != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init func table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info_eqm_cfg.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +#define EQM_DATA_BUF_SIZE 1024 +#define MQM_ATT_PAGE_NUM 128 + +static int mqm_eqm_set_page_2_hw(struct hinic5_hwdev *hwdev) +{ + struct comm_cmd_eqm_search_gpa *info = NULL; + struct hinic5_dma_addr_align *page_addr = NULL; + void *send_buf = NULL; + u16 send_buf_size, out_size; + u64 *gpa_hi52 = NULL; + u64 gpa; + u32 num, start_idx, i; + int err = 0; + u8 cmd; + + send_buf_size = sizeof(struct comm_cmd_eqm_search_gpa) + + EQM_DATA_BUF_SIZE; + send_buf = kzalloc(send_buf_size, GFP_KERNEL); + if (!send_buf) { + sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); + return -EFAULT; + } + + page_addr = hwdev->mqm_att.brm_srch_page_addr; + info = (struct comm_cmd_eqm_search_gpa *)send_buf; + + gpa_hi52 = info->gpa_hi52; + num = 0; + start_idx = 0; + cmd = COMM_MGMT_CMD_SET_MQM_SRCH_GPA; + for (i = 0; i < hwdev->mqm_att.page_num; i++) { + /* gpa align to 4K, save gpa[31:12] */ + gpa = page_addr->align_paddr >> 12; + gpa_hi52[num] = gpa; + num++; + if (num == MQM_ATT_PAGE_NUM) { + info->num = num; + info->start_idx = start_idx; + info->host_id = hinic5_host_id(hwdev); + out_size = send_buf_size; + err = comm_msg_to_mgmt_sync(hwdev, cmd, info, (u16)send_buf_size, + info, &out_size); + if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, info->head.status)) { + sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info->head.status, out_size); + err = -EFAULT; + goto set_page_2_hw_end; + } + + gpa_hi52 = info->gpa_hi52; + num = 0; + start_idx = i + 1; + } + page_addr++; + } + + if (num != 0) { + info->num = num; + info->start_idx = start_idx; + info->host_id = hinic5_host_id(hwdev); + out_size = send_buf_size; + err = comm_msg_to_mgmt_sync(hwdev, cmd, info, (u16)send_buf_size, info, &out_size); + if (MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, info->head.status)) { + sdk_err(hwdev->dev_hdl, "Set mqm srch gpa fail, err: %d, status: 0x%x, out_size: 0x%x\n", + err, info->head.status, out_size); + err = -EFAULT; + goto set_page_2_hw_end; + } + } + +set_page_2_hw_end: + kfree(send_buf); + return err; +} + +static int get_eqm_num(struct hinic5_hwdev *hwdev, struct comm_cmd_get_eqm_num *info_eqm_fix) +{ + int ret; + u16 len = sizeof(*info_eqm_fix); + + memset(info_eqm_fix, 0, sizeof(*info_eqm_fix)); + + ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_MQM_FIX_INFO, + info_eqm_fix, sizeof(*info_eqm_fix), info_eqm_fix, &len); + if (ret != 0 || len == 0 || info_eqm_fix->head.status != 0) { + sdk_err(hwdev->dev_hdl, "Get mqm fix info fail,err: %d, status: 0x%x, out_size: 0x%x\n", + ret, info_eqm_fix->head.status, len); + return -EFAULT; + } + + sdk_info(hwdev->dev_hdl, "get chunk_num: 0x%x, search_gpa_num: 0x%08x\n", + info_eqm_fix->chunk_num, info_eqm_fix->search_gpa_num); + + return 0; +} + +static int mqm_eqm_init(struct hinic5_hwdev *hwdev) +{ + struct comm_cmd_get_eqm_num info_eqm_fix; + int ret; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return 0; + + ret = get_eqm_num(hwdev, &info_eqm_fix); + if (ret != 0) + return ret; + + if (info_eqm_fix.chunk_num == 0) + return 0; + + hwdev->mqm_att.chunk_num = info_eqm_fix.chunk_num; + hwdev->mqm_att.search_gpa_num = info_eqm_fix.search_gpa_num; + hwdev->mqm_att.page_size = 0; + hwdev->mqm_att.page_num = 0; + + hwdev->mqm_att.brm_srch_page_addr = + kcalloc(hwdev->mqm_att.chunk_num, sizeof(struct hinic5_dma_addr_align), GFP_KERNEL); + if (!(hwdev->mqm_att.brm_srch_page_addr)) { + sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); + return -EFAULT; + } + + ret = mqm_eqm_alloc_page_mem(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Alloc eqm page mem failed\r\n"); + goto err_page; + } + + ret = mqm_eqm_set_page_2_hw(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); + goto err_ecmd; + } + + ret = mqm_eqm_set_cfg_2_hw(hwdev, 1); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Set page to hw failed\r\n"); + goto err_ecmd; + } + + sdk_info(hwdev->dev_hdl, "ppf_ext_db_init ok\r\n"); + + return 0; + +err_ecmd: + mqm_eqm_free_page_mem(hwdev); + +err_page: + kfree(hwdev->mqm_att.brm_srch_page_addr); + + return ret; +} + +static void mqm_eqm_deinit(struct hinic5_hwdev *hwdev) +{ + int ret; + + if (hwdev->hwif->attr.func_type != TYPE_PPF) + return; + + if (hwdev->mqm_att.chunk_num == 0) + return; + + mqm_eqm_free_page_mem(hwdev); + kfree(hwdev->mqm_att.brm_srch_page_addr); + + ret = mqm_eqm_set_cfg_2_hw(hwdev, 0); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "Set mqm eqm cfg to chip fail! err: %d\n", + ret); + return; + } + + hwdev->mqm_att.chunk_num = 0; + hwdev->mqm_att.search_gpa_num = 0; + hwdev->mqm_att.page_num = 0; + hwdev->mqm_att.page_size = 0; +} + +int hinic5_ppf_ext_db_init(struct hinic5_hwdev *hwdev) +{ + int ret; + + ret = mqm_eqm_init(hwdev); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "MQM eqm init fail!\n"); + return -EFAULT; + } + + return 0; +} + +int hinic5_ppf_ext_db_deinit(struct hinic5_hwdev *hwdev) +{ + if (!hwdev) + return -EINVAL; + + mqm_eqm_deinit(hwdev); + + return 0; +} + +static enum hinic5_wait_return check_flr_finish_handler(void *priv_data) +{ + struct hinic5_hwif *hwif = priv_data; + enum hinic5_pf_status status; + + status = hinic5_get_pf_status(hwif); + if (status == HINIC5_PF_STATUS_FLR_FINISH_FLAG) { +#if defined(__UEFI__) && !defined(__HIFC__) + hinic5_set_pf_status(hwif, HINIC5_PF_STATUS_INIT); +#else + hinic5_set_pf_status(hwif, HINIC5_PF_STATUS_ACTIVE_FLAG); +#endif + return WAIT_PROCESS_CPL; + } + + return WAIT_PROCESS_WAITING; +} + +#define HINIC5_FLR_TIMEOUT 40000 + +static int wait_for_flr_finish(struct hinic5_hwif *hwif, u32 flr_timeout_ms) +{ + u32 flr_timeout = (flr_timeout_ms == 0) ? HINIC5_FLR_TIMEOUT : flr_timeout_ms; + + return hinic5_wait_for_timeout(hwif, check_flr_finish_handler, + flr_timeout, 0xa * USEC_PER_MSEC); +} + +#define HINIC5_WAIT_CMDQ_IDLE_TIMEOUT 5000 + +static enum hinic5_wait_return check_cmdq_stop_handler(void *priv_data) +{ + struct hinic5_hwdev *hwdev = priv_data; + struct hinic5_cmdqs *cmdqs = hwdev->cmdqs; + u32 cmdq_type; + + /* Stop waiting when card unpresent */ + if (!hinic5_is_chip_present(hwdev)) + return WAIT_PROCESS_CPL; + + cmdq_type = (u32)HINIC5_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + if (!hinic5_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + return WAIT_PROCESS_WAITING; + } + + return WAIT_PROCESS_CPL; +} + +static int wait_cmdq_stop(struct hinic5_hwdev *hwdev) +{ + u32 cmdq_type; + struct hinic5_cmdqs *cmdqs = hwdev->cmdqs; + int err; + + if ((cmdqs->status & HINIC5_CMDQ_ENABLE) == 0) + return 0; + + cmdqs->status &= ~HINIC5_CMDQ_ENABLE; + + err = hinic5_wait_for_timeout(hwdev, check_cmdq_stop_handler, + HINIC5_WAIT_CMDQ_IDLE_TIMEOUT, + USEC_PER_MSEC); + if (err == 0) + return 0; + + cmdq_type = (u32)HINIC5_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + if (!hinic5_cmdq_idle(&cmdqs->cmdq[cmdq_type])) + sdk_err(hwdev->dev_hdl, "Cmdq %u is busy\n", cmdq_type); + } + + cmdqs->status |= HINIC5_CMDQ_ENABLE; + + return err; +} + +static int hinic5_rx_tx_flush(struct hinic5_hwdev *hwdev, u16 channel, bool wait_io, + u32 flr_timeout_ms) +{ + struct hinic5_hwif *hwif = hwdev->hwif; + struct comm_cmd_clear_doorbell clear_db; + struct comm_cmd_clear_resource clr_res; + u16 out_size; + int err; + + if (HINIC5_FUNC_TYPE(hwdev) != TYPE_VF && wait_io == true) + msleep(100); /* wait ucode 100 ms stop I/O */ + + err = wait_cmdq_stop(hwdev); + if (err != 0) + sdk_warn(hwdev->dev_hdl, "CMDQ is still working, please check CMDQ timeout value is reasonable\n"); + + hinic5_disable_doorbell(hwif); + + out_size = sizeof(clear_db); + memset(&clear_db, 0, sizeof(clear_db)); + clear_db.func_id = HINIC5_HWIF_GLOBAL_IDX(hwif); + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_FLUSH_DOORBELL, &clear_db, + sizeof(clear_db), &clear_db, &out_size, channel); + if (err != 0 || out_size == 0 || clear_db.head.status != 0) { + sdk_warn(hwdev->dev_hdl, "Failed to flush doorbell, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", + err, clear_db.head.status, out_size, channel); + } + + hinic5_set_pf_status(hwif, HINIC5_PF_STATUS_FLR_START_FLAG); + + memset(&clr_res, 0, sizeof(clr_res)); + clr_res.func_id = HINIC5_HWIF_GLOBAL_IDX(hwif); + + err = hinic5_msg_to_mgmt_no_ack(hwdev, HINIC5_MOD_COMM, COMM_MGMT_CMD_START_FLUSH, &clr_res, + sizeof(clr_res), channel); + if (err != 0) + sdk_warn(hwdev->dev_hdl, "Failed to notice flush message, err: %d, channel: 0x%x\n", + err, channel); + + if (HINIC5_FUNC_TYPE(hwdev) != TYPE_VF) { + err = wait_for_flr_finish(hwif, flr_timeout_ms); + if (err != 0) + sdk_warn(hwdev->dev_hdl, "Wait firmware FLR timeout\n"); + } + + hinic5_enable_doorbell(hwif); + + err = hinic5_reinit_cmdq_ctxts(hwdev); + if (err != 0) + sdk_warn(hwdev->dev_hdl, "Failed to reinit cmdq\n"); + + return err; +} + +int hinic5_func_rx_tx_flush(void *hwdev, u16 channel, bool wait_io, u32 flr_timeout_ms) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return -EINVAL; + + if (!hinic5_is_chip_present(dev)) + return 0; + + return hinic5_rx_tx_flush(dev, channel, wait_io, flr_timeout_ms); +} +EXPORT_SYMBOL(hinic5_func_rx_tx_flush); + +int hinic5_get_board_info(void *hwdev, struct hinic5_board_info *info, + u16 channel) +{ + struct comm_cmd_board_info board_info; + u16 out_size = sizeof(board_info); + int err; + + if (!hwdev || !info) + return -EINVAL; + + memset(&board_info, 0, sizeof(board_info)); + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_BOARD_INFO, + &board_info, sizeof(board_info), + &board_info, &out_size, channel); + if (err != 0 || out_size == 0 || board_info.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, board_info.head.status, out_size, channel); + return -EIO; + } + + memcpy(info, &board_info.info, sizeof(*info)); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_board_info); + +int hinic5_get_hw_pf_infos(void *hwdev, struct hinic5_hw_pf_infos *infos, + u16 channel) +{ + struct comm_cmd_hw_pf_infos *pf_infos = NULL; + u16 out_size = sizeof(*pf_infos); + int err = 0; + + if (!hwdev || !infos) + return -EINVAL; + + pf_infos = kzalloc(sizeof(*pf_infos), GFP_KERNEL); + if (!pf_infos) + return -ENOMEM; + + err = comm_msg_to_mgmt_sync_ch(hwdev, COMM_MGMT_CMD_GET_HW_PF_INFOS, + pf_infos, sizeof(*pf_infos), + pf_infos, &out_size, channel); + if (err != 0 || out_size == 0 || pf_infos->head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to get hw pf information, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, pf_infos->head.status, out_size, channel); + err = -EIO; + goto free_buf; + } + + memcpy(infos, &pf_infos->infos, sizeof(struct hinic5_hw_pf_infos)); + +free_buf: + kfree(pf_infos); + return err; +} +EXPORT_SYMBOL(hinic5_get_hw_pf_infos); + +int hinic5_get_global_attr(void *hwdev, struct comm_global_attr *attr) +{ + struct comm_cmd_get_glb_attr get_attr = { 0 }; + u16 out_size = sizeof(get_attr); + int err = 0; + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_GLOBAL_ATTR, + &get_attr, sizeof(get_attr), &get_attr, + &out_size); + if (err != 0 || out_size == 0 || get_attr.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to get global attribute, err: %d, status: 0x%x, out size: 0x%x\n", + err, get_attr.head.status, out_size); + return -EIO; + } + + memcpy(attr, &get_attr.attr, sizeof(struct comm_global_attr)); + + return 0; +} + +int hinic5_set_func_svc_used_state(void *hwdev, u16 svc_type, u8 state, + u16 channel) +{ + struct comm_cmd_func_svc_used_state used_state; + u16 out_size = sizeof(used_state); + int err; + + if (!hwdev) + return -EINVAL; + + memset(&used_state, 0, sizeof(used_state)); + used_state.func_id = hinic5_global_func_id(hwdev); + used_state.svc_type = svc_type; + used_state.used_state = state; + + err = comm_msg_to_mgmt_sync_ch(hwdev, + COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, + &used_state, sizeof(used_state), + &used_state, &out_size, channel); + if (err != 0 || out_size == 0 || used_state.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to set func service used state, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n\n", + err, used_state.head.status, out_size, channel); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(hinic5_set_func_svc_used_state); + +int hinic5_get_sml_table_info(void *hwdev, u32 tbl_id, u8 *node_id, u8 *instance_id) +{ + struct sml_table_id_info sml_table = {0}; + struct comm_cmd_get_sml_tbl_data sml_tbl = {0}; + u16 out_size = sizeof(sml_tbl); + int err; + + if (!hwdev) + return -EINVAL; + + if (tbl_id != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, "sml table index is not 0"); + return -EINVAL; + } + + err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_SML_TABLE_INFO, + &sml_tbl, sizeof(sml_tbl), &sml_tbl, &out_size); + if (err != 0 || out_size == 0 || sml_tbl.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to get sml table information, err: %d, status: 0x%x, out size: 0x%x\n", + err, sml_tbl.head.status, out_size); + return -EIO; + } + + memcpy(&sml_table, sml_tbl.tbl_data, sizeof(sml_table)); + + *node_id = sml_table.node_id; + *instance_id = sml_table.instance_id; + + return 0; +} + +int hinic5_activate_firmware(void *hwdev, u8 cfg_index) +{ + struct hinic5_cmd_activate_firmware activate_msg; + u16 out_size = sizeof(activate_msg); + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic5_func_type(hwdev) == TYPE_VF) + return -EOPNOTSUPP; + + memset(&activate_msg, 0, sizeof(activate_msg)); + activate_msg.index = cfg_index; + + err = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, COMM_MGMT_CMD_ACTIVE_FW, + &activate_msg, sizeof(activate_msg), + &activate_msg, &out_size, FW_UPDATE_MGMT_TIMEOUT, 0); + if (err != 0 || out_size == 0 || activate_msg.msg_head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to activate firmware, err: %d, status: 0x%x, out size: 0x%x\n", + err, activate_msg.msg_head.status, out_size); + err = (activate_msg.msg_head.status != 0) ? activate_msg.msg_head.status : -EIO; + return err; + } else { + return 0; + } +} + +int hinic5_switch_config(void *hwdev, u8 cfg_index) +{ + struct hinic5_cmd_switch_config switch_cfg; + u16 out_size = sizeof(switch_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic5_func_type(hwdev) != TYPE_PF) + return -EOPNOTSUPP; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + memset(&switch_cfg, 0, sizeof(switch_cfg)); + switch_cfg.index = cfg_index; + + err = hinic5_pf_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, COMM_MGMT_CMD_SWITCH_CFG, + &switch_cfg, sizeof(switch_cfg), + &switch_cfg, &out_size, FW_UPDATE_MGMT_TIMEOUT); + if (err != 0 || out_size == 0 || switch_cfg.msg_head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to switch cfg, err: %d, status: 0x%x, out size: 0x%x\n", + err, switch_cfg.msg_head.status, out_size); + err = (switch_cfg.msg_head.status != 0) ? switch_cfg.msg_head.status : -EIO; + return err; + } + + return 0; +} + +int hinic5_get_secure_mem_cfg(struct hinic5_hwdev *hwdev, dma_addr_t *gpa, u32 *len) +{ + struct hinic5_cqm_cmd_func_secure_mem mem_info; + u16 out_size = sizeof(mem_info); + int ret; + + if (!hwdev || !gpa || !len) + return -EPERM; + + memset(&mem_info, 0, sizeof(mem_info)); + mem_info.func_id = hinic5_global_func_id((void *)hwdev); + + ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_GET_FUNC_SECURE_MEM, + &mem_info, sizeof(mem_info), + &mem_info, &out_size); + if (mem_info.head.status == HINIC5_MGMT_CMD_UNSUPPORTED) + return -EPERM; + + if (ret != 0 || out_size == 0 || mem_info.head.status != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to get memsec info, ret: %d, status: 0x%x, out size: 0x%x\n", + ret, mem_info.head.status, out_size); + return -EINVAL; + } + + if (mem_info.valid == 0) + return -EPERM; + + *len = mem_info.len; + *gpa = (dma_addr_t)MAKE_64BITS(mem_info.gpa_hi, mem_info.gpa_lo); + + return 0; +} + +#define PLUG_SRV_GET 1 +#define PLUG_SRV_SET 0 + +int hisdk5_set_plug_srv_bitmap(void *hwdev, u8 srv_type, u16 func_id, u8 attach_en) +{ + struct comm_cmd_plug_srv plug_srv; + u16 out_size = sizeof(plug_srv); + int ret; + + if (!hwdev) + return -EPERM; + + memset(&plug_srv, 0, sizeof(plug_srv)); + plug_srv.func_id = func_id; + plug_srv.srv_type = srv_type; + plug_srv.attach_en = attach_en; + + ret = comm_msg_to_mgmt_sync((struct hinic5_hwdev *)hwdev, COMM_MGMT_CMD_SET_FUNC_PLUG_SRV, + &plug_srv, sizeof(plug_srv), + &plug_srv, &out_size); + if (ret != 0 || out_size == 0 || plug_srv.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to set plug srv_bitmap, ret: %d, status: 0x%x, out size: 0x%x\n", + ret, plug_srv.head.status, out_size); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(hisdk5_set_plug_srv_bitmap); + +int hisdk5_get_plug_srv_bitmap(void *hwdev, u8 srv_type, u16 func_id, u8 *attach_en) +{ + struct comm_cmd_plug_srv plug_srv; + u16 out_size = sizeof(plug_srv); + int ret; + + if (!hwdev) + return -EPERM; + + memset(&plug_srv, 0, sizeof(plug_srv)); + plug_srv.func_id = func_id; + plug_srv.srv_type = srv_type; + + ret = comm_msg_to_mgmt_sync((struct hinic5_hwdev *)hwdev, COMM_MGMT_CMD_GET_FUNC_PLUG_SRV, + &plug_srv, sizeof(plug_srv), + &plug_srv, &out_size); + if (ret != 0 || out_size == 0 || plug_srv.head.status != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Failed to get plug srv_bitmap, ret: %d, status: 0x%x, out size: 0x%x\n", + ret, plug_srv.head.status, out_size); + return -EINVAL; + } + + *attach_en = plug_srv.attach_en; + + return 0; +} +EXPORT_SYMBOL(hisdk5_get_plug_srv_bitmap); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_comm.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_comm.h new file mode 100644 index 00000000..62c6210d --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hw_comm.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_COMM_H +#define HINIC5_COMM_H + +#include <linux/types.h> + +#include "mpu_inband_cmd_defs.h" +#include "hinic5_hwdev.h" + +#define MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, status) \ + (((err) != 0) || ((status) != 0) || ((out_size) == 0)) + +#define HINIC5_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12))) + +enum func_tmr_bitmap_status { + FUNC_TMR_BITMAP_DISABLE, + FUNC_TMR_BITMAP_ENABLE, +}; + +enum ppf_tmr_status { + HINIC_PPF_TMR_FLAG_STOP, + HINIC_PPF_TMR_FLAG_START, +}; + +#define HINIC5_HT_GPA_PAGE_SIZE 4096UL +#define HINIC5_HT_GPA_SET_RETRY_TIMES 10 + +extern unsigned char lowpower_mode; + +int hinic5_set_cmdq_depth(void *hwdev, u16 cmdq_depth); + +int hinic5_set_enhance_cmdq_ctxt(struct hinic5_hwdev *hwdev, u8 cmdq_id, + struct enhance_cmdq_ctxt_info *ctxt); + +int hinic5_set_cmdq_ctxt(struct hinic5_hwdev *hwdev, u8 cmdq_id, + struct cmdq_ctxt_info *ctxt); + +int hinic5_ppf_ext_db_init(struct hinic5_hwdev *hwdev); + +int hinic5_ppf_ext_db_deinit(struct hinic5_hwdev *hwdev); + +int hinic5_set_ceq_ctrl_reg(struct hinic5_hwdev *hwdev, u16 q_id, + u32 ctrl0, u32 ctrl1); + +int hinic5_set_dma_attr_tbl(struct hinic5_hwdev *hwdev, u8 entry_idx, u8 st, u8 at, u8 ph, + u8 no_snooping, u8 tph_en); + +int hinic5_get_comm_features(void *hwdev, u64 *s_feature, u16 size); +int hinic5_set_comm_features(void *hwdev, u64 *s_feature, u16 size); + +int hinic5_comm_channel_detect(struct hinic5_hwdev *hwdev); + +int hinic5_get_global_attr(void *hwdev, struct comm_global_attr *attr); + +int hinic5_get_secure_mem_cfg(struct hinic5_hwdev *hwdev, dma_addr_t *gpa, u32 *len); + +int hisdk5_get_plug_srv_bitmap(void *hwdev, u8 srv_type, u16 func_id, u8 *attach_en); +int hisdk5_set_plug_srv_bitmap(void *hwdev, u8 srv_type, u16 func_id, u8 attach_en); + +int hinic5_get_board_info(void *hwdev, struct hinic5_board_info *info, u16 channel); + +/** + * @brief 初始化 HT GPA + * @param hwdev 设备句柄 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_ht_gpa_init(struct hinic5_hwdev *hwdev); + +/** + * @brief 反初始化 HT GPA + * @param hwdev 设备句柄 + */ +void hinic5_ht_gpa_deinit(struct hinic5_hwdev *hwdev); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hwdev.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hwdev.c new file mode 100644 index 00000000..447ff687 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_hwdev.c @@ -0,0 +1,2767 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/completion.h> +#include <linux/semaphore.h> +#include <linux/interrupt.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" +#include "hinic5_mt.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_common.h" +#include "hinic5_csr_inner.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_typedef_inner.h" +#include "hinic5_eqs.h" +#include "hinic5_api_cmd.h" +#include "hinic5_mgmt.h" +#include "hinic5_mbox.h" +#include "hinic5_cmdq.h" +#include "hinic5_hw_cfg.h" +#include "hinic5_hw_comm.h" +#include "hinic5_hinic5_cqm.h" +#include "sdk_pub_cmd.h" +#if !defined(__WIN__) +#include "hinic5_cqm_fast_msg.h" +#include "hinic5_devlink.h" +#endif +#include "mpu_inband_cmd.h" +#if defined(__UEFI__) && !defined(__HIFC__) +#include "mpu_board_defs.h" +#endif +#include "hinic5_prof_adap.h" +#include "hinic5_chip_info.h" +#if !defined(__UEFI__) && !defined(__VMWARE__) && !defined(__WIN__) +#include "hinic5_ubus_id_tbl.h" +#include "hinic5_bus.h" +#include "hinic5_lld.h" +#include "hinic5_dev_mgmt.h" +#include "hinic5_micro_log.h" +#include "hinic5_non_ptp.h" +#endif +#include "hinic5_hwdev.h" + +static unsigned int wq_page_order = HINIC5_MAX_WQ_PAGE_SIZE_ORDER; +module_param(wq_page_order, uint, 0444); +MODULE_PARM_DESC(wq_page_order, "Set wq page size order, wq page size is 4K * " \ + "(2 ^ wq_page_order) - default is 8"); + +static ulong perf_en_bitmap; +module_param(perf_en_bitmap, ulong, 0644); +MODULE_PARM_DESC(perf_en_bitmap, + "Set perf enable bitmap: 0-disable, 1-enable (bit(0)-cmdq, " \ + "bit(1)-mailbox) - default is 0"); + +#define HINIC5_DMA_ATTR_INDIR_IDX_SHIFT 0 +#define UNKNOWN_LEN 7 + +#define HINIC5_DMA_ATTR_INDIR_IDX_MASK 0x3FF + +#define HINIC5_DMA_ATTR_INDIR_IDX_SET(val, member) \ + (((u32)(val) & HINIC5_DMA_ATTR_INDIR_##member##_MASK) << \ + HINIC5_DMA_ATTR_INDIR_##member##_SHIFT) + +#define HINIC5_DMA_ATTR_INDIR_IDX_CLEAR(val, member) \ + ((val) & (~(HINIC5_DMA_ATTR_INDIR_##member##_MASK \ + << HINIC5_DMA_ATTR_INDIR_##member##_SHIFT))) + +#define HINIC5_DMA_ATTR_ENTRY_ST_SHIFT 0 +#define HINIC5_DMA_ATTR_ENTRY_AT_SHIFT 8 +#define HINIC5_DMA_ATTR_ENTRY_PH_SHIFT 10 +#define HINIC5_DMA_ATTR_ENTRY_NO_SNOOPING_SHIFT 12 +#define HINIC5_DMA_ATTR_ENTRY_TPH_EN_SHIFT 13 + +#define HINIC5_DMA_ATTR_ENTRY_ST_MASK 0xFF +#define HINIC5_DMA_ATTR_ENTRY_AT_MASK 0x3 +#define HINIC5_DMA_ATTR_ENTRY_PH_MASK 0x3 +#define HINIC5_DMA_ATTR_ENTRY_NO_SNOOPING_MASK 0x1 +#define HINIC5_DMA_ATTR_ENTRY_TPH_EN_MASK 0x1 + +#define HINIC5_DMA_ATTR_ENTRY_SET(val, member) \ + (((u32)(val) & HINIC5_DMA_ATTR_ENTRY_##member##_MASK) << \ + HINIC5_DMA_ATTR_ENTRY_##member##_SHIFT) + +#define HINIC5_DMA_ATTR_ENTRY_CLEAR(val, member) \ + ((val) & (~(HINIC5_DMA_ATTR_ENTRY_##member##_MASK \ + << HINIC5_DMA_ATTR_ENTRY_##member##_SHIFT))) + +#define HINIC5_PCIE_ST_DISABLE 0 +#define HINIC5_PCIE_AT_DISABLE 0 +#define HINIC5_PCIE_PH_DISABLE 0 + +#define PCIE_MSIX_ATTR_ENTRY 0 + +#define HINIC5_DEAULT_EQ_MSIX_PENDING_LIMIT 0 +#define HINIC5_DEAULT_EQ_MSIX_COALESC_TIMER_CFG 0xFF +#define HINIC5_DEAULT_EQ_MSIX_RESEND_TIMER_CFG 7 + +#define HINIC5_HWDEV_WQ_NAME "hinic5_hardware" +#define HINIC5_WQ_MAX_REQ 10 + +#define SLAVE_HOST_STATUS_CLEAR(host_id, val) ((val) & (~(1U << (host_id)))) +#define SLAVE_HOST_STATUS_SET(host_id, enable) (((u8)(enable) & 1U) << (host_id)) +#define SLAVE_HOST_STATUS_GET(host_id, val) (((val) & (1U << (host_id))) != 0) + +#define HINIC5_COMM_RES \ + ((BIT(RES_TYPE_COMM)) | (BIT(RES_TYPE_COMM_CMD_CH)) | \ + (BIT(RES_TYPE_FLUSH_BIT)) | (BIT(RES_TYPE_MQM)) | \ + (BIT(RES_TYPE_SMF)) | (BIT(RES_TYPE_PF_BW_CFG))) + +void set_slave_host_enable(void *hwdev, u8 host_id, bool enable) +{ + u32 reg_val; + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (HINIC5_FUNC_TYPE(dev) != TYPE_PPF) + return; + + reg_val = hinic5_hwif_read_reg(dev->hwif, HINIC5_MULT_HOST_SLAVE_STATUS_ADDR); + + reg_val = SLAVE_HOST_STATUS_CLEAR(host_id, reg_val); + reg_val |= SLAVE_HOST_STATUS_SET(host_id, enable); + hinic5_hwif_write_reg(dev->hwif, HINIC5_MULT_HOST_SLAVE_STATUS_ADDR, reg_val); + + sdk_info(dev->dev_hdl, "Set slave host %u status %d, reg value: 0x%x\n", + host_id, enable, reg_val); +} + +int hinic5_get_slave_host_enable(void *hwdev, u8 host_id, u8 *slave_en) +{ + struct hinic5_hwdev *dev = hwdev; + + u32 reg_val; + + if (!hwdev || !slave_en) + return -EINVAL; + + if (HINIC5_FUNC_TYPE(dev) != TYPE_PPF) { + sdk_warn(dev->dev_hdl, "hwdev should be ppf\n"); + return -EINVAL; + } + + reg_val = hinic5_hwif_read_reg(dev->hwif, HINIC5_MULT_HOST_SLAVE_STATUS_ADDR); + *slave_en = SLAVE_HOST_STATUS_GET(host_id, reg_val); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_slave_host_enable); + +int hinic5_get_slave_bitmap(void *hwdev, u8 *slave_host_bitmap) +{ + struct hinic5_hwdev *dev = hwdev; + struct service_cap *cap = NULL; + + if (!dev || !dev->cfg_mgmt) + return -EINVAL; + cap = &dev->cfg_mgmt->svc_cap; + if (HINIC5_FUNC_TYPE(dev) != TYPE_PPF) { + sdk_warn(dev->dev_hdl, "hwdev should be ppf\n"); + return -EINVAL; + } + + *slave_host_bitmap = cap->host_valid_bitmap & (~(1U << cap->master_host_id)); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_slave_bitmap); + +static void hinic5_init_host_mode_pre(struct hinic5_hwdev *hwdev) +{ + struct service_cap *cap = &hwdev->cfg_mgmt->svc_cap; + u8 host_id = hwdev->hwif->attr.pci_intf_idx; + + switch (cap->srv_multi_host_mode) { + case HINIC5_SDI_MODE_BM: + if (host_id == cap->master_host_id) + hwdev->func_mode = FUNC_MOD_MULTI_BM_MASTER; + else + hwdev->func_mode = FUNC_MOD_MULTI_BM_SLAVE; + break; + case HINIC5_SDI_MODE_VM: + if (host_id == cap->master_host_id) + hwdev->func_mode = FUNC_MOD_MULTI_VM_MASTER; + else + hwdev->func_mode = FUNC_MOD_MULTI_VM_SLAVE; + break; + default: + hwdev->func_mode = FUNC_MOD_NORMAL_HOST; + break; + } + sdk_info(hwdev->dev_hdl, "host mode init, host_mode:%d, func_mode:%d\n", + cap->srv_multi_host_mode, hwdev->func_mode); +} + +STATIC int hinic5_multi_host_enable(struct hinic5_hwdev *hwdev, bool enable) +{ + if (!IS_SLAVE_HOST(hwdev) || !HINIC5_IS_PPF(hwdev)) + return 0; + + set_slave_host_enable(hwdev, hinic5_pcie_itf_id(hwdev), enable); + + return 0; +} + +static void hinic5_init_heartbeat_detect(struct hinic5_hwdev *hwdev); +static void hinic5_destroy_heartbeat_detect(struct hinic5_hwdev *hwdev); + +typedef void (*mgmt_event_cb)(void *handle, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +struct mgmt_event_handle { + u16 cmd; + mgmt_event_cb proc; +}; + +static int pf_handle_vf_comm_mbox(void *pri_handle, + u16 vf_id, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic5_hwdev *hwdev = pri_handle; + + if (!hwdev) + return -EINVAL; + + sdk_warn(hwdev->dev_hdl, "Unsupported vf mbox event %u to process\n", + cmd); + + return 0; +} + +static int vf_handle_pf_comm_mbox(void *pri_handle, + u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic5_hwdev *hwdev = pri_handle; + + if (!hwdev) + return -EINVAL; + + sdk_warn(hwdev->dev_hdl, "Unsupported pf mbox event %u to process\n", + cmd); + return 0; +} + +static void chip_fault_show(struct hinic5_hwdev *hwdev, + struct hinic5_fault_event *event) +{ + char fault_level[FAULT_LEVEL_MAX][FAULT_SHOW_STR_LEN + 1] = { + "fatal", "reset", "host", "flr", "general", "suggestion"}; + char level_str[FAULT_SHOW_STR_LEN + 1]; + u8 level; + + memset(level_str, 0, FAULT_SHOW_STR_LEN + 1); + level = event->event.chip.err_level; + if (level < FAULT_LEVEL_MAX) + strscpy(level_str, fault_level[level], sizeof(level_str)); + else + strscpy(level_str, "Unknown", sizeof(level_str)); + + if (level == FAULT_LEVEL_SERIOUS_FLR) + dev_err(hwdev->dev_hdl, "err_level: %u [%s], flr func_id: %u\n", + level, level_str, event->event.chip.func_id); + + dev_err(hwdev->dev_hdl, + "Module_id: 0x%x, err_type: 0x%x, err_level: %u[%s], err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + event->event.chip.node_id, + event->event.chip.err_type, level, level_str, + event->event.chip.err_csr_addr, + event->event.chip.err_csr_value); +} + +static void fault_report_show(struct hinic5_hwdev *hwdev, + struct hinic5_fault_event *event) +{ + char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + "chip", "ucode", "mem rd timeout", "mem wr timeout", + "reg rd timeout", "reg wr timeout", "phy fault", "tsensor fault" + }; + char type_str[FAULT_SHOW_STR_LEN + 1] = {0}; + struct fault_event_stats *fault = NULL; + + sdk_err(hwdev->dev_hdl, "Fault event report received, func_id: %u\n", + hinic5_global_func_id(hwdev)); + + fault = &hwdev->hw_stats.fault_event_stats; + + if (event->type < FAULT_TYPE_MAX) { + strscpy(type_str, fault_type[event->type], sizeof(type_str)); + atomic_inc(&fault->fault_type_stat[event->type]); + } else { + strscpy(type_str, "Unknown", sizeof(type_str)); + } + + sdk_err(hwdev->dev_hdl, "Fault type: %u [%s]\n", event->type, type_str); + /* 0, 1, 2 and 3 word Represents array event->event.val index */ + sdk_err(hwdev->dev_hdl, + "Fault val[0]: 0x%08x, val[1]: 0x%08x, val[2]: 0x%08x, val[3]: 0x%08x\n", + event->event.val[0x0], event->event.val[0x1], + event->event.val[0x2], event->event.val[0x3]); + + hinic5_show_chip_err_info(hwdev); + + switch (event->type) { + case FAULT_TYPE_CHIP: + chip_fault_show(hwdev, event); + break; + case FAULT_TYPE_UCODE: + sdk_err(hwdev->dev_hdl, "Cause_id: %u, core_id: %u, c_id: %u, epc: 0x%08x\n", + event->event.ucode.cause_id, event->event.ucode.core_id, + event->event.ucode.c_id, event->event.ucode.epc); + break; + case FAULT_TYPE_MEM_RD_TIMEOUT: + case FAULT_TYPE_MEM_WR_TIMEOUT: + sdk_err(hwdev->dev_hdl, + "Err_csr_ctrl: 0x%08x, err_csr_data: 0x%08x, ctrl_tab: 0x%08x, mem_index: 0x%08x\n", + event->event.mem_timeout.err_csr_ctrl, + event->event.mem_timeout.err_csr_data, + event->event.mem_timeout.ctrl_tab, event->event.mem_timeout.mem_index); + break; + case FAULT_TYPE_REG_RD_TIMEOUT: + case FAULT_TYPE_REG_WR_TIMEOUT: + sdk_err(hwdev->dev_hdl, "Err_csr: 0x%08x\n", event->event.reg_timeout.err_csr); + break; + case FAULT_TYPE_PHY_FAULT: + sdk_err(hwdev->dev_hdl, + "Op_type: %u, port_id: %u, dev_ad: %u, csr_addr: 0x%08x, op_data: 0x%08x\n", + event->event.phy_fault.op_type, + event->event.phy_fault.port_id, event->event.phy_fault.dev_ad, + event->event.phy_fault.csr_addr, event->event.phy_fault.op_data); + break; + default: + break; + } +} + +static void fault_event_handler(void *dev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic5_cmd_fault_event *fault_event = NULL; + struct hinic5_fault_event *fault = NULL; + struct hinic5_event_info event_info; + struct hinic5_hwdev *hwdev = dev; + struct card_node *chip_info = hwdev->chip_node; + u8 fault_src = HINIC5_FAULT_SRC_TYPE_MAX; + u8 fault_level; + + if (in_size != sizeof(*fault_event)) { + sdk_err(hwdev->dev_hdl, "Invalid fault event report, length: %u, should be %lu\n", + in_size, sizeof(*fault_event)); + return; + } + + fault_event = buf_in; + fault_report_show(hwdev, &fault_event->event); + + if (fault_event->event.type == FAULT_TYPE_CHIP) + fault_level = fault_event->event.event.chip.err_level; + else + fault_level = FAULT_LEVEL_FATAL; + + if (fault_event->event.type == FAULT_TYPE_CHIP && + fault_level <= (u8)FAULT_LEVEL_SERIOUS_RESET) { + chip_info->exception_flag = true; + sdk_err(hwdev->dev_hdl, "Set card error due to chip fault, lvl %u\n", + fault_level); + } + + if (hwdev->event_callback) { + event_info.service = EVENT_SRV_COMM; + event_info.type = EVENT_COMM_FAULT; + fault = (void *)event_info.event_data; + memcpy(fault, &fault_event->event, + sizeof(struct hinic5_fault_event)); + fault->fault_level = fault_level; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + if (fault_event->event.type <= FAULT_TYPE_REG_WR_TIMEOUT) + fault_src = fault_event->event.type; + else if (fault_event->event.type == FAULT_TYPE_PHY_FAULT) + fault_src = HINIC5_FAULT_SRC_HW_PHY_FAULT; + + hisdk5_fault_post_process(hwdev, fault_src, fault_level); +} + +static void ffm_event_record(struct hinic5_hwdev *dev, struct dbgtool_k_glb_info *dbgtool_info, + struct ffm_intr_info *intr) +{ + struct rtc_time rctm; + struct timeval txc; + u32 ffm_idx; + u32 last_err_csr_addr; + u32 last_err_csr_value; + + ffm_idx = dbgtool_info->ffm->ffm_num; + last_err_csr_addr = dbgtool_info->ffm->last_err_csr_addr; + last_err_csr_value = dbgtool_info->ffm->last_err_csr_value; + if (ffm_idx < FFM_RECORD_NUM_MAX) { + if (intr->err_csr_addr == last_err_csr_addr && + intr->err_csr_value == last_err_csr_value) { + dbgtool_info->ffm->ffm[ffm_idx - 1].times++; + sdk_err(dev->dev_hdl, "Receive intr same, ffm_idx: %u\n", ffm_idx - 1); + return; + } + sdk_err(dev->dev_hdl, "Receive intr, ffm_idx: %u\n", ffm_idx); + + dbgtool_info->ffm->ffm[ffm_idx].intr_info.node_id = intr->node_id; + dbgtool_info->ffm->ffm[ffm_idx].intr_info.err_level = intr->err_level; + dbgtool_info->ffm->ffm[ffm_idx].intr_info.err_type = intr->err_type; + dbgtool_info->ffm->ffm[ffm_idx].intr_info.err_csr_addr = intr->err_csr_addr; + dbgtool_info->ffm->ffm[ffm_idx].intr_info.err_csr_value = intr->err_csr_value; + dbgtool_info->ffm->last_err_csr_addr = intr->err_csr_addr; + dbgtool_info->ffm->last_err_csr_value = intr->err_csr_value; + dbgtool_info->ffm->ffm[ffm_idx].times = 1; + + /* Obtain the current UTC time */ + do_gettimeofday(&txc); + + /* Calculate the time in date value to tm, i.e. GMT + 8, mutiplied by 60 * 60 */ + rtc_time_to_tm(txc.tv_sec + 60 * 60 * 8, &rctm); + + /* tm_year starts from 1900; 0->1900, 1->1901, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].year = (u16)(rctm.tm_year + 1900); + /* tm_mon starts from 0, 0 indicates January, and so on */ + dbgtool_info->ffm->ffm[ffm_idx].mon = (u8)rctm.tm_mon + 1; + dbgtool_info->ffm->ffm[ffm_idx].mday = (u8)rctm.tm_mday; + dbgtool_info->ffm->ffm[ffm_idx].hour = (u8)rctm.tm_hour; + dbgtool_info->ffm->ffm[ffm_idx].min = (u8)rctm.tm_min; + dbgtool_info->ffm->ffm[ffm_idx].sec = (u8)rctm.tm_sec; + + dbgtool_info->ffm->ffm_num++; + } +} + +static void ffm_event_msg_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ +#if !defined(__VMWARE__) && !defined(__WIN__) + struct dbgtool_k_glb_info *dbgtool_info = NULL; + struct hinic5_hwdev *dev = hwdev; + struct card_node *card_info = NULL; + struct ffm_intr_info *intr = NULL; + spinlock_t *lock = NULL; + + if (in_size != sizeof(*intr)) { + sdk_err(dev->dev_hdl, "Invalid fault event report, length: %u, should be %ld.\n", + in_size, sizeof(*intr)); + return; + } + + intr = buf_in; + + sdk_err(dev->dev_hdl, "node_id: 0x%x, err_type: 0x%x, err_level: %u, err_csr_addr: 0x%08x, err_csr_value: 0x%08x\n", + intr->node_id, intr->err_type, intr->err_level, + intr->err_csr_addr, intr->err_csr_value); + + hinic5_show_chip_err_info(hwdev); + + card_info = dev->chip_node; + dbgtool_info = card_info->dbgtool_info; + + *out_size = sizeof(*intr); + + if (!dbgtool_info) + return; + + if (!dbgtool_info->ffm) + return; + + lock = &card_info->dbgtool_info_lock; + spin_lock(lock); + ffm_event_record(dev, dbgtool_info, intr); + spin_unlock(lock); +#endif +} + +#define X_CSR_INDEX 30 + +static void sw_watchdog_timeout_info_show(struct hinic5_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct comm_info_sw_watchdog *watchdog_info = buf_in; + u32 stack_len, i, j, tmp; + u32 *dump_addr = NULL; + u64 *reg = NULL; + + if (in_size != sizeof(*watchdog_info)) { + sdk_err(hwdev->dev_hdl, "Invalid mgmt watchdog report, length: %u, should be %ld\n", + in_size, sizeof(*watchdog_info)); + return; + } + + sdk_err(hwdev->dev_hdl, "Mgmt deadloop time: 0x%x 0x%x, task id: 0x%x, sp: 0x%llx\n", + watchdog_info->curr_time_h, watchdog_info->curr_time_l, + watchdog_info->task_id, watchdog_info->sp); + sdk_err(hwdev->dev_hdl, + "Stack current used: 0x%x, peak used: 0x%x, overflow flag: 0x%x, top: 0x%llx, bottom: 0x%llx\n", + watchdog_info->curr_used, watchdog_info->peak_used, + watchdog_info->is_overflow, watchdog_info->stack_top, watchdog_info->stack_bottom); + + sdk_err(hwdev->dev_hdl, + "Mgmt pc: 0x%llx, elr: 0x%llx, spsr: 0x%llx, far: 0x%llx, esr: 0x%llx, xzr: 0x%llx\n", + watchdog_info->pc, watchdog_info->reg_info.arm_reg.elr, + watchdog_info->reg_info.arm_reg.spsr, watchdog_info->reg_info.arm_reg.far, + watchdog_info->reg_info.arm_reg.esr, watchdog_info->reg_info.arm_reg.xzr); + + sdk_err(hwdev->dev_hdl, "Mgmt register info\n"); + reg = &watchdog_info->reg_info.arm_reg.x30; + for (i = 0; i <= X_CSR_INDEX; i++) + sdk_err(hwdev->dev_hdl, "x%02u:0x%llx\n", + X_CSR_INDEX - i, reg[i]); + + if (watchdog_info->stack_actlen <= DATA_LEN_1K) { + stack_len = watchdog_info->stack_actlen; + } else { + sdk_err(hwdev->dev_hdl, "Oops stack length: 0x%x is wrong\n", + watchdog_info->stack_actlen); + stack_len = DATA_LEN_1K; + } + + sdk_err(hwdev->dev_hdl, "Mgmt dump stack, 16 bytes per line(start from sp)\n"); + for (i = 0; i < (stack_len / DUMP_16B_PER_LINE); i++) { + dump_addr = (u32 *)(watchdog_info->stack_data + (u32)(i * DUMP_16B_PER_LINE)); + sdk_err(hwdev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *dump_addr, *(dump_addr + 0x1), *(dump_addr + 0x2), *(dump_addr + 0x3)); + } + + tmp = (stack_len % DUMP_16B_PER_LINE) / DUMP_4_VAR_PER_LINE; + for (j = 0; j < tmp; j++) { + dump_addr = (u32 *)(watchdog_info->stack_data + + (u32)(i * DUMP_16B_PER_LINE + j * DUMP_4_VAR_PER_LINE)); + sdk_err(hwdev->dev_hdl, "0x%08x ", *dump_addr); + } + + *out_size = sizeof(*watchdog_info); + watchdog_info = buf_out; + watchdog_info->head.status = 0; +} + +static void mgmt_watchdog_timeout_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic5_event_info event_info = { 0 }; + struct hinic5_hwdev *dev = hwdev; + + sw_watchdog_timeout_info_show(dev, buf_in, in_size, buf_out, out_size); + + if (dev->event_callback) { + event_info.type = EVENT_COMM_MGMT_WATCHDOG; + dev->event_callback(dev->event_pri_handle, &event_info); + } +} + +static void show_exc_info(struct hinic5_hwdev *hwdev, const EXC_INFO_S *exc_info) +{ + u32 i; + + /* key information */ + sdk_err(hwdev->dev_hdl, "==================== Exception Info Begin ====================\n"); + sdk_err(hwdev->dev_hdl, "Exception CpuTick : 0x%08x 0x%08x\n", + exc_info->cpu_tick.cnt_hi, exc_info->cpu_tick.cnt_lo); + sdk_err(hwdev->dev_hdl, "Exception Cause : %u\n", exc_info->exc_cause); + sdk_err(hwdev->dev_hdl, "Os Version : %s\n", exc_info->os_ver); + sdk_err(hwdev->dev_hdl, "App Version : %s\n", exc_info->app_ver); + sdk_err(hwdev->dev_hdl, "CPU Type : 0x%08x\n", exc_info->cpu_type); + sdk_err(hwdev->dev_hdl, "CPU ID : 0x%08x\n", exc_info->cpu_id); + sdk_err(hwdev->dev_hdl, "Thread Type : 0x%08x\n", exc_info->thread_type); + sdk_err(hwdev->dev_hdl, "Thread ID : 0x%08x\n", exc_info->thread_id); + sdk_err(hwdev->dev_hdl, "Byte Order : 0x%08x\n", exc_info->byte_order); + sdk_err(hwdev->dev_hdl, "Nest Count : 0x%08x\n", exc_info->nest_cnt); + sdk_err(hwdev->dev_hdl, "Fatal Error Num : 0x%08x\n", exc_info->fatal_errno); + sdk_err(hwdev->dev_hdl, "Current SP : 0x%016llx\n", exc_info->uw_sp); + sdk_err(hwdev->dev_hdl, "Stack Bottom : 0x%016llx\n", exc_info->stack_bottom); + + /* register field */ + sdk_err(hwdev->dev_hdl, "Register contents when exception occur.\n"); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TTBR0", + exc_info->reg_info.ttbr0, "TTBR1", exc_info->reg_info.ttbr1); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "TCR", + exc_info->reg_info.tcr, "MAIR", exc_info->reg_info.mair); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "SCTLR", + exc_info->reg_info.sctlr, "VBAR", exc_info->reg_info.vbar); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "CURRENTE1", + exc_info->reg_info.current_el, "SP", exc_info->reg_info.sp); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "ELR", + exc_info->reg_info.elr, "SPSR", exc_info->reg_info.spsr); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx \t %-14s: 0x%016llx\n", "FAR", + exc_info->reg_info.far_r, "ESR", exc_info->reg_info.esr); + sdk_err(hwdev->dev_hdl, "%-14s: 0x%016llx\n", "XZR", exc_info->reg_info.xzr); + + for (i = 0; i < XREGS_NUM - 1; i += 0x2) + sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t XREGS[%02u]%-5s: 0x%016llx", + i, " ", exc_info->reg_info.xregs[i], + (u32)(i + 0x1U), " ", exc_info->reg_info.xregs[(u32)(i + 0x1U)]); + + sdk_err(hwdev->dev_hdl, "XREGS[%02u]%-5s: 0x%016llx \t ", XREGS_NUM - 1, " ", + exc_info->reg_info.xregs[XREGS_NUM - 1]); +} + +#define FOUR_REG_LEN 16 + +static void mgmt_lastword_report_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + comm_info_up_lastword_s *lastword_info = buf_in; + EXC_INFO_S *exc_info = NULL; + struct hinic5_hwdev *dev = hwdev; + u32 *curr_reg = NULL; + u32 reg_i, cnt, stack_len; + + if (in_size != sizeof(*lastword_info)) { + sdk_err(dev->dev_hdl, "Invalid mgmt lastword, length: %u, should be %lu\n", + in_size, sizeof(*lastword_info)); + return; + } + exc_info = &lastword_info->stack_info; + stack_len = lastword_info->stack_actlen; + + if (stack_len > MPU_LASTWORD_SIZE) { + sdk_err(dev->dev_hdl, "Invalid mgmt lastword, length: stack_len: %u, should less than %u\n", + stack_len, MPU_LASTWORD_SIZE); + return; + } + + show_exc_info(dev, exc_info); + + /* call stack dump */ + sdk_err(dev->dev_hdl, "Dump stack when exceptioin occurs, 16Bytes per line.\n"); + + cnt = stack_len / FOUR_REG_LEN; + for (reg_i = 0; reg_i < cnt; reg_i++) { + curr_reg = (u32 *)(lastword_info->stack_data + ((u64)(u32)(reg_i * FOUR_REG_LEN))); + sdk_err(dev->dev_hdl, "0x%08x 0x%08x 0x%08x 0x%08x\n", + *curr_reg, *(curr_reg + 0x1), *(curr_reg + 0x2), *(curr_reg + 0x3)); + } + + sdk_err(dev->dev_hdl, "==================== Exception Info End ====================\n"); +} + +#if !defined(__UEFI__) && !defined(__WIN__) && !defined(__VMWARE__) +static int hisdk5_attach_vf_vroce(struct hinic5_lld_dev *lld_dev, u16 func_id) +{ + int err = 0; + struct hinic5_adev *src_adev = NULL; + struct hinic5_adev *dst_adev = NULL; + + if (!lld_dev) + return -EINVAL; + + src_adev = to_hinic5_adev(lld_dev); + dst_adev = hinic5_get_vf_adev_by_pf((void *)src_adev, func_id); + if (!dst_adev) + return -EINVAL; + + err = hinic5_attach_service(&dst_adev->lld_dev, SERVICE_T_ROCE); + return err; +} + +static void hisdk5_detach_vf_vroce(struct hinic5_lld_dev *lld_dev, u16 func_id) +{ + struct hinic5_adev *src_adev = NULL; + struct hinic5_adev *dst_adev = NULL; + + if (!lld_dev) + return; + + src_adev = to_hinic5_adev(lld_dev); + dst_adev = hinic5_get_vf_adev_by_pf((void *)src_adev, func_id); + if (!dst_adev) + return; + + hinic5_detach_service(&dst_adev->lld_dev, SERVICE_T_VROCE); +} + +static int hisdk5_attach_vf_ub(struct hinic5_lld_dev *lld_dev, u16 func_id) +{ + int err = 0; + struct hinic5_adev *src_adev = NULL; + struct hinic5_adev *dst_adev = NULL; + + if (!lld_dev) + return -EINVAL; + + src_adev = to_hinic5_adev(lld_dev); + dst_adev = hinic5_get_vf_adev_by_pf((void *)src_adev, func_id); + if (!dst_adev) + return -EINVAL; + + err = hinic5_attach_service(&dst_adev->lld_dev, SERVICE_T_UB); + return err; +} + +static void hisdk5_detach_vf_ub(struct hinic5_lld_dev *lld_dev, u16 func_id) +{ + struct hinic5_adev *src_adev = NULL; + struct hinic5_adev *dst_adev = NULL; + + if (!lld_dev) + return; + + src_adev = to_hinic5_adev(lld_dev); + dst_adev = hinic5_get_vf_adev_by_pf((void *)src_adev, func_id); + if (!dst_adev) + return; + + hinic5_detach_service(&dst_adev->lld_dev, SERVICE_T_UB); +} + +static int hisdk5_attach_vf_nic(struct hinic5_lld_dev *lld_dev, u16 func_id) +{ + int err = 0; + struct hinic5_adev *src_adev = NULL; + struct hinic5_adev *dst_adev = NULL; + + if (!lld_dev) + return -EINVAL; + + src_adev = to_hinic5_adev(lld_dev); + + dst_adev = hinic5_get_vf_adev_by_pf((void *)src_adev, func_id); + if (!dst_adev) + return -EINVAL; + + err = hinic5_set_func_en(dst_adev, true, func_id); + return err; +} + +static void hisdk5_detach_vf_nic(struct hinic5_lld_dev *lld_dev, u16 func_id) +{ + struct hinic5_adev *src_adev = NULL; + struct hinic5_adev *dst_adev = NULL; + + if (!lld_dev) + return; + + src_adev = to_hinic5_adev(lld_dev); + dst_adev = hinic5_get_vf_adev_by_pf((void *)src_adev, func_id); + if (!dst_adev) + return; + + (void)hinic5_set_func_en(dst_adev, false, func_id); +} + +static void hisdk5_attach_plug_service(struct hinic5_lld_dev *lld_dev, u8 srv_type, + struct hinic5_hwdev *dev, u16 func_id) +{ + int err = 0; + + if (func_id < CMD_MAX_MAX_PF_NUM) { + switch (srv_type) { + case COMM_PLUG_SRV_NIC: + err = hinic5_attach_service(lld_dev, SERVICE_T_NIC); + break; + case COMM_PLUG_SRV_VROCE: + err = hinic5_attach_service(lld_dev, SERVICE_T_ROCE); + break; + case COMM_PLUG_SRV_UB: + err = hinic5_attach_service(lld_dev, SERVICE_T_UB); + break; + default: + sdk_err(dev->dev_hdl, "plug attach pf service type error.\n"); + } + } else { + switch (srv_type) { + case COMM_PLUG_SRV_NIC: + err = hisdk5_attach_vf_nic(lld_dev, func_id); + break; + case COMM_PLUG_SRV_VROCE: + err = hisdk5_attach_vf_vroce(lld_dev, func_id); + break; + case COMM_PLUG_SRV_UB: + err = hisdk5_attach_vf_ub(lld_dev, func_id); + break; + default: + sdk_err(dev->dev_hdl, "plug attach vf service type error.\n"); + } + } + + if (err != 0) + sdk_err(dev->dev_hdl, "plug attach service failed.\n"); +} + +static void hisdk5_detach_plug_service(struct hinic5_lld_dev *lld_dev, u8 srv_type, + struct hinic5_hwdev *dev, u16 func_id) +{ + if (func_id < CMD_MAX_MAX_PF_NUM) { + switch (srv_type) { + case COMM_PLUG_SRV_NIC: + hinic5_detach_service(lld_dev, SERVICE_T_NIC); + break; + case COMM_PLUG_SRV_VROCE: + hinic5_detach_service(lld_dev, SERVICE_T_VROCE); + break; + case COMM_PLUG_SRV_UB: + hinic5_detach_service(lld_dev, SERVICE_T_UB); + break; + default: + sdk_err(dev->dev_hdl, "plug attach pf service type error.\n"); + } + } else { + switch (srv_type) { + case COMM_PLUG_SRV_NIC: + hisdk5_detach_vf_nic(lld_dev, func_id); + break; + case COMM_PLUG_SRV_VROCE: + hisdk5_detach_vf_vroce(lld_dev, func_id); + break; + case COMM_PLUG_SRV_UB: + hisdk5_detach_vf_ub(lld_dev, func_id); + break; + default: + sdk_err(dev->dev_hdl, "plug detach vf service type error.\n"); + } + } +} + +static void hisdk5_plug_service_pre_handler(u8 srv_type, struct comm_cmd_plug_srv *plug_srv, + struct hinic5_hwdev *dev) +{ + if (srv_type == COMM_PLUG_SRV_NIC) { + dev->cfg_mgmt->svc_cap.nic_cap.max_sqs = plug_srv->nic_cap.max_sqs; + dev->cfg_mgmt->svc_cap.nic_cap.max_rqs = plug_srv->nic_cap.max_rqs; + } +} + +static void mgmt_plug_report_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct comm_cmd_plug_srv *plug_srv = buf_in; + struct hinic5_hwdev *dev = hwdev; + struct hinic5_adev *adev = dev->adapter_hdl; + struct hinic5_lld_dev *lld_dev = &adev->lld_dev; + u16 func_id; + u8 srv_type; + u8 attach_en; + + if (in_size != sizeof(*plug_srv)) { + sdk_err(dev->dev_hdl, "Invalid plug event report, length: %u, should be %ld.\n", + in_size, sizeof(*plug_srv)); + return; + } + + if (!IS_BMGW_SLAVE_HOST(dev)) { + sdk_warn(dev->dev_hdl, "Discard plug event from unexpected function (mode %u).\n", + dev->func_mode); + return; + } + + srv_type = plug_srv->srv_type; + attach_en = plug_srv->attach_en; + func_id = plug_srv->func_id; + hisdk5_plug_service_pre_handler(srv_type, plug_srv, dev); + + if (attach_en != 0) + hisdk5_attach_plug_service(lld_dev, srv_type, dev, func_id); + else + hisdk5_detach_plug_service(lld_dev, srv_type, dev, func_id); +} +#endif + +static void mgmt_reset_event_handler(void *dev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic5_hwdev *hwdev = dev; + + sdk_err(hwdev->dev_hdl, "Event COMM_MGMT_CMD_MGMT_RESET from MPU\n"); +} + +const struct mgmt_event_handle mgmt_event_proc[] = { + { + .cmd = COMM_MGMT_CMD_FAULT_REPORT, + .proc = fault_event_handler, + }, + + { + .cmd = COMM_MGMT_CMD_FFM_SET, + .proc = ffm_event_msg_handler, + }, + + { + .cmd = COMM_MGMT_CMD_WATCHDOG_INFO, + .proc = mgmt_watchdog_timeout_event_handler, + }, + + { + .cmd = COMM_MGMT_CMD_LASTWORD_GET, + .proc = mgmt_lastword_report_event_handler, + }, + + { + .cmd = COMM_MGMT_CMD_MGMT_RESET, + .proc = mgmt_reset_event_handler, + }, + +#if !defined(__UEFI__) && !defined(__WIN__) && !defined(__VMWARE__) + { + .cmd = COMM_MGMT_CMD_SET_FUNC_PLUG_SRV, + .proc = mgmt_plug_report_event_handler, + }, +#endif +}; + +static void pf_handle_mgmt_comm_event(void *handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic5_hwdev *hwdev = handle; + u32 i, event_num = (u32)ARRAY_LEN(mgmt_event_proc); + + if (!hwdev) + return; + + for (i = 0; i < event_num; i++) { + if (cmd == mgmt_event_proc[i].cmd) { + if (mgmt_event_proc[i].proc) + mgmt_event_proc[i].proc(handle, buf_in, in_size, + buf_out, out_size); + else + sdk_warn(hwdev->dev_hdl, + "Mgmt event proc is not registered, cmd %u\n", cmd); + return; + } + } + + sdk_warn(hwdev->dev_hdl, "Unsupported mgmt cpu event %u to process\n", + cmd); + *out_size = sizeof(struct mgmt_msg_head); + ((struct mgmt_msg_head *)buf_out)->status = HINIC5_MGMT_CMD_UNSUPPORTED; +} + +static inline void hinic5_set_chip_present(struct hinic5_hwdev *hwdev) +{ + hwdev->chip_present_flag = HINIC5_CHIP_PRESENT; +} + +static inline void hinic5_set_chip_absent(struct hinic5_hwdev *hwdev) +{ + hwdev->chip_present_flag = HINIC5_CHIP_ABSENT; +} + +bool hinic5_check_htn_device_id(void *hwdev) +{ +#if !defined(__UEFI__) && !defined(__WIN__) && !defined(__VMWARE__) + struct hinic5_hwdev *dev = hwdev; + struct hinic5_adev *adev = dev->adapter_hdl; + + if ((hinic5_adev_get_device_id(adev) != HINIC5_UDEV_DEVICE_ID_1872_PF && + hinic5_adev_get_device_id(adev) != HINIC5_UDEV_DEVICE_ID_1872_VF) || + adev->lld_dev.dev_type != HINIC5_DEVICE_T_UB) { + return false; + } +#endif + + return true; +} + +int hinic5_get_chip_present_flag(const void *hwdev) +{ + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (unlikely(!hwdev)) + return HINIC5_CHIP_ABSENT; + + if (unlikely(!get_handshake_state(dev))) + return HINIC5_CHIP_ABSENT; + + if (likely(hinic5_is_chip_present(dev))) + return HINIC5_CHIP_PRESENT; + return HINIC5_CHIP_ABSENT; +} +EXPORT_SYMBOL(hinic5_get_chip_present_flag); + +void hinic5_force_complete_all(void *dev) +{ + struct hinic5_recv_msg *recv_resp_msg = NULL; + struct hinic5_hwdev *hwdev = dev; + struct hinic5_mbox *func_to_func = NULL; + + if (!dev || !hwdev->pf_to_mgmt) + return; + + spin_lock_bh(&hwdev->channel_lock); + if (test_bit(HINIC5_HWDEV_MGMT_INITED, &hwdev->func_state)) { + recv_resp_msg = &hwdev->pf_to_mgmt->recv_resp_msg_from_mgmt; + spin_lock_bh(&hwdev->pf_to_mgmt->sync_event_lock); + if (hwdev->pf_to_mgmt->event_flag == SEND_EVENT_START) { + complete(&recv_resp_msg->recv_done); + hwdev->pf_to_mgmt->event_flag = SEND_EVENT_TIMEOUT; + } + spin_unlock_bh(&hwdev->pf_to_mgmt->sync_event_lock); + } + + if (test_bit(HINIC5_HWDEV_MBOX_INITED, &hwdev->func_state)) { + func_to_func = hwdev->func_to_func; + spin_lock(&func_to_func->mbox_lock); + if (func_to_func->event_flag == EVENT_START) + func_to_func->event_flag = EVENT_TIMEOUT; + spin_unlock(&func_to_func->mbox_lock); + } + + if (test_bit(HINIC5_HWDEV_CMDQ_INITED, &hwdev->func_state)) + hinic5_cmdq_flush_sync_cmd(hwdev); + + spin_unlock_bh(&hwdev->channel_lock); +} +EXPORT_SYMBOL(hinic5_force_complete_all); + +void hinic5_detect_hw_present(void *hwdev) +{ + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (!get_card_present_state(dev)) { + sdk_err(dev->dev_hdl, "Detect card absent.\n"); + hinic5_set_chip_absent(hwdev); + hinic5_force_complete_all(hwdev); + } +} + +/** + * dma_attr_table_init - initialize the default dma attributes + * @hwdev: the pointer to hw device + **/ +static int dma_attr_table_init(struct hinic5_hwdev *hwdev) +{ + u32 addr, val, dst_attr; + + /* Use indirect access should set entry_idx first */ + addr = HINIC5_CSR_DMA_ATTR_INDIR_IDX_ADDR; + val = hinic5_hwif_read_reg(hwdev->hwif, addr); + val = HINIC5_DMA_ATTR_INDIR_IDX_CLEAR(val, IDX); + + val |= HINIC5_DMA_ATTR_INDIR_IDX_SET(PCIE_MSIX_ATTR_ENTRY, IDX); + + hinic5_hwif_write_reg(hwdev->hwif, addr, val); + + wmb(); /* write index before config */ + + addr = HINIC5_CSR_DMA_ATTR_TBL_ADDR; + val = hinic5_hwif_read_reg(hwdev->hwif, addr); + + dst_attr = HINIC5_DMA_ATTR_ENTRY_SET(HINIC5_PCIE_ST_DISABLE, ST) | + HINIC5_DMA_ATTR_ENTRY_SET(HINIC5_PCIE_AT_DISABLE, AT) | + HINIC5_DMA_ATTR_ENTRY_SET(HINIC5_PCIE_PH_DISABLE, PH) | + HINIC5_DMA_ATTR_ENTRY_SET(HINIC5_PCIE_SNOOP, NO_SNOOPING) | + HINIC5_DMA_ATTR_ENTRY_SET(HINIC5_PCIE_TPH_DISABLE, TPH_EN); + + if (val == dst_attr) + return 0; + + return hinic5_set_dma_attr_tbl(hwdev, PCIE_MSIX_ATTR_ENTRY, HINIC5_PCIE_ST_DISABLE, + HINIC5_PCIE_AT_DISABLE, HINIC5_PCIE_PH_DISABLE, + HINIC5_PCIE_SNOOP, HINIC5_PCIE_TPH_DISABLE); +} + +static int init_aeqs_msix_attr(struct hinic5_hwdev *hwdev) +{ + struct hinic5_aeqs *aeqs = hwdev->aeqs; + struct interrupt_info info = {0}; + struct hinic5_eq *eq = NULL; + int q_id; + int err; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HINIC5_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC5_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC5_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = aeqs->num_aeqs - 1; q_id >= 0; q_id--) { + eq = &aeqs->aeq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hinic5_set_interrupt_cfg_direct(hwdev, &info, + HINIC5_CHANNEL_COMM); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +static int init_ceqs_msix_attr(struct hinic5_hwdev *hwdev) +{ +#ifdef __UEFI__ + return 0; +#endif + struct hinic5_ceqs *ceqs = hwdev->ceqs; + struct interrupt_info info = {0}; + struct hinic5_eq *eq = NULL; + u16 q_id; + int err; + + if (!ceqs) + return 0; + + info.lli_set = 0; + info.interrupt_coalesc_set = 1; + info.pending_limt = HINIC5_DEAULT_EQ_MSIX_PENDING_LIMIT; + info.coalesc_timer_cfg = HINIC5_DEAULT_EQ_MSIX_COALESC_TIMER_CFG; + info.resend_timer_cfg = HINIC5_DEAULT_EQ_MSIX_RESEND_TIMER_CFG; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + eq = &ceqs->ceq[q_id]; + info.msix_index = eq->eq_irq.msix_entry_idx; + err = hinic5_set_interrupt_cfg(hwdev, info, + HINIC5_CHANNEL_COMM); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %u failed\n", + q_id); + return -EFAULT; + } + } + + return 0; +} + +static int hinic5_comm_clp_to_mgmt_init(struct hinic5_hwdev *hwdev) +{ + int err; + + if (hinic5_func_type(hwdev) == TYPE_VF || !COMM_SUPPORT_CLP(hwdev)) + return 0; + + err = hinic5_clp_pf_to_mgmt_init(hwdev); + if (err != 0) + return err; + + return 0; +} + +static void hinic5_comm_clp_to_mgmt_free(struct hinic5_hwdev *hwdev) +{ + if (hinic5_func_type(hwdev) == TYPE_VF || !COMM_SUPPORT_CLP(hwdev)) + return; + + hinic5_clp_pf_to_mgmt_free(hwdev); +} + +static int hinic5_comm_aeqs_init(struct hinic5_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HINIC5_MAX_AEQS] = {{0}}; + u16 num_aeqs, resp_num_irq = 0, i; + int err; + + num_aeqs = HINIC5_HWIF_NUM_AEQS(hwdev->hwif); + if (num_aeqs > HINIC5_MAX_AEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %d\n", + HINIC5_MAX_AEQS); + num_aeqs = HINIC5_MAX_AEQS; + } + err = hinic5_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs, + &resp_num_irq); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %u\n", + num_aeqs); + return err; + } + + if (resp_num_irq < num_aeqs) { + sdk_warn(hwdev->dev_hdl, "Adjust aeq num to %u\n", + resp_num_irq); + num_aeqs = resp_num_irq; + } + + err = hinic5_aeqs_init(hwdev, num_aeqs, aeq_irqs); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n"); + goto aeqs_init_err; + } + + return 0; + +aeqs_init_err: + for (i = 0; i < num_aeqs; i++) + hinic5_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); + + return err; +} + +static void hinic5_comm_aeqs_free(struct hinic5_hwdev *hwdev) +{ + struct irq_info aeq_irqs[HINIC5_MAX_AEQS] = {{0} }; + struct irq_info *aeq_irq = &aeq_irqs[0]; + u16 num_irqs, i; + + hinic5_get_aeq_irqs(hwdev, aeq_irq, &num_irqs); + + hinic5_aeqs_free(hwdev); + + for (i = 0; i < num_irqs; i++) + hinic5_free_irq(hwdev, SERVICE_T_INTF, aeq_irqs[i].irq_id); +} + +static int hinic5_comm_ceqs_init(struct hinic5_hwdev *hwdev) +{ +#ifdef __UEFI__ + return 0; +#endif + struct irq_info ceq_irqs[HINIC5_MAX_CEQS] = {{0}}; + u16 num_ceqs, resp_num_irq = 0, i; + int err; + + num_ceqs = HINIC5_HWIF_NUM_CEQS(hwdev->hwif); + if (num_ceqs == 0) + return 0; + + if (num_ceqs > HINIC5_MAX_CEQS) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %d\n", + HINIC5_MAX_CEQS); + num_ceqs = HINIC5_MAX_CEQS; + } + + err = hinic5_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs, + &resp_num_irq); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %u\n", + num_ceqs); + return err; + } + + if (resp_num_irq < num_ceqs) { + sdk_warn(hwdev->dev_hdl, "Adjust ceq num to %u\n", + resp_num_irq); + num_ceqs = resp_num_irq; + } + + err = hinic5_ceqs_init(hwdev, num_ceqs, ceq_irqs); + if (err != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to init ceqs, err:%d\n", err); + goto ceqs_init_err; + } + + return 0; + +ceqs_init_err: + for (i = 0; i < num_ceqs; i++) + hinic5_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); + + return err; +} + +static void hinic5_comm_ceqs_free(struct hinic5_hwdev *hwdev) +{ +#ifdef __UEFI__ + return; +#endif + struct irq_info ceq_irqs[HINIC5_MAX_CEQS] = {{0}}; + struct irq_info *ceq_irq = &ceq_irqs[0]; + u16 num_irqs; + int i; + + if (!hwdev->ceqs) + return; + + hinic5_get_ceq_irqs(hwdev, ceq_irq, &num_irqs); + + hinic5_ceqs_free(hwdev); + + for (i = 0; i < num_irqs; i++) + hinic5_free_irq(hwdev, SERVICE_T_INTF, ceq_irqs[i].irq_id); +} + +/** + * @brief 初始化function和function之间的通信功能 + * + * @param[in] hwdev 设备指针 + * + * @details + * 注意mpu是一个特殊的function + * 这里的通信功能通过mailbox实现 + * 1)初始化mailbox相关的寄存器和资源 + * 2)注册处理接收mailbox数据的回调 + * + * @return: + * @retval 0 成功 + * @retval 非0 失败 + */ +static int hinic5_comm_func_to_func_init(struct hinic5_hwdev *hwdev) +{ + int err; + + err = hinic5_func_to_func_init(hwdev); + if (err != 0) + return err; + + hinic5_aeq_register_hw_cb(hwdev, hwdev, HINIC5_MBX_FROM_FUNC, + hinic5_mbox_func_aeqe_handler); + hinic5_aeq_register_hw_cb(hwdev, hwdev, HINIC5_MSG_FROM_MGMT_CPU, + hinic5_mgmt_msg_aeqe_handler); + + if (!HINIC5_IS_VF(hwdev)) + hinic5_register_pf_mbox_cb(hwdev, HINIC5_MOD_COMM, + hwdev, + pf_handle_vf_comm_mbox); + else + hinic5_register_vf_mbox_cb(hwdev, HINIC5_MOD_COMM, + hwdev, + vf_handle_pf_comm_mbox); + + set_bit(HINIC5_HWDEV_MBOX_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic5_comm_func_to_func_free(struct hinic5_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(HINIC5_HWDEV_MBOX_INITED, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + hinic5_aeq_unregister_hw_cb(hwdev, HINIC5_MBX_FROM_FUNC); + hinic5_aeq_unregister_hw_cb(hwdev, HINIC5_MSG_FROM_MGMT_CPU); + + if (!HINIC5_IS_VF(hwdev)) + hinic5_unregister_pf_mbox_cb(hwdev, HINIC5_MOD_COMM); + else + hinic5_unregister_vf_mbox_cb(hwdev, HINIC5_MOD_COMM); + + hinic5_func_to_func_free(hwdev); +} + +static int hinic5_comm_pf_to_mgmt_init(struct hinic5_hwdev *hwdev) +{ + int err; + + err = hinic5_pf_to_mgmt_init(hwdev); + if (err != 0) + return err; + + hinic5_register_mgmt_msg_cb(hwdev, HINIC5_MOD_COMM, hwdev, + pf_handle_mgmt_comm_event); + + set_bit(HINIC5_HWDEV_MGMT_INITED, &hwdev->func_state); + + return 0; +} + +static void hinic5_comm_pf_to_mgmt_free(struct hinic5_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(HINIC5_HWDEV_MGMT_INITED, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + hinic5_unregister_mgmt_msg_cb(hwdev, HINIC5_MOD_COMM); + + hinic5_pf_to_mgmt_free(hwdev); +} + +static int hinic5_comm_cmdqs_init(struct hinic5_hwdev *hwdev) +{ + int err; + + err = hinic5_cmdqs_init(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + return err; + } + + hinic5_ceq_register_cb(hwdev, hwdev, HINIC5_CMDQ, hinic5_cmdq_ceq_handler); + + err = hinic5_set_cmdq_depth(hwdev, HINIC5_CMDQ_DEPTH); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n"); + goto set_cmdq_depth_err; + } + + set_bit(HINIC5_HWDEV_CMDQ_INITED, &hwdev->func_state); + + return 0; + +set_cmdq_depth_err: + hinic5_cmdqs_free(hwdev); + + return err; +} + +static void hinic5_comm_cmdqs_free(struct hinic5_hwdev *hwdev) +{ + spin_lock_bh(&hwdev->channel_lock); + clear_bit(HINIC5_HWDEV_CMDQ_INITED, &hwdev->func_state); + spin_unlock_bh(&hwdev->channel_lock); + + hinic5_ceq_unregister_cb(hwdev, HINIC5_CMDQ); + hinic5_cmdqs_free(hwdev); +} + +static void hinic5_sync_mgmt_func_state(struct hinic5_hwdev *hwdev) +{ +#if defined(__UEFI__) && !defined(__HIFC__) + hinic5_set_pf_status(hwdev->hwif, HINIC5_PF_STATUS_INIT); +#else + hinic5_set_pf_status(hwdev->hwif, HINIC5_PF_STATUS_ACTIVE_FLAG); +#endif +} + +static void hinic5_unsync_mgmt_func_state(struct hinic5_hwdev *hwdev) +{ + hinic5_set_pf_status(hwdev->hwif, HINIC5_PF_STATUS_INIT); +} + +static int init_basic_attributes(struct hinic5_hwdev *hwdev) +{ + u64 drv_features[COMM_MAX_FEATURE_QWORD] = {HINIC5_DRV_FEATURE_QW0, 0, 0, 0}; + int err, i; + +#if !defined(__UEFI__) + if (hinic5_func_type(hwdev) == TYPE_PPF) + drv_features[0] |= COMM_F_CHANNEL_DETECT; +#endif + + err = hinic5_get_board_info(hwdev, &hwdev->board_info, + HINIC5_CHANNEL_COMM); + if (err != 0) + return err; + + err = hinic5_get_comm_features(hwdev, hwdev->features, + COMM_MAX_FEATURE_QWORD); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Get comm features failed\n"); + return err; + } + + sdk_info(hwdev->dev_hdl, "Comm hw features: 0x%llx, drv features: 0x%llx\n", + hwdev->features[0], drv_features[0]); + + for (i = 0; i < COMM_MAX_FEATURE_QWORD; i++) + hwdev->features[i] &= drv_features[i]; + + err = hinic5_get_global_attr(hwdev, &hwdev->glb_attr); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to get global attribute\n"); + return err; + } + + sdk_info(hwdev->dev_hdl, + "global attribute: max_host: 0x%x, max_pf: 0x%x, vf_id_start: 0x%x, mgmt node id: 0x%x, cmdq_num: 0x%x\n", + hwdev->glb_attr.max_host_num, hwdev->glb_attr.max_pf_num, + hwdev->glb_attr.vf_id_start, + hwdev->glb_attr.mgmt_host_node_id, + hwdev->glb_attr.cmdq_num); + + return 0; +} + +static int init_basic_mgmt_channel(struct hinic5_hwdev *hwdev) +{ + int err; + + err = hinic5_comm_aeqs_init(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n"); + return err; + } + + err = hinic5_comm_func_to_func_init(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init mailbox\n"); + goto func_to_func_init_err; + } + + err = init_aeqs_msix_attr(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n"); + goto aeqs_msix_attr_init_err; + } +#if !defined(__UEFI__) && !defined(__WIN__) && !defined(__VMWARE__) + err = hinic5_cqm_init_fast_msg(hwdev); + if (err != 0) + sdk_err(hwdev->dev_hdl, "Failed to init fast msg\n"); +#endif + return 0; + +aeqs_msix_attr_init_err: + hinic5_comm_func_to_func_free(hwdev); + +func_to_func_init_err: + hinic5_comm_aeqs_free(hwdev); + + return err; +} + +static void free_base_mgmt_channel(struct hinic5_hwdev *hwdev) +{ + hinic5_comm_func_to_func_free(hwdev); + hinic5_comm_aeqs_free(hwdev); +} + +static int init_pf_mgmt_channel(struct hinic5_hwdev *hwdev) +{ + int err; + + err = hinic5_comm_clp_to_mgmt_init(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init clp\n"); + return err; + } + + err = hinic5_comm_pf_to_mgmt_init(hwdev); + if (err != 0) { + hinic5_comm_clp_to_mgmt_free(hwdev); + sdk_err(hwdev->dev_hdl, "Failed to init pf to mgmt\n"); + return err; + } + + return 0; +} + +static void free_pf_mgmt_channel(struct hinic5_hwdev *hwdev) +{ + hinic5_comm_clp_to_mgmt_free(hwdev); + hinic5_comm_pf_to_mgmt_free(hwdev); +} + +static int init_mgmt_channel_post(struct hinic5_hwdev *hwdev) +{ + int err; + + /* mbox host channel resources will be freed in + * hinic5_func_to_func_free + */ + if (HINIC5_IS_PPF(hwdev)) { + err = hinic5_mbox_init_host_msg_channel(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init mbox host channel\n"); + return err; + } + } + + err = init_pf_mgmt_channel(hwdev); + if (err != 0) + return err; + + return 0; +} + +static void free_mgmt_msg_channel_post(struct hinic5_hwdev *hwdev) +{ + free_pf_mgmt_channel(hwdev); +} + +static int init_cmdqs_channel(struct hinic5_hwdev *hwdev) +{ + int err; + + err = dma_attr_table_init(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init dma attr table\n"); + goto dma_attr_init_err; + } + + err = hinic5_comm_ceqs_init(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n"); + goto ceqs_init_err; + } + + err = init_ceqs_msix_attr(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n"); + goto init_ceq_msix_err; + } + + /* set default wq page_size */ + if (wq_page_order > HINIC5_MAX_WQ_PAGE_SIZE_ORDER) { + sdk_info(hwdev->dev_hdl, "wq_page_order exceed limit[0, %d], reset to %d\n", + HINIC5_MAX_WQ_PAGE_SIZE_ORDER, + HINIC5_MAX_WQ_PAGE_SIZE_ORDER); + wq_page_order = HINIC5_MAX_WQ_PAGE_SIZE_ORDER; + } + hwdev->wq_page_size = HINIC5_HW_WQ_PAGE_SIZE * (1U << wq_page_order); + sdk_info(hwdev->dev_hdl, "WQ page size: 0x%x\n", hwdev->wq_page_size); + err = hinic5_set_wq_page_size(hwdev, hinic5_global_func_id(hwdev), + hwdev->wq_page_size, HINIC5_CHANNEL_COMM); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n"); + goto init_wq_pg_size_err; + } + + err = hinic5_comm_cmdqs_init(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); + goto cmdq_init_err; + } + + return 0; + +cmdq_init_err: + hinic5_set_wq_page_size(hwdev, hinic5_global_func_id(hwdev), + HINIC5_HW_WQ_PAGE_SIZE, + HINIC5_CHANNEL_COMM); +init_wq_pg_size_err: +init_ceq_msix_err: + hinic5_comm_ceqs_free(hwdev); + +ceqs_init_err: +dma_attr_init_err: + + return err; +} + +static void hinic5_free_cmdqs_channel(struct hinic5_hwdev *hwdev) +{ + hinic5_comm_cmdqs_free(hwdev); + + hinic5_set_wq_page_size(hwdev, hinic5_global_func_id(hwdev), + HINIC5_HW_WQ_PAGE_SIZE, HINIC5_CHANNEL_COMM); + + hinic5_comm_ceqs_free(hwdev); +} + +static int hinic5_init_comm_ch(struct hinic5_hwdev *hwdev) +{ + int err; + + err = init_basic_mgmt_channel(hwdev); + if (err != 0) + return err; + + err = hinic5_func_reset(hwdev, hinic5_global_func_id(hwdev), + HINIC5_COMM_RES, HINIC5_CHANNEL_COMM); + if (err != 0) + goto func_reset_err; + + err = init_basic_attributes(hwdev); + if (err != 0) + goto init_basic_attr_err; + + err = init_mgmt_channel_post(hwdev); + if (err != 0) + goto init_mgmt_channel_post_err; + + err = init_cmdqs_channel(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init cmdq channel\n"); + goto init_cmdqs_channel_err; + } + + err = hinic5_set_func_svc_used_state(hwdev, SVC_T_COMM, 1, HINIC5_CHANNEL_COMM); + if (err != 0) + goto set_used_state_err; + + hinic5_sync_mgmt_func_state(hwdev); + + if (HISDK5_F_CHANNEL_LOCK_EN(hwdev)) { + hinic5_mbox_enable_channel_lock(hwdev, true); + hinic5_cmdq_enable_channel_lock(hwdev, true); + } + err = hinic5_init_stateless_aeqs(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to init stateless aeqs\n"); + goto init_stateless_aeqs_err; + } + + err = hinic5_aeq_register_swe_cb(hwdev, hwdev, HINIC5_STATELESS_EVENT, + (hinic5_aeq_swe_cb)hinic5_nic_sw_aeqe_handler); + if (err != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to register sw aeqe handler\n"); + goto register_ucode_aeqe_err; + } + + return 0; + +register_ucode_aeqe_err: + hinic5_stateless_aeqs_free(hwdev); +init_stateless_aeqs_err: + hinic5_unsync_mgmt_func_state(hwdev); + hinic5_set_func_svc_used_state(hwdev, SVC_T_COMM, 0, HINIC5_CHANNEL_COMM); +set_used_state_err: + hinic5_free_cmdqs_channel(hwdev); +init_cmdqs_channel_err: + free_mgmt_msg_channel_post(hwdev); +init_mgmt_channel_post_err: +init_basic_attr_err: +func_reset_err: + free_base_mgmt_channel(hwdev); + + return err; +} + +static void hinic5_uninit_comm_ch(struct hinic5_hwdev *hwdev) +{ + hinic5_aeq_unregister_swe_cb(hwdev, HINIC5_STATELESS_EVENT); + + hinic5_stateless_aeqs_free(hwdev); + + hinic5_unsync_mgmt_func_state(hwdev); + + hinic5_set_func_svc_used_state(hwdev, SVC_T_COMM, 0, HINIC5_CHANNEL_COMM); + + hinic5_free_cmdqs_channel(hwdev); + + free_mgmt_msg_channel_post(hwdev); +#if !defined(__UEFI__) && !defined(__WIN__) && !defined(__VMWARE__) + hinic5_cqm_deinit_fast_msg(hwdev); +#endif + free_base_mgmt_channel(hwdev); +} + +#if !defined(__UEFI__) && !defined(__VMWARE__) && !defined(__WIN__) +static void hinic5_auto_sync_time_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic5_hwdev *hwdev = container_of(delay, struct hinic5_hwdev, sync_time_task); + int err; + + err = hinic5_sync_time(hwdev, ossl_get_real_time()); + if (err != 0) + sdk_err(hwdev->dev_hdl, "Synchronize UTC time to firmware failed, errno:%d.\n", + err); + + queue_delayed_work(hwdev->workq, &hwdev->sync_time_task, + msecs_to_jiffies(HINIC5_SYNFW_TIME_PERIOD)); +} + +static void hinic5_auto_channel_detect_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic5_hwdev *hwdev = container_of(delay, struct hinic5_hwdev, channel_detect_task); + + if (!hinic5_is_chip_present(hwdev)) { + sdk_warn(hwdev->dev_hdl, "Detect card absent, stop channel detect.\n"); + return; + } + + (void)hinic5_comm_channel_detect(hwdev); + + /* reschedule self */ + if (!hinic5_channel_detect_should_stop(hwdev)) + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(HINIC5_CHANNEL_DETECT_PERIOD)); +} + +void hinic5_kernel_sync_time_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic5_hwdev *hwdev = container_of(delay, struct hinic5_hwdev, + sync_kernel_time_task); + int err; + + struct card_node *chip_node = (struct card_node *)(hwdev->chip_node); + + if (!chip_node || !chip_node->non_ptp_info || + (chip_node->non_ptp_info->non_ptp_time_diff_enable == 0)) { + return; + } + + err = hinic5_sync_kernel_time(hwdev); + if (err != 0) + sdk_err(hwdev->dev_hdl, "Synchronize kernel time failed, errno:%d.\n", err); + + queue_delayed_work(hwdev->workq, &hwdev->sync_kernel_time_task, + msecs_to_jiffies(HINIC5_NON_PTP_SYNC_FW_TIME_PERIOD)); +} + +static int hinic5_init_ppf_work(struct hinic5_hwdev *hwdev) +{ + int err; + + if (hinic5_func_type(hwdev) != TYPE_PPF) + return 0; + + INIT_DELAYED_WORK(&hwdev->sync_time_task, hinic5_auto_sync_time_work); + queue_delayed_work(hwdev->workq, &hwdev->sync_time_task, + msecs_to_jiffies(HINIC5_SYNFW_TIME_PERIOD)); + + if (COMM_SUPPORT_CHANNEL_DETECT(hwdev) != 0) { + INIT_DELAYED_WORK(&hwdev->channel_detect_task, + hinic5_auto_channel_detect_work); + queue_delayed_work(hwdev->workq, &hwdev->channel_detect_task, + msecs_to_jiffies(HINIC5_CHANNEL_DETECT_PERIOD)); + } + + if (COMM_SUPPORT_NON_PTP_SYNC(hwdev) != 0) { + err = hinic5_non_ptp_cdev_init(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init non_ptp char dev\n"); + goto init_non_ptp_err; + } + /* 注册延时任务, 并初始化为disable */ + INIT_DELAYED_WORK(&hwdev->sync_kernel_time_task, hinic5_kernel_sync_time_work); + hinic5_set_non_ptp_time_diff_en(hwdev, false); + } + + if (!COMM_SUPPORT_HTN_CMD(hwdev)) { + err = hinic5_comm_micro_log_init(hwdev); + if (err != 0) + sdk_warn(hwdev->dev_hdl, "Failed to init micro log\n"); + } + + return 0; + +init_non_ptp_err: + if (COMM_SUPPORT_CHANNEL_DETECT(hwdev) != 0) { + hwdev->features[0] &= ~(COMM_F_CHANNEL_DETECT); + cancel_delayed_work_sync(&hwdev->channel_detect_task); + } + + cancel_delayed_work_sync(&hwdev->sync_time_task); + + return err; +} + +static void hinic5_free_ppf_work(struct hinic5_hwdev *hwdev) +{ + if (hinic5_func_type(hwdev) != TYPE_PPF) + return; + + if (!COMM_SUPPORT_HTN_CMD(hwdev)) + hinic5_micro_log_uninit(hwdev); + + if (COMM_SUPPORT_NON_PTP_SYNC(hwdev) != 0) { + hwdev->features[0] &= ~(COMM_F_NON_PTP_SYNC); + cancel_delayed_work_sync(&hwdev->sync_kernel_time_task); + hinic5_non_ptp_cdev_deinit(hwdev); + } + + if (COMM_SUPPORT_CHANNEL_DETECT(hwdev)) { + hwdev->features[0] &= ~(COMM_F_CHANNEL_DETECT); + cancel_delayed_work_sync(&hwdev->channel_detect_task); + } + + cancel_delayed_work_sync(&hwdev->sync_time_task); +} + +#else +static int hinic5_init_ppf_work(struct hinic5_hwdev *hwdev) +{ + return 0; +} + +static void hinic5_free_ppf_work(struct hinic5_hwdev *hwdev) +{ +} +#endif + +/* + * CMDQ timeout should be greater than SMEG1 runaway timeout(SMEG1_RUNAWAY_CFG) + */ +#define HINIC5_ASIC_CMDQ_TIMEOUT 10000 +#define HINIC5_FPGA_CMDQ_TIMEOUT 50000 +#define HINIC5_EMU_CMDQ_TIMEOUT 500000 +#define HINIC5_EDA_CMDQ_TIMEOUT 10000 + +#define HINIC5_ASIC_MBOX_TIMEOUT 40000 +#define HINIC5_FPGA_MBOX_TIMEOUT 40000 +#define HINIC5_EMU_MBOX_TIMEOUT 400000 +#define HINIC5_EDA_MBOX_TIMEOUT 40000 + +#define HINIC5_ASIC_MBOX_POLL_TIMEOUT 8000 +#define HINIC5_FPGA_MBOX_POLL_TIMEOUT 8000 +#define HINIC5_EMU_MBOX_POLL_TIMEOUT 80000 +#define HINIC5_EDA_MBOX_POLL_TIMEOUT 8000 + +static const struct hinic5_sdk_timeout_info g_sdk_timeout_info[] = { + { + .hw_type = HINIC5_HW_TYPE_FPGA, + .hw_type_desc = "FPGA", + .mbox_poll_timeout = HINIC5_FPGA_MBOX_POLL_TIMEOUT, + .mbox_timeout = HINIC5_FPGA_MBOX_TIMEOUT, + .cmdq_timeout = HINIC5_FPGA_CMDQ_TIMEOUT, + }, + { + .hw_type = HINIC5_HW_TYPE_ASIC, + .hw_type_desc = "ASIC", + .mbox_poll_timeout = HINIC5_ASIC_MBOX_POLL_TIMEOUT, + .mbox_timeout = HINIC5_ASIC_MBOX_TIMEOUT, + .cmdq_timeout = HINIC5_ASIC_CMDQ_TIMEOUT, + }, + { + .hw_type = HINIC5_HW_TYPE_EMU, + .hw_type_desc = "EMU", + .mbox_poll_timeout = HINIC5_EMU_MBOX_POLL_TIMEOUT, + .mbox_timeout = HINIC5_EMU_MBOX_TIMEOUT, + .cmdq_timeout = HINIC5_EMU_CMDQ_TIMEOUT, + }, + { + .hw_type = HINIC5_HW_TYPE_EDA, + .hw_type_desc = "EDA", + .mbox_poll_timeout = HINIC5_EDA_MBOX_POLL_TIMEOUT, + .mbox_timeout = HINIC5_EDA_MBOX_TIMEOUT, + .cmdq_timeout = HINIC5_EDA_CMDQ_TIMEOUT, + }, +}; + +STATIC void hinic5_hwdev_init_timeout(struct hinic5_hwdev *hwdev, u8 hw_type) +{ + u8 temp_hw_type = hw_type; + + if (temp_hw_type > HINIC5_HW_TYPE_EDA) { + sdk_warn(hwdev->dev_hdl, "No available timeout info for type(%d), use default type(%d)\n", + temp_hw_type, HINIC5_HW_TYPE_FPGA); + temp_hw_type = HINIC5_HW_TYPE_FPGA; + } + + hwdev->timeout_info = &g_sdk_timeout_info[temp_hw_type]; +} + +static int init_hwdev(struct hinic5_init_para *para) +{ + struct hinic5_hwdev *hwdev = NULL; + + hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL); + if (!hwdev) + return -ENOMEM; + + *para->hwdev = hwdev; + hwdev->adapter_hdl = para->adapter_hdl; +#ifdef __UEFI__ + hwdev->busdev_hdl = para->busdev_hdl; +#endif + hwdev->dev_hdl = para->dev_hdl; + hwdev->chip_node = para->chip_node; + hwdev->poll = para->poll; + atomic_set(&hwdev->check_ob_flush_bypass_ref_cnt, 0); + hwdev->probe_fault_level = para->probe_fault_level; + hwdev->func_state = 0; + + hwdev->chip_fault_stats = vzalloc(HINIC5_CHIP_FAULT_SIZE); + if (!hwdev->chip_fault_stats) + goto alloc_chip_fault_stats_err; + + hwdev->stateful_ref_cnt = 0; + memset(hwdev->features, 0, sizeof(hwdev->features)); + + hinic5_hwdev_init_timeout(hwdev, HINIC5_HW_TYPE_ASIC); + + spin_lock_init(&hwdev->channel_lock); + mutex_init(&hwdev->stateful_mutex); + + return 0; + +alloc_chip_fault_stats_err: + kfree(hwdev); + *para->hwdev = NULL; + return -EFAULT; +} + +static void deinit_hwdev(struct hinic5_hwdev *hwdev) +{ + mutex_deinit(&hwdev->stateful_mutex); + spin_lock_deinit(&hwdev->channel_lock); + vfree(hwdev->chip_fault_stats); + hwdev->chip_fault_stats = NULL; + kfree(hwdev); +} + +static inline void hinic5_init_max_aeq_busy_cnt(struct hinic5_hwdev *hwdev) +{ + hwdev->max_aeq_busy_cnt = hwdev->timeout_info->mbox_timeout / MSEC_PER_SEC; +} + +#define HINIC5_HEARTBEAT_PERIOD 1000 +#define DETECT_PCIE_LINK_DOWN_RETRY 2 + +int hinic5_init_hwdev(struct hinic5_init_para *para) +{ + struct hinic5_hwdev *hwdev = NULL; + u8 hw_type; + int err; + + err = init_hwdev(para); + if (err != 0) + return err; + + hwdev = *para->hwdev; + + err = hinic5_init_hwif(hwdev, para->fers2_reg_base, para->cfg_reg_base, para->intr_reg_base, + para->mgmt_reg_base, para->db_base_phy, para->db_base, + para->db_dwqe_len); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init hwif\n"); + goto init_hwif_err; + } + + hw_type = hinic5_get_hw_type(hwdev); + hinic5_hwdev_init_timeout(hwdev, hw_type); + hinic5_init_max_aeq_busy_cnt(hwdev); + + hinic5_set_chip_present(hwdev); + + err = hisdk5_init_profile_adapter(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init profile adapter\n"); + goto init_prof_adapter_err; + } + + hwdev->workq = alloc_workqueue(HINIC5_HWDEV_WQ_NAME, WQ_MEM_RECLAIM, HINIC5_WQ_MAX_REQ); + if (!hwdev->workq) { + sdk_err(hwdev->dev_hdl, "Failed to alloc hardware workq\n"); + goto alloc_workq_err; + } + + (void)hinic5_set_heartbeat_period_and_linkdown_cnt((void *)hwdev, HINIC5_HEARTBEAT_PERIOD, + DETECT_PCIE_LINK_DOWN_RETRY); + hinic5_init_heartbeat_detect(hwdev); + + err = init_cfg_mgmt(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n"); + goto init_cfg_mgmt_err; + } + + err = hinic5_init_comm_ch(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n"); + goto init_comm_ch_err; + } + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + err = hinic5_init_devlink(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init devlink\n"); + goto init_devlink_err; + } +#endif + + err = init_capability(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init capability\n"); + goto init_cap_err; + } + + hinic5_init_host_mode_pre(hwdev); + + err = hinic5_multi_host_enable(hwdev, true); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init function mode\n"); + goto init_multi_host_fail; + } + + err = hinic5_init_ppf_work(hwdev); + if (err != 0) + goto init_ppf_work_fail; + + err = hinic5_set_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set comm features\n"); + goto set_feature_err; + } + + return 0; + +set_feature_err: + hinic5_free_ppf_work(hwdev); + +init_ppf_work_fail: + hinic5_multi_host_enable(hwdev, false); + +init_multi_host_fail: + free_capability(hwdev); + +init_cap_err: +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + hinic5_uninit_devlink(hwdev); + +init_devlink_err: +#endif + hinic5_uninit_comm_ch(hwdev); + +init_comm_ch_err: + free_cfg_mgmt(hwdev); + +init_cfg_mgmt_err: + hinic5_destroy_heartbeat_detect(hwdev); + destroy_workqueue(hwdev->workq); + +alloc_workq_err: + hisdk5_deinit_profile_adapter(hwdev); + +init_prof_adapter_err: + hinic5_free_hwif(hwdev); + +init_hwif_err: + deinit_hwdev(hwdev); + *para->hwdev = NULL; + + return -EFAULT; +} + +void hinic5_free_hwdev(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + u64 drv_features[COMM_MAX_FEATURE_QWORD]; + + memset(drv_features, 0, sizeof(drv_features)); + hinic5_set_comm_features(hwdev, drv_features, COMM_MAX_FEATURE_QWORD); + + hinic5_free_ppf_work(dev); + + hinic5_multi_host_enable(dev, false); + + hinic5_func_rx_tx_flush(hwdev, HINIC5_CHANNEL_COMM, true, 0); + + free_capability(dev); + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS + hinic5_uninit_devlink(dev); +#endif + + hinic5_uninit_comm_ch(dev); + + free_cfg_mgmt(dev); + hinic5_destroy_heartbeat_detect(hwdev); + destroy_workqueue(dev->workq); + + hisdk5_deinit_profile_adapter(hwdev); + hinic5_free_hwif(dev); + + deinit_hwdev(dev); +} + +int hinic5_register_service_adapter(void *hwdev, void *service_adapter, + enum hinic5_service_type type) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev || !service_adapter || type >= SERVICE_T_MAX) + return -EINVAL; + + if (dev->service_adapter[type]) + return -EINVAL; + + dev->service_adapter[type] = service_adapter; + + return 0; +} +EXPORT_SYMBOL(hinic5_register_service_adapter); + +void hinic5_unregister_service_adapter(void *hwdev, + enum hinic5_service_type type) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev || type >= SERVICE_T_MAX) + return; + + dev->service_adapter[type] = NULL; +} +EXPORT_SYMBOL(hinic5_unregister_service_adapter); + +void *hinic5_get_service_adapter(void *hwdev, enum hinic5_service_type type) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev || type < SERVICE_T_NIC || type >= SERVICE_T_MAX) + return NULL; + + return dev->service_adapter[type]; +} +EXPORT_SYMBOL(hinic5_get_service_adapter); + +int hinic5_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, const u32 *out_size) +{ + struct hinic5_hw_stats *tmp_hw_stats = (struct hinic5_hw_stats *)hw_stats; + struct card_node *chip_node = NULL; + + if (!hwdev) + return -EINVAL; + + if (*out_size != sizeof(struct hinic5_hw_stats) || !hw_stats) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(struct hinic5_hw_stats)); + return -EFAULT; + } + + memcpy(hw_stats, &((struct hinic5_hwdev *)hwdev)->hw_stats, + sizeof(struct hinic5_hw_stats)); + + chip_node = ((struct hinic5_hwdev *)hwdev)->chip_node; + + atomic_set(&tmp_hw_stats->nic_ucode_event_stats[HINIC5_CHANNEL_BUSY], + atomic_read(&chip_node->channel_busy_cnt)); + + return 0; +} + +static int check_cmdq_args(struct hinic5_hwdev *hwdev, u16 cmdq_id) +{ + struct hinic5_cmdqs *cmdqs = hwdev->cmdqs; + + if (unlikely(!cmdqs)) + return -EAGAIN; /* cmdqs not ready */ + + if (unlikely(cmdq_id >= cmdqs->cmdq_num)) + return -E2BIG; + + return 0; +} + +int hinic5_dump_cmdq_wq(struct hinic5_hwdev *hwdev, u16 cmdq_id, struct hinic5_wq *wq) +{ + int err; + + if (unlikely(!hwdev || !wq)) + return -EINVAL; + + err = check_cmdq_args(hwdev, cmdq_id); + if (unlikely(err != 0)) + return err; + + memcpy(wq, &hwdev->cmdqs->cmdq[cmdq_id].wq, sizeof(struct hinic5_wq)); + + return 0; +} + +int hinic5_dump_cmdq_wqebb(struct hinic5_hwdev *hwdev, u16 cmdq_id, u16 wqe_idx, + struct sdk_cmdq_wqe_desc *wqe_desc) +{ + struct hinic5_wq *wq = NULL; + u16 wqebb_size, wqe_idx_masked; + void *wqebb = NULL; + int err; + + if (unlikely(!hwdev || !wqe_desc)) + return -EINVAL; + + err = check_cmdq_args(hwdev, cmdq_id); + if (unlikely(err != 0)) + return err; + + wq = &hwdev->cmdqs->cmdq[cmdq_id].wq; + wqebb_size = wq->wqebb_size; + if (unlikely(wqebb_size > sizeof(wqe_desc->data))) + dev_warn_once(hwdev->dev_hdl, "WARN: out wqebb size too small\n"); + + wqe_idx_masked = WQ_MASK_IDX(wq, wqe_idx); + wqebb = hinic5_wq_wqebb_addr(wq, wqe_idx_masked); + + memset((void *)wqe_desc->data, 0, sizeof(wqe_desc->data)); + memcpy((void *)wqe_desc->data, wqebb, wqebb_size); + + wqe_desc->wqebb_size = wqebb_size; + return 0; +} + +u16 hinic5_dbg_clear_hw_stats(void *hwdev) +{ + struct card_node *chip_node = NULL; + struct hinic5_hwdev *dev = hwdev; + + memset((void *)&dev->hw_stats, 0, sizeof(struct hinic5_hw_stats)); + memset((void *)dev->chip_fault_stats, 0, HINIC5_CHIP_FAULT_SIZE); + + chip_node = dev->chip_node; + if (COMM_SUPPORT_CHANNEL_DETECT(dev) && (atomic_read(&chip_node->channel_busy_cnt) != 0)) { + atomic_set(&chip_node->channel_busy_cnt, 0); + dev->aeq_busy_cnt = 0; +#if !defined(__UEFI__) && !defined(__VMWARE__) && !defined(__WIN__) + queue_delayed_work(dev->workq, &dev->channel_detect_task, + msecs_to_jiffies(HINIC5_CHANNEL_DETECT_PERIOD)); +#endif + } + + return sizeof(struct hinic5_hw_stats); +} + +void hinic5_get_chip_fault_stats(const void *hwdev, u8 *chip_fault_stats, + u32 offset) +{ + u32 chip_fault_len; + + if (offset >= HINIC5_CHIP_FAULT_SIZE) { + pr_err("Invalid chip offset value: %u\n", offset); + return; + } + + if (offset + MAX_DRV_BUF_SIZE <= HINIC5_CHIP_FAULT_SIZE) + chip_fault_len = MAX_DRV_BUF_SIZE; + else + chip_fault_len = HINIC5_CHIP_FAULT_SIZE - offset; + memcpy(chip_fault_stats, + ((struct hinic5_hwdev *)hwdev)->chip_fault_stats + offset, chip_fault_len); +} + +int hinic5_event_register(void *dev, void *pri_handle, hinic5_event_handler callback) +{ + struct hinic5_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for register event\n"); + return -EINVAL; + } + + if (hwdev->event_callback) { + pr_err("event_callback is already registered\n"); + return -EPERM; + } + + hwdev->event_callback = callback; + hwdev->event_pri_handle = pri_handle; + return 0; +} + +void hinic5_event_unregister(void *dev) +{ + struct hinic5_hwdev *hwdev = dev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for register event\n"); + return; + } + + hwdev->event_callback = NULL; + hwdev->event_pri_handle = NULL; +} + +void hinic5_event_callback(void *hwdev, struct hinic5_event_info *event) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) { + pr_err("Hwdev pointer is NULL for event callback\n"); + return; + } + + if (!dev->event_callback) { + sdk_info(dev->dev_hdl, "Event callback function not register\n"); + return; + } + + dev->event_callback(dev->event_pri_handle, event); +} +EXPORT_SYMBOL(hinic5_event_callback); + +void hinic5_set_pcie_order_cfg(void *handle) +{ +} + +void hinic5_disable_mgmt_msg_report(void *hwdev) +{ + struct hinic5_hwdev *hw_dev = (struct hinic5_hwdev *)hwdev; + + hinic5_set_pf_status(hw_dev->hwif, HINIC5_PF_STATUS_INIT); +} + +void hinic5_record_pcie_error(void *hwdev) +{ + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (!hwdev) + return; + + atomic_inc(&dev->hw_stats.fault_event_stats.pcie_fault_stats); +} + +#if !defined(__VMWARE__) +bool hinic5_need_init_stateful_default(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + u16 chip_svc_type = (u16)dev->cfg_mgmt->svc_cap.svc_type; + +#if defined(__UEFI__) && !defined(__HIFC__) && !defined(VIRTIO_2X100G_NORMAL) + if (dev->board_info.board_type == BOARD_TYPE_CAL_2X100G_NIC_120MPPS || + dev->board_info.board_type == BOARD_TYPE_CAL_4X25G_NIC_120MPPS) + return false; +#endif + + if (lowpower_mode != 0) + return true; + + /* Current virtio net have to init hinic5_cqm in PPF. */ + if ((hinic5_func_type(hwdev) == TYPE_PPF) && + ((chip_svc_type & CFG_SERVICE_MASK_VIRTIO) != 0)) { + sdk_info(dev->dev_hdl, "sdk init ppf resource, chip_svc_type: 0x%x\n", + chip_svc_type); + return true; + } + + /* vroce have to init hinic5_cqm */ + if (IS_MASTER_HOST(dev) && (hinic5_func_type(hwdev) != TYPE_PPF) && + (((chip_svc_type & CFG_SERVICE_MASK_VROCE) != 0))) + return true; + + /* Other service type will init hinic5_cqm when uld call. */ + return false; +} + +static bool hinic5_ext_db_en(struct hinic5_hwdev *dev) +{ + u32 stateful_en = IS_FT_TYPE(dev) | IS_RDMA_TYPE(dev); + + return (((stateful_en != 0) || IS_RDMA_ENABLE(dev) || + IS_FT_ENABLE(dev)) && HINIC5_IS_PPF(dev)); +} + +static inline void stateful_uninit(struct hinic5_hwdev *hwdev) +{ + hinic5_cqm_uninit(hwdev); + + if (hinic5_ext_db_en(hwdev)) + hinic5_ppf_ext_db_deinit(hwdev); +} + +int hinic5_stateful_init(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + int err; + bool ext_db_en; + + if (!dev) + return -EINVAL; + + if (!hinic5_get_stateful_enable(dev)) + return 0; + + mutex_lock(&dev->stateful_mutex); + if (dev->stateful_ref_cnt != 0) { + dev->stateful_ref_cnt++; + mutex_unlock(&dev->stateful_mutex); + return 0; + } + + dev->stateful_ref_cnt++; + ext_db_en = hinic5_ext_db_en(dev); + if (ext_db_en) { + err = hinic5_ppf_ext_db_init(dev); + if (err != 0) + goto out; + } + + err = hinic5_cqm_init(dev); + if (err != 0) { + sdk_err(dev->dev_hdl, "Failed to init hinic5_cqm, err: %d\n", err); + goto init_hinic5_cqm_err; + } + + mutex_unlock(&dev->stateful_mutex); + sdk_info(dev->dev_hdl, "Initialize stateful resource success\n"); + + return 0; + +init_hinic5_cqm_err: + if (ext_db_en) + hinic5_ppf_ext_db_deinit(dev); + +out: + dev->stateful_ref_cnt--; + mutex_unlock(&dev->stateful_mutex); + + return err; +} +EXPORT_SYMBOL(hinic5_stateful_init); + +void hinic5_stateful_deinit(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev || !hinic5_get_stateful_enable(dev)) + return; + + mutex_lock(&dev->stateful_mutex); + if (dev->stateful_ref_cnt == 0 || ((--dev->stateful_ref_cnt) != 0)) { + mutex_unlock(&dev->stateful_mutex); + return; + } + + stateful_uninit(hwdev); + mutex_unlock(&dev->stateful_mutex); + + sdk_info(dev->dev_hdl, "Clear stateful resource success\n"); +} +EXPORT_SYMBOL(hinic5_stateful_deinit); + +void hinic5_free_stateful(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev || !hinic5_get_stateful_enable(dev) || dev->stateful_ref_cnt == 0) + return; + + if (!hinic5_need_init_stateful_default(hwdev) || dev->stateful_ref_cnt > 1) + sdk_info(dev->dev_hdl, "Current stateful resource ref is incorrect, ref_cnt:%u\n", + dev->stateful_ref_cnt); + + stateful_uninit(hwdev); + + sdk_info(dev->dev_hdl, "Clear stateful resource success\n"); +} +#endif /* __VMWARE__ */ + +int hinic5_get_card_present_state(void *hwdev, bool *card_present_state) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev || !card_present_state) + return -EINVAL; + + *card_present_state = get_card_present_state(dev); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_card_present_state); + +void hinic5_link_event_stats(void *dev, u8 link) +{ + struct hinic5_hwdev *hwdev = dev; + + if (!hwdev) { + pr_err("hwdev is null\n"); + return; + } + + if (link != 0) + atomic_inc(&hwdev->hw_stats.link_event_stats.link_up_stats); + else + atomic_inc(&hwdev->hw_stats.link_event_stats.link_down_stats); +} +EXPORT_SYMBOL(hinic5_link_event_stats); + +int hinic5_get_link_down_cnt(void *dev, int *link_down_cnt) +{ + struct hinic5_hwdev *hwdev = dev; + + if (!hwdev || !link_down_cnt) + return -EINVAL; + + *link_down_cnt = hwdev->hw_stats.link_event_stats.link_down_stats.counter; + + return 0; +} +EXPORT_SYMBOL(hinic5_get_link_down_cnt); + +u8 hinic5_max_pf_num(void *hwdev) +{ + if (!hwdev) + return 0; + + return HINIC5_MAX_PF_NUM((struct hinic5_hwdev *)hwdev); +} +EXPORT_SYMBOL(hinic5_max_pf_num); + +void hinic5_fault_event_report(void *hwdev, u16 src, u16 level) +{ + if (!hwdev) + return; + + sdk_info(((struct hinic5_hwdev *)hwdev)->dev_hdl, "Fault event report, src: %u, level: %u\n", + src, level); + + hisdk5_fault_post_process(hwdev, src, level); +} +EXPORT_SYMBOL(hinic5_fault_event_report); + +void hinic5_probe_success(void *hwdev) +{ + if (!hwdev) + return; + + hisdk5_probe_success(hwdev); +} + +static void hinic5_update_channel_status(struct hinic5_hwdev *hwdev) +{ + struct card_node *chip_node = hwdev->chip_node; + + if (!chip_node) + return; + + if ((hinic5_func_type(hwdev) != TYPE_PPF) || !COMM_SUPPORT_CHANNEL_DETECT(hwdev) || + hinic5_channel_detect_should_stop(hwdev)) + return; + + if (test_bit(HINIC5_HWDEV_MBOX_INITED, &hwdev->func_state) == 0) + return; + + if (hwdev->last_recv_aeq_cnt != hwdev->cur_recv_aeq_cnt) { + hwdev->aeq_busy_cnt = 0; + hwdev->last_recv_aeq_cnt = hwdev->cur_recv_aeq_cnt; + /* Intentionally keep channel_busy_cnt */ + return; + } + + hwdev->aeq_busy_cnt++; + if (hwdev->aeq_busy_cnt > hwdev->max_aeq_busy_cnt) { + atomic_inc(&chip_node->channel_busy_cnt); + hwdev->aeq_busy_cnt = 0; + sdk_err(hwdev->dev_hdl, "Detect channel busy\n"); + } +} + +static void hinic5_heartbeat_lost_handler(struct work_struct *work) +{ + struct hinic5_event_info event_info = { 0 }; + struct hinic5_hwdev *hwdev = container_of(work, struct hinic5_hwdev, + heartbeat_lost_work); + u16 src, level; + + atomic_inc(&hwdev->hw_stats.heart_lost_stats); + + if (hwdev->event_callback) { + event_info.service = EVENT_SRV_COMM; + event_info.type = + (atomic_read(&hwdev->bus_link_down) != 0) ? EVENT_COMM_PCIE_LINK_DOWN : + EVENT_COMM_HEART_LOST; + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + } + + if (atomic_read(&hwdev->bus_link_down) != 0) { + src = HINIC5_FAULT_SRC_PCIE_LINK_DOWN; + level = FAULT_LEVEL_HOST; + sdk_err(hwdev->dev_hdl, "Detect bus is link down\n"); + } else { + src = HINIC5_FAULT_SRC_HOST_HEARTBEAT_LOST; + level = FAULT_LEVEL_FATAL; + sdk_err(hwdev->dev_hdl, "Heart lost report received, func_id: %u\n", + hinic5_global_func_id(hwdev)); + } + + hinic5_show_chip_err_info(hwdev); + + hisdk5_fault_post_process(hwdev, src, level); +} + +#define HINIC5_HEARTBEAT_START_EXPIRE 5000 + +/* 判断当前function是否处于可用状态 */ +bool hinic5_is_function_active(struct hinic5_hwdev *hwdev) +{ + return (atomic_read(&hwdev->bus_link_down) == 0 && + atomic_read(&hwdev->heartbeat_lost) == 0); +} + +static bool hinic5_is_hw_abnormal(struct hinic5_hwdev *hwdev) +{ + struct card_node *chip_info = hwdev->chip_node; + u32 status; + + if (hinic5_get_chip_present_flag(hwdev) == 0) + return false; + + status = hinic5_get_heartbeat_status(hwdev); + if (status == HINIC5_BUS_LINK_DOWN) { + sdk_warn(hwdev->dev_hdl, "Detect BAR register read failed\n"); + hwdev->rd_bar_err_cnt++; + if (hwdev->rd_bar_err_cnt >= hwdev->linkdown_threshold) { + sdk_err(hwdev->dev_hdl, "Set card absent due to bus link down\n"); + hinic5_set_chip_absent(hwdev); + hinic5_force_complete_all(hwdev); + atomic_set(&hwdev->bus_link_down, true); + return true; + } + + return false; + } + + if (status != 0) { + atomic_set(&hwdev->heartbeat_lost, true); + chip_info->exception_flag = true; + sdk_err(hwdev->dev_hdl, "Set card error due to heartbeat lost\n"); + return true; + } + + hwdev->rd_bar_err_cnt = 0; + + return false; +} + +int hinic5_set_heartbeat_period_and_linkdown_cnt(void *hwdev, u32 heartbeat_period, + u32 linkdown_threshold) +{ + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (!hwdev) { + pr_err("Hwdev is NULL\n"); + return -EINVAL; + } + + if (heartbeat_period == 0 && linkdown_threshold == 0) { + sdk_err(dev->dev_hdl, "heartbeat_period and linkdown_threshold is 0\n"); + return -EINVAL; + } + + if (heartbeat_period != 0) { + dev->heartbeat_period = heartbeat_period; + sdk_info(dev->dev_hdl, "heartbeat_period modify to %d\n", dev->heartbeat_period); + } + + if (linkdown_threshold != 0) { + dev->linkdown_threshold = linkdown_threshold; + sdk_info(dev->dev_hdl, "linkdown_threshold modify to %d\n", + dev->linkdown_threshold); + } + + return 0; +} +EXPORT_SYMBOL(hinic5_set_heartbeat_period_and_linkdown_cnt); + +#ifdef HAVE_TIMER_SETUP +static void hinic5_heartbeat_timer_handler(struct timer_list *t) +#else +static void hinic5_heartbeat_timer_handler(ulong data) +#endif +{ +#ifdef HAVE_TIMER_SETUP + struct hinic5_hwdev *hwdev = from_timer(hwdev, t, heartbeat_timer); +#else + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)data; +#endif + + if (hinic5_is_hw_abnormal(hwdev)) { + stop_timer(&hwdev->heartbeat_timer); + queue_work(hwdev->workq, &hwdev->heartbeat_lost_work); + } else { + mod_timer(&hwdev->heartbeat_timer, + jiffies + msecs_to_jiffies(hwdev->heartbeat_period)); + } + + hinic5_update_channel_status(hwdev); +} + +static void hinic5_init_heartbeat_detect(struct hinic5_hwdev *hwdev) +{ +#ifdef HAVE_TIMER_SETUP + timer_setup(&hwdev->heartbeat_timer, hinic5_heartbeat_timer_handler, 0); +#else + initialize_timer(hwdev->adapter_hdl, &hwdev->heartbeat_timer); + hwdev->heartbeat_timer.data = (u64)hwdev; + hwdev->heartbeat_timer.function = hinic5_heartbeat_timer_handler; +#endif + + hwdev->heartbeat_timer.expires = + jiffies + msecs_to_jiffies(HINIC5_HEARTBEAT_START_EXPIRE); + + INIT_WORK(&hwdev->heartbeat_lost_work, hinic5_heartbeat_lost_handler); + + add_to_timer(&hwdev->heartbeat_timer, hwdev->heartbeat_period); +} + +static void hinic5_destroy_heartbeat_detect(struct hinic5_hwdev *hwdev) +{ + destroy_work(&hwdev->heartbeat_lost_work); + stop_timer(&hwdev->heartbeat_timer); + delete_timer(&hwdev->heartbeat_timer); +} + +void hinic5_set_api_stop(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!hwdev) + return; + + sdk_info(dev->dev_hdl, "Set card absent\n"); + hinic5_set_chip_absent(dev); + hinic5_force_complete_all(dev); + sdk_info(dev->dev_hdl, "All messages interacting with the chip will stop\n"); +} + +bool hinic5_get_perf_en(enum hinic5_perf_bitmap perf_bit) +{ + return test_bit(perf_bit, (void *)&perf_en_bitmap); +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_prof_adap.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_prof_adap.c new file mode 100644 index 00000000..32983ca1 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_prof_adap.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [PROF]" fmt + +#include <linux/kernel.h> +#include <linux/semaphore.h> +#include <linux/workqueue.h> + +#include "ossl_knl.h" +#include "hinic5_hwdev.h" +#include "hinic5_profile.h" +#include "hinic5_prof_adap.h" + +__weak +const struct hinic5_prof_adapter *hinic5_get_prof_adapter(void *hwdev) +{ + return NULL; +} + +int hisdk5_init_profile_adapter(struct hinic5_hwdev *hwdev) +{ + const struct hinic5_prof_adapter *adapter = NULL; + + adapter = hinic5_get_prof_adapter((void *)hwdev); + if (!adapter) { + sdk_info(hwdev->dev_hdl, "No profile adapter\n"); + return 0; + } + sdk_info(hwdev->dev_hdl, "Find profile adapter type: %d\n", adapter->type); + + if (!hinic5_verify_prof_adapter(adapter)) { + sdk_err(hwdev->dev_hdl, "Invalid profile adapter\n"); + return -EINVAL; + } + + hwdev->prof_adap = adapter; + if (adapter->init) + hwdev->prof_attr = adapter->init((void *)hwdev); + + return 0; +} + +void hisdk5_deinit_profile_adapter(struct hinic5_hwdev *hwdev) +{ + if (!hwdev->prof_adap) + return; + + if (hwdev->prof_adap->deinit) { + hwdev->prof_adap->deinit(hwdev->prof_attr); + hwdev->prof_attr = NULL; + } + hwdev->prof_adap = NULL; +} + +struct hinic5_prof_attr *hinic5_get_prof_attr(void *hwdev) +{ + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (!hwdev) { + pr_err("hwdev is NULL\n"); + return NULL; + } + + return dev->prof_attr; +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_prof_adap.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_prof_adap.h new file mode 100644 index 00000000..19899bea --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/crm/hinic5_prof_adap.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_PROF_ADAP_H +#define HINIC5_PROF_ADAP_H + +#include <linux/workqueue.h> + +#include "hinic5_profile.h" +#include "hinic5_hwdev.h" + +enum cpu_affinity_work_type { + WORK_TYPE_AEQ, + WORK_TYPE_MBOX, + WORK_TYPE_MGMT_MSG, + WORK_TYPE_COMM, + WORK_TYPE_FAST_MSG, +}; + +enum hisdk5_sw_features { + HISDK5_SW_F_CHANNEL_LOCK = BIT(0), +}; + +#define GET_PROF_ATTR_OPS(hwdev) \ + ((hwdev)->prof_attr ? (hwdev)->prof_attr->ops : NULL) + +static inline int hisdk5_get_work_cpu_affinity(struct hinic5_hwdev *hwdev, + enum cpu_affinity_work_type type) +{ + struct hinic5_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); + + if (ops && ops->get_work_cpu_affinity) + return ops->get_work_cpu_affinity(hwdev->prof_attr->priv_data, type); + + return WORK_CPU_UNBOUND; +} + +static inline void hisdk5_fault_post_process(struct hinic5_hwdev *hwdev, + u16 src, u16 level) +{ + struct hinic5_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); + + if (ops && ops->fault_recover) + ops->fault_recover(hwdev->prof_attr->priv_data, src, level); +} + +static inline void hisdk5_probe_success(struct hinic5_hwdev *hwdev) +{ + struct hinic5_prof_ops *ops = GET_PROF_ATTR_OPS(hwdev); + + if (ops && ops->probe_success) + ops->probe_success(hwdev->prof_attr->priv_data); +} + +static inline bool hisdk5_sw_feature_en(const struct hinic5_hwdev *hwdev, + u64 feature_bit) +{ + if (!hwdev->prof_attr) + return false; + + return ((hwdev->prof_attr->sw_feature_cap & feature_bit) != 0) && + ((hwdev->prof_attr->dft_sw_feature & feature_bit) != 0); +} + +#ifdef CONFIG_MODULE_PROF +static inline void hisdk5_remove_pre_process(struct hinic5_hwdev *hwdev) +{ + struct hinic5_prof_ops *ops = NULL; + + if (!hwdev) + return; + + ops = GET_PROF_ATTR_OPS(hwdev); + + if (ops && ops->remove_pre_handle) + ops->remove_pre_handle(hwdev); +} +#else +static inline void hisdk5_remove_pre_process(struct hinic5_hwdev *hwdev) {}; +#endif + +#define SW_FEATURE_EN(hwdev, f_bit) \ + hisdk5_sw_feature_en(hwdev, HISDK5_SW_F_##f_bit) +#define HISDK5_F_CHANNEL_LOCK_EN(hwdev) SW_FEATURE_EN(hwdev, CHANNEL_LOCK) + +int hisdk5_init_profile_adapter(struct hinic5_hwdev *hwdev); +void hisdk5_deinit_profile_adapter(struct hinic5_hwdev *hwdev); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_182x_cmdq_adapt/hinic5_cqm_182x_cmdq_ops.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_182x_cmdq_adapt/hinic5_cqm_182x_cmdq_ops.c new file mode 100644 index 00000000..e5bb5db0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_182x_cmdq_adapt/hinic5_cqm_182x_cmdq_ops.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "ossl_knl.h" +#include "hinic5_hinic5_cqm.h" +#include "hinic5_cqm_npu_cmd.h" +#include "hinic5_cqm_cmdq.h" +#include "hinic5_cqm_main.h" +#include "hinic5_cqm_npu_cmd_defs.h" +#include "hinic5_cqm_182x_cmdq_ops.h" + +static s32 prepare_cmd_buf_bat_update(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cmd_buf *buf_in, + struct tag_hinic5_cqm_bat_update_param *param, + u8 *cmd) +{ + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct hinic5_cqm_182x_bat_update_cmd *cmd_data = buf_in->buf; + u8 *bat = NULL; + + cmd_data->offset = param->bat_offset / HINIC5_CQM_BAT_ENTRY_SIZE; + cmd_data->byte_len = param->update_size; + cmd_data->smf_id = param->smf_id; + cmd_data->func_id = param->func_id; + + bat = bat_table->bat + param->bat_offset; + memcpy(cmd_data->data, bat, param->update_size); + + hinic5_cqm_swab32((u8 *)cmd_data, + sizeof(struct hinic5_cqm_182x_bat_update_cmd) >> HINIC5_CQM_DW_SHIFT); + *cmd = (u8)HINIC5_CQM_CMD_T_BAT_UPDATE; + + return HINIC5_CQM_SUCCESS; +} + +static void prepare_cmd_buf_cla_update(hinic5_cqm_cla_update_cmd_s *cmd_info, + struct tag_hinic5_cqm_cmd_buf *buf_in, + u8 *cmd) +{ + struct hinic5_cqm_182x_cla_update_cmd *cmd_data = buf_in->buf; + + cmd_data->gpa_h = cmd_info->gpa_h; + cmd_data->gpa_l = cmd_info->gpa_l; + cmd_data->value_h = cmd_info->value_h; + cmd_data->value_l = cmd_info->value_l; + cmd_data->smf_id = cmd_info->smf_id; + cmd_data->func_id = cmd_info->func_id; + + hinic5_cqm_swab32((u8 *)cmd_data, + (sizeof(struct hinic5_cqm_182x_cla_update_cmd) >> HINIC5_CQM_DW_SHIFT)); + *cmd = (u8)HINIC5_CQM_CMD_T_CLA_UPDATE; +} + +static void prepare_cmd_cache_invalidate(hinic5_cqm_cla_cache_invalid_cmd_s *cmd_info, + struct tag_hinic5_cqm_cmd_buf *buf_in, + u8 *cmd) +{ + struct hinic5_cqm_182x_cla_cache_invalid_cmd *cmd_data = buf_in->buf; + + cmd_data->gpa_h = cmd_info->gpa_h; + cmd_data->gpa_l = cmd_info->gpa_l; + cmd_data->cache_size = cmd_info->cache_size; + cmd_data->smf_id = cmd_info->smf_id; + cmd_data->func_id = cmd_info->func_id; + + hinic5_cqm_swab32((u8 *)cmd_data, + /* shift 2 bits by right to get length of dw(4B) */ + (sizeof(struct hinic5_cqm_182x_cla_cache_invalid_cmd) >> HINIC5_CQM_DW_SHIFT)); + *cmd = (u8)HINIC5_CQM_CMD_T_CLA_CACHE_INVALID; +} + +struct hinic5_cqm_cmdq_ops *hinic5_cqm_cmdq_get_182x_ops(void) +{ + static struct hinic5_cqm_cmdq_ops cmdq_182x_ops = { + .prepare_cmd_buf_bat_update = prepare_cmd_buf_bat_update, + .prepare_cmd_buf_cla_update = prepare_cmd_buf_cla_update, + .prepare_cmd_cache_invalidate = prepare_cmd_cache_invalidate, + }; + + return &cmdq_182x_ops; +}; diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_182x_cmdq_adapt/hinic5_cqm_182x_cmdq_ops.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_182x_cmdq_adapt/hinic5_cqm_182x_cmdq_ops.h new file mode 100644 index 00000000..2bacd9f5 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_182x_cmdq_adapt/hinic5_cqm_182x_cmdq_ops.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef _HINIC5_CQM_182X_CMDQ_PRIVATE_H_ +#define _HINIC5_CQM_182X_CMDQ_PRIVATE_H_ + +#include "ossl_knl.h" +#include "hinic5_cqm_npu_cmd_defs.h" + +struct hinic5_cqm_182x_bat_update_cmd { + u32 offset; /* byte offset,16Byte aligned */ + u32 byte_len; /* max size: 256byte */ + u8 data[HINIC5_CQM_BAT_MAX_SIZE]; + u32 smf_id; + u32 func_id; +}; + +struct hinic5_cqm_182x_cla_update_cmd { + /* Gpa address to be updated */ + u32 gpa_h; /* byte addr */ + u32 gpa_l; /* byte addr */ + + /* Updated Value */ + u32 value_h; + u32 value_l; + + u32 smf_id; + u32 func_id; +}; + +struct hinic5_cqm_182x_cla_cache_invalid_cmd { + u32 gpa_h; + u32 gpa_l; + + u32 cache_size; /* CLA cache size=4096B */ + + u32 smf_id; + u32 func_id; +}; + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_187x_cmdq_adapt/hinic5_cqm_187x_cmdq_ops.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_187x_cmdq_adapt/hinic5_cqm_187x_cmdq_ops.c new file mode 100644 index 00000000..ff206b64 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_187x_cmdq_adapt/hinic5_cqm_187x_cmdq_ops.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "ossl_knl.h" +#include "hinic5_hinic5_cqm.h" +#include "hinic5_cqm_cmdq.h" +#include "hinic5_cqm_main.h" +#include "hinic5_cqm_npu_cmd_defs.h" +#include "hinic5_cqm_187x_cmdq_ops.h" + +static s32 prepare_cmd_buf_bat_update(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cmd_buf *buf_in, + struct tag_hinic5_cqm_bat_update_param *param, + u8 *cmd) +{ + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct hinic5_cqm_187x_bat_update_cmd *cmd_data = buf_in->buf; + u8 *bat = NULL; + + cmd_data->offset = param->bat_offset / HINIC5_CQM_BAT_ENTRY_SIZE; + cmd_data->byte_len = param->update_size; + cmd_data->smf_id = param->smf_id; + cmd_data->func_id = (u16)param->func_id; + + bat = bat_table->bat + param->bat_offset; + memcpy(cmd_data->data, bat, param->update_size); + + hinic5_cqm_swab32((u8 *)cmd_data, + sizeof(struct hinic5_cqm_187x_bat_update_cmd) >> HINIC5_CQM_DW_SHIFT); + *cmd = (u8)HINIC5_CQM_HTN_CMD_T_BAT_UPDATE; + + return HINIC5_CQM_SUCCESS; +} + +static void prepare_cmd_buf_cla_update(hinic5_cqm_cla_update_cmd_s *cmd_info, + struct tag_hinic5_cqm_cmd_buf *buf_in, + u8 *cmd) +{ + struct hinic5_cqm_187x_cla_update_cmd *cmd_data = buf_in->buf; + + cmd_data->gpa_h = cmd_info->gpa_h; + cmd_data->gpa_l = cmd_info->gpa_l; + cmd_data->value_h = cmd_info->value_h; + cmd_data->value_l = cmd_info->value_l; + cmd_data->smf_id = cmd_info->smf_id; + cmd_data->func_id = (u16)cmd_info->func_id; + + hinic5_cqm_swab32((u8 *)cmd_data, + (sizeof(struct hinic5_cqm_187x_cla_update_cmd) >> HINIC5_CQM_DW_SHIFT)); + *cmd = (u8)HINIC5_CQM_HTN_CMD_T_CLA_UPDATE; +} + +static void prepare_cmd_cache_invalidate(hinic5_cqm_cla_cache_invalid_cmd_s *cmd_info, + struct tag_hinic5_cqm_cmd_buf *buf_in, + u8 *cmd) +{ + struct hinic5_cqm_187x_cla_cache_invalid_cmd *cmd_data = buf_in->buf; + + cmd_data->gpa_h = cmd_info->gpa_h; + cmd_data->gpa_l = cmd_info->gpa_l; + cmd_data->cache_size = cmd_info->cache_size; + cmd_data->smf_id = cmd_info->smf_id; + cmd_data->func_id = (u16)cmd_info->func_id; + + hinic5_cqm_swab32((u8 *)cmd_data, + /* shift 2 bits by right to get length of dw(4B) */ + (sizeof(struct hinic5_cqm_187x_cla_cache_invalid_cmd) >> 2)); + *cmd = (u8)HINIC5_CQM_HTN_CMD_T_CLA_CACHE_INVALID; +} + +struct hinic5_cqm_cmdq_ops *hinic5_cqm_cmdq_get_187x_ops(void) +{ + static struct hinic5_cqm_cmdq_ops cmdq_187x_ops = { + .prepare_cmd_buf_bat_update = prepare_cmd_buf_bat_update, + .prepare_cmd_buf_cla_update = prepare_cmd_buf_cla_update, + .prepare_cmd_cache_invalidate = prepare_cmd_cache_invalidate, + }; + + return &cmdq_187x_ops; +}; diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_187x_cmdq_adapt/hinic5_cqm_187x_cmdq_ops.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_187x_cmdq_adapt/hinic5_cqm_187x_cmdq_ops.h new file mode 100644 index 00000000..24707cfd --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_187x_cmdq_adapt/hinic5_cqm_187x_cmdq_ops.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef _HINIC5_CQM_187X_CMDQ_PRIVATE_H_ +#define _HINIC5_CQM_187X_CMDQ_PRIVATE_H_ + +#include "ossl_knl.h" +#include "hinic5_cqm_npu_cmd_defs.h" + +struct hinic5_cqm_187x_bat_update_cmd { + u32 rsv[2]; + u32 smf_id : 4; /* set as 0xffff, HTN_CMDQ use func_id in metadata */ + u32 byte_len : 10; /* max size: 256byte, min size: 16byte, 16Byte aligned */ + u32 offset : 18; /* byte offset, 16Byte aligned */ + u16 rsv1; + u16 func_id; + u8 data[HINIC5_CQM_BAT_MAX_SIZE]; +}; + +struct hinic5_cqm_187x_cla_update_cmd { + u32 rsv[2]; + u32 smf_id : 4; + u32 rsv1 : 28; + u16 rsv2; + u16 func_id; /* set as 0xffff, HTN_CMDQ use func_id in metadata */ + + /* Gpa address to be updated */ + u32 gpa_h; /* byte addr */ + u32 gpa_l; + + /* Updated Value */ + u32 value_h; + u32 value_l; +}; + +struct hinic5_cqm_187x_cla_cache_invalid_cmd { + u32 gpa_h; + u32 gpa_l; + + u32 smf_id : 4; + u32 cache_size : 19; /* CLA cache size=4096B */ + u32 rsv : 9; + u16 rsv2; + u16 func_id; /* set as 0xffff, HTN_CMDQ use func_id in metadata */ +}; + +/* HINIC5_CQM HTN CMD */ +enum hinic5_cqm_htn_cmd { + HINIC5_CQM_HTN_CMD_T_CLA_CACHE_INVALID = 0x20, + HINIC5_CQM_HTN_CMD_T_BAT_UPDATE, + HINIC5_CQM_HTN_CMD_T_CLA_UPDATE +}; + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bat_cla.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bat_cla.c new file mode 100644 index 00000000..e315bfe8 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bat_cla.c @@ -0,0 +1,2859 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/device.h> +#include <linux/gfp.h> + +#ifdef __LINUX__ +#include <linux/mmzone.h> +#endif + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_hw_comm.h" +#include "hinic5_hw_cfg.h" +#include "hinic5_hinic5_vram_api.h" +#include "hinic5_typedef_inner.h" + +#include "hinic5_cqm_object.h" +#include "hinic5_cqm_bitmap_table.h" +#include "hinic5_cqm_cmd.h" +#include "hinic5_cqm_object_intern.h" +#include "hinic5_cqm_main.h" +#include "hinic5_cqm_bat_cla.h" + +#include "comm_defs.h" +#include "hinic5_cqm_npu_cmd.h" +#include "hinic5_cqm_npu_cmd_defs.h" +#include "hinic5_cqm_cmdq.h" + +#include "hinic5_vram_common.h" + +static unsigned char hinic5_cqm_ver = 8; +module_param(hinic5_cqm_ver, byte, 0444); +MODULE_PARM_DESC(hinic5_cqm_ver, "for hinic5_cqm version control (default=8)"); + +static bool hinic5_cqm_cla_hugepage_hint; +module_param(hinic5_cqm_cla_hugepage_hint, bool, 0444); +MODULE_PARM_DESC(hinic5_cqm_cla_hugepage_hint, + "Hint for hugepage alloc to improve TLB locality (default false). " \ + "This option only impacts QPC and Timer Spoke Lists."); + +#ifdef __HINIC5_CQM_DEBUG__ +bool hinic5_cqm_verbose; +module_param(hinic5_cqm_verbose, bool, 0644); +#endif + +bool secure_mem_en = true; + +static inline u32 get_cacheline_size(u32 entry_type) +{ + /* The cacheline of the timer is changed to 512. */ + if (entry_type == HINIC5_CQM_BAT_ENTRY_T_TIMER && hinic5_cqm_ver == 0x8) + return HINIC5_CQM_CHIP_TIMER_CACHELINE; + + return HINIC5_CQM_CHIP_CACHELINE; +} + +static void hinic5_cqm_bat_fill_cla_common_gpa(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_bat_entry_standerd *bat_entry_standerd) +{ + u8 gpa_check_enable = hinic5_cqm_handle->func_capability.gpa_check_enable; + struct hinic5_func_attr *func_attr = NULL; + struct tag_hinic5_cqm_bat_entry_vf2pf gpa = {0}; + u32 cla_gpa_h = 0; + dma_addr_t pa; + + if (cla_table->cla_lvl == HINIC5_CQM_CLA_LVL_0) + pa = cla_table->cla_z_buf.buf_list[0].pa; + else if (cla_table->cla_lvl == HINIC5_CQM_CLA_LVL_1) + pa = cla_table->cla_y_buf.buf_list[0].pa; + else + pa = cla_table->cla_x_buf.buf_list[0].pa; + + gpa.cla_gpa_h = HINIC5_CQM_ADDR_HI(pa) & HINIC5_CQM_CHIP_GPA_HIMASK; + gpa.acs_spu_en = hinic5_cqm_get_acs_spu_en(hinic5_cqm_handle); + + /* In fake mode, fake_vf_en in the GPA address of the BAT + * must be set to 1. + */ + if (HINIC5_CQM_IS_FAKE_CHILD_AGENT(hinic5_cqm_handle)) { + gpa.fake_vf_en = 1; + func_attr = &hinic5_cqm_handle->parent_hinic5_cqm_handle->func_attribute; + gpa.pf_id = func_attr->func_global_idx; + } else { + gpa.fake_vf_en = 0; + } + + memcpy(&cla_gpa_h, &gpa, sizeof(u32)); + bat_entry_standerd->cla_gpa_h = cla_gpa_h; + + /* GPA is valid when gpa[0] = 1. + * HINIC5_CQM_BAT_ENTRY_T_REORDER does not support GPA validity check. + */ + if (cla_table->type == HINIC5_CQM_BAT_ENTRY_T_REORDER) + bat_entry_standerd->cla_gpa_l = HINIC5_CQM_ADDR_LW(pa); + else + bat_entry_standerd->cla_gpa_l = HINIC5_CQM_ADDR_LW(pa) | + gpa_check_enable; + + hinic5_cqm_info(hinic5_cqm_handle->dev, "Bat fill: cla_type %u, pa 0x%llx, gpa 0x%x-0x%x, level %u\n", + cla_table->type, pa, bat_entry_standerd->cla_gpa_h, bat_entry_standerd->cla_gpa_l, + bat_entry_standerd->cla_level); +} + +static void hinic5_cqm_bat_fill_cla_common(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u8 *entry_base_addr) +{ + struct tag_hinic5_cqm_bat_entry_standerd *bat_entry_standerd = NULL; + u32 cache_line = get_cacheline_size(cla_table->type); + + if (cla_table->obj_num == 0) { + hinic5_cqm_dbg(hinic5_cqm_handle->dev, + "Bat fill: cla_type %u, obj_num=0, don't init bat entry\n", + cla_table->type); + return; + } + + bat_entry_standerd = (struct tag_hinic5_cqm_bat_entry_standerd *)entry_base_addr; + + /* The QPC value is 256/512/1024 and the timer value is 512. + * The other cacheline value is 256B. + * The conversion operation is performed inside the chip. + */ + if (cla_table->obj_size > cache_line) { + if (cla_table->obj_size == HINIC5_CQM_OBJECT_512) + bat_entry_standerd->entry_size = HINIC5_CQM_BAT_ENTRY_SIZE_512; + else + bat_entry_standerd->entry_size = + HINIC5_CQM_BAT_ENTRY_SIZE_1024; + bat_entry_standerd->max_number = cla_table->max_buffer_size / + cla_table->obj_size; + } else { + if (cache_line == HINIC5_CQM_CHIP_CACHELINE) { + bat_entry_standerd->entry_size = HINIC5_CQM_BAT_ENTRY_SIZE_256; + bat_entry_standerd->max_number = + cla_table->max_buffer_size / cache_line; + } else { + bat_entry_standerd->entry_size = HINIC5_CQM_BAT_ENTRY_SIZE_512; + bat_entry_standerd->max_number = + cla_table->max_buffer_size / cache_line; + } + } + + bat_entry_standerd->max_number = bat_entry_standerd->max_number - 1; + + bat_entry_standerd->bypass = HINIC5_CQM_BAT_NO_BYPASS_CACHE; + bat_entry_standerd->z = cla_table->cacheline_z; + bat_entry_standerd->y = cla_table->cacheline_y; + bat_entry_standerd->x = cla_table->cacheline_x; + bat_entry_standerd->cla_level = cla_table->cla_lvl; + + hinic5_cqm_bat_fill_cla_common_gpa(hinic5_cqm_handle, cla_table, bat_entry_standerd); +} + +static void hinic5_cqm_bat_fill_cla_cfg(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u8 **entry_base_addr) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct tag_hinic5_cqm_bat_entry_cfg *bat_entry_cfg = NULL; + + bat_entry_cfg = (struct tag_hinic5_cqm_bat_entry_cfg *)(*entry_base_addr); + bat_entry_cfg->cur_conn_cache = 0; + bat_entry_cfg->max_conn_cache = + func_cap->flow_table_based_conn_cache_number; + bat_entry_cfg->cur_conn_num_h_4 = 0; + bat_entry_cfg->cur_conn_num_l_16 = 0; + bat_entry_cfg->max_conn_num = func_cap->flow_table_based_conn_number; + + /* Aligns with 64 buckets and shifts rightward by 6 bits. + * The maximum value of this field is 16 bits. A maximum of 4M buckets + * can be supported. The value is subtracted by 1. It is used for &hash + * value. + */ + if ((func_cap->hash_number >> HINIC5_CQM_HASH_NUMBER_UNIT) != 0) { + bat_entry_cfg->bucket_num = ((func_cap->hash_number >> + HINIC5_CQM_HASH_NUMBER_UNIT) - 1); + } + if (func_cap->bloomfilter_length != 0) { + bat_entry_cfg->bloom_filter_len = func_cap->bloomfilter_length - + 1; + bat_entry_cfg->bloom_filter_addr = func_cap->bloomfilter_addr; + } + + (*entry_base_addr) += sizeof(struct tag_hinic5_cqm_bat_entry_cfg); +} + +static void hinic5_cqm_bat_fill_cla_other(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u8 **entry_base_addr) +{ + hinic5_cqm_bat_fill_cla_common(hinic5_cqm_handle, cla_table, *entry_base_addr); + + (*entry_base_addr) += sizeof(struct tag_hinic5_cqm_bat_entry_standerd); +} + +static void hinic5_cqm_bat_fill_cla_taskmap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + const struct tag_hinic5_cqm_cla_table *cla_table, + u8 **entry_base_addr) +{ + struct tag_hinic5_cqm_bat_entry_taskmap *bat_entry_taskmap = NULL; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + int i; + + if (hinic5_cqm_handle->func_capability.taskmap_number != 0) { + bat_entry_taskmap = + (struct tag_hinic5_cqm_bat_entry_taskmap *)(*entry_base_addr); + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_TASKMAP_NUM; i++) { + bat_entry_taskmap->addr[i].gpa_h = + (u32)(cla_table->cla_z_buf.buf_list[i].pa >> + HINIC5_CQM_CHIP_GPA_HSHIFT); + bat_entry_taskmap->addr[i].gpa_l = + (u32)(cla_table->cla_z_buf.buf_list[i].pa & + HINIC5_CQM_CHIP_GPA_LOMASK); + hinic5_cqm_info(handle->dev_hdl, + "Cla alloc: taskmap bat entry: 0x%x 0x%x\n", + bat_entry_taskmap->addr[i].gpa_h, + bat_entry_taskmap->addr[i].gpa_l); + } + } + + (*entry_base_addr) += sizeof(struct tag_hinic5_cqm_bat_entry_taskmap); +} + +static void hinic5_cqm_bat_fill_cla_timer(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u8 **entry_base_addr) +{ + /* Only the PPF allocates timer resources. */ + if (!HINIC5_CQM_IS_PPF(hinic5_cqm_handle)) { + (*entry_base_addr) += HINIC5_CQM_BAT_ENTRY_SIZE; + } else { + hinic5_cqm_bat_fill_cla_common(hinic5_cqm_handle, cla_table, + *entry_base_addr); + + (*entry_base_addr) += sizeof(struct tag_hinic5_cqm_bat_entry_standerd); + } +} + +static void hinic5_cqm_bat_fill_cla_invalid(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u8 **entry_base_addr) +{ + (*entry_base_addr) += HINIC5_CQM_BAT_ENTRY_SIZE; +} + +/** + * Prototype : hinic5_cqm_bat_fill_cla + * Description : Fill the base address of the CLA table into the BAT table. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +static void hinic5_cqm_bat_fill_cla(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_hwdev *hwdev = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + u32 entry_type = HINIC5_CQM_BAT_ENTRY_T_INVALID; + u8 *entry_base_addr = NULL; + u32 i = 0; + + /* Fills each item in the BAT table according to the BAT format. */ + entry_base_addr = bat_table->bat; + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + hinic5_cqm_dbg_on(hinic5_cqm_verbose, hinic5_cqm_handle->dev, + "entry_base_addr = %p\n", entry_base_addr); + entry_type = bat_table->bat_entry_type[i]; + cla_table = &bat_table->entry[i]; + + if (entry_type == HINIC5_CQM_BAT_ENTRY_T_CFG) { + hinic5_cqm_bat_fill_cla_cfg(hinic5_cqm_handle, cla_table, &entry_base_addr); + } else if (entry_type == HINIC5_CQM_BAT_ENTRY_T_TASKMAP) { + hinic5_cqm_bat_fill_cla_taskmap(hinic5_cqm_handle, cla_table, &entry_base_addr); + } else if (entry_type == HINIC5_CQM_BAT_ENTRY_T_INVALID) { + hinic5_cqm_bat_fill_cla_invalid(hinic5_cqm_handle, cla_table, &entry_base_addr); + } else if (entry_type == HINIC5_CQM_BAT_ENTRY_T_TIMER) { + if (HINIC5_CQM_IS_PPF(hinic5_cqm_handle) && HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle)) { + /* The fill of Timer Entry is delayed, + * because it needs to be based on a specific SMF. */ + entry_base_addr += sizeof(struct tag_hinic5_cqm_bat_entry_standerd); + continue; + } + + hinic5_cqm_bat_fill_cla_timer(hinic5_cqm_handle, cla_table, &entry_base_addr); + } else if (entry_type == HINIC5_CQM_BAT_ENTRY_T_HASH) { + if (HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle)) { + /* The fill of Hash Entry is delayed, + * because it needs to be based on a specific SMF. */ + entry_base_addr += sizeof(struct tag_hinic5_cqm_bat_entry_standerd); + continue; + } + + hinic5_cqm_bat_fill_cla_other(hinic5_cqm_handle, cla_table, &entry_base_addr); + } else if (entry_type == HINIC5_CQM_BAT_ENTRY_T_XID2CID) { + if (COMM_SUPPORT_VIRTIO_FC_CACHE(hwdev) && HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle)) { + /* The fill of XID2CID Entry is delayed, + * because it needs to be based on a specific SMF. */ + entry_base_addr += sizeof(struct tag_hinic5_cqm_bat_entry_standerd); + continue; + } + + hinic5_cqm_bat_fill_cla_other(hinic5_cqm_handle, cla_table, &entry_base_addr); + } else { + hinic5_cqm_bat_fill_cla_other(hinic5_cqm_handle, cla_table, &entry_base_addr); + } + + /* Check whether entry_base_addr is out-of-bounds array. */ + if (entry_base_addr >= + (bat_table->bat + HINIC5_CQM_BAT_ENTRY_MAX * HINIC5_CQM_BAT_ENTRY_SIZE)) + break; + } +} + +u32 hinic5_cqm_lb0_get_smf_id(const struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + u32 smf_sel, funcid, smf_pg_partial, smf_id; + /* SMFID is selected based on SMF_PG[1:0] and SMF_Selection(0-1) */ + u32 smfsel_smfid01[4][2] = { {0, 0}, {0, 0}, {1, 1}, {0, 1} }; + /* SMFID is selected based on SMF_PG[3:2] and SMF_Selection(2-4) */ + u32 smfsel_smfid23[4][2] = { {2, 2}, {2, 2}, {3, 3}, {2, 3} }; + + /* SMF_Selection is selected based on + * the lower two bits of the function id + */ + funcid = hinic5_cqm_handle->func_attribute.func_global_idx & 0x3; + /* if smf2 and smf3 are disabled, only select smf0/smf1 */ + if ((hinic5_cqm_handle->func_capability.smf_pg >> 2) == 0) { + u32 lbf_smfsel[4] = {0, 1, 0, 1}; + smf_sel = lbf_smfsel[funcid]; + } else { + u32 lbf_smfsel[4] = {0, 2, 1, 3}; + smf_sel = lbf_smfsel[funcid]; + } + + if (smf_sel < 0x2) { + smf_pg_partial = hinic5_cqm_handle->func_capability.smf_pg & 0x3; + smf_id = smfsel_smfid01[smf_pg_partial][smf_sel]; + } else { + smf_pg_partial = + /* shift to right by 2 bits */ + (hinic5_cqm_handle->func_capability.smf_pg >> 2) & 0x3; + smf_id = smfsel_smfid23[smf_pg_partial][smf_sel - 0x2]; + } + + return smf_id; +} + +u32 hinic5_cqm_funcid2smfid(const struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + u32 smf_id; + + /* When the LB mode is disabled, SMF0 is always returned. */ + if (HINIC5_CQM_IS_LB_MODE_NORMAL(hinic5_cqm_handle)) { + smf_id = 0; + } else { + smf_id = hinic5_cqm_lb0_get_smf_id(hinic5_cqm_handle); + } + + return smf_id; +} + +/* This function is used in LB mode 1/2. Some BAT entries + * of independent space needs to be configured for all enabled SMFs. + */ +static void hinic5_cqm_update_bat_gpa(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, u32 smf_id) +{ + struct hinic5_hwdev *hwdev = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + u32 entry_type = HINIC5_CQM_BAT_ENTRY_T_INVALID; + u8 *entry_base_addr = bat_table->bat; + u32 i = 0; + + if (!HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle)) + return; + + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + entry_type = bat_table->bat_entry_type[i]; + if (HINIC5_CQM_IS_PPF(hinic5_cqm_handle) && + entry_type == HINIC5_CQM_BAT_ENTRY_T_TIMER) { + cla_table = &bat_table->timer_entry[smf_id]; + hinic5_cqm_bat_fill_cla_timer(hinic5_cqm_handle, cla_table, + &entry_base_addr); + } else if (entry_type == HINIC5_CQM_BAT_ENTRY_T_HASH) { + cla_table = &bat_table->hash_entry[smf_id]; + hinic5_cqm_bat_fill_cla_other(hinic5_cqm_handle, cla_table, &entry_base_addr); + } else if (COMM_SUPPORT_VIRTIO_FC_CACHE(hwdev) && + entry_type == HINIC5_CQM_BAT_ENTRY_T_XID2CID) { + cla_table = &bat_table->xid2cid_entry[smf_id]; + hinic5_cqm_bat_fill_cla_other(hinic5_cqm_handle, cla_table, &entry_base_addr); + } else { + if (entry_type == HINIC5_CQM_BAT_ENTRY_T_TASKMAP) + entry_base_addr += sizeof(struct tag_hinic5_cqm_bat_entry_taskmap); + else + entry_base_addr += HINIC5_CQM_BAT_ENTRY_SIZE; + } + + /* Check whether entry_base_addr is out-of-bounds array. */ + if (entry_base_addr >= + (bat_table->bat + HINIC5_CQM_BAT_ENTRY_MAX * HINIC5_CQM_BAT_ENTRY_SIZE)) + break; + } +} + +static s32 hinic5_cqm_bat_update_smf_cmd(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cmd_buf *buf_in, + struct tag_hinic5_cqm_bat_update_param *param) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct hinic5_cqm_cmdq_ops *ops = hinic5_cqm_handle->cmdq_ops; + u8 cmd; + bool illegal_args = true; + s32 ret = HINIC5_CQM_FAIL; + + illegal_args = (param->bat_offset % HINIC5_CQM_BAT_ENTRY_SIZE != 0) || + (param->update_size % HINIC5_CQM_BAT_ENTRY_SIZE != 0) || + (param->update_size == 0) || + (param->bat_offset + param->update_size > bat_table->bat_size); + if (unlikely(illegal_args)) { + hinic5_cqm_err(handle->dev_hdl, + "Bat update: invalid args, bat_offset %u, update_size %u.", + param->bat_offset, param->update_size); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_info(handle->dev_hdl, + "Bat update: smf_id %u, func_id %u, bat_offset %u, update_size %u.", + param->smf_id, param->func_id, + param->bat_offset, param->update_size); + + ret = ops->prepare_cmd_buf_bat_update(hinic5_cqm_handle, buf_in, param, &cmd); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(prepare_cmd_buf_bat_update)); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_dbg_byte_print(handle->dev_hdl, (u32 *)bat_table->bat, sizeof(bat_table->bat)); + + ret = hinic5_cqm_send_cmd_box((void *)(handle), HINIC5_CQM_MOD_HINIC5_CQM, + cmd, buf_in, NULL, NULL, + HINIC5_CQM_CMD_TIMEOUT, HINIC5_CHANNEL_DEFAULT); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_send_cmd_box)); + hinic5_cqm_err(handle->dev_hdl, "%s: send_cmd_box ret=%d\n", __func__, + ret); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_bat_update_smf(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cmd_buf *buf_in, + u32 smf_id, u32 func_id) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct tag_hinic5_cqm_bat_update_param param = { 0 }; + struct hinic5_bat_entry_config l3i_config = { 0 }; + int is_in_kexec; + s32 ret = HINIC5_CQM_FAIL; + + is_in_kexec = hinic5_vram_get_kexec_flag(); + if (is_in_kexec != 0) { + hinic5_cqm_info(handle->dev_hdl, "Skip updating the hinic5_cqm_bat to chip during kexec!"); + return HINIC5_CQM_SUCCESS; + } + + if (bat_table->bat_size > HINIC5_CQM_BAT_MAX_SIZE) { + hinic5_cqm_err(handle->dev_hdl, "bat_size = %u, which is more than %d.", + bat_table->bat_size, HINIC5_CQM_BAT_MAX_SIZE); + return HINIC5_CQM_FAIL; + } + + ret = hinic5_bat_get_l3i_entry_config(handle, &l3i_config); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_bat_get_l3i_entry_config)); + return ret; + } + + param.smf_id = smf_id; + param.func_id = func_id; + + /* The L3I entry is not managed by HINIC5_CQM */ + if (l3i_config.mapping && + bat_table->bat_size > l3i_config.bat_entry_offset) { + /* update bat entries before L3I */ + param.bat_offset = 0; + param.update_size = l3i_config.bat_entry_offset; + ret = hinic5_cqm_bat_update_smf_cmd(hinic5_cqm_handle, buf_in, ¶m); + if (ret != HINIC5_CQM_SUCCESS) + goto cmd_err; + + /* update bat entries after L3I */ + param.bat_offset = l3i_config.bat_entry_offset + l3i_config.bat_entry_size; + if (bat_table->bat_size > param.bat_offset) { + param.update_size = bat_table->bat_size - param.bat_offset; + ret = hinic5_cqm_bat_update_smf_cmd(hinic5_cqm_handle, buf_in, ¶m); + if (ret != HINIC5_CQM_SUCCESS) + goto cmd_err; + } + } else { + /* update all bat entries */ + param.bat_offset = 0; + param.update_size = bat_table->bat_size; + ret = hinic5_cqm_bat_update_smf_cmd(hinic5_cqm_handle, buf_in, ¶m); + if (ret != HINIC5_CQM_SUCCESS) + goto cmd_err; + } + + return HINIC5_CQM_SUCCESS; + +cmd_err: + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bat_update_smf_cmd)); + return ret; +} + +static s32 hinic5_cqm_bat_update_all_smf(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cmd_buf *buf_in, + u32 func_id) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + u32 smf_id = 0; + s32 ret = HINIC5_CQM_SUCCESS; + + for (smf_id = 0; smf_id < func_cap->smf_max_num; smf_id++) { + if ((func_cap->smf_pg & (1U << smf_id)) == 0) + continue; + + hinic5_cqm_update_bat_gpa(hinic5_cqm_handle, smf_id); + ret = hinic5_cqm_bat_update_smf(hinic5_cqm_handle, buf_in, smf_id, func_id); + if (ret != HINIC5_CQM_SUCCESS) + return ret; + } + + return ret; +} + +/** + * The LB scenario is supported. + * - The normal mode is the traditional mode and is configured on SMF0. + * - In mode 0, load is balanced to all SMFs based on the func ID (except + * the PPF func ID). The PPF in mode 0 needs to be configured on all SMFs, + * so the timer resources can be shared by the all timer engine. + * - Mode 1/2 is load balanced to all SMFs by flow. Therefore, one function + * needs to be configured to all SMFs. + */ +static s32 hinic5_cqm_bat_update_lb(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cmd_buf *buf_in, + u32 func_id) +{ + struct hinic5_hwdev *hwdev = hinic5_cqm_handle->ex_handle; + u32 smf_id; + + if (HINIC5_CQM_IS_LB_MODE_NORMAL(hinic5_cqm_handle)) { + smf_id = hinic5_cqm_funcid2smfid(hinic5_cqm_handle); + return hinic5_cqm_bat_update_smf(hinic5_cqm_handle, buf_in, smf_id, func_id); + } + + if (HINIC5_CQM_IS_LB_MODE_0(hinic5_cqm_handle)) { + if (HINIC5_CQM_IS_PPF(hinic5_cqm_handle)) + return hinic5_cqm_bat_update_all_smf(hinic5_cqm_handle, buf_in, func_id); + smf_id = hinic5_cqm_funcid2smfid(hinic5_cqm_handle); + return hinic5_cqm_bat_update_smf(hinic5_cqm_handle, buf_in, smf_id, func_id); + } + + if (HINIC5_CQM_IS_LB_MODE_1(hinic5_cqm_handle) || HINIC5_CQM_IS_LB_MODE_2(hinic5_cqm_handle)) + return hinic5_cqm_bat_update_all_smf(hinic5_cqm_handle, buf_in, func_id); + + hinic5_cqm_err(hwdev->dev_hdl, "Bat update: unsupported lb mode=%u\n", + hinic5_cqm_handle->func_capability.lb_mode); + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_bat_update + * Description : Send a command to tile to update the BAT table through cmdq. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +static s32 hinic5_cqm_bat_update(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_hwdev *hwdev = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_cmd_buf *buf_in = NULL; + s32 ret = HINIC5_CQM_FAIL; + u32 func_id = 0; + + /* The BAT is maintained by the parent function. */ + if (HINIC5_CQM_IS_FAKE_CHILD(hinic5_cqm_handle)) { + hinic5_cqm_err(hwdev->dev_hdl, "Bat update: unsupported for fake child\n"); + return HINIC5_CQM_FAIL; + } + + buf_in = hinic5_cqm_cmd_alloc((void *)(hinic5_cqm_handle->ex_handle)); + if (unlikely((buf_in) == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(buf_in)); + return HINIC5_CQM_FAIL; + } + + /* In non-fake mode, func_id is set to 0xffff, indicating the current + * func. In fake mode, the value of func_id is specified. This is a fake + * func_id. + */ + if (HINIC5_CQM_IS_FAKE_CHILD_AGENT(hinic5_cqm_handle)) + func_id = hinic5_cqm_handle->func_attribute.func_global_idx; + else + func_id = 0xffff; + + ret = hinic5_cqm_bat_update_lb(hinic5_cqm_handle, buf_in, func_id); + + hinic5_cqm_cmd_free((void *)(hinic5_cqm_handle->ex_handle), buf_in); + return ret; +} + +static s32 hinic5_cqm_bat_init_ft(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_bat_table *bat_table, + enum func_type function_type) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 i = 0; + + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX0] = HINIC5_CQM_BAT_ENTRY_T_CFG; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX1] = HINIC5_CQM_BAT_ENTRY_T_HASH; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX2] = HINIC5_CQM_BAT_ENTRY_T_QPC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX3] = HINIC5_CQM_BAT_ENTRY_T_SCQC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX4] = HINIC5_CQM_BAT_ENTRY_T_LUN; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX5] = HINIC5_CQM_BAT_ENTRY_T_TASKMAP; + + if (function_type == HINIC5_CQM_PF || function_type == HINIC5_CQM_PPF) { + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX6] = HINIC5_CQM_BAT_ENTRY_T_L3I; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX7] = HINIC5_CQM_BAT_ENTRY_T_CHILDC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX8] = HINIC5_CQM_BAT_ENTRY_T_TIMER; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX9] = HINIC5_CQM_BAT_ENTRY_T_XID2CID; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX10] = HINIC5_CQM_BAT_ENTRY_T_REORDER; + bat_table->bat_size = HINIC5_CQM_BAT_SIZE_FT_PF; + } else if (function_type == HINIC5_CQM_VF) { + bat_table->bat_size = HINIC5_CQM_BAT_SIZE_FT_VF; + } else { + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = HINIC5_CQM_BAT_ENTRY_T_INVALID; + + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(function_type)); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_bat_init_rdma(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_bat_table *bat_table, + enum func_type function_type) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 i = 0; + + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX0] = HINIC5_CQM_BAT_ENTRY_T_QPC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX1] = HINIC5_CQM_BAT_ENTRY_T_SCQC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX2] = HINIC5_CQM_BAT_ENTRY_T_SRQC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX3] = HINIC5_CQM_BAT_ENTRY_T_MPT; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX4] = HINIC5_CQM_BAT_ENTRY_T_GID; + + if (function_type == HINIC5_CQM_PF || function_type == HINIC5_CQM_PPF) { + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX5] = HINIC5_CQM_BAT_ENTRY_T_L3I; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX6] = + HINIC5_CQM_BAT_ENTRY_T_CHILDC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX7] = + HINIC5_CQM_BAT_ENTRY_T_TIMER; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX8] = + HINIC5_CQM_BAT_ENTRY_T_XID2CID; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX9] = + HINIC5_CQM_BAT_ENTRY_T_REORDER; + bat_table->bat_size = HINIC5_CQM_BAT_SIZE_RDMA_PF; + } else if (function_type == HINIC5_CQM_VF) { + bat_table->bat_size = HINIC5_CQM_BAT_SIZE_RDMA_VF; + } else { + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = HINIC5_CQM_BAT_ENTRY_T_INVALID; + + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(function_type)); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_bat_init_ft_rdma(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_bat_table *bat_table, + enum func_type function_type) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 i = 0; + + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX0] = HINIC5_CQM_BAT_ENTRY_T_CFG; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX1] = HINIC5_CQM_BAT_ENTRY_T_HASH; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX2] = HINIC5_CQM_BAT_ENTRY_T_QPC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX3] = HINIC5_CQM_BAT_ENTRY_T_SCQC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX4] = HINIC5_CQM_BAT_ENTRY_T_SRQC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX5] = HINIC5_CQM_BAT_ENTRY_T_MPT; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX6] = HINIC5_CQM_BAT_ENTRY_T_GID; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX7] = HINIC5_CQM_BAT_ENTRY_T_LUN; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX8] = HINIC5_CQM_BAT_ENTRY_T_TASKMAP; + + if (function_type == HINIC5_CQM_PF || function_type == HINIC5_CQM_PPF) { + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX9] = HINIC5_CQM_BAT_ENTRY_T_L3I; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX10] = + HINIC5_CQM_BAT_ENTRY_T_CHILDC; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX11] = + HINIC5_CQM_BAT_ENTRY_T_TIMER; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX12] = + HINIC5_CQM_BAT_ENTRY_T_XID2CID; + bat_table->bat_entry_type[HINIC5_CQM_BAT_INDEX13] = + HINIC5_CQM_BAT_ENTRY_T_REORDER; + bat_table->bat_size = HINIC5_CQM_BAT_SIZE_FT_RDMA_PF; + } else if (function_type == HINIC5_CQM_VF) { + bat_table->bat_size = HINIC5_CQM_BAT_SIZE_FT_RDMA_VF; + } else { + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = HINIC5_CQM_BAT_ENTRY_T_INVALID; + + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(function_type)); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_bat_init + * Description : Initialize the BAT table. Only the items to be initialized and + * the entry sequence are selected. The content of the BAT entry + * is filled after the CLA is allocated. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +s32 hinic5_cqm_bat_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_func_capability *capability = &hinic5_cqm_handle->func_capability; + enum func_type function_type = hinic5_cqm_handle->func_attribute.func_type; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + u32 i; + + memset(bat_table, 0, sizeof(struct tag_hinic5_cqm_bat_table)); + + /* Initialize the type of each bat entry. */ + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = HINIC5_CQM_BAT_ENTRY_T_INVALID; + + /* Select BATs based on service types. Currently, + * feature-related resources of the VF are stored in the BATs of the VF. + */ + if (capability->ft_enable && capability->rdma_enable) + return hinic5_cqm_bat_init_ft_rdma(hinic5_cqm_handle, bat_table, function_type); + else if (capability->ft_enable) + return hinic5_cqm_bat_init_ft(hinic5_cqm_handle, bat_table, function_type); + else if (capability->rdma_enable) + return hinic5_cqm_bat_init_rdma(hinic5_cqm_handle, bat_table, function_type); + + return HINIC5_CQM_SUCCESS; +} + +STATIC s32 hinic5_cqm_cla_reset(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_cmd_buf *buf_in = NULL; + struct tag_hinic5_cqm_cla_reset_cmd *cmd_data = NULL; + int ret = HINIC5_CQM_SUCCESS; + + buf_in = hinic5_cqm_cmd_alloc(handle); + if (unlikely(!buf_in)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(buf_in)); + return HINIC5_CQM_FAIL; + } + + cmd_data = buf_in->buf; + memset(cmd_data, 0, sizeof(*cmd_data)); + + cmd_data->func_id = hinic5_global_func_id(handle); + hinic5_cqm_swab32((u8 *)cmd_data, sizeof(struct tag_hinic5_cqm_cla_reset_cmd) >> HINIC5_CQM_DW_SHIFT); + ret = hinic5_cqm_send_cmd_box(handle, HINIC5_CQM_MOD_HINIC5_CQM, HINIC5_CQM_CMD_T_CLA_RESET, + buf_in, NULL, NULL, + HINIC5_CQM_CMD_TIMEOUT, HINIC5_CHANNEL_DEFAULT); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_send_cmd_box)); + } + + return ret; +} + +/** + * Prototype : hinic5_cqm_bat_uninit + * Description : Deinitialize the BAT table. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * Output : None + * Return Value : void + * 1.Date : 2015/5/15 + * Modification : Created function + */ +void hinic5_cqm_bat_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 i; + + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = HINIC5_CQM_BAT_ENTRY_T_INVALID; + + /* The BAT is maintained by the parent function. + Reset CLA instead of clear BAT. */ + if (HINIC5_CQM_IS_FAKE_CHILD(hinic5_cqm_handle)) { + hinic5_cqm_cla_reset(hinic5_cqm_handle); + return; + } + + memset(bat_table->bat, 0, HINIC5_CQM_BAT_ENTRY_MAX * HINIC5_CQM_BAT_ENTRY_SIZE); + /* Instruct the chip to update the BAT table. */ + if (hinic5_cqm_bat_update(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bat_update)); +} + +static u64 hinic5_cqm_cla_chip_gpa_flags(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, u8 gpa_check_enable) +{ + struct hinic5_func_attr *func_attr = NULL; + u64 fake_en, spu_en, pf_id; + + spu_en = ((u64)hinic5_cqm_get_acs_spu_en(hinic5_cqm_handle)) << 0x3F; + + /* fake enable */ + fake_en = 0; + pf_id = 0; + if (HINIC5_CQM_IS_FAKE_CHILD_AGENT(hinic5_cqm_handle)) { + fake_en = 1ULL << 0x3E; + func_attr = &hinic5_cqm_handle->parent_hinic5_cqm_handle->func_attribute; + pf_id = (u64)(func_attr->func_global_idx & 0x1f) << 0x39; + } + + return spu_en | fake_en | pf_id | gpa_check_enable; +} + +/** + * Create mapping from cla_base_buf to cla_sub_buf. + * The pointer in cla_base_buf is mapped from base_offset, and the target buf + * in cla_sub_buf is used from sub_offset. + */ +static s32 hinic5_cqm_cla_map_buf(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_buf *cla_base_buf, + const struct tag_hinic5_cqm_buf *cla_sub_buf, + u32 base_offset, u32 sub_offset, u32 num, + u8 gpa_check_enable) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 buf_addr_cap, base_addr_cap; + u64 gpa_flags = 0; + u32 i, base_buf_index, base_buf_offset, index_base_offset, index_sub_offset; + dma_addr_t *base = NULL; + + buf_addr_cap = cla_base_buf->buf_size / sizeof(dma_addr_t); + base_addr_cap = cla_base_buf->buf_number * buf_addr_cap; + + if (unlikely(num == 0 || (base_offset + num > base_addr_cap) || + (sub_offset + num > cla_sub_buf->buf_number))) { + hinic5_cqm_err(handle->dev_hdl, + "Cla alloc: truncate! mapping num %u, base off %u, sub cap %u, sub offset %u, sub cap %u", + num, base_offset, base_addr_cap, sub_offset, cla_sub_buf->buf_number); + return HINIC5_CQM_FAIL; + } + + gpa_flags = hinic5_cqm_cla_chip_gpa_flags(hinic5_cqm_handle, gpa_check_enable); + + hinic5_cqm_dbg(handle->dev_hdl, + "hinic5_cqm_cla_map_buf: mapping num %u, base off %u, sub cap %u, sub offset %u, sub cap %u, gpa_flags 0x%llX\n", + num, base_offset, base_addr_cap, sub_offset, cla_sub_buf->buf_number, gpa_flags); + + index_base_offset = base_offset; + index_sub_offset = sub_offset; + for (i = 0; i < num; i++) { + base_buf_index = index_base_offset / buf_addr_cap; + base_buf_offset = index_base_offset % buf_addr_cap; + base = (dma_addr_t *)(cla_base_buf->buf_list[base_buf_index].va); + base += base_buf_offset; + +#define HINIC5_CQM_TIMER_FUNC_BUF_NUM 64 + hinic5_cqm_dbg_on(i % HINIC5_CQM_TIMER_FUNC_BUF_NUM == 0, handle->dev_hdl, + "hinic5_cqm_cla_map_buf: mapping %4u, pointer(va 0x%lX, base_buf+%03u) --> sub_buf(idx %4u, pa 0x%lX), using base_buf(idx %3u, pa 0x%lX, va 0x%lX).\n", + i, (uintptr_t)base, base_buf_offset, + index_sub_offset, (uintptr_t)cla_sub_buf->buf_list[index_sub_offset].pa, + base_buf_index, (uintptr_t)cla_base_buf->buf_list[base_buf_index].pa, + (uintptr_t)cla_base_buf->buf_list[base_buf_index].va); + + *base = (dma_addr_t)(((u64)(cla_sub_buf->buf_list[index_sub_offset].pa) & HINIC5_CQM_CHIP_GPA_MASK) + | gpa_flags); + hinic5_cqm_swab64((u8 *)base, 1); + + index_base_offset++; + index_sub_offset++; + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_cla_fill_buf(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *cla_base_buf, + struct tag_hinic5_cqm_buf *cla_sub_buf, u8 gpa_check_enable) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + dma_addr_t *base = NULL; + u64 gpa_flags = 0; + u32 i = 0; + u32 addr_num; + u32 buf_index = 0; + s32 ret; + + /* Apply for space for base_buf */ + if (!cla_base_buf->buf_list) { + ret = hinic5_cqm_buf_alloc(hinic5_cqm_handle, cla_base_buf, false); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_warn(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(cla_base_buf)); + return ret; + } + } + + /* Apply for space for sub_buf */ + if (!cla_sub_buf->buf_list) { + ret = hinic5_cqm_buf_alloc(hinic5_cqm_handle, cla_sub_buf, false); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_warn(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(cla_sub_buf)); + hinic5_cqm_buf_free(cla_base_buf, hinic5_cqm_handle->dev); + return ret; + } + } + + gpa_flags = hinic5_cqm_cla_chip_gpa_flags(hinic5_cqm_handle, gpa_check_enable); + hinic5_cqm_dbg(handle->dev_hdl, "hinic5_cqm_cla_fill_buf: gpa_flags 0x%llX\n", gpa_flags); + + /* Fill base_buff with the gpa of sub_buf */ + addr_num = cla_base_buf->buf_size / sizeof(dma_addr_t); + base = (dma_addr_t *)(cla_base_buf->buf_list[0].va); + for (i = 0; i < cla_sub_buf->buf_number; i++) { + *base = (dma_addr_t)(((u64)(cla_sub_buf->buf_list[i].pa) & HINIC5_CQM_CHIP_GPA_MASK) + | gpa_flags); + + hinic5_cqm_swab64((u8 *)base, 1); + if ((i + 1) % addr_num == 0) { + buf_index++; + if (buf_index < cla_base_buf->buf_number) + base = cla_base_buf->buf_list[buf_index].va; + } else { + base++; + } + } + + return HINIC5_CQM_SUCCESS; +} + +static void hinic5_cqm_apply_new_buf(struct tag_hinic5_cqm_buf *cla_buf, u32 buf_size, u32 buf_num, u32 buf_order) +{ + cla_buf->buf_size = buf_size; + cla_buf->buf_number = buf_num; + cla_buf->page_number = cla_buf->buf_number << buf_order; +} + +static s32 hinic5_cqm_cla_secure_mem_buf_alloc(struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_buf *buf) +{ + /* Applying for the buffer list descriptor space */ + buf->buf_list = vmalloc(buf->buf_number * sizeof(struct tag_hinic5_cqm_buf_list)); + if (unlikely(buf->buf_list == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(secure_mem_buf_alloc)); + return HINIC5_CQM_FAIL; + } + memset(buf->buf_list, + 0, buf->buf_number * sizeof(struct tag_hinic5_cqm_buf_list)); + + buf->buf_list->va = cla_table->secure_mem.va; + buf->buf_list->pa = cla_table->secure_mem.pa; + buf->secure_mem_flag = HINIC5_CQM_SECURE_BUFFER_EN; + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_cla_xyz_lvl0(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_cla_table *cla_table, u32 trunk_size) +{ + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + s32 ret; + + cla_table->cla_lvl = HINIC5_CQM_CLA_LVL_0; + + cla_table->z = cla_table->max_index_bit; + cla_table->y = 0; + cla_table->x = 0; + + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + + /* Applying for CLA_Z_BUF Space */ + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = 1; + cla_z_buf->page_number = cla_z_buf->buf_number << cla_table->trunk_order; + + if (secure_mem_en && HINIC5_CQM_IS_VF(hinic5_cqm_handle) && HINIC5_CQM_CLA_IS_SECURE_MEM(cla_table->type)) + return hinic5_cqm_cla_secure_mem_buf_alloc(cla_table, cla_z_buf); + + ret = hinic5_cqm_buf_alloc(hinic5_cqm_handle, cla_z_buf, false); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) + hinic5_cqm_warn(hinic5_cqm_handle->dev, + "lvl_0_z_buf alloc fail. buf size 0x%x, ret %d.\n", + trunk_size, ret); + return ret; +} + +static s32 hinic5_cqm_cla_xyz_lvl1(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_cla_table *cla_table, u32 trunk_size) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + u32 shift = 0, z_buf_num; + u8 gpa_check_enable = hinic5_cqm_handle->func_capability.gpa_check_enable; + u32 cache_line = get_cacheline_size(cla_table->type); + s32 ret; + + if (cla_table->type == HINIC5_CQM_BAT_ENTRY_T_REORDER) + gpa_check_enable = 0; + + cla_table->cla_lvl = HINIC5_CQM_CLA_LVL_1; + + shift = hinic5_cqm_shift(trunk_size / cla_table->obj_size); + cla_table->z = ((shift != 0) ? (shift - 1) : (shift)); + cla_table->y = cla_table->max_index_bit; + cla_table->x = 0; + + if (cla_table->obj_size >= cache_line) { + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + } else { + shift = hinic5_cqm_shift(trunk_size / cache_line); + cla_table->cacheline_z = ((shift != 0) ? (shift - 1) : (shift)); + cla_table->cacheline_y = cla_table->max_index_bit; + cla_table->cacheline_x = 0; + } + + /* Applying for CLA_Y_BUF Space */ + hinic5_cqm_apply_new_buf(cla_y_buf, trunk_size, 1, cla_table->trunk_order); + ret = hinic5_cqm_buf_alloc(hinic5_cqm_handle, cla_y_buf, false); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_warn(hinic5_cqm_handle->dev, + "lvl_1_y_buf alloc fail. buf size 0x%x, ret %d.\n", + trunk_size, ret); + return ret; + } + + /* Applying for CLA_Z_BUF Space */ + z_buf_num = ALIGN(cla_table->max_buffer_size, trunk_size) / trunk_size; + hinic5_cqm_apply_new_buf(cla_z_buf, trunk_size, z_buf_num, cla_table->trunk_order); + /* All buffer space must be statically allocated. */ + if (cla_table->alloc_static) { + ret = hinic5_cqm_cla_fill_buf(hinic5_cqm_handle, cla_y_buf, cla_z_buf, gpa_check_enable); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_warn(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_fill_buf)); + /* cla_y_buf freed by hinic5_cqm_cla_fill_buf() */ + return ret; + } + } else { /* Only the buffer list space is initialized. The buffer space + * is dynamically allocated in services. + */ + ret = hinic5_cqm_buf_list_alloc(cla_z_buf); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(lvl_1_z_buf)); + hinic5_cqm_buf_free(cla_y_buf, hinic5_cqm_handle->dev); + return ret; + } + } + + return HINIC5_CQM_SUCCESS; +} + +static void hinic5_cqm_cla_xyz_lvl2_param_init(struct tag_hinic5_cqm_cla_table *cla_table, u32 trunk_size) +{ + u32 shift = 0; + u32 cache_line = get_cacheline_size(cla_table->type); + + cla_table->cla_lvl = HINIC5_CQM_CLA_LVL_2; + + shift = hinic5_cqm_shift(trunk_size / cla_table->obj_size); + cla_table->z = ((shift != 0) ? (shift - 1) : (shift)); + shift = hinic5_cqm_shift(trunk_size / sizeof(dma_addr_t)); + cla_table->y = cla_table->z + shift; + cla_table->x = cla_table->max_index_bit; + + if (cla_table->obj_size >= cache_line) { + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + } else { + shift = hinic5_cqm_shift(trunk_size / cache_line); + cla_table->cacheline_z = ((shift != 0) ? (shift - 1) : (shift)); + shift = hinic5_cqm_shift(trunk_size / sizeof(dma_addr_t)); + cla_table->cacheline_y = cla_table->cacheline_z + shift; + cla_table->cacheline_x = cla_table->max_index_bit; + } +} + +static s32 hinic5_cqm_cla_xyz_lvl2_xyz_apply(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, u32 trunk_size) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_buf *cla_x_buf = &cla_table->cla_x_buf; + struct tag_hinic5_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + s32 ret; + + /* Apply for CLA_X_BUF Space */ + cla_x_buf->buf_size = trunk_size; + cla_x_buf->buf_number = 1; + cla_x_buf->page_number = cla_x_buf->buf_number << cla_table->trunk_order; + cla_x_buf->buf_info.use_hinic5_vram = get_use_hinic5_vram_flag(); + ret = hinic5_cqm_buf_alloc(hinic5_cqm_handle, cla_x_buf, false); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_warn(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(lvl_2_x_buf)); + return ret; + } + + /* Apply for CLA_Z_BUF and CLA_Y_BUF Space */ + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = (ALIGN(cla_table->max_buffer_size, trunk_size)) / trunk_size; + cla_z_buf->page_number = cla_z_buf->buf_number << cla_table->trunk_order; + + cla_y_buf->buf_size = trunk_size; + cla_y_buf->buf_number = + (u32)(ALIGN(cla_z_buf->buf_number * sizeof(dma_addr_t), trunk_size)) / trunk_size; + cla_y_buf->page_number = cla_y_buf->buf_number << cla_table->trunk_order; + + return 0; +} + +static s32 hinic5_cqm_cla_xyz_hinic5_vram_name_init(struct tag_hinic5_cqm_cla_table *cla_table, + struct hinic5_hwdev *handle) +{ + struct tag_hinic5_cqm_buf *cla_x_buf = &cla_table->cla_x_buf; + struct tag_hinic5_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + const int use_hinic5_vram = get_use_hinic5_vram_flag(); + int ret; + + cla_x_buf->buf_info.use_hinic5_vram = use_hinic5_vram; + ret = snprintf(cla_x_buf->buf_info.buf_hinic5_vram_name, HINIC5_VRAM_NAME_MAX_LEN, + "%s%s", cla_table->name, HINIC5_VRAM_HINIC5_CQM_CLA_COORD_X); + if (ret < 0) { + hinic5_cqm_err(handle->dev_hdl, "hinic5_cqm cla x hinic5_vram name snprintf_s failed, cla_table->name:%s", cla_table->name); + return HINIC5_CQM_FAIL; + } + + cla_y_buf->buf_info.use_hinic5_vram = use_hinic5_vram; + ret = snprintf(cla_y_buf->buf_info.buf_hinic5_vram_name, HINIC5_VRAM_NAME_MAX_LEN, + "%s%s", cla_table->name, HINIC5_VRAM_HINIC5_CQM_CLA_COORD_Y); + if (ret < 0) { + hinic5_cqm_err(handle->dev_hdl, "hinic5_cqm cla y hinic5_vram name snprintf_s failed"); + return HINIC5_CQM_FAIL; + } + + cla_z_buf->buf_info.use_hinic5_vram = use_hinic5_vram; + ret = snprintf(cla_z_buf->buf_info.buf_hinic5_vram_name, HINIC5_VRAM_NAME_MAX_LEN, + "%s%s", cla_table->name, HINIC5_VRAM_HINIC5_CQM_CLA_COORD_Z); + if (ret < 0) { + hinic5_cqm_err(handle->dev_hdl, "hinic5_cqm cla z hinic5_vram name snprintf_s failed"); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_cla_xyz_lvl2(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, u32 trunk_size) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_buf *cla_x_buf = &cla_table->cla_x_buf; + struct tag_hinic5_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + s32 ret = HINIC5_CQM_FAIL; + u8 gpa_check_enable = hinic5_cqm_handle->func_capability.gpa_check_enable; + + hinic5_cqm_cla_xyz_lvl2_param_init(cla_table, trunk_size); + + ret = hinic5_cqm_cla_xyz_lvl2_xyz_apply(hinic5_cqm_handle, cla_table, trunk_size); + if (ret != HINIC5_CQM_SUCCESS) + return ret; + + if (cla_table->type == HINIC5_CQM_BAT_ENTRY_T_REORDER) + gpa_check_enable = 0; + + /* All buffer space must be statically allocated. */ + if (cla_table->alloc_static) { + /* Apply for y buf and z buf, and fill the gpa of z buf list in y buf */ + ret = hinic5_cqm_cla_fill_buf(hinic5_cqm_handle, cla_y_buf, cla_z_buf, + gpa_check_enable); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_warn(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_fill_buf)); + hinic5_cqm_buf_free(cla_x_buf, hinic5_cqm_handle->dev); + return ret; + } + + /* Fill the gpa with the y buf list into the x buf. + * After the x and y bufs are applied for, this function will not fail. + * Use void to forcibly convert the return of the function. + */ + (void)hinic5_cqm_cla_fill_buf(hinic5_cqm_handle, cla_x_buf, cla_y_buf, gpa_check_enable); + } else { /* Only the buffer list space is initialized. The buffer space + * is dynamically allocated in services. + */ + ret = hinic5_cqm_buf_list_alloc(cla_z_buf); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(lvl_2_z_buf)); + hinic5_cqm_buf_free(cla_x_buf, hinic5_cqm_handle->dev); + return ret; + } + + ret = hinic5_cqm_buf_list_alloc(cla_y_buf); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(lvl_2_y_buf)); + hinic5_cqm_buf_free(cla_z_buf, hinic5_cqm_handle->dev); + hinic5_cqm_buf_free(cla_x_buf, hinic5_cqm_handle->dev); + return ret; + } + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_cla_xyz_lvl2_timer_xyz_apply(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u32 trunk_size) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_func_capability *cap = &hinic5_cqm_handle->func_capability; + struct tag_hinic5_cqm_buf *cla_x_buf = &cla_table->cla_x_buf; + struct tag_hinic5_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + u32 timer_func_num, timer_number, actual_buffer_size; + s32 ret; + + ret = hinic5_cqm_cla_xyz_lvl2_xyz_apply(hinic5_cqm_handle, cla_table, trunk_size); + if (ret != HINIC5_CQM_SUCCESS) + return ret; + + /* Apply for space for CLA_Y_BUF */ + if (unlikely(hinic5_cqm_buf_alloc(hinic5_cqm_handle, cla_y_buf, false) != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(lvl_2_y_buf)); + hinic5_cqm_buf_free(cla_x_buf, hinic5_cqm_handle->dev); + return HINIC5_CQM_FAIL; + } + + /* Ref: hinic5_cqm_capability_init_timer() */ + timer_func_num = cap->timer_pf_num + cap->timer_vf_num_actual; + timer_number = HINIC5_CQM_TIMER_ALIGN_SCALE_NUM * timer_func_num; + /* Ref: hinic5_cqm_bat_entry_init_timer() */ + actual_buffer_size = timer_number * cap->timer_basic_size; + + /* Ref: hinic5_cqm_cla_xyz_lvl2_xyz_apply() */ + cla_z_buf->buf_number = (ALIGN(actual_buffer_size, trunk_size)) / trunk_size; + cla_z_buf->page_number = cla_z_buf->buf_number << cla_table->trunk_order; + + /* Apply for space for CLA_Z_BUF */ + if (unlikely(hinic5_cqm_buf_alloc(hinic5_cqm_handle, cla_z_buf, false) != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(lvl_2_z_buf)); + hinic5_cqm_buf_free(cla_y_buf, hinic5_cqm_handle->dev); + hinic5_cqm_buf_free(cla_x_buf, hinic5_cqm_handle->dev); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_dbg(handle->dev_hdl, + "timer xyz apply: x buf va 0x%lX, pa 0x%lX. y buf num %u, z buf num %u\n", + (uintptr_t)cla_x_buf->buf_list[0].va, (uintptr_t)cla_x_buf->buf_list[0].pa, + cla_y_buf->buf_number, cla_z_buf->buf_number); + + return HINIC5_CQM_SUCCESS; +} + +/** + * Level-2 CLA for timer + * Allocates CLA_X_BUF, CLA_Y_BUF, and CLA_Z_BUF during initialization. + * + * SMF Timer accesses VF's spokes by offset based on timer_vf_id_start. + * Some VF may not require initialization, the allocation is based on VF segments. + * + * The mapping from 1st CLA (Y buf) to 2nd CLA (Z buf) is as follows: + * + * <pre> + * ▯ Empty buffer ▮ Buffer with pointer to Z buffer + * + * Ptr to first timer PF Ptr to VF seg N start + * (timer_pf_id_start) (timer_vf_id_start) (timer_vf_segs[N].start) + * | | | + * 1st CLA ▮▮▮▮▮▮▮▮▮▮▮▮▮▯▯▯▯▯▮▮▮..▮▮▮▯▯▯▯▯▯▯▮▮▮▮▮ + * ┊ ┊ ╰───────╮ ╰────╮ ┊╭───────────╯ ┊ + * ┊ Ptr to VF seg 0 start ╰─────╮┊ ┊┊ ┊ + * ┊ (timer_vf_segs[0].start) ┊┊ ┊┊ ┊ + * ↓ ↓ ↓↓ ↓↓ ↓ + * 2nd CLA ▯▯▯▯...▯▯▯▯▯▯▯▯▯....▯▯▯▯▯▯▯▯▯▯▯▯▯.......▯▯▯▯ + * \______________/\____________________/\______/\____________________/ + * timer_pf_num timer_vf_segs[0].num .... timer_vf_segs[N].num + * </pre> + */ +static s32 hinic5_cqm_cla_xyz_lvl2_timer(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u32 trunk_size) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct tag_hinic5_cqm_buf *cla_x_buf = &cla_table->cla_x_buf; + struct tag_hinic5_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + u8 gpa_check_enable = hinic5_cqm_handle->func_capability.gpa_check_enable; + u32 func_timer_size, func_z_buf_num; + u32 base_idx, sub_idx, func_num; + int i; + + if (!cla_table->alloc_static || + func_cap->timer_vf_num == func_cap->timer_vf_num_actual) + return hinic5_cqm_cla_xyz_lvl2(hinic5_cqm_handle, cla_table, trunk_size); + + func_cap->timer_vf_deploy_with_segs = true; + + hinic5_cqm_cla_xyz_lvl2_param_init(cla_table, trunk_size); + + if (hinic5_cqm_cla_xyz_lvl2_timer_xyz_apply(hinic5_cqm_handle, cla_table, trunk_size) != HINIC5_CQM_SUCCESS) + return HINIC5_CQM_FAIL; + + func_timer_size = HINIC5_CQM_TIMER_ALIGN_SCALE_NUM * func_cap->timer_basic_size; + func_z_buf_num = func_timer_size / trunk_size; + + /* Fill the gpa with the z buf list into the y buf for PF. */ + base_idx = 0; + sub_idx = 0; + func_num = func_cap->timer_pf_num; + if (hinic5_cqm_cla_map_buf(hinic5_cqm_handle, cla_y_buf, cla_z_buf, + base_idx * func_z_buf_num, + sub_idx * func_z_buf_num, + func_num * func_z_buf_num, + gpa_check_enable) == HINIC5_CQM_FAIL) + goto mapping_buf_fail; + + /* Fill the gpa with the z buf list into the y buf for VF. */ + for (i = 0; i < ARRAY_SIZE(func_cap->timer_vf_segs); i++) { + u16 seg_start = func_cap->timer_vf_segs[i].start; + if (seg_start == 0) + break; + + base_idx = func_cap->timer_pf_num + + (seg_start - func_cap->timer_vf_id_start); + sub_idx += func_num; + func_num = func_cap->timer_vf_segs[i].num; + if (hinic5_cqm_cla_map_buf(hinic5_cqm_handle, cla_y_buf, cla_z_buf, + base_idx * func_z_buf_num, + sub_idx * func_z_buf_num, + func_num * func_z_buf_num, + gpa_check_enable) == HINIC5_CQM_FAIL) + goto mapping_buf_fail; + } + + /* Fill the gpa with the y buf list into the x buf. + * After the x and y bufs are applied for, this function will not fail. + * Use void to forcibly convert the return of the function. + */ + (void)hinic5_cqm_cla_fill_buf(hinic5_cqm_handle, cla_x_buf, cla_y_buf, gpa_check_enable); + + return HINIC5_CQM_SUCCESS; + +mapping_buf_fail: + hinic5_cqm_err(handle->dev_hdl, + "Failed to create mapping from Y buf to Z buf. base_idx %u, sub_idx %u, func_num %u", + base_idx, sub_idx, func_num); + hinic5_cqm_buf_free(cla_z_buf, hinic5_cqm_handle->dev); + hinic5_cqm_buf_free(cla_y_buf, hinic5_cqm_handle->dev); + hinic5_cqm_buf_free(cla_x_buf, hinic5_cqm_handle->dev); + return HINIC5_CQM_FAIL; +} + +static inline int min_order_for_cla_obj(struct tag_hinic5_cqm_cla_table *cla_table) +{ + return get_order(cla_table->obj_size); +} + +static u32 calc_cla_lvl(u64 max_size, u32 order) +{ + const u64 buf_size = (u64)PAGE_SIZE << order; + const u64 buf_addr_cap = buf_size / sizeof(dma_addr_t); + + if (max_size <= buf_size) + return HINIC5_CQM_CLA_LVL_0; + if (max_size <= buf_size * buf_addr_cap) + return HINIC5_CQM_CLA_LVL_1; + if (max_size <= buf_size * buf_addr_cap * buf_addr_cap) + return HINIC5_CQM_CLA_LVL_2; + return HINIC5_CQM_CLA_LVL_UNSUPPORT; +} + +static s32 hinic5_cqm_cla_xyz_alloc(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u32 order) +{ + const u32 cla_lvl = calc_cla_lvl(cla_table->max_buffer_size, order); + const u32 buf_size = (u32)(PAGE_SIZE << order); + s32 ret; + + /* Level-0 CLA occupies a small space. + * Only CLA_Z_BUF can be allocated during initialization. + */ + if (cla_lvl == HINIC5_CQM_CLA_LVL_0) { + ret = hinic5_cqm_cla_xyz_lvl0(hinic5_cqm_handle, cla_table, buf_size); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) + hinic5_cqm_warn(hinic5_cqm_handle->dev, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_xyz_lvl0)); + return ret; + } + + /* Level-1 CLA + * Allocates CLA_Y_BUF and CLA_Z_BUF during initialization. + */ + if (cla_lvl == HINIC5_CQM_CLA_LVL_1) { + ret = hinic5_cqm_cla_xyz_lvl1(hinic5_cqm_handle, cla_table, buf_size); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) + hinic5_cqm_warn(hinic5_cqm_handle->dev, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_xyz_lvl1)); + return ret; + } + + /* Level-2 CLA + * Allocates CLA_X_BUF, CLA_Y_BUF, and CLA_Z_BUF during initialization. + */ + if (cla_lvl == HINIC5_CQM_CLA_LVL_2) { + if (cla_table->type == HINIC5_CQM_BAT_ENTRY_T_TIMER) { + ret = hinic5_cqm_cla_xyz_lvl2_timer(hinic5_cqm_handle, cla_table, buf_size); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) + hinic5_cqm_warn(hinic5_cqm_handle->dev, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_xyz_lvl2_timer)); + return ret; + } else { + ret = hinic5_cqm_cla_xyz_lvl2(hinic5_cqm_handle, cla_table, buf_size); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) + hinic5_cqm_warn(hinic5_cqm_handle->dev, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_xyz_lvl2)); + return ret; + } + } + + /* The current memory management mode does not support such a large + * buffer addressing. The order value needs to be increased. + */ + hinic5_cqm_err(hinic5_cqm_handle->dev, + "Cla alloc: cla max_buffer_size 0x%x exceeds support range\n", + cla_table->max_buffer_size); + return HINIC5_CQM_FAIL; +} + +/** + * Try hugepages for CLA tables, fallback to 4K pages. + * Fallback is limited to alloc_pages() failures during CLA buffers init. + */ +static s32 hinic5_cqm_cla_xyz_hugepage(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table) +{ + const u32 max_size = cla_table->max_buffer_size; + int min_order, max_order, order, ret; + + min_order = min_order_for_cla_obj(cla_table); + max_order = get_order(max_size); + if (max_order > MAX_ORDER) + max_order = MAX_ORDER; + + hinic5_cqm_dbg(hinic5_cqm_handle->dev, + "Cla alloc: try hugepage, size 0x%x, order %d - %d.\n", + max_size, min_order, max_order); + + for (order = max_order; order >= min_order; order--) { + ret = hinic5_cqm_cla_xyz_alloc(hinic5_cqm_handle, cla_table, (u32)order); + if (ret == HINIC5_CQM_BUF_ALLOC_BUDDY_PAGES_FAIL) { + hinic5_cqm_warn(hinic5_cqm_handle->dev, + "Cla alloc: insufficient pages (order %d).\n", + order); + continue; + } + + if (unlikely(ret != HINIC5_CQM_SUCCESS)) + hinic5_cqm_err(hinic5_cqm_handle->dev, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_xyz_alloc)); + return ret; + } + + return HINIC5_CQM_FAIL; +} + +static s32 hinic5_cqm_cla_xyz_check(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + /* Check whether obj_size is 2^n-aligned. An error is reported when + * obj_size is 0 or 1. + */ + if (!hinic5_cqm_check_align(cla_table->obj_size)) { + hinic5_cqm_err(handle->dev_hdl, + "Cla alloc: cla_type %u, obj_size 0x%x is not align on 2^n\n", + cla_table->type, cla_table->obj_size); + return HINIC5_CQM_FAIL; + } + + if (min_order_for_cla_obj(cla_table) > MAX_ORDER) { + hinic5_cqm_err(hinic5_cqm_handle->dev, + "Cla alloc: cla_type %u, obj_size 0x%x is too big\n", + cla_table->type, cla_table->obj_size); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_cla_xyz + * Description : Calculate the number of levels of CLA tables and allocate + * space for each level of CLA table. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * struct tag_hinic5_cqm_cla_table *cla_table + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +STATIC s32 hinic5_cqm_cla_xyz(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_cla_table *cla_table) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + s32 ret = HINIC5_CQM_FAIL; + + /* The BAT and CLA of the Fake VF are maintained by the parent function. */ + if (HINIC5_CQM_IS_FAKE_CHILD(hinic5_cqm_handle)) { + hinic5_cqm_dbg(handle->dev_hdl, + "Cla alloc: cla_type %u, obj_num 0x%x, fake child func skip alloc\n", + cla_table->type, cla_table->obj_num); + return HINIC5_CQM_SUCCESS; + } + + /* If the capability(obj_num) is set to 0, the CLA does not need to be + * initialized and exits directly. + */ + if (cla_table->obj_num == 0) { + hinic5_cqm_info(handle->dev_hdl, + "Cla alloc: cla_type %u, obj_num 0, don't alloc buffer\n", + cla_table->type); + return HINIC5_CQM_SUCCESS; + } + + hinic5_cqm_info(handle->dev_hdl, + "Cla alloc: cla_type %u, obj_num 0x%x, hugetable_hint %d\n", + cla_table->type, cla_table->obj_num, cla_table->hugepage_hint); + + ret = hinic5_cqm_cla_xyz_check(hinic5_cqm_handle, cla_table); + if (ret != HINIC5_CQM_SUCCESS) + return ret; + + ret = hinic5_cqm_cla_xyz_hinic5_vram_name_init(cla_table, handle); + if (ret != HINIC5_CQM_SUCCESS) + return ret; + + /* Try hugepages for CLA tables. */ + if (unlikely(cla_table->hugepage_hint)) + return hinic5_cqm_cla_xyz_hugepage(hinic5_cqm_handle, cla_table); + + /* Build CLA tables with specified page order. */ + if ((int)cla_table->trunk_order < min_order_for_cla_obj(cla_table)) { + hinic5_cqm_err(handle->dev_hdl, + "Cla alloc: cla type %u, obj_size 0x%x is out of a CLA buffer(order %u)\n", + cla_table->type, cla_table->obj_size, cla_table->trunk_order); + return HINIC5_CQM_FAIL; + } + return hinic5_cqm_cla_xyz_alloc(hinic5_cqm_handle, cla_table, cla_table->trunk_order); +} + +static void update_entry_cap_for_secure_mem(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + if (!secure_mem_en || !HINIC5_CQM_IS_VF(hinic5_cqm_handle)) + return; + + /* No multi-level CLA and dynamic allocation + * when Secure Memory is enabled. */ + cla_table->trunk_order = (u32)get_order(cla_table->max_buffer_size); + cla_table->hugepage_hint = false; + cla_table->alloc_static = true; + hinic5_cqm_info(handle->dev_hdl, "Secure mem: cla_type=%u, max_buffer_size=0x%x, order=%u\n", + cla_table->type, cla_table->max_buffer_size, cla_table->trunk_order); +} + +static void init_hash_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->obj_size = capability->hash_basic_size; + cla_table->obj_num = capability->hash_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->alloc_static = true; +} + +static void init_qpc_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->obj_size = capability->qpc_basic_size; + cla_table->obj_num = capability->qpc_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->alloc_static = capability->qpc_alloc_static; + + if (hinic5_cqm_cla_hugepage_hint) + cla_table->hugepage_hint = true; + + update_entry_cap_for_secure_mem(hinic5_cqm_handle, cla_table, capability); +} + +static void init_scqc_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->obj_size = capability->scqc_basic_size; + cla_table->obj_num = capability->scqc_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->alloc_static = capability->scqc_alloc_static; + + update_entry_cap_for_secure_mem(hinic5_cqm_handle, cla_table, capability); +} + +static void init_srqc_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->obj_size = capability->srqc_basic_size; + cla_table->obj_num = capability->srqc_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->alloc_static = capability->srqc_alloc_static; + + update_entry_cap_for_secure_mem(hinic5_cqm_handle, cla_table, capability); +} + +static void init_mpt_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->mpt_number * capability->mpt_basic_size; + cla_table->obj_size = capability->mpt_basic_size; + cla_table->obj_num = capability->mpt_number; + /* CCB decided. MPT uses only static application scenarios. */ + cla_table->alloc_static = true; + + update_entry_cap_for_secure_mem(hinic5_cqm_handle, cla_table, capability); +} + +static void init_gid_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + /* Level-0 CLA table required */ + cla_table->obj_size = capability->gid_basic_size; + cla_table->obj_num = capability->gid_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = hinic5_cqm_shift(ALIGN(cla_table->max_buffer_size, PAGE_SIZE) / PAGE_SIZE); + cla_table->alloc_static = true; +} + +static void init_lun_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->obj_size = capability->lun_basic_size; + cla_table->obj_num = capability->lun_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; + cla_table->alloc_static = true; +} + +static void init_taskmap_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->obj_size = capability->taskmap_basic_size; + cla_table->obj_num = capability->taskmap_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = HINIC5_CQM_4K_PAGE_ORDER; + cla_table->alloc_static = true; +} + +static void init_l3i_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->obj_size = capability->l3i_basic_size; + cla_table->obj_num = capability->l3i_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; + cla_table->alloc_static = true; +} + +static void init_childc_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->obj_size = capability->childc_basic_size; + cla_table->obj_num = capability->childc_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->alloc_static = true; +} + +static void init_timer_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + /* Ensure that the basic size of the timer buffer page does not + * exceed 128 x 4 KB. Otherwise, clearing the timer buffer of + * the function is complex. + */ + cla_table->obj_size = capability->timer_basic_size; + cla_table->obj_num = capability->timer_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = HINIC5_CQM_8K_PAGE_ORDER; + cla_table->alloc_static = true; + + if (hinic5_cqm_cla_hugepage_hint) + cla_table->hugepage_hint = true; +} + +static void init_xid2cid_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->obj_size = capability->xid2cid_basic_size; + cla_table->obj_num = capability->xid2cid_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = HINIC5_CQM_8K_PAGE_ORDER; + cla_table->alloc_static = true; + + if (capability->bat_cid_index_bit_width > 0) + cla_table->max_index_bit = capability->bat_cid_index_bit_width - 1; +} + +static void init_reorder_entry_cap(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + /* This entry supports only IWARP and doesn't support GPA validity check. */ + cla_table->obj_size = capability->reorder_basic_size; + cla_table->obj_num = capability->reorder_number; + cla_table->max_buffer_size = cla_table->obj_size * cla_table->obj_num; + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->alloc_static = true; +} + +typedef void (*init_entry_cap)(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability); + +static const init_entry_cap init_entry_cap_funcs[HINIC5_CQM_BAT_ENTRY_T_MAX] = { + NULL, /* HINIC5_CQM_BAT_ENTRY_T_CFG */ + init_hash_entry_cap, + init_qpc_entry_cap, + init_scqc_entry_cap, + init_srqc_entry_cap, + init_mpt_entry_cap, + init_gid_entry_cap, + init_lun_entry_cap, + init_taskmap_entry_cap, + init_l3i_entry_cap, + init_childc_entry_cap, + init_timer_entry_cap, + init_xid2cid_entry_cap, + init_reorder_entry_cap, + NULL, /* HINIC5_CQM_BAT_ENTRY_T_INVALID */ +}; + +static void hinic5_cqm_cla_init_entry_capability(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_func_capability *capability) +{ + cla_table->max_index_bit = HINIC5_CQM_MAX_INDEX_BIT_DEFAULT; + + if (cla_table->type < ARRAY_SIZE(init_entry_cap_funcs) && + init_entry_cap_funcs[cla_table->type]) + init_entry_cap_funcs[cla_table->type](hinic5_cqm_handle, cla_table, capability); +} + +static s32 hinic5_cqm_cla_init_entry_memory(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, u32 entry_idx) +{ + struct hinic5_hwdev *hwdev = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct tag_hinic5_cqm_cla_table *cla_table = &bat_table->entry[entry_idx]; + struct tag_hinic5_cqm_cla_table *cla_table_tmp = NULL; + u32 entry_type = cla_table->type; + u32 i; + int ret; + + /* When the SMF API LB is mode 1 or 2, some entries need to be + * configured for all enabled SMFs and the address space is independent. + */ + if (HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle) && + (entry_type == HINIC5_CQM_BAT_ENTRY_T_TIMER || entry_type == HINIC5_CQM_BAT_ENTRY_T_HASH || + (entry_type == HINIC5_CQM_BAT_ENTRY_T_XID2CID && COMM_SUPPORT_VIRTIO_FC_CACHE(hwdev)))) { + for (i = 0; i < hinic5_cqm_handle->func_capability.smf_max_num; i++) { + if (cla_table->type == HINIC5_CQM_BAT_ENTRY_T_TIMER) + cla_table_tmp = &bat_table->timer_entry[i]; + else if (entry_type == HINIC5_CQM_BAT_ENTRY_T_HASH) + cla_table_tmp = &bat_table->hash_entry[i]; + else + cla_table_tmp = &bat_table->xid2cid_entry[i]; + + memcpy(cla_table_tmp, + cla_table, sizeof(struct tag_hinic5_cqm_cla_table)); + + ret = snprintf(cla_table_tmp->name, HINIC5_VRAM_NAME_MAX_LEN, + "%s%s%01u", cla_table->name, + HINIC5_VRAM_HINIC5_CQM_CLA_SMF_BASE, i); + if (ret < 0) { + hinic5_cqm_err(hinic5_cqm_handle->ex_handle->dev_hdl, + "hinic5_cqm cla timer hinic5_vram name snprintf_s failed"); + hinic5_cqm_cla_uninit(hinic5_cqm_handle, entry_idx); + return HINIC5_CQM_FAIL; + } + + if (hinic5_cqm_cla_xyz(hinic5_cqm_handle, cla_table_tmp) == + HINIC5_CQM_FAIL) { + hinic5_cqm_cla_uninit(hinic5_cqm_handle, entry_idx); + return HINIC5_CQM_FAIL; + } + } + return HINIC5_CQM_SUCCESS; + } + + if (hinic5_cqm_cla_xyz(hinic5_cqm_handle, cla_table) == HINIC5_CQM_FAIL) { + hinic5_cqm_cla_uninit(hinic5_cqm_handle, entry_idx); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_cla_init_entry(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_func_capability *capability) +{ + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + s32 ret; + u32 i = 0; + int err; + + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + cla_table->type = bat_table->bat_entry_type[i]; + + err = snprintf(cla_table->name, HINIC5_VRAM_NAME_MAX_LEN, + "%s%s%s%02u", hinic5_cqm_handle->name, HINIC5_VRAM_HINIC5_CQM_CLA_BASE, + HINIC5_VRAM_HINIC5_CQM_CLA_TYPE_BASE, cla_table->type); + if (err < 0) { + hinic5_cqm_err(hinic5_cqm_handle->ex_handle->dev_hdl, + "hinic5_cqm cla table hinic5_vram name snprintf_s failed"); + return HINIC5_CQM_FAIL; + } + + mutex_init(&cla_table->lock); + + hinic5_cqm_cla_init_entry_capability(hinic5_cqm_handle, cla_table, capability); + + /* Those entries don't need to alloc memory */ + if (cla_table->type < HINIC5_CQM_BAT_ENTRY_T_HASH || + cla_table->type > HINIC5_CQM_BAT_ENTRY_T_REORDER) { + continue; + } + + /* Timer entry is only deployed in PPF */ + if (cla_table->type == HINIC5_CQM_BAT_ENTRY_T_TIMER && + !HINIC5_CQM_IS_PPF(hinic5_cqm_handle)) + continue; + + ret = hinic5_cqm_cla_init_entry_memory(hinic5_cqm_handle, i); + if (ret != HINIC5_CQM_SUCCESS) + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static u32 hinic5_cqm_cla_get_ctx_mem_size(struct tag_hinic5_cqm_func_capability *capability, u32 cla_type) +{ + u32 basic_size; + u32 num; + + switch (cla_type) { + case HINIC5_CQM_BAT_ENTRY_T_SCQC: + basic_size = capability->scqc_basic_size; + num = capability->scqc_number; + break; + + case HINIC5_CQM_BAT_ENTRY_T_SRQC: + basic_size = capability->srqc_basic_size; + num = capability->srqc_number; + break; + case HINIC5_CQM_BAT_ENTRY_T_QPC: + basic_size = capability->qpc_basic_size; + num = capability->qpc_number; + break; + case HINIC5_CQM_BAT_ENTRY_T_MPT: + basic_size = capability->mpt_basic_size; + num = capability->mpt_number; + break; + default: + return 0; + } + + return basic_size * num; +} + +#if defined(__UEFI__) || defined(SECURE_MEM_STUB) +static s32 hinic5_cqm_stub_get_func_secure_mem_size(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_func_capability *capability, + u32 *total_size) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 cla_type[] = { HINIC5_CQM_BAT_ENTRY_T_SCQC, + HINIC5_CQM_BAT_ENTRY_T_QPC, + HINIC5_CQM_BAT_ENTRY_T_MPT, + HINIC5_CQM_BAT_ENTRY_T_SRQC }; + u32 cla_num = sizeof(cla_type) / sizeof(cla_type[0]); + u32 mem_size, i; + + /* mem size check */ + for (i = 0; i < cla_num; i++) { + mem_size = hinic5_cqm_cla_get_ctx_mem_size(capability, cla_type[i]); + if (!HINIC5_CQM_IS_SECURE_MEMSIZE_VALID(mem_size)) { + hinic5_cqm_err(handle->dev_hdl, + "%s: mem check failed, type=%d, mem_size=0x%x\n", + __func__, cla_type[i], mem_size); + return HINIC5_CQM_FAIL; + } + *total_size += mem_size; + } + + return HINIC5_CQM_SUCCESS; +} + +/* secure mem当前打桩由sdk申请,产品化时,由QEMU分配后,读SML表获取 */ +static s32 hinic5_cqm_stub_alloc_secure_mem(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_func_capability *capability) +{ + struct hinic5_cqm_secure_mem_info *secure_mem_info = &hinic5_cqm_handle->bat_table.func_secure_mem; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct device *dev = hinic5_cqm_handle->dev; + u32 secure_mem_size = 0; + u32 ret, order, map_size; + + ret = hinic5_cqm_stub_get_func_secure_mem_size(hinic5_cqm_handle, capability, &secure_mem_size); + if (ret != HINIC5_CQM_SUCCESS) + return ret; + + order = (u32)get_order(secure_mem_size); + /* map total mem size(2^n) */ + map_size = 1 << (order + PAGE_SHIFT); + hinic5_cqm_info(handle->dev_hdl, "total_mem_size=0x%x, map_size=0x%x, order=%d\n", + secure_mem_size, map_size, order); + secure_mem_info->addr.va = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!secure_mem_info->addr.va) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(secure_mem)); + return HINIC5_CQM_FAIL; + } + + secure_mem_info->len = map_size; + secure_mem_info->addr.pa = dma_map_single(dev, secure_mem_info->addr.va, map_size, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, secure_mem_info->addr.pa)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_MAP_FAIL(secure_mem)); + free_pages((ulong)secure_mem_info->addr.va, order); + return HINIC5_CQM_FAIL; + } + return HINIC5_CQM_SUCCESS; +} + +#else + +/* 从SML表中获取VF的安全内存地址,并做映射 */ +static s32 hinic5_cqm_cla_get_secure_mem(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_cqm_secure_mem_info *secure_mem_info = &hinic5_cqm_handle->bat_table.func_secure_mem; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + s32 ret; + + ret = hinic5_get_secure_mem_cfg(handle, &secure_mem_info->addr.pa, &secure_mem_info->len); + if (ret == HINIC5_CQM_CONTINUE) { + secure_mem_en = false; + return HINIC5_CQM_SUCCESS; + } else if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, "failed to get secure mem, ret: %d, func_id %hu\n", + ret, hinic5_global_func_id((void *)handle)); + return HINIC5_CQM_FAIL; + } + + secure_mem_info->addr.va = ioremap(secure_mem_info->addr.pa, secure_mem_info->len); + if (!secure_mem_info->addr.va) { + hinic5_cqm_err(handle->dev_hdl, + "failed to remap secure mem, func_id %u, gpa=0x%llx, len=0x%x\n", + hinic5_global_func_id((void *)handle), (u64)secure_mem_info->addr.pa, + secure_mem_info->len); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_info(handle->dev_hdl, + "get secure mem: func_id=0x%x, gpa=0x%llx, va=0x%lx, len=0x%x\n", + hinic5_global_func_id((void *)handle), (u64)secure_mem_info->addr.pa, + (uintptr_t)secure_mem_info->addr.va, secure_mem_info->len); + return HINIC5_CQM_SUCCESS; +} +#endif + +#if defined(__UEFI__) || defined(SECURE_MEM_STUB) +/* secure mem当前打桩由sdk申请,ctx size不合法则释放内存;产品化由Qemu释放 */ +static void hinic5_cqm_free_secure_mem(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_cqm_secure_mem_info *secure_mem_info = &hinic5_cqm_handle->bat_table.func_secure_mem; + struct device *dev = hinic5_cqm_handle->dev; + u32 order; + + if (secure_mem_info->addr.va) { + order = (u32)get_order(secure_mem_info->len); + dma_unmap_single(dev, secure_mem_info->addr.pa, + secure_mem_info->len, + DMA_BIDIRECTIONAL); + free_pages((ulong)(secure_mem_info->addr.va), order); + secure_mem_info->addr.va = NULL; + } +} +#else +static void hinic5_cqm_free_secure_mem(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_cqm_secure_mem_info *secure_mem_info = &hinic5_cqm_handle->bat_table.func_secure_mem; + + if (secure_mem_en && secure_mem_info->addr.va != NULL) { + iounmap(secure_mem_info->addr.va); + secure_mem_info->addr.va = NULL; + } +} +#endif + +static struct tag_hinic5_cqm_cla_table *hinic5_cqm_cla_get_entry(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, u32 cla_type) +{ + u32 *bat_entry_type = hinic5_cqm_handle->bat_table.bat_entry_type; + struct tag_hinic5_cqm_cla_table *cla_entry = hinic5_cqm_handle->bat_table.entry; + u32 i; + + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + if (bat_entry_type[i] == cla_type) + return &cla_entry[i]; + } + + return NULL; +} + +static s32 hinic5_cqm_cla_secure_mem_assign(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_func_capability *capability, + u32 cla_type, u32 *mem_offset) +{ + struct hinic5_cqm_secure_mem_info *secure_mem_info = &hinic5_cqm_handle->bat_table.func_secure_mem; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_cla_table *cla_entry = NULL; + u32 cla_mem_size; + + cla_entry = hinic5_cqm_cla_get_entry(hinic5_cqm_handle, cla_type); + if (!cla_entry) { + hinic5_cqm_err(handle->dev_hdl, "Get cla entry failed: cla_type=%u\n", cla_type); + return HINIC5_CQM_FAIL; + } + cla_entry->secure_mem.va = secure_mem_info->addr.va + *mem_offset; + cla_entry->secure_mem.pa = secure_mem_info->addr.pa + *mem_offset; + + cla_mem_size = hinic5_cqm_cla_get_ctx_mem_size(capability, cla_type); + + *mem_offset += cla_mem_size; + if (*mem_offset >= secure_mem_info->len) { + hinic5_cqm_err(handle->dev_hdl, "Mem size exceeds: mem_offset=0x%x, max_size=0x%x\n", + *mem_offset, secure_mem_info->len); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_cla_secure_mem_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_func_capability *capability = &hinic5_cqm_handle->func_capability; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + /* SCQC must be first (UB-VTP Table Use SCQC) */ + u32 cla_type[] = { HINIC5_CQM_BAT_ENTRY_T_SCQC, + HINIC5_CQM_BAT_ENTRY_T_QPC, + HINIC5_CQM_BAT_ENTRY_T_MPT, + HINIC5_CQM_BAT_ENTRY_T_SRQC }; + u32 cla_num = sizeof(cla_type) / sizeof(cla_type[0]); + u32 mem_offset = 0; + s32 ret; + u32 i; + + /* secure mem当前打桩由sdk申请,产品化时,由QEMU分配后,读VF BAR空间获取 */ +#if defined(__UEFI__) || defined(SECURE_MEM_STUB) + if (hinic5_cqm_stub_alloc_secure_mem(hinic5_cqm_handle, capability) != HINIC5_CQM_SUCCESS) +#else + if ((hinic5_cqm_cla_get_secure_mem(hinic5_cqm_handle)) != HINIC5_CQM_SUCCESS) +#endif + return HINIC5_CQM_FAIL; + + if (!secure_mem_en) { + return HINIC5_CQM_SUCCESS; + } + + for (i = 0; i < cla_num; i++) { + ret = hinic5_cqm_cla_secure_mem_assign(hinic5_cqm_handle, capability, cla_type[i], &mem_offset); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, "Cla secure mem assign failed: cla_type=%u\n", cla_type[i]); + hinic5_cqm_free_secure_mem(hinic5_cqm_handle); + return HINIC5_CQM_FAIL; + } + } + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_cla_init + * Description : Initialize the CLA table. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +s32 hinic5_cqm_cla_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_func_capability *capability = &hinic5_cqm_handle->func_capability; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + s32 ret; + + if (secure_mem_en && HINIC5_CQM_IS_VF(hinic5_cqm_handle)) { + if (unlikely(hinic5_cqm_cla_secure_mem_init(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(secure_mem_init)); + return HINIC5_CQM_FAIL; + } + } + + /* Applying for CLA Entries */ + if (hinic5_cqm_cla_init_entry(hinic5_cqm_handle, capability) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_init_entry)); + return HINIC5_CQM_FAIL; + } + + /* The BAT and CLA of the Fake VF are maintained by the parent function. */ + if (HINIC5_CQM_IS_FAKE_CHILD(hinic5_cqm_handle)) { + return hinic5_cqm_cla_reset(hinic5_cqm_handle); + } + + /* After the CLA entry is applied, the address is filled + * in the BAT table. + */ + hinic5_cqm_bat_fill_cla(hinic5_cqm_handle); + + /* Instruct the chip to update the BAT table. */ + if (hinic5_cqm_bat_update(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bat_update)); + goto err; + } + + hinic5_cqm_info(handle->dev_hdl, "Timer start: func_type=%d, timer_enable=%u\n", + hinic5_cqm_handle->func_attribute.func_type, + hinic5_cqm_handle->func_capability.timer_enable); + + if (HINIC5_CQM_IS_PPF(hinic5_cqm_handle) && + hinic5_cqm_handle->func_capability.timer_enable == HINIC5_CQM_TIMER_ENABLE) { + /* Enable the timer after the timer resources are applied for */ + ret = hinic5_ppf_tmr_start(handle); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, "PPF timer start failed, err %d\n", ret); + goto err; + } + } + + return HINIC5_CQM_SUCCESS; + +err: + hinic5_cqm_cla_uninit(hinic5_cqm_handle, HINIC5_CQM_BAT_ENTRY_MAX); + return HINIC5_CQM_FAIL; +} + +/* Inverse operation of hinic5_cqm_cla_xyz() */ +static void hinic5_cqm_cla_table_free_cache_inv(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + s32 *inv_flag) +{ + /* The CLA memory are maintained by the parent function. */ + if (HINIC5_CQM_IS_FAKE_CHILD(hinic5_cqm_handle)) + return; + + hinic5_cqm_buf_free_cache_inv(hinic5_cqm_handle, &cla_table->cla_x_buf, inv_flag); + hinic5_cqm_buf_free_cache_inv(hinic5_cqm_handle, &cla_table->cla_y_buf, inv_flag); + hinic5_cqm_buf_free_cache_inv(hinic5_cqm_handle, &cla_table->cla_z_buf, inv_flag); +} + +STATIC INLINE void hinic5_cqm_cla_uninit_entry(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + s32 *inv_flag) +{ + if (cla_table->type != HINIC5_CQM_BAT_ENTRY_T_INVALID) + hinic5_cqm_cla_table_free_cache_inv(hinic5_cqm_handle, cla_table, inv_flag); + mutex_deinit(&cla_table->lock); +} + +/** + * Prototype : hinic5_cqm_cla_uninit + * Description : Deinitialize the CLA table. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * Output : None + * Return Value : void + * 1.Date : 2015/5/15 + * Modification : Created function + */ +void hinic5_cqm_cla_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, u32 entry_numb) +{ + struct hinic5_hwdev *hwdev = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + s32 inv_flag = 0; + u32 i; + + for (i = 0; i < entry_numb; i++) { + cla_table = &bat_table->entry[i]; + hinic5_cqm_cla_uninit_entry(hinic5_cqm_handle, cla_table, &inv_flag); + } + + /* When the lb mode is 1/2, the following entries allocated to all SMFs + * needs to be released. + */ + if (HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle) && HINIC5_CQM_IS_PPF(hinic5_cqm_handle)) { + for (i = 0; i < hinic5_cqm_handle->func_capability.smf_max_num; i++) { + cla_table = &bat_table->timer_entry[i]; + hinic5_cqm_cla_uninit_entry(hinic5_cqm_handle, cla_table, &inv_flag); + } + } + + if (HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle)) { + for (i = 0; i < hinic5_cqm_handle->func_capability.smf_max_num; i++) { + cla_table = &bat_table->hash_entry[i]; + hinic5_cqm_cla_uninit_entry(hinic5_cqm_handle, cla_table, &inv_flag); + } + } + + if (HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle) && COMM_SUPPORT_VIRTIO_FC_CACHE(hwdev)) { + for (i = 0; i < hinic5_cqm_handle->func_capability.smf_max_num; i++) { + cla_table = &bat_table->xid2cid_entry[i]; + hinic5_cqm_cla_uninit_entry(hinic5_cqm_handle, cla_table, &inv_flag); + } + } + + /* 释放安全内存。产品化则由Qemu释放 */ + hinic5_cqm_free_secure_mem(hinic5_cqm_handle); +} + +static s32 hinic5_cqm_cla_update_cmd(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cmd_buf *buf_in, + hinic5_cqm_cla_update_cmd_s *cmd_info) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + s32 ret = HINIC5_CQM_FAIL; + u8 cmd; + + hinic5_cqm_handle->cmdq_ops->prepare_cmd_buf_cla_update(cmd_info, buf_in, &cmd); + ret = hinic5_cqm_send_cmd_box((void *)(hinic5_cqm_handle->ex_handle), HINIC5_CQM_MOD_HINIC5_CQM, + cmd, buf_in, NULL, NULL, + HINIC5_CQM_CMD_TIMEOUT, HINIC5_CHANNEL_DEFAULT); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_send_cmd_box)); + hinic5_cqm_err(handle->dev_hdl, "Cla alloc: hinic5_cqm_cla_update, hinic5_cqm_send_cmd_box_ret=%d\n", + ret); + hinic5_cqm_err(handle->dev_hdl, "Cla alloc: hinic5_cqm_cla_update, cla_update_cmd: 0x%x 0x%x 0x%x 0x%x\n", + cmd_info->gpa_h, cmd_info->gpa_l, cmd_info->value_h, cmd_info->value_l); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static void hinic5_cqm_cla_cmd_init(hinic5_cqm_cla_update_cmd_s *cmd, struct tag_hinic5_cqm_handle *hinic5_cqm_handle, dma_addr_t parant_pa, + dma_addr_t child_pa, u8 cla_update_mode) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u64 spu_en = 0; + dma_addr_t pa = 0; + u8 gpa_check_enable = hinic5_cqm_handle->func_capability.gpa_check_enable; + + spu_en = ((u64)hinic5_cqm_get_acs_spu_en(hinic5_cqm_handle)) << 0x3F; + + pa = (parant_pa | spu_en); + cmd->gpa_h = HINIC5_CQM_ADDR_HI(pa); + cmd->gpa_l = HINIC5_CQM_ADDR_LW(pa); + + pa = (child_pa | spu_en); + cmd->value_h = HINIC5_CQM_ADDR_HI(pa); + cmd->value_l = HINIC5_CQM_ADDR_LW(pa); + + /* current CLA GPA CHECK */ + if (gpa_check_enable != 0) { + switch (cla_update_mode) { + /* gpa[0]=1 means this GPA is valid */ + case HINIC5_CQM_CLA_RECORD_NEW_GPA: + cmd->value_l |= 1; + break; + /* gpa[0]=0 means this GPA is valid */ + case HINIC5_CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID: + case HINIC5_CQM_CLA_DEL_GPA_WITH_CACHE_INVALID: + cmd->value_l &= (~1); + break; + default: + hinic5_cqm_err(handle->dev_hdl, "Cla alloc: %s, wrong cla_update_mode=%u\n", __func__, cla_update_mode); + break; + } + } +} + +static s32 hinic5_cqm_cla_update_all_smf(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + hinic5_cqm_cla_update_cmd_s *cmd, + struct tag_hinic5_cqm_cmd_buf *buf_in) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + u32 i = 0; + s32 ret = HINIC5_CQM_FAIL; + + for (i = 0; i < func_cap->smf_max_num; i++) { + if ((func_cap->smf_pg & (1U << i)) != 0) { + cmd->smf_id = i; + ret = hinic5_cqm_cla_update_cmd(hinic5_cqm_handle, buf_in, cmd); + if (ret != HINIC5_CQM_SUCCESS) + return ret; + } + } + return ret; +} + +/** + * Prototype : hinic5_cqm_cla_update + * Description : Send a command to update the CLA table. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + * struct tag_hinic5_cqm_buf_list *buf_node_parent parent node of the content to + * be updated + * struct tag_hinic5_cqm_buf_list *buf_node_child Subnode for which the buffer + * is to be applied + * u32 child_index Index of a child node. + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +STATIC s32 hinic5_cqm_cla_update(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, const struct tag_hinic5_cqm_buf_list *buf_node_parent, + const struct tag_hinic5_cqm_buf_list *buf_node_child, u32 child_index, u8 cla_update_mode) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_cmd_buf *buf_in = NULL; + hinic5_cqm_cla_update_cmd_s cmd; + s32 ret = HINIC5_CQM_FAIL; + + buf_in = hinic5_cqm_cmd_alloc(hinic5_cqm_handle->ex_handle); + if (unlikely(buf_in == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(buf_in)); + return HINIC5_CQM_FAIL; + } + + /* Fill command format, convert to big endian. */ + hinic5_cqm_cla_cmd_init(&cmd, hinic5_cqm_handle, (buf_node_parent->pa + (child_index * sizeof(dma_addr_t))), buf_node_child->pa, + cla_update_mode); + + hinic5_cqm_dbg(handle->dev_hdl, + "Cla alloc: %s, gpa=0x%x 0x%x, value=0x%x 0x%x, cla_update_mode=0x%x\n", + __func__, cmd.gpa_h, cmd.gpa_l, cmd.value_h, cmd.value_l, cla_update_mode); + + /* In non-fake mode, set func_id to 0xffff. + * Indicates the current func fake mode, set func_id to the + * specified value, This is a fake func_id. + */ + if (HINIC5_CQM_IS_FAKE_CHILD_AGENT(hinic5_cqm_handle)) + cmd.func_id = hinic5_cqm_handle->func_attribute.func_global_idx; + else + cmd.func_id = 0xffff; + + /* Normal mode is 1822 traditional mode and is configured on SMF0. */ + /* Mode 0 is hashed to 4 SMF engines (excluding PPF) by func ID. */ + if (HINIC5_CQM_IS_LB_MODE_NORMAL(hinic5_cqm_handle) || + (HINIC5_CQM_IS_LB_MODE_0(hinic5_cqm_handle) && !HINIC5_CQM_IS_PPF(hinic5_cqm_handle))) { + cmd.smf_id = hinic5_cqm_funcid2smfid(hinic5_cqm_handle); + ret = hinic5_cqm_cla_update_cmd(hinic5_cqm_handle, buf_in, &cmd); + /* Modes 1/2 are allocated to four SMF engines by flow. + * Therefore, one function needs to be allocated to four SMF engines. + */ + /* Mode 0 PPF needs to be configured on 4 engines, + * and the timer resources need to be shared by the 4 engines. + */ + } else if (HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle) || + (HINIC5_CQM_IS_LB_MODE_0(hinic5_cqm_handle) && HINIC5_CQM_IS_PPF(hinic5_cqm_handle))) { + ret = hinic5_cqm_cla_update_all_smf(hinic5_cqm_handle, &cmd, buf_in); + } else { + hinic5_cqm_err(handle->dev_hdl, "Cla update: unsupported lb mode=%u\n", hinic5_cqm_handle->func_capability.lb_mode); + ret = HINIC5_CQM_FAIL; + } + + hinic5_cqm_cmd_free((void *)(hinic5_cqm_handle->ex_handle), buf_in); + return ret; +} + +/** + * Prototype : hinic5_cqm_cla_alloc + * Description : Trunk page for applying for a CLA. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + * struct tag_hinic5_cqm_cla_table *cla_table, + * struct tag_hinic5_cqm_buf_list *buf_node_parent parent node of the content to + * be updated + * struct tag_hinic5_cqm_buf_list *buf_node_child subnode for which the buffer + * is to be applied + * u32 child_index index of a child node + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +static s32 hinic5_cqm_cla_alloc(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_buf_list *buf_node_parent, + struct tag_hinic5_cqm_buf_list *buf_node_child, u32 child_index) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + s32 ret = HINIC5_CQM_FAIL; + + /* Apply for trunk page */ + buf_node_child->va = (u8 *)(uintptr_t)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + cla_table->trunk_order); + if (unlikely(buf_node_child->va == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(va)); + return HINIC5_CQM_FAIL; + } + /* PCI mapping */ + buf_node_child->pa = dma_map_single(hinic5_cqm_handle->dev, buf_node_child->va, + PAGE_SIZE << cla_table->trunk_order, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hinic5_cqm_handle->dev, buf_node_child->pa) != 0) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_MAP_FAIL(buf_node_child->pa)); + goto err1; + } + + /* Notify the chip of trunk_pa so that the chip fills in cla entry */ + ret = hinic5_cqm_cla_update(hinic5_cqm_handle, buf_node_parent, buf_node_child, + child_index, HINIC5_CQM_CLA_RECORD_NEW_GPA); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_update)); + goto err2; + } + + return HINIC5_CQM_SUCCESS; + +err2: + dma_unmap_single(hinic5_cqm_handle->dev, buf_node_child->pa, + PAGE_SIZE << cla_table->trunk_order, + DMA_BIDIRECTIONAL); +err1: + free_pages((ulong)(uintptr_t)(buf_node_child->va), cla_table->trunk_order); + buf_node_child->va = NULL; + return HINIC5_CQM_FAIL; +} + +static void hinic5_cqm_unmap_and_free_pages(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf_list *buf_node, u32 order) +{ + /* Remove PCI mapping from the trunk page */ + dma_unmap_single(hinic5_cqm_handle->dev, buf_node->pa, PAGE_SIZE << order, DMA_BIDIRECTIONAL); + + /* Release trunk page */ + free_pages((ulong)(uintptr_t)(buf_node->va), order); + buf_node->va = NULL; +} + +/** + * Prototype : hinic5_cqm_cla_free_without_cache_invalid + * Description : Release trunk page of a CLA + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * struct tag_hinic5_cqm_cla_table *cla_table + * struct tag_hinic5_cqm_buf_list *buf_node + * Output : None + * Return Value : void + * 1.Date : 2015/5/15 + * Modification : Created function + */ +static void hinic5_cqm_cla_free_without_cache_invalid(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_buf_list *buf_node_parent, + struct tag_hinic5_cqm_buf_list *buf_node_child, + u32 child_index) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + if (hinic5_cqm_cla_update(hinic5_cqm_handle, buf_node_parent, buf_node_child, + child_index, HINIC5_CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_update)); + return; + } + /* Remove PCI mapping from the trunk page and Release trunk page */ + hinic5_cqm_unmap_and_free_pages(hinic5_cqm_handle, buf_node_child, cla_table->trunk_order); +} + +STATIC void hinic5_cqm_cla_free_with_cache_invalid(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_buf_list *buf_node_parent, + struct tag_hinic5_cqm_buf_list *buf_node_child, + u32 child_index) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 trunk_size; + + if (hinic5_cqm_cla_update(hinic5_cqm_handle, buf_node_parent, buf_node_child, + child_index, HINIC5_CQM_CLA_DEL_GPA_WITH_CACHE_INVALID) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_update)); + return; + } + + /* invalid cache */ + trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); + if (hinic5_cqm_cla_cache_invalid(hinic5_cqm_handle, buf_node_child->pa, trunk_size) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_cache_invalid)); + return; + } + + /* Remove PCI mapping from the trunk page and Release trunk page */ + hinic5_cqm_unmap_and_free_pages(hinic5_cqm_handle, buf_node_child, cla_table->trunk_order); +} + +static inline u8 *hinic5_cqm_cla_do_get_lvl0(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + u32 offset = 0; + + /* Level 0 CLA pages are statically allocated. */ + offset = index * cla_table->obj_size; + *pa = cla_z_buf->buf_list->pa + offset; + return (u8 *)(cla_z_buf->buf_list->va) + offset; +} + +static inline u8 *hinic5_cqm_cla_do_get_lvl1(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + struct tag_hinic5_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_buf_list *buf_node_y = NULL; + struct tag_hinic5_cqm_buf_list *buf_node_z = NULL; + u32 y_index = 0; + u32 z_index = 0; + u8 *ret_addr = NULL; + u32 offset = 0; + + z_index = index & ((1U << (cla_table->z + 1)) - 1); + y_index = index >> (cla_table->z + 1); + + if (y_index >= cla_z_buf->buf_number) { + hinic5_cqm_err(handle->dev_hdl, + "Cla get: index exceeds buf_number, y_index %u, z_buf_number %u\n", + y_index, cla_z_buf->buf_number); + return NULL; + } + buf_node_z = &cla_z_buf->buf_list[y_index]; + buf_node_y = cla_y_buf->buf_list; + + /* The z buf node does not exist, applying for a page first. */ + if (!buf_node_z->va) { + if (hinic5_cqm_cla_alloc(hinic5_cqm_handle, cla_table, buf_node_y, buf_node_z, + y_index) == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_alloc)); + hinic5_cqm_err(handle->dev_hdl, + "Cla get: cla_table->type=%u\n", + cla_table->type); + return NULL; + } + } + + hinic5_cqm_dbg_on(hinic5_cqm_verbose, handle->dev_hdl, + "Cla get: 1L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + + return ret_addr; +} + +static inline u8 *hinic5_cqm_cla_do_get_lvl2(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + struct tag_hinic5_cqm_buf *cla_x_buf = &cla_table->cla_x_buf; + struct tag_hinic5_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_buf_list *buf_node_x = NULL; + struct tag_hinic5_cqm_buf_list *buf_node_y = NULL; + struct tag_hinic5_cqm_buf_list *buf_node_z = NULL; + u32 z_index = index & ((1U << (cla_table->z + 1)) - 1); + u32 y_index = (index >> (cla_table->z + 1)) & ((1U << (cla_table->y - cla_table->z)) - 1); + u32 x_index = index >> (cla_table->y + 1); + u64 tmp = x_index * ((u32)(PAGE_SIZE << cla_table->trunk_order) / sizeof(dma_addr_t)) + y_index; + u8 *ret_addr = NULL; + u32 offset = 0; + + if (x_index >= cla_y_buf->buf_number || tmp >= cla_z_buf->buf_number) { + hinic5_cqm_err(handle->dev_hdl, + "Cla get: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n", + x_index, y_index, cla_y_buf->buf_number, cla_z_buf->buf_number); + return NULL; + } + + buf_node_x = cla_x_buf->buf_list; + buf_node_y = &cla_y_buf->buf_list[x_index]; + buf_node_z = &cla_z_buf->buf_list[tmp]; + + /* The y buf node does not exist, applying for pages for y node. */ + if (!buf_node_y->va) { + if (hinic5_cqm_cla_alloc(hinic5_cqm_handle, cla_table, buf_node_x, buf_node_y, x_index) == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_alloc)); + return NULL; + } + } + + /* The z buf node does not exist, applying for pages for z node. */ + if (!buf_node_z->va) { + if (hinic5_cqm_cla_alloc(hinic5_cqm_handle, cla_table, buf_node_y, buf_node_z, y_index) == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_alloc)); + if (buf_node_y->refcount == 0) + /* To release node Y, cache_invalid is + * required. + */ + hinic5_cqm_cla_free_with_cache_invalid(hinic5_cqm_handle, cla_table, buf_node_x, buf_node_y, x_index); + return NULL; + } + + hinic5_cqm_dbg_on(hinic5_cqm_verbose, handle->dev_hdl, + "Cla get: 2L: y_refcount=0x%x\n", buf_node_y->refcount); + /* reference counting of the y buffer node needs to increase + * by 1. + */ + buf_node_y->refcount++; + } + + hinic5_cqm_dbg_on(hinic5_cqm_verbose, handle->dev_hdl, + "Cla get: 2L: z_refcount=0x%x, count=0x%x\n", buf_node_z->refcount, count); + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + + return ret_addr; +} + +static inline u8 *hinic5_cqm_cla_do_get(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + if (cla_table->cla_lvl == HINIC5_CQM_CLA_LVL_0) + return hinic5_cqm_cla_do_get_lvl0(hinic5_cqm_handle, cla_table, index, count, pa); + if (cla_table->cla_lvl == HINIC5_CQM_CLA_LVL_1) + return hinic5_cqm_cla_do_get_lvl1(hinic5_cqm_handle, cla_table, index, count, pa); + if (cla_table->cla_lvl == HINIC5_CQM_CLA_LVL_2) + return hinic5_cqm_cla_do_get_lvl2(hinic5_cqm_handle, cla_table, index, count, pa); + WARN_ON(true); + return NULL; +} + +/** + * Prototype : hinic5_cqm_cla_get + * Description : Apply for block buffer in number of count from the index + * position in the cla table. If the buffer is dynamic, this + * function may block. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + * struct tag_hinic5_cqm_cla_table *cla_table, + * u32 index, + * u32 count, + * dma_addr_t *pa + * Output : None + * Return Value : u8 * + * 1.Date : 2025/3/15 + * Modification : Created function + */ +u8 *hinic5_cqm_cla_get(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + const bool dynamic_alloc = !cla_table->alloc_static; + u8 *ret_addr = NULL; + + /* The CLA memory of the Fake VF are holded by the parent + * function, so the Fake VF can't get the memory. */ + if (HINIC5_CQM_IS_FAKE_CHILD(hinic5_cqm_handle)) + return NULL; + + if (index >= cla_table->obj_num) + return NULL; + + if (dynamic_alloc) + mutex_lock(&cla_table->lock); + + ret_addr = hinic5_cqm_cla_do_get(hinic5_cqm_handle, cla_table, index, count, pa); + + if (dynamic_alloc) + mutex_unlock(&cla_table->lock); + + return ret_addr; +} + +/** + * Prototype : hinic5_cqm_cla_put + * Description : Decrease the value of reference counting on the trunk page. + * If the value is 0, the trunk page is released. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + * struct tag_hinic5_cqm_cla_table *cla_table, + * u32 index, + * u32 count + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_cla_put(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_cla_table *cla_table, u32 index, u32 count) +{ + struct tag_hinic5_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + struct tag_hinic5_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_hinic5_cqm_buf *cla_x_buf = &cla_table->cla_x_buf; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_buf_list *buf_node_z = NULL; + struct tag_hinic5_cqm_buf_list *buf_node_y = NULL; + struct tag_hinic5_cqm_buf_list *buf_node_x = NULL; + u32 x_index = 0; + u32 y_index = 0; + u64 tmp; + + /* No buffer is applied for the Fake VF. */ + if (HINIC5_CQM_IS_FAKE_CHILD(hinic5_cqm_handle)) + return; + + /* The buffer is applied statically, and the reference counting + * does not need to be controlled. + */ + if (cla_table->alloc_static) + return; + + mutex_lock(&cla_table->lock); + + if (cla_table->cla_lvl == HINIC5_CQM_CLA_LVL_1) { + y_index = index >> (cla_table->z + 1); + + if (y_index >= cla_z_buf->buf_number) { + hinic5_cqm_err(handle->dev_hdl, "Cla put: idx exceeds buf_number, y_idx %u, z_buf_num %u type %u\n", + y_index, cla_z_buf->buf_number, cla_table->type); + goto out; + } + + buf_node_z = &cla_z_buf->buf_list[y_index]; + buf_node_y = cla_y_buf->buf_list; + + /* When the value of reference counting on the z node page is 0, + * the z node page is released. + */ + buf_node_z->refcount -= count; + if (buf_node_z->refcount == 0) + /* The cache invalid is not required for the Z node. */ + hinic5_cqm_cla_free_without_cache_invalid(hinic5_cqm_handle, cla_table, buf_node_y, buf_node_z, y_index); + } else if (cla_table->cla_lvl == HINIC5_CQM_CLA_LVL_2) { + y_index = (index >> (cla_table->z + 1)) & ((1U << (cla_table->y - cla_table->z)) - 1); + x_index = index >> (cla_table->y + 1); + tmp = x_index * ((u32)(PAGE_SIZE << cla_table->trunk_order) / sizeof(dma_addr_t)) + y_index; + + if (x_index >= cla_y_buf->buf_number || tmp >= cla_z_buf->buf_number) { + hinic5_cqm_err(handle->dev_hdl, + "Cla put: index exceeds buf_number, x_index %u, y_index %u, y_buf_number %u, z_buf_number %u\n", + x_index, y_index, cla_y_buf->buf_number, cla_z_buf->buf_number); + goto out; + } + + buf_node_x = cla_x_buf->buf_list; + buf_node_y = &cla_y_buf->buf_list[x_index]; + buf_node_z = &cla_z_buf->buf_list[tmp]; + + /* When the value of reference counting on the z node page is 0, + * the z node page is released. + */ + buf_node_z->refcount -= count; + if (buf_node_z->refcount == 0) { + hinic5_cqm_cla_free_without_cache_invalid(hinic5_cqm_handle, cla_table, buf_node_y, buf_node_z, y_index); + + /* When the value of reference counting on the y node + * page is 0, the y node page is released. + */ + buf_node_y->refcount--; + if (buf_node_y->refcount == 0) + /* Node y requires cache to be invalid. */ + hinic5_cqm_cla_free_with_cache_invalid(hinic5_cqm_handle, cla_table, buf_node_x, buf_node_y, x_index); + } + } + +out: + mutex_unlock(&cla_table->lock); +} + +/** + * Prototype : hinic5_cqm_cla_table_get + * Description : Searches for the CLA table data structure corresponding to a + * BAT entry. + * Input : struct tag_hinic5_cqm_bat_table *bat_table, + * u32 entry_type + * Output : None + * Return Value : struct tag_hinic5_cqm_cla_table * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_hinic5_cqm_cla_table *hinic5_cqm_cla_table_get(struct tag_hinic5_cqm_bat_table *bat_table, + u32 entry_type) +{ + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + u32 i = 0; + + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if ((cla_table != NULL) && (entry_type == cla_table->type)) + return cla_table; + } + + return NULL; +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bat_cla.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bat_cla.h new file mode 100644 index 00000000..86385221 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bat_cla.h @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CQM_BAT_CLA_H +#define HINIC5_CQM_BAT_CLA_H + +#include <linux/types.h> +#include <linux/mutex.h> + +#include "hinic5_hw_cfg.h" +#include "hinic5_hinic5_cqm.h" +#include "hinic5_cqm_bitmap_table.h" +#include "hinic5_cqm_object.h" + +/* When the connection check is enabled, the maximum number of connections + * supported by the chip is 1M - 63, which cannot reach 1M + */ +#define HINIC5_CQM_BAT_MAX_CONN_NUM (0x100000 - 63) +#define HINIC5_CQM_BAT_MAX_CACHE_CONN_NUM (0x100000 - 63) + +#ifndef MAX_ORDER +#ifdef MAX_PAGE_ORDER +#define MAX_ORDER MAX_PAGE_ORDER +#endif +#endif + +#define CLA_TABLE_PAGE_ORDER 0 +#define HINIC5_CQM_4K_PAGE_ORDER 0 +#define HINIC5_CQM_4K_PAGE_SIZE 4096 + +#define HINIC5_CQM_8K_PAGE_ORDER 1 + +#define HINIC5_CQM_BAT_ENTRY_MAX 16 +#define HINIC5_CQM_BAT_ENTRY_SIZE 16 +#define HINIC5_CQM_BAT_STORE_API_SIZE 16 +#define HINIC5_CQM_BAT_MAX (HINIC5_CQM_BAT_ENTRY_MAX * HINIC5_CQM_BAT_ENTRY_SIZE) + +#define HINIC5_CQM_BAT_SIZE_FT_RDMA_PF 240 +#define HINIC5_CQM_BAT_SIZE_FT_RDMA_VF 160 +#define HINIC5_CQM_BAT_SIZE_FT_PF 192 +#define HINIC5_CQM_BAT_SIZE_FT_VF 112 +#define HINIC5_CQM_BAT_SIZE_RDMA_PF 160 +#define HINIC5_CQM_BAT_SIZE_RDMA_VF 80 + +#define HINIC5_CQM_BAT_INDEX0 0 +#define HINIC5_CQM_BAT_INDEX1 1 +#define HINIC5_CQM_BAT_INDEX2 2 +#define HINIC5_CQM_BAT_INDEX3 3 +#define HINIC5_CQM_BAT_INDEX4 4 +#define HINIC5_CQM_BAT_INDEX5 5 +#define HINIC5_CQM_BAT_INDEX6 6 +#define HINIC5_CQM_BAT_INDEX7 7 +#define HINIC5_CQM_BAT_INDEX8 8 +#define HINIC5_CQM_BAT_INDEX9 9 +#define HINIC5_CQM_BAT_INDEX10 10 +#define HINIC5_CQM_BAT_INDEX11 11 +#define HINIC5_CQM_BAT_INDEX12 12 +#define HINIC5_CQM_BAT_INDEX13 13 +#define HINIC5_CQM_BAT_INDEX14 14 +#define HINIC5_CQM_BAT_INDEX15 15 + +enum hinic5_cqm_bat_entry_type { + HINIC5_CQM_BAT_ENTRY_T_CFG = 0, + HINIC5_CQM_BAT_ENTRY_T_HASH = 1, + HINIC5_CQM_BAT_ENTRY_T_QPC = 2, + HINIC5_CQM_BAT_ENTRY_T_SCQC = 3, + HINIC5_CQM_BAT_ENTRY_T_SRQC = 4, + HINIC5_CQM_BAT_ENTRY_T_MPT = 5, + HINIC5_CQM_BAT_ENTRY_T_GID = 6, + HINIC5_CQM_BAT_ENTRY_T_LUN = 7, + HINIC5_CQM_BAT_ENTRY_T_TASKMAP = 8, + HINIC5_CQM_BAT_ENTRY_T_L3I = 9, + HINIC5_CQM_BAT_ENTRY_T_CHILDC = 10, + HINIC5_CQM_BAT_ENTRY_T_TIMER = 11, + HINIC5_CQM_BAT_ENTRY_T_XID2CID = 12, + HINIC5_CQM_BAT_ENTRY_T_REORDER = 13, + HINIC5_CQM_BAT_ENTRY_T_INVALID = 14, + HINIC5_CQM_BAT_ENTRY_T_MAX = 15, +}; + +/* CLA update mode */ +#define HINIC5_CQM_CLA_RECORD_NEW_GPA 0 +#define HINIC5_CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID 1 +#define HINIC5_CQM_CLA_DEL_GPA_WITH_CACHE_INVALID 2 + +#define HINIC5_CQM_CLA_LVL_0 0 +#define HINIC5_CQM_CLA_LVL_1 1 +#define HINIC5_CQM_CLA_LVL_2 2 +#define HINIC5_CQM_CLA_LVL_UNSUPPORT 3 + +#define HINIC5_CQM_MAX_INDEX_BIT_DEFAULT 19 + +#define HINIC5_CQM_CHIP_CACHELINE 256 +#define HINIC5_CQM_CHIP_TIMER_CACHELINE 512 +#define HINIC5_CQM_OBJECT_256 256 +#define HINIC5_CQM_OBJECT_512 512 +#define HINIC5_CQM_OBJECT_1024 1024 +#define HINIC5_CQM_CHIP_GPA_MASK 0x1ffffffffffffff +#define HINIC5_CQM_CHIP_GPA_HIMASK 0x1ffffff +#define HINIC5_CQM_CHIP_GPA_LOMASK 0xffffffff +#define HINIC5_CQM_CHIP_GPA_HSHIFT 32 + +/* Aligns with 64 buckets and shifts rightward by 6 bits */ +#define HINIC5_CQM_HASH_NUMBER_UNIT 6 + +/* mem_size should be none-zero and be 2^n */ +#define HINIC5_CQM_IS_SECURE_MEMSIZE_VALID(mem_size) \ + (((mem_size) != 0) && (((mem_size) & ((mem_size) - 1)) == 0)) + +struct hinic5_cqm_mem_addr { + void *va; + dma_addr_t pa; +}; + +struct hinic5_cqm_secure_mem_info { + struct hinic5_cqm_mem_addr addr; + u32 len; +}; + +struct tag_hinic5_cqm_cla_table { + u32 type; + u32 obj_size; + u32 obj_num; + u32 max_buffer_size; + + u32 cla_lvl; + u32 trunk_order; /* Preferred page order for CLA buffer. + * Set this before calling hinic5_cqm_cla_xyz(). This value + * will be overriden by hinic5_cqm_cla_xyz() when + * hugepage_hint is enabled. + */ + bool hugepage_hint; /* Hint for hugepage alloc to improve TLB locality */ + + /* Dynamic allocation */ + bool alloc_static; /* Whether the buffer is statically allocated */ + struct mutex lock; /* Lock for cla buffer allocation and free */ + + u32 max_index_bit; + u32 cacheline_x; /* x value calculated based on cacheline, used by the chip */ + u32 cacheline_y; /* y value calculated based on cacheline, used by the chip */ + u32 cacheline_z; /* z value calculated based on cacheline, used by the chip */ + u32 x; /* x value calculated based on obj_size, used by software */ + u32 y; /* y value calculated based on obj_size, used by software */ + u32 z; /* z value calculated based on obj_size, used by software */ + struct tag_hinic5_cqm_buf cla_x_buf; + struct tag_hinic5_cqm_buf cla_y_buf; + struct tag_hinic5_cqm_buf cla_z_buf; + + struct tag_hinic5_cqm_bitmap bitmap; + + struct tag_hinic5_cqm_object_table obj_table; /* Mapping table between + * indexes and objects + */ + struct hinic5_cqm_mem_addr secure_mem; /* Secure memory with consecutive physical addresses */ + + char name[HINIC5_VRAM_NAME_APPLY_LEN]; +}; + +struct tag_hinic5_cqm_bat_entry_cfg { + u32 cur_conn_num_h_4 : 4; + u32 rsv1 : 4; + u32 max_conn_num : 20; + u32 rsv2 : 4; + + u32 max_conn_cache : 10; + u32 rsv3 : 6; + u32 cur_conn_num_l_16 : 16; + + u32 bloom_filter_addr : 16; + u32 cur_conn_cache : 10; + u32 rsv4 : 6; + + u32 bucket_num : 16; + u32 bloom_filter_len : 16; +}; + +#define HINIC5_CQM_BAT_NO_BYPASS_CACHE 0 +#define HINIC5_CQM_BAT_BYPASS_CACHE 1 + +#define HINIC5_CQM_BAT_ENTRY_SIZE_256 0 +#define HINIC5_CQM_BAT_ENTRY_SIZE_512 1 +#define HINIC5_CQM_BAT_ENTRY_SIZE_1024 2 + +struct tag_hinic5_cqm_bat_entry_standerd { + u32 entry_size : 2; /* 0: 256B, 1: 512B, 2: 1024B, others reserved */ + u32 rsv1 : 6; + u32 max_number : 22; /* Maximum indexable number. Some types of CLA can only use 20 bits. */ + u32 rsv2 : 2; + + u32 cla_gpa_h : 32; + + u32 cla_gpa_l : 32; + + u32 rsv3 : 8; + u32 z : 5; /* SM uses memory index [Z: 0] to access physical memory. */ + u32 y : 5; /* SM uses memory index [Y: Z+1] to access 2nd CLA. */ + u32 x : 5; /* SM uses memory index [X: Y+1] to access 1st CLA. */ + u32 rsv24 : 1; + u32 bypass : 1; /* 0: not bypass, 1: bypass */ + u32 cla_level : 2; /* 0: 0 level CLA, 1: 1 level CLA, 2: 2 levels CLA, others reserved */ + u32 rsv5 : 5; +}; + +struct tag_hinic5_cqm_bat_entry_vf2pf { + u32 cla_gpa_h : 25; + u32 pf_id : 5; + u32 fake_vf_en : 1; + u32 acs_spu_en : 1; +}; + +#define HINIC5_CQM_BAT_ENTRY_TASKMAP_NUM 4 +struct tag_hinic5_cqm_bat_entry_taskmap_addr { + u32 gpa_h; + u32 gpa_l; +}; + +struct tag_hinic5_cqm_bat_entry_taskmap { + struct tag_hinic5_cqm_bat_entry_taskmap_addr addr[HINIC5_CQM_BAT_ENTRY_TASKMAP_NUM]; +}; + +struct tag_hinic5_cqm_bat_table { + u32 bat_entry_type[HINIC5_CQM_BAT_ENTRY_MAX]; + u8 bat[HINIC5_CQM_BAT_ENTRY_MAX * HINIC5_CQM_BAT_ENTRY_SIZE]; + struct tag_hinic5_cqm_cla_table entry[HINIC5_CQM_BAT_ENTRY_MAX]; + /* Secure memory with consecutive physical addresses */ + struct hinic5_cqm_secure_mem_info func_secure_mem; + /* In LB mode 1/2, the following entries need to be configured in all + * enabled SMFs, and the GPAs must be different and independent. + */ + struct tag_hinic5_cqm_cla_table timer_entry[CHIP_SMF_NUM_MAX]; + struct tag_hinic5_cqm_cla_table hash_entry[CHIP_SMF_NUM_MAX]; + struct tag_hinic5_cqm_cla_table xid2cid_entry[CHIP_SMF_NUM_MAX]; + u32 bat_size; +}; + +struct tag_hinic5_cqm_bat_update_param { + u32 smf_id; + u32 func_id; + u32 bat_offset; + u32 update_size; +}; + +s32 hinic5_cqm_bat_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle); +void hinic5_cqm_bat_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle); + +s32 hinic5_cqm_cla_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle); +void hinic5_cqm_cla_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, u32 entry_numb); + +u8 *hinic5_cqm_cla_get(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa); +void hinic5_cqm_cla_put(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_cla_table *cla_table, + u32 index, u32 count); + +struct tag_hinic5_cqm_cla_table *hinic5_cqm_cla_table_get(struct tag_hinic5_cqm_bat_table *bat_table, + u32 entry_type); +u32 hinic5_cqm_funcid2smfid(const struct tag_hinic5_cqm_handle *hinic5_cqm_handle); + +#endif /* HINIC5_CQM_BAT_CLA_H */ diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bitmap_table.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bitmap_table.c new file mode 100644 index 00000000..c8449163 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bitmap_table.c @@ -0,0 +1,1744 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/device.h> +#include <linux/mm.h> +#include <linux/gfp.h> +#ifndef __UEFI__ +#include <linux/numa.h> +#endif + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_hinic5_vram_api.h" + +#include "hinic5_cqm_object.h" +#include "hinic5_cqm_bat_cla.h" +#include "hinic5_cqm_cmd.h" +#include "hinic5_cqm_object_intern.h" +#include "hinic5_cqm_main.h" +#include "hinic5_vram_common.h" +#include "hinic5_cqm_cmdq.h" + +#include "comm_defs.h" +#include "hinic5_cqm_npu_cmd_defs.h" +#ifdef __UEFI__ +#include "ossl_knl_uefi.h" +#endif + +#define common_section + +#ifndef __WIN__ +struct malloc_memory { + bool (*check_alloc_mode)(const struct hinic5_hwdev *handle, const struct tag_hinic5_cqm_buf *buf); + s32 (*malloc_func)(struct hinic5_hwdev *handle, struct tag_hinic5_cqm_buf *buf); +}; + +struct free_memory { + bool (*check_alloc_mode)(const struct hinic5_hwdev *handle, const struct tag_hinic5_cqm_buf *buf); + void (*free_func)(struct tag_hinic5_cqm_buf *buf); +}; +#endif +/** + * Prototype : hinic5_cqm_swab64(Encapsulation of __swab64) + * Description : Perform big-endian conversion for a memory block (8 bytes). + * Input : u8 *addr: Start address of the memory block + * u32 cnt: Number of 8 bytes in the memory block + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_swab64(u8 *addr, u32 cnt) +{ + u64 *temp = (u64 *)addr; + u64 value = 0; + u32 i; + + for (i = 0; i < cnt; i++) { + value = __swab64(*temp); + *temp = value; + temp++; + } +} + +/** + * Prototype : hinic5_cqm_swab32(Encapsulation of __swab32) + * Description : Perform big-endian conversion for a memory block (4 bytes). + * Input : u8 *addr: Start address of the memory block + * u32 cnt: Number of 4 bytes in the memory block + * Output : None + * Return Value : void + * 1.Date : 2015/7/23 + * Modification : Created function + */ +void hinic5_cqm_swab32(u8 *addr, u32 cnt) +{ + u32 *temp = (u32 *)addr; + u32 value = 0; + u32 i; + + for (i = 0; i < cnt; i++) { + value = __swab32(*temp); + *temp = value; + temp++; + } +} + +/** + * Prototype : hinic5_cqm_shift + * Description : Calculates n in a 2^n number.(Find the logarithm of 2^n) + * Input : u32 data + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u32 hinic5_cqm_shift(u32 data) +{ + u32 data_num = data; + s32 shift = -1; + + do { + data_num >>= 1; + shift++; + } while (data_num != 0); + + return (u32)shift; +} + +/** + * Prototype : hinic5_cqm_check_align + * Description : Check whether the value is 2^n-aligned. If 0 or 1, false is + * returned. + * Input : u32 data + * Output : None + * Return Value : s32 + * 1.Date : 2015/9/15 + * Modification : Created function + */ +bool hinic5_cqm_check_align(u32 data) +{ + u32 data_num = data; + + if (data == 0) + return false; + + do { + /* When the value can be exactly divided by 2, + * the value of data is shifted right by one bit, that is, + * divided by 2. + */ + if ((data_num & 0x1) == 0) + data_num >>= 1; + /* If the value cannot be divisible by 2, the value is + * not 2^n-aligned and false is returned. + */ + else + return false; + } while (data_num != 1); + + return true; +} + +/** + * Prototype : hinic5_cqm_kmalloc_align + * Description : Allocates 2^n-byte-aligned memory for the start address. + * Input : size_t size + * gfp_t flags + * u16 align_order + * Output : None + * Return Value : void * + * 1.Date : 2017/9/22 + * Modification : Created function + */ +void *hinic5_cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order) +{ + void *orig_addr = NULL; + void *align_addr = NULL; + void *index_addr = NULL; + + orig_addr = kmalloc(size + ((u64)1 << align_order) + sizeof(void *), + flags); + if (!orig_addr) + return NULL; + + index_addr = (void *)((char *)orig_addr + sizeof(void *)); + align_addr = + (void *)(uintptr_t)((((u64)(uintptr_t)index_addr + ((u64)1 << align_order) - 1) >> + align_order) << align_order); + + /* Record the original memory address for memory release. */ + index_addr = (void *)((char *)align_addr - sizeof(void *)); + *(void **)index_addr = orig_addr; + + return align_addr; +} + +/** + * Prototype : hinic5_cqm_kfree_align + * Description : Release the memory allocated for starting address alignment. + * Input : void *addr + * Output : None + * Return Value : void + * 1.Date : 2017/9/22 + * Modification : Created function + */ +void hinic5_cqm_kfree_align(void *addr) +{ + void *index_addr = NULL; + + /* Release the original memory address. */ + index_addr = (void *)((char *)addr - sizeof(void *)); + + hinic5_cqm_dbg_pr_on(hinic5_cqm_verbose, + "free aligned address: %p, original address: %p\n", + addr, *(void **)index_addr); + + kfree(*(void **)index_addr); +} + +static void hinic5_cqm_write_lock(rwlock_t *lock, bool bh) +{ + if (bh) + write_lock_bh(lock); + else + write_lock(lock); +} + +static void hinic5_cqm_write_unlock(rwlock_t *lock, bool bh) +{ + if (bh) + write_unlock_bh(lock); + else + write_unlock(lock); +} + +static void hinic5_cqm_read_lock(rwlock_t *lock, bool bh) +{ + if (bh) + read_lock_bh(lock); + else + read_lock(lock); +} + +static void hinic5_cqm_read_unlock(rwlock_t *lock, bool bh) +{ + if (bh) + read_unlock_bh(lock); + else + read_unlock(lock); +} + +s32 hinic5_cqm_buf_alloc_direct(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf, bool direct) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct page **pages = NULL; + u32 i, j, order; + + order = (u32)get_order(buf->buf_size); + + if (!direct) { + buf->direct.va = NULL; + return HINIC5_CQM_SUCCESS; + } + + pages = vmalloc(sizeof(struct page *) * buf->page_number); + if (!pages) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(pages)); + return HINIC5_CQM_FAIL; + } + + for (i = 0; i < buf->buf_number; i++) { + for (j = 0; j < ((u32)1 << order); j++) + pages[(ulong)(unsigned int)((i << order) + j)] = + (void *)virt_to_page((void *)((uintptr_t)buf->buf_list[i].va + + PAGE_SIZE * j)); + } + + buf->direct.va = vmap(pages, buf->page_number, VM_MAP, PAGE_KERNEL); + vfree(pages); + if (!buf->direct.va) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_MAP_FAIL(buf->direct.va)); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +#ifndef __WIN__ + +static bool check_use_hinic5_vram(const struct hinic5_hwdev *handle, const struct tag_hinic5_cqm_buf *buf) +{ + return buf->buf_info.use_hinic5_vram != 0 ? true : false; +} + +static bool check_use_non_hinic5_vram(const struct hinic5_hwdev *handle, const struct tag_hinic5_cqm_buf *buf) +{ + return buf->buf_info.use_hinic5_vram != 0 ? false : true; +} + +static bool check_for_use_node_alloc(const struct hinic5_hwdev *handle, const struct tag_hinic5_cqm_buf *buf) +{ + if (buf->buf_info.use_hinic5_vram == 0 && handle->board_info.service_mode == 0) + return true; + + return false; +} + +static bool check_for_nouse_node_alloc(const struct hinic5_hwdev *handle, const struct tag_hinic5_cqm_buf *buf) +{ + if (buf->buf_info.use_hinic5_vram == 0 && handle->board_info.service_mode != 0) + return true; + + return false; +} + +#ifndef __UEFI__ +static u8 hinic5_cqm_hinic5_vram_node(struct hinic5_hwdev *handle) +{ + if (nr_node_ids > 0) { + u16 func_id = hinic5_global_func_id(handle); + // nr_node_ids表示可用的NUMA节点最大数量, 不会大于u8最大值 + return (u8)(func_id % nr_node_ids); + } + return HINIC5_VRAM_NUMA_NODE0; +} +#endif + +static s32 hinic5_cqm_buf_hinic5_vram_kalloc(struct hinic5_hwdev *handle, struct tag_hinic5_cqm_buf *buf) +{ + void *vaddr = NULL; + u32 i; + + vaddr = hinic5_hinic5_vram_kalloc_node(buf->buf_info.buf_hinic5_vram_name, + (u64)buf->buf_size * buf->buf_number, + hinic5_cqm_hinic5_vram_node(handle)); + if (!vaddr) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(buf_page)); + return HINIC5_CQM_FAIL; + } + + for (i = 0; i < buf->buf_number; i++) + buf->buf_list[i].va = (void *)((char *)vaddr + i * (u64)buf->buf_size); + + return HINIC5_CQM_SUCCESS; +} + +static void hinic5_cqm_buf_hinic5_vram_free(struct tag_hinic5_cqm_buf *buf) +{ + s32 i; + + if (buf->buf_list == NULL) + return; + + if (buf->buf_list[0].va) + hinic5_hinic5_vram_kfree(buf->buf_list[0].va, buf->buf_info.buf_hinic5_vram_name, + (u64)buf->buf_size * buf->buf_number); + + for (i = 0; i < (s32)buf->buf_number; i++) + buf->buf_list[i].va = NULL; +} + +static void hinic5_cqm_buf_free_page_common(struct tag_hinic5_cqm_buf *buf) +{ + u32 order; + u32 i; + + if (buf->buf_list == NULL) + return; + + order = (u32)get_order(buf->buf_size); + + for (i = 0; i < buf->buf_number; i++) { + if (buf->buf_list[i].va) { + free_pages((ulong)(uintptr_t)(buf->buf_list[i].va), order); + buf->buf_list[i].va = NULL; + } + } +} + +static s32 hinic5_cqm_buf_use_node_alloc_page(struct hinic5_hwdev *handle, struct tag_hinic5_cqm_buf *buf) +{ + struct page *newpage = NULL; + gfp_t flags = GFP_KERNEL | __GFP_ZERO; + u32 order, i; + void *va = NULL; + s32 node = dev_to_node(handle->dev_hdl); + + order = (u32)get_order(buf->buf_size); + if (order > 0) + flags |= __GFP_COMP; + + for (i = 0; i < buf->buf_number; i++) { + newpage = alloc_pages_node(node, flags, order); + if (!newpage) { + hinic5_cqm_warn(handle->dev_hdl, + "alloc buf pages fail (%u/%u)\n", + i, buf->buf_number); + break; + } + va = (void *)page_address(newpage); + /* Initialize the page after the page is applied for. + * If hash entries are involved, the initialization + * value must be 0. + */ + memset(va, 0, buf->buf_size); + buf->buf_list[i].va = va; + } + + if (i != buf->buf_number) { + hinic5_cqm_buf_free_page_common(buf); + return HINIC5_CQM_BUF_ALLOC_BUDDY_PAGES_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_buf_unused_node_alloc_page(struct hinic5_hwdev *handle, struct tag_hinic5_cqm_buf *buf) +{ + gfp_t flags = GFP_KERNEL | __GFP_ZERO; + u32 order, i; + void *va = NULL; + + order = (u32)get_order(buf->buf_size); + if (order > 0) + flags |= __GFP_COMP; + + for (i = 0; i < buf->buf_number; i++) { + va = (void *)(uintptr_t)ossl_get_free_pages(flags, order); + if (!va) { + hinic5_cqm_warn(handle->dev_hdl, + "alloc buf pages fail (%u/%u)\n", + i, buf->buf_number); + break; + } + /* Initialize the page after the page is applied for. + * If hash entries are involved, the initialization + * value must be 0. + */ + memset(va, 0, buf->buf_size); + buf->buf_list[i].va = va; + } + + if (i != buf->buf_number) { + hinic5_cqm_buf_free_page_common(buf); + return HINIC5_CQM_BUF_ALLOC_BUDDY_PAGES_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static const struct malloc_memory g_malloc_funcs[] = { + {check_use_hinic5_vram, hinic5_cqm_buf_hinic5_vram_kalloc}, + {check_for_use_node_alloc, hinic5_cqm_buf_use_node_alloc_page}, + {check_for_nouse_node_alloc, hinic5_cqm_buf_unused_node_alloc_page} +}; + +static const struct free_memory g_free_funcs[] = { + {check_use_hinic5_vram, hinic5_cqm_buf_hinic5_vram_free}, + {check_use_non_hinic5_vram, hinic5_cqm_buf_free_page_common} +}; + +static s32 hinic5_cqm_buf_alloc_page(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 malloc_funcs_num = ARRAY_SIZE(g_malloc_funcs); + u32 i; + + for (i = 0; i < malloc_funcs_num; i++) { + if (g_malloc_funcs[i].check_alloc_mode && + g_malloc_funcs[i].malloc_func && + g_malloc_funcs[i].check_alloc_mode(handle, buf)) + return g_malloc_funcs[i].malloc_func(handle, buf); + } + + hinic5_cqm_err(handle->dev_hdl, "Unknown alloc mode\n"); + + return HINIC5_CQM_FAIL; +} + +static void hinic5_cqm_buf_free_page(struct tag_hinic5_cqm_buf *buf) +{ + u32 free_funcs_num = ARRAY_SIZE(g_free_funcs); + u32 i; + + for (i = 0; i < free_funcs_num; i++) { + if (g_free_funcs[i].check_alloc_mode && + g_free_funcs[i].free_func && + g_free_funcs[i].check_alloc_mode(NULL, buf)) + return g_free_funcs[i].free_func(buf); + } +} + +static s32 hinic5_cqm_buf_alloc_map(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct device *dev = hinic5_cqm_handle->dev; + void *va = NULL; + s32 i; + + for (i = 0; i < (s32)buf->buf_number; i++) { + va = buf->buf_list[i].va; + buf->buf_list[i].pa = dma_map_single(dev, va, buf->buf_size, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, buf->buf_list[i].pa) != 0) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_MAP_FAIL(buf_list)); + break; + } + } + + if (i != (s32)buf->buf_number) { + i--; + for (; i >= 0; i--) + dma_unmap_single(dev, buf->buf_list[i].pa, + buf->buf_size, DMA_BIDIRECTIONAL); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +/* Applying for the buffer list descriptor space */ +s32 hinic5_cqm_buf_list_alloc(struct tag_hinic5_cqm_buf *buf) +{ + size_t size = buf->buf_number * sizeof(struct tag_hinic5_cqm_buf_list); + + if (WARN_ON_ONCE(buf->buf_list)) + return HINIC5_CQM_SUCCESS; + + buf->buf_list = vmalloc(size); + if (unlikely(!buf->buf_list)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(linux_buf_list)); + return HINIC5_CQM_FAIL; + } + + memset(buf->buf_list, 0, size); + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_buf_alloc + * Description : Apply for buffer space and DMA mapping for the struct tag_hinic5_cqm_buf + * structure. + * Input : struct tag_hinic5_cqm_buf *buf + * struct device *dev + * bool direct: Whether direct remapping is required + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_buf_alloc(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf, bool direct) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 i; + s32 ret = HINIC5_CQM_FAIL; + + ret = hinic5_cqm_buf_list_alloc(buf); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) + return ret; + + /* Page for applying for each buffer */ + ret = hinic5_cqm_buf_alloc_page(hinic5_cqm_handle, buf); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_warn(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(linux_hinic5_cqm_buf_alloc_page)); + goto err1; + } + + /* PCI mapping of the buffer */ + ret = hinic5_cqm_buf_alloc_map(hinic5_cqm_handle, buf); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(linux_hinic5_cqm_buf_alloc_map)); + goto err2; + } + + /* direct remapping */ + ret = hinic5_cqm_buf_alloc_direct(hinic5_cqm_handle, buf, direct); + if (unlikely(ret != HINIC5_CQM_SUCCESS)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_buf_alloc_direct)); + goto err3; + } + + return HINIC5_CQM_SUCCESS; + +err3: + for (i = 0; i < buf->buf_number; i++) + dma_unmap_single(hinic5_cqm_handle->dev, buf->buf_list[i].pa, buf->buf_size, + DMA_BIDIRECTIONAL); +err2: + hinic5_cqm_buf_free_page(buf); +err1: + vfree(buf->buf_list); + buf->buf_list = NULL; + return ret; +} + +/** + * Prototype : hinic5_cqm_buf_free + * Description : Release the buffer space and DMA mapping for the struct tag_hinic5_cqm_buf + * structure. + * Input : struct tag_hinic5_cqm_buf *buf + * struct device *dev + * bool direct: Whether direct remapping is required + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_buf_free(struct tag_hinic5_cqm_buf *buf, struct device *dev) +{ + u32 i; + + if (buf->direct.va) { + vunmap(buf->direct.va); + buf->direct.va = NULL; + } + + // A secure mem buf doesn't need to call dma ummap and free pages. + // see hinic5_cqm_cla_secure_mem_buf_alloc() + if (buf->secure_mem_flag == HINIC5_CQM_SECURE_BUFFER_EN) + goto free_buf_list; + + if (buf->buf_list) { + for (i = 0; i < buf->buf_number; i++) { + if (buf->buf_list[i].va) + dma_unmap_single(dev, buf->buf_list[i].pa, + buf->buf_size, + DMA_BIDIRECTIONAL); + } + hinic5_cqm_buf_free_page(buf); + } + +free_buf_list: + if (buf->buf_list) { + vfree(buf->buf_list); + buf->buf_list = NULL; + } +} + +#else /* __WIN__ */ + +static s32 hinic5_cqm_buf_alloc_page(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct huge_buf_addr *bufs_addr = NULL; + u32 total_size; + u32 i; + + total_size = buf->buf_size * buf->buf_number; + + buf->huge_buf_number = (total_size / HINIC5_CQM_HUGE_BUF_SIZE) + + ((total_size % HINIC5_CQM_HUGE_BUF_SIZE) ? 1 : 0); + if (!buf->huge_buf_number) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(buf->huge_buf_number)); + return HINIC5_CQM_FAIL; + } + + buf->bufs_addr = vmalloc(buf->huge_buf_number * + sizeof(struct huge_buf_addr)); + if (!buf->bufs_addr) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(bufs_addr)); + return HINIC5_CQM_FAIL; + } + memset(buf->bufs_addr, 0, + buf->huge_buf_number * sizeof(struct huge_buf_addr)); + + bufs_addr = buf->bufs_addr; + for (i = 0; i < buf->huge_buf_number; i++) { + if ((i + 1) == buf->huge_buf_number) + bufs_addr[i].huge_buf_size = + PAGE_SIZE << + get_order(total_size - HINIC5_CQM_HUGE_BUF_SIZE * i); + else + bufs_addr[i].huge_buf_size = HINIC5_CQM_HUGE_BUF_SIZE; + + bufs_addr[i].huge_buf_vaddr = + __get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(bufs_addr[i].huge_buf_size)); + if (!bufs_addr[i].huge_buf_vaddr) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_ALLOC_FAIL(huge_buf_vaddr)); + break; + } + } + + /* exception processing */ + if (i != buf->huge_buf_number) { + i--; + for (; i >= 0; i--) { + free_pages((ulong)(buf->bufs_addr[i].huge_buf_vaddr), + get_order(buf->bufs_addr[i].huge_buf_size)); + buf->bufs_addr[i].huge_buf_vaddr = NULL; + } + + vfree(buf->bufs_addr); + buf->bufs_addr = NULL; + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_buf_alloc_map(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct device *dev = hinic5_cqm_handle->dev; + struct huge_buf_addr *bufs_addr = NULL; + u32 i; + + bufs_addr = buf->bufs_addr; + for (i = 0; i < buf->huge_buf_number; i++) { + bufs_addr[i].huge_buf_paddr = dma_map_single(dev, bufs_addr[i].huge_buf_vaddr, + bufs_addr[i].huge_buf_size, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, bufs_addr[i].huge_buf_paddr)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_MAP_FAIL(huge_buf_paddr)); + break; + } + } + + if (i != buf->huge_buf_number) { + i--; + for (; i >= 0; i--) + dma_unmap_single(dev, bufs_addr[i].huge_buf_paddr, + bufs_addr[i].huge_buf_size, + DMA_BIDIRECTIONAL); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +s32 hinic5_cqm_buf_alloc(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf, bool direct) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct huge_buf_addr *bufs_addr = NULL; + u32 cnt; + u32 i; + s32 j = 0; + + if (buf->buf_size > HINIC5_CQM_HUGE_BUF_SIZE) { + hinic5_cqm_err(handle->dev_hdl, "Buffer size(0x%x) is large than huge buffer size(0x%x)\n", + buf->buf_size, HINIC5_CQM_HUGE_BUF_SIZE); + return HINIC5_CQM_FAIL; + } + + /* Applying for the buffer list descriptor space */ + buf->buf_list = vmalloc(buf->buf_number * sizeof(struct tag_hinic5_cqm_buf_list)); + if (unlikely(buf->buf_list == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(win_buf_list)); + return HINIC5_CQM_FAIL; + } + memset(buf->buf_list, + 0, buf->buf_number * sizeof(struct tag_hinic5_cqm_buf_list)); + + /* Page for applying for each buffer */ + if (hinic5_cqm_buf_alloc_page(hinic5_cqm_handle, buf) == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(win_hinic5_cqm_buf_alloc_page)); + goto err1; + } + + /* PCI mapping of the buffer */ + if (hinic5_cqm_buf_alloc_map(hinic5_cqm_handle, buf) == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(win_hinic5_cqm_buf_alloc_map)); + goto err2; + } + + /* Assign a value to the buffer list space. */ + for (i = 0; i < buf->buf_number; i++) { + bufs_addr = &buf->bufs_addr[j]; + cnt = bufs_addr->huge_buf_size / buf->buf_size; + buf->buf_list[i].va = (void *)((u64)bufs_addr->huge_buf_vaddr + + buf->buf_size * (i % cnt)); + buf->buf_list[i].pa = bufs_addr->huge_buf_paddr + + buf->buf_size * (i % cnt); + + if (0 == ((i + 1) % cnt)) + j++; + } + + return HINIC5_CQM_SUCCESS; + +err2: + for (i = 0; i < buf->huge_buf_number; i++) { + free_pages((ulong)(buf->bufs_addr[i].huge_buf_vaddr), + get_order(buf->bufs_addr[i].huge_buf_size)); + buf->bufs_addr[i].huge_buf_vaddr = NULL; + } + + vfree(buf->bufs_addr); + buf->bufs_addr = NULL; + +err1: + vfree(buf->buf_list); + buf->buf_list = NULL; + return HINIC5_CQM_FAIL; +} + +void hinic5_cqm_buf_free(struct tag_hinic5_cqm_buf *buf, struct device *dev) +{ + u32 i; + + if (buf->bufs_addr) { + for (i = 0; i < buf->huge_buf_number; i++) { + dma_unmap_single(dev, buf->bufs_addr[i].huge_buf_paddr, + buf->bufs_addr[i].huge_buf_size, + DMA_BIDIRECTIONAL); + free_pages((ulong)(buf->bufs_addr[i].huge_buf_vaddr), + get_order(buf->bufs_addr[i].huge_buf_size)); + buf->bufs_addr[i].huge_buf_paddr = 0; + buf->bufs_addr[i].huge_buf_vaddr = NULL; + } + vfree(buf->bufs_addr); + buf->bufs_addr = NULL; + } + + if (buf->buf_list) { + vfree(buf->buf_list); + buf->buf_list = NULL; + } +} + +#endif /* __WIN__ */ + +static s32 hinic5_cqm_cla_cache_invalid_cmd(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cmd_buf *buf_in, + hinic5_cqm_cla_cache_invalid_cmd_s *cmd_info) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + s32 ret; + u8 cmd; + + hinic5_cqm_handle->cmdq_ops->prepare_cmd_cache_invalidate(cmd_info, buf_in, &cmd); + + /* Send the cmdq command. */ + ret = hinic5_cqm_send_cmd_box((void *)(hinic5_cqm_handle->ex_handle), HINIC5_CQM_MOD_HINIC5_CQM, + cmd, buf_in, NULL, NULL, + HINIC5_CQM_CMD_TIMEOUT, HINIC5_CHANNEL_DEFAULT); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_send_cmd_box)); + hinic5_cqm_err(handle->dev_hdl, + "Cla cache invalid: hinic5_cqm_send_cmd_box_ret=%d\n", + ret); + hinic5_cqm_err(handle->dev_hdl, + "Cla cache invalid: cla_cache_invalid_cmd: 0x%x 0x%x 0x%x\n", + cmd_info->gpa_h, cmd_info->gpa_l, cmd_info->cache_size); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_cla_cache_invalid_all_smf(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cmd_buf *buf_in, + hinic5_cqm_cla_cache_invalid_cmd_s *cmd) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + u32 i; + s32 ret = HINIC5_CQM_FAIL; + + for (i = 0; i < func_cap->smf_max_num; i++) { + if ((func_cap->smf_pg & (1U << i)) != 0) { + cmd->smf_id = i; + ret = hinic5_cqm_cla_cache_invalid_cmd(hinic5_cqm_handle, buf_in, cmd); + if (ret != HINIC5_CQM_SUCCESS) + return ret; + } + } + return ret; +} + +s32 hinic5_cqm_cla_cache_invalid(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, dma_addr_t pa, u32 cache_size) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_cmd_buf *buf_in = NULL; + struct hinic5_func_attr *func_attr = NULL; + struct tag_hinic5_cqm_bat_entry_vf2pf gpa = {0}; + hinic5_cqm_cla_cache_invalid_cmd_s cmd; + u32 cla_gpa_h = 0; + s32 ret = HINIC5_CQM_FAIL; + + buf_in = hinic5_cqm_cmd_alloc((void *)(hinic5_cqm_handle->ex_handle)); + if (unlikely(buf_in == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(buf_in)); + return HINIC5_CQM_FAIL; + } + + gpa.cla_gpa_h = HINIC5_CQM_ADDR_HI(pa) & HINIC5_CQM_CHIP_GPA_HIMASK; + gpa.acs_spu_en = hinic5_cqm_get_acs_spu_en(hinic5_cqm_handle); + + /* In non-fake mode, set func_id to 0xffff. + * Indicate the current func fake mode. + * The value of func_id is a fake func ID. + */ + if (HINIC5_CQM_IS_FAKE_CHILD_AGENT(hinic5_cqm_handle)) { + cmd.func_id = hinic5_cqm_handle->func_attribute.func_global_idx; + func_attr = &hinic5_cqm_handle->parent_hinic5_cqm_handle->func_attribute; + gpa.fake_vf_en = 1; + gpa.pf_id = func_attr->func_global_idx; + } else { + cmd.func_id = 0xffff; + } + memcpy(&cla_gpa_h, &gpa, sizeof(u32)); + + /* Fill command and convert it to big endian */ + cmd.cache_size = cache_size; + cmd.gpa_l = HINIC5_CQM_ADDR_LW(pa); + cmd.gpa_h = cla_gpa_h; + + /* The normal mode is the 1822 traditional mode and is all configured + * on SMF0. + */ + /* Mode 0 is hashed to 4 SMF engines (excluding PPF) by func ID. */ + if (HINIC5_CQM_IS_LB_MODE_NORMAL(hinic5_cqm_handle) || + (HINIC5_CQM_IS_LB_MODE_0(hinic5_cqm_handle) && !HINIC5_CQM_IS_PPF(hinic5_cqm_handle))) { + cmd.smf_id = hinic5_cqm_funcid2smfid(hinic5_cqm_handle); + ret = hinic5_cqm_cla_cache_invalid_cmd(hinic5_cqm_handle, buf_in, &cmd); + /* Mode 1/2 are allocated to 4 SMF engines by flow. Therefore, + * one function needs to be allocated to 4 SMF engines. + */ + /* The PPF in mode 0 needs to be configured on 4 engines, + * and the timer resources need to be shared by the 4 engines. + */ + } else if (HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle) || + (HINIC5_CQM_IS_LB_MODE_0(hinic5_cqm_handle) && HINIC5_CQM_IS_PPF(hinic5_cqm_handle))) { + ret = hinic5_cqm_cla_cache_invalid_all_smf(hinic5_cqm_handle, buf_in, &cmd); + } else { + hinic5_cqm_err(handle->dev_hdl, "Cla cache invalid: unsupport lb mode=%u\n", hinic5_cqm_handle->func_capability.lb_mode); + ret = HINIC5_CQM_FAIL; + } + + hinic5_cqm_cmd_free((void *)(hinic5_cqm_handle->ex_handle), buf_in); + return ret; +} + +static void free_cache_inv(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf, + s32 *inv_flag) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 order; + u32 i; + + order = (u32)get_order(buf->buf_size); + + if (!hinic5_is_chip_present(handle)) + return; + + if (!buf->buf_list) + return; + + for (i = 0; i < buf->buf_number; i++) { + if (!buf->buf_list[i].va) + continue; + + if (*inv_flag != HINIC5_CQM_SUCCESS) + continue; + + /* In the Pangea environment, if the cmdq times out, + * no subsequent message is sent. + */ + *inv_flag = hinic5_cqm_cla_cache_invalid(hinic5_cqm_handle, buf->buf_list[i].pa, + (u32)(PAGE_SIZE << order)); + if (*inv_flag != HINIC5_CQM_SUCCESS) + hinic5_cqm_err(handle->dev_hdl, + "Buffer free: fail to invalid buf_list pa cache, inv_flag=%d\n", + *inv_flag); + } +} + +void hinic5_cqm_buf_free_cache_inv(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf, + s32 *inv_flag) +{ + if (!COMM_SUPPORT_SMF_CACHE_INVALID(hinic5_cqm_handle->ex_handle)) { + /* Send a command to the chip to kick out the cache. */ + free_cache_inv(hinic5_cqm_handle, buf, inv_flag); + } + + /* Clear host resources */ + hinic5_cqm_buf_free(buf, hinic5_cqm_handle->dev); +} + +#define bitmap_section + +/** + * Prototype : hinic5_cqm_single_bitmap_init + * Description : Initialize a bitmap. + * Input : struct tag_hinic5_cqm_bitmap *bitmap + * Output : None + * Return Value : s32 + * 1.Date : 2015/9/9 + * Modification : Created function + */ +static s32 hinic5_cqm_single_bitmap_init(struct tag_hinic5_cqm_bitmap *bitmap) +{ + u32 nbytes; + + spin_lock_init(&bitmap->lock); + + nbytes = BITS_TO_LONGS(bitmap->max_num) * sizeof(long); + if (bitmap->bitmap_info.use_hinic5_vram != 0) + bitmap->table = hinic5_hinic5_vram_kalloc(bitmap->bitmap_info.buf_hinic5_vram_name, nbytes); + else + bitmap->table = vmalloc(nbytes); + + if (unlikely(bitmap->table == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(bitmap->table)); + return HINIC5_CQM_FAIL; + } + + memset(bitmap->table, 0, nbytes); + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_bitmap_toe_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + + /* SRQC of TOE services is not managed through the CLA table, + * but the bitmap is required to manage SRQid. + */ + if (hinic5_cqm_handle->service[HINIC5_CQM_SERVICE_T_TOE].valid) { + bitmap = &hinic5_cqm_handle->toe_own_capability.srqc_bitmap; + bitmap->max_num = + hinic5_cqm_handle->toe_own_capability.toe_srqc_number; + bitmap->reserved_top = 0; + bitmap->reserved_back = 0; + bitmap->last = 0; + if (bitmap->max_num == 0) { + hinic5_cqm_info(handle->dev_hdl, + "Bitmap init: toe_srqc_number=0, don't init bitmap\n"); + return HINIC5_CQM_SUCCESS; + } + + if (hinic5_cqm_single_bitmap_init(bitmap) != HINIC5_CQM_SUCCESS) + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static void hinic5_cqm_bitmap_toe_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + + if (hinic5_cqm_handle->service[HINIC5_CQM_SERVICE_T_TOE].valid) { + bitmap = &hinic5_cqm_handle->toe_own_capability.srqc_bitmap; + if (bitmap->table) { + spin_lock_deinit(&bitmap->lock); + vfree(bitmap->table); + bitmap->table = NULL; + } + } +} + +static s32 hinic5_cqm_bitmap_init_by_type(u32 type, struct tag_hinic5_cqm_bitmap *bitmap, struct tag_hinic5_cqm_func_capability *capability) +{ + switch (type) { + case HINIC5_CQM_BAT_ENTRY_T_QPC: + bitmap->max_num = capability->qpc_number; + bitmap->reserved_top = capability->qpc_reserved; + bitmap->reserved_back = capability->qpc_reserved_back; + bitmap->last = capability->qpc_reserved; + bitmap->bitmap_info.use_hinic5_vram = get_use_hinic5_vram_flag(); + break; + case HINIC5_CQM_BAT_ENTRY_T_MPT: + bitmap->max_num = capability->mpt_number; + bitmap->reserved_top = capability->mpt_reserved; + bitmap->reserved_back = capability->mpt_reserved_back; + bitmap->last = capability->mpt_reserved; + break; + case HINIC5_CQM_BAT_ENTRY_T_SCQC: + bitmap->max_num = capability->scqc_number; + bitmap->reserved_top = capability->scq_reserved; + bitmap->reserved_back = capability->scq_reserved_back; + bitmap->last = capability->scq_reserved; + break; + case HINIC5_CQM_BAT_ENTRY_T_SRQC: + bitmap->max_num = capability->srqc_number; + bitmap->reserved_top = capability->srq_reserved; + bitmap->reserved_back = capability->srq_reserved_back; + bitmap->last = capability->srq_reserved; + break; + default: + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + + +/** + * Prototype : hinic5_cqm_bitmap_init + * Description : Initialize the bitmap. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_bitmap_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_func_capability *capability = &hinic5_cqm_handle->func_capability; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + s32 ret = HINIC5_CQM_SUCCESS; + u32 i; + int err; + + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->obj_num == 0) { + hinic5_cqm_info(handle->dev_hdl, "Cla alloc: cla_type %u, obj_num=0, don't init bitmap\n", cla_table->type); + continue; + } + + bitmap = &cla_table->bitmap; + err = snprintf(bitmap->bitmap_info.buf_hinic5_vram_name, + HINIC5_VRAM_NAME_MAX_LEN, "%s%s%02u", cla_table->name, + HINIC5_VRAM_HINIC5_CQM_BITMAP_BASE, cla_table->type); + if (err < 0) { + hinic5_cqm_err(handle->dev_hdl, "hinic5_cqm bitmap hinic5_vram name snprintf_s failed"); + return HINIC5_CQM_FAIL; + } + + if (hinic5_cqm_bitmap_init_by_type(cla_table->type, bitmap, capability) == HINIC5_CQM_SUCCESS) { + hinic5_cqm_info(handle->dev_hdl, "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = hinic5_cqm_single_bitmap_init(bitmap); + } + + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, "Bitmap init: failed to init cla_table_type=%u, obj_num=0x%x\n", + cla_table->type, cla_table->obj_num); + goto err; + } + } + + if (hinic5_cqm_bitmap_toe_init(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) + goto err; + + return HINIC5_CQM_SUCCESS; + +err: + hinic5_cqm_bitmap_uninit(hinic5_cqm_handle); + return HINIC5_CQM_FAIL; +} + +static void hinic5_cqm_bitmap_table_free(struct tag_hinic5_cqm_bitmap *bitmap) +{ + if (bitmap->bitmap_info.use_hinic5_vram != 0) + hinic5_hinic5_vram_kfree(bitmap->table, bitmap->bitmap_info.buf_hinic5_vram_name, + BITS_TO_LONGS(bitmap->max_num) * sizeof(long)); + else + vfree(bitmap->table); + bitmap->table = NULL; +} + +/** + * Prototype : hinic5_cqm_bitmap_uninit + * Description : Deinitialize the bitmap. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_bitmap_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + u32 i; + + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + bitmap = &cla_table->bitmap; + if (cla_table->type != HINIC5_CQM_BAT_ENTRY_T_INVALID) { + if (bitmap->table) { + spin_lock_deinit(&bitmap->lock); + hinic5_cqm_bitmap_table_free(bitmap); + } + } + } + + hinic5_cqm_bitmap_toe_uninit(hinic5_cqm_handle); +} + +/** + * Prototype : hinic5_cqm_bitmap_check_range + * Description : Starting from begin, check whether the bits in number of count + * are idle in the table. Requirement: + * 1. This group of bits cannot cross steps. + * 2. This group of bits must be 0. + * Input : const ulong *table, + * u32 step, + * u32 max_num, + * u32 begin, + * u32 count + * Output : None + * Return Value : u32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +static u32 hinic5_cqm_bitmap_check_range(const ulong *table, u32 step, u32 max_num, u32 begin, + u32 count) +{ + u32 end = (begin + (count - 1)); + u32 i; + + /* Single-bit check is not performed. */ + if (count == 1) + return begin; + + /* The end value exceeds the threshold. */ + if (end >= max_num) + return max_num; + + /* Bit check, the next bit is returned when a non-zero bit is found. */ + for (i = (begin + 1); i <= end; i++) { + if (test_bit((int)i, table)) + return i + 1; + } + + /* Check whether it's in different steps. */ + if ((begin & (~(step - 1))) != (end & (~(step - 1)))) + return (end & (~(step - 1))); + + /* If the check succeeds, begin is returned. */ + return begin; +} + +static void hinic5_cqm_bitmap_find(struct tag_hinic5_cqm_bitmap *bitmap, u32 *index, u32 last, + u32 step, u32 count) +{ + u32 last_num = last; + u32 max_num = bitmap->max_num - bitmap->reserved_back; + ulong *table = bitmap->table; + + do { + *index = (u32)find_next_zero_bit(table, max_num, last_num); + if (*index < max_num) + last_num = hinic5_cqm_bitmap_check_range(table, step, max_num, + *index, count); + else + break; + } while (last_num != *index); +} + +static u32 hinic5_cqm_bitmap_find_with_lowbits_forward(struct tag_hinic5_cqm_bitmap *bitmap, + u32 start, u32 end, u32 lowbits, u32 lowbits_mask) +{ + ulong *table = bitmap->table; + u32 offset = start; + u32 index = HINIC5_CQM_INDEX_INVALID; + + while (offset < end) { + index = (u32)find_next_zero_bit(table, end, offset); + if (index >= end) + return HINIC5_CQM_INDEX_INVALID; + + if ((index & lowbits_mask) == lowbits) /* match lowbits */ + break; + + offset = index + 1; + if (offset == end) + return HINIC5_CQM_INDEX_INVALID; + } + + return index; +} + +static inline u32 find_next_zero_bit_reverse(const unsigned long *addr, u32 end, u32 start) +{ + u32 i; + + for (i = start; i > end; i--) { + if (test_bit(i, addr) == 0) + return i; + } + + return i; +} + +static u32 hinic5_cqm_bitmap_find_with_lowbits_reverse(struct tag_hinic5_cqm_bitmap *bitmap, + u32 start, u32 end, u32 lowbits, u32 lowbits_mask) +{ + ulong *table = bitmap->table; + u32 offset = start; + u32 index = HINIC5_CQM_INDEX_INVALID; + + while (offset > end) { + index = (u32)find_next_zero_bit_reverse(table, end, offset); + if (index <= end) + return HINIC5_CQM_INDEX_INVALID; + + if ((index & lowbits_mask) == lowbits) /* match lowbits */ + break; + + offset = index - 1; + if (offset == end) + return HINIC5_CQM_INDEX_INVALID; + } + + return index; +} + +/* search range is [start, end) or (end, start] */ +static u32 hinic5_cqm_bitmap_find_with_lowbits_align(struct tag_hinic5_cqm_bitmap *bitmap, + u32 start, u32 end, u32 xid) +{ + u32 lowbits_mode = HINIC5_CQM_DYNAMIC_XID_LB_MODE(xid); + u32 lowbits_mask = HINIC5_CQM_DYNAMIC_XID_LOW_BIT_MASK(lowbits_mode); + u32 lowbits = (HINIC5_CQM_DYNAMIC_XID_LOW_BITS(xid) & lowbits_mask); + u32 index; + + if (start <= end) + index = hinic5_cqm_bitmap_find_with_lowbits_forward(bitmap, start, end, + lowbits, lowbits_mask); + else + index = hinic5_cqm_bitmap_find_with_lowbits_reverse(bitmap, start, end, + lowbits, lowbits_mask); + + return index; +} + +/** + * Prototype : hinic5_cqm_bitmap_alloc + * Description : Apply for a bitmap index. 0 and 1 must be left blank. + * Scan backwards from where you last applied. + * A string of consecutive indexes must be applied for and + * cannot be applied for across trunks. + * Input : struct tag_hinic5_cqm_bitmap *bitmap, + * u32 step, + * u32 count + * Output : None + * Return Value : u32 + * The obtained index is returned. + * If a failure occurs, the value of max is returned. + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u32 hinic5_cqm_bitmap_alloc(struct tag_hinic5_cqm_bitmap *bitmap, u32 step, u32 count, bool update_last) +{ + u32 index = 0; + u32 max_num = bitmap->max_num - bitmap->reserved_back; + u32 last = bitmap->last; + ulong *table = bitmap->table; + u32 i; + + spin_lock(&bitmap->lock); + + /* Search for an idle bit from the last position. */ + hinic5_cqm_bitmap_find(bitmap, &index, last, step, count); + + /* The preceding search fails. Search for an idle bit + * from the beginning. + */ + if (index >= max_num) { + last = bitmap->reserved_top; + hinic5_cqm_bitmap_find(bitmap, &index, last, step, count); + } + + /* Set the found bit to 1 and reset last. */ + if (index < max_num) { + for (i = index; i < (index + count); i++) + set_bit(i, table); + + if (update_last) { + bitmap->last = (index + count); + if (bitmap->last >= max_num) + bitmap->last = bitmap->reserved_top; + } + } + + spin_unlock(&bitmap->lock); + return index; +} + +/** + * Prototype : hinic5_cqm_bitmap_alloc_lowbits_align + * Description : Apply for a bitmap index with lowbits align. + * Scan backwards from where you last applied if search all range. + * A string of consecutive indexes must be applied for and + * cannot be applied for across trunks. + * Input : struct tag_hinic5_cqm_bitmap *bitmap, + * struct tag_hinic5_cqm_bitmap_range *bp_range, + * struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + * u32 xid, + * bool update_last + * Output : None + * Return Value : u32 + * The obtained index is returned. + * If a failure occurs, the value of invalid_index is returned. + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u32 hinic5_cqm_bitmap_alloc_lowbits_align(struct tag_hinic5_cqm_bitmap *bitmap, + struct tag_hinic5_cqm_bitmap_range *bp_range, + struct tag_hinic5_cqm_handle *hinic5_cqm_handle, u32 xid, bool update_last) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + ulong *table = bitmap->table; + u32 search_mode = HINIC5_CQM_DYNAMIC_XID_SEARCH_MODE(xid); + u32 max_num = bitmap->max_num - bitmap->reserved_back; + u32 index, last; + + spin_lock(&bitmap->lock); + + /* unsupport reverse search when search all range of bitmap */ + if (search_mode == HINIC5_CQM_XID_SEARCH_ALL) { + last = bitmap->last; + /* Search for an idle bit from the last position. */ + index = hinic5_cqm_bitmap_find_with_lowbits_align(bitmap, last, max_num, xid); + /* The preceding search fails. Search for an idle bit from the beginning. */ + if (index == HINIC5_CQM_INDEX_INVALID) { + last = bitmap->reserved_top; + index = hinic5_cqm_bitmap_find_with_lowbits_align(bitmap, last, max_num, xid); + } + } else { + if (HINIC5_CQM_BP_RANGE_VALID(bp_range->start, bp_range->end, + bitmap->reserved_top, max_num) == 0) { + hinic5_cqm_err(handle->dev_hdl, + "Bitmap alloc: range invalid, start=0x%x, end=0x%x, min=0x%x, max=0x%x\n", + bp_range->start, bp_range->end, bitmap->reserved_top, max_num); + spin_unlock(&bitmap->lock); + return HINIC5_CQM_INDEX_INVALID; + } + index = hinic5_cqm_bitmap_find_with_lowbits_align(bitmap, bp_range->start, bp_range->end, + xid); + } + + /* Set the found bit to 1 and reset last. */ + if (index != HINIC5_CQM_INDEX_INVALID) { + set_bit(index, table); + + if (update_last && search_mode == HINIC5_CQM_XID_SEARCH_ALL) { + bitmap->last = index + 1; + if (bitmap->last >= max_num) + bitmap->last = bitmap->reserved_top; + } + } + + spin_unlock(&bitmap->lock); + return index; +} + +static inline void bitmap_set_table(struct tag_hinic5_cqm_bitmap *bitmap, ulong *table, u32 *ret_index, u32 index) +{ + spin_lock(&bitmap->lock); + if (test_bit((int)index, table)) { + *ret_index = HINIC5_CQM_INDEX_INVALID; + } else { + set_bit(index, table); + *ret_index = index; + } + spin_unlock(&bitmap->lock); +} + +/** + * Prototype : hinic5_cqm_bitmap_alloc_reserved + * Description : Reserve bit applied for based on index. + * Input : struct tag_hinic5_cqm_bitmap *bitmap, + * u32 count, + * u32 index + * Output : None + * Return Value : u32 + * The obtained index is returned. + * If a failure occurs, the value of max is returned. + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u32 hinic5_cqm_bitmap_alloc_reserved(struct tag_hinic5_cqm_bitmap *bitmap, u32 count, u32 index) +{ + u32 ret_index; + + if (index >= bitmap->max_num || count != 1) + return HINIC5_CQM_INDEX_INVALID; + + if (index >= bitmap->reserved_top && (index < bitmap->max_num - bitmap->reserved_back)) + return HINIC5_CQM_INDEX_INVALID; + + bitmap_set_table(bitmap, bitmap->table, &ret_index, index); + return ret_index; +} + +u32 hinic5_cqm_bitmap_alloc_by_xid(struct tag_hinic5_cqm_bitmap *bitmap, u32 count, u32 index) +{ + u32 ret_index; + + if (index >= bitmap->max_num || count != 1) + return HINIC5_CQM_INDEX_INVALID; + bitmap_set_table(bitmap, bitmap->table, &ret_index, index); + return ret_index; +} + +/** + * Prototype : hinic5_cqm_bitmap_free + * Description : Releases a bitmap index. + * Input : struct tag_hinic5_cqm_bitmap *bitmap, + * u32 index, + * u32 count + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_bitmap_free(struct tag_hinic5_cqm_bitmap *bitmap, u32 index, u32 count) +{ + u32 i; + + spin_lock(&bitmap->lock); + + for (i = index; i < (index + count); i++) + clear_bit((s32)i, bitmap->table); + + spin_unlock(&bitmap->lock); +} + +#define obj_table_section + +/** + * Prototype : hinic5_cqm_single_object_table_init + * Description : Initialize a object table. + * Input : struct tag_hinic5_cqm_object_table *obj_table + * Output : None + * Return Value : s32 + * 1.Date : 2015/9/9 + * Modification : Created function + */ +static s32 hinic5_cqm_single_object_table_init(struct tag_hinic5_cqm_object_table *obj_table) +{ + rwlock_init(&obj_table->lock); + + obj_table->table = vmalloc(obj_table->max_num * sizeof(void *)); + if (unlikely(obj_table->table == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(table)); + return HINIC5_CQM_FAIL; + } + memset(obj_table->table, 0, obj_table->max_num * sizeof(void *)); + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_object_table_init + * Description : Initialize the association table between objects and indexes. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_object_table_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_func_capability *capability = &hinic5_cqm_handle->func_capability; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_object_table *obj_table = NULL; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + s32 ret = HINIC5_CQM_SUCCESS; + u32 i; + + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->obj_num == 0) { + hinic5_cqm_info(handle->dev_hdl, + "Obj table init: cla_table_type %u, obj_num=0, don't init obj table\n", + cla_table->type); + continue; + } + + obj_table = &cla_table->obj_table; + + switch (cla_table->type) { + case HINIC5_CQM_BAT_ENTRY_T_QPC: + obj_table->max_num = capability->qpc_number; + ret = hinic5_cqm_single_object_table_init(obj_table); + break; + case HINIC5_CQM_BAT_ENTRY_T_MPT: + obj_table->max_num = capability->mpt_number; + ret = hinic5_cqm_single_object_table_init(obj_table); + break; + case HINIC5_CQM_BAT_ENTRY_T_SCQC: + obj_table->max_num = capability->scqc_number; + ret = hinic5_cqm_single_object_table_init(obj_table); + break; + case HINIC5_CQM_BAT_ENTRY_T_SRQC: + obj_table->max_num = capability->srqc_number; + ret = hinic5_cqm_single_object_table_init(obj_table); + break; + default: + break; + } + + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, + "Obj table init: failed to init cla_table_type=%u, obj_num=0x%x\n", + cla_table->type, cla_table->obj_num); + goto err; + } + } + + return HINIC5_CQM_SUCCESS; + +err: + hinic5_cqm_object_table_uninit(hinic5_cqm_handle); + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_object_table_uninit + * Description : Deinitialize the association table between objects and + * indexes. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_object_table_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct tag_hinic5_cqm_object_table *obj_table = NULL; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + u32 i; + + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + obj_table = &cla_table->obj_table; + if (cla_table->type != HINIC5_CQM_BAT_ENTRY_T_INVALID) { + if (obj_table->table) { + rwlock_deinit(&obj_table->lock); + vfree(obj_table->table); + obj_table->table = NULL; + } + } + } +} + +/** + * Prototype : hinic5_cqm_object_table_insert + * Description : Insert an object + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * struct tag_hinic5_cqm_object_table *object_table + * u32 index + * struct tag_hinic5_cqm_object *obj + * bool bh + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_object_table_insert(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_object_table *object_table, + u32 index, struct tag_hinic5_cqm_object *obj, bool bh) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + if (index >= object_table->max_num) { + hinic5_cqm_err(handle->dev_hdl, + "Obj table insert: index 0x%x exceeds max_num 0x%x\n", + index, object_table->max_num); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_write_lock(&object_table->lock, bh); + + if (!object_table->table[index]) { + object_table->table[index] = obj; + hinic5_cqm_write_unlock(&object_table->lock, bh); + return HINIC5_CQM_SUCCESS; + } + + hinic5_cqm_write_unlock(&object_table->lock, bh); + hinic5_cqm_err(handle->dev_hdl, + "Obj table insert: object_table->table[0x%x] has been inserted\n", + index); + + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_object_table_remove + * Description : Remove an object + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * struct tag_hinic5_cqm_object_table *object_table + * u32 index + * const struct tag_hinic5_cqm_object *obj + * bool bh + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_object_table_remove(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_object_table *object_table, + u32 index, const struct tag_hinic5_cqm_object *obj, bool bh) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + if (index >= object_table->max_num) { + hinic5_cqm_err(handle->dev_hdl, + "Obj table remove: index 0x%x exceeds max_num 0x%x\n", + index, object_table->max_num); + return; + } + + hinic5_cqm_write_lock(&object_table->lock, bh); + + if (object_table->table[index] && object_table->table[index] == obj) + object_table->table[index] = NULL; + else + hinic5_cqm_err(handle->dev_hdl, + "Obj table remove: object_table->table[0x%x] has been removed\n", + index); + + hinic5_cqm_write_unlock(&object_table->lock, bh); +} + +/** + * Prototype : hinic5_cqm_object_table_get + * Description : Remove an object + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle + * struct tag_hinic5_cqm_object_table *object_table + * u32 index + * bool bh + * Output : None + * Return Value : struct tag_hinic5_cqm_object *obj + * 1.Date : 2018/6/20 + * Modification : Created function + */ +struct tag_hinic5_cqm_object *hinic5_cqm_object_table_get(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_object_table *object_table, + u32 index, bool bh) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_object *obj = NULL; + + if (index >= object_table->max_num) { + hinic5_cqm_err(handle->dev_hdl, + "Obj table get: index 0x%x exceeds max_num 0x%x\n", + index, object_table->max_num); + return NULL; + } + + hinic5_cqm_read_lock(&object_table->lock, bh); + + obj = object_table->table[index]; + if (obj) + atomic_inc(&obj->refcount); + + hinic5_cqm_read_unlock(&object_table->lock, bh); + + return obj; +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bitmap_table.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bitmap_table.h new file mode 100644 index 00000000..2199b83b --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bitmap_table.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CQM_BITMAP_TABLE_H +#define HINIC5_CQM_BITMAP_TABLE_H + +#include <linux/types.h> +#include <linux/device.h> +#include <linux/spinlock.h> + +#include "hinic5_hinic5_vram_api.h" +#include "hinic5_cqm_object.h" +#include "hinic5_vram_common.h" + +/* hinic5_cqm_buf_alloc() failed due to buddy allocator page exhaustion. */ +#define HINIC5_CQM_BUF_ALLOC_BUDDY_PAGES_FAIL (HINIC5_CQM_CONTINUE + 1) + +struct tag_hinic5_cqm_bitmap_range { + u32 start; + u32 end; +}; + +struct tag_hinic5_cqm_bitmap { + ulong *table; + u32 max_num; + u32 last; + u32 reserved_top; /* reserved index */ + u32 reserved_back; + spinlock_t lock; /* lock for hinic5_cqm */ + struct hinic5_vram_buf_info bitmap_info; +}; + +struct tag_hinic5_cqm_object_table { + /* Now is big array. Later will be optimized as a red-black tree. */ + struct tag_hinic5_cqm_object **table; + u32 max_num; + rwlock_t lock; +}; + +struct tag_hinic5_cqm_handle; + +s32 hinic5_cqm_bitmap_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle); +void hinic5_cqm_bitmap_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle); +u32 hinic5_cqm_bitmap_alloc(struct tag_hinic5_cqm_bitmap *bitmap, u32 step, u32 count, bool update_last); +u32 hinic5_cqm_bitmap_alloc_lowbits_align(struct tag_hinic5_cqm_bitmap *bitmap, + struct tag_hinic5_cqm_bitmap_range *bp_range, + struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + u32 xid, bool update_last); +u32 hinic5_cqm_bitmap_alloc_reserved(struct tag_hinic5_cqm_bitmap *bitmap, u32 count, u32 index); +void hinic5_cqm_bitmap_free(struct tag_hinic5_cqm_bitmap *bitmap, u32 index, u32 count); +s32 hinic5_cqm_object_table_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle); +void hinic5_cqm_object_table_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle); +s32 hinic5_cqm_object_table_insert(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_object_table *object_table, + u32 index, struct tag_hinic5_cqm_object *obj, bool bh); +void hinic5_cqm_object_table_remove(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_object_table *object_table, + u32 index, const struct tag_hinic5_cqm_object *obj, bool bh); +struct tag_hinic5_cqm_object *hinic5_cqm_object_table_get(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_object_table *object_table, + u32 index, bool bh); +u32 hinic5_cqm_bitmap_alloc_by_xid(struct tag_hinic5_cqm_bitmap *bitmap, u32 count, u32 index); +void hinic5_cqm_swab64(u8 *addr, u32 cnt); +void hinic5_cqm_swab32(u8 *addr, u32 cnt); +bool hinic5_cqm_check_align(u32 data); +u32 hinic5_cqm_shift(u32 data); +s32 hinic5_cqm_buf_list_alloc(struct tag_hinic5_cqm_buf *buf); +s32 hinic5_cqm_buf_alloc(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf, bool direct); +s32 hinic5_cqm_buf_alloc_direct(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf, bool direct); +void hinic5_cqm_buf_free(struct tag_hinic5_cqm_buf *buf, struct device *dev); +void hinic5_cqm_buf_free_cache_inv(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct tag_hinic5_cqm_buf *buf, + s32 *inv_flag); +s32 hinic5_cqm_cla_cache_invalid(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, dma_addr_t pa, + u32 cache_size); +void *hinic5_cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order); +void hinic5_cqm_kfree_align(void *addr); + +#endif /* HINIC5_CQM_BITMAP_TABLE_H */ diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bloomfilter.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bloomfilter.c new file mode 100644 index 00000000..c2a3ac1e --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bloomfilter.c @@ -0,0 +1,517 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_typedef_inner.h" + +#include "hinic5_cqm_object.h" +#include "hinic5_cqm_bitmap_table.h" +#include "hinic5_cqm_bat_cla.h" +#include "hinic5_cqm_cmd.h" +#include "hinic5_cqm_main.h" +#include "hinic5_cqm_bloomfilter.h" + +#include "hinic5_cqm_npu_cmd.h" +#include "hinic5_cqm_npu_cmd_defs.h" + +/** + * Prototype : bloomfilter_init_cmd + * Description : host send cmd to ucode to init bloomfilter mem + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2016/8/13 + * Modification : Created function + */ +static s32 bloomfilter_init_cmd(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_func_capability *capability = &hinic5_cqm_handle->func_capability; + hinic5_cqm_bloomfilter_init_cmd_s *cmd = NULL; + struct tag_hinic5_cqm_cmd_buf *buf_in = NULL; + s32 ret; + + buf_in = hinic5_cqm_cmd_alloc((void *)(hinic5_cqm_handle->ex_handle)); + if (unlikely(buf_in == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(buf_in)); + return HINIC5_CQM_FAIL; + } + + /* Fill the command format and convert it to big-endian. */ + buf_in->size = sizeof(hinic5_cqm_bloomfilter_init_cmd_s); + cmd = (hinic5_cqm_bloomfilter_init_cmd_s *)(buf_in->buf); + cmd->bloom_filter_addr = capability->bloomfilter_addr; + cmd->bloom_filter_len = capability->bloomfilter_length; + + hinic5_cqm_swab32((u8 *)cmd, + (sizeof(hinic5_cqm_bloomfilter_init_cmd_s) >> HINIC5_CQM_DW_SHIFT)); + + ret = hinic5_cqm_send_cmd_box((void *)(hinic5_cqm_handle->ex_handle), + HINIC5_CQM_MOD_HINIC5_CQM, HINIC5_CQM_CMD_T_BLOOMFILTER_INIT, buf_in, + NULL, NULL, HINIC5_CQM_CMD_TIMEOUT, + HINIC5_CHANNEL_DEFAULT); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(hinic5_cqm_handle->ex_handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_send_cmd_box)); + hinic5_cqm_err(hinic5_cqm_handle->ex_handle->dev_hdl, "Bloomfilter: %s ret=%d\n", __func__, + ret); + hinic5_cqm_err(hinic5_cqm_handle->ex_handle->dev_hdl, "Bloomfilter: %s: 0x%x 0x%x\n", + __func__, cmd->bloom_filter_addr, + cmd->bloom_filter_len); + hinic5_cqm_cmd_free((void *)(hinic5_cqm_handle->ex_handle), buf_in); + return HINIC5_CQM_FAIL; + } + hinic5_cqm_cmd_free((void *)(hinic5_cqm_handle->ex_handle), buf_in); + return HINIC5_CQM_SUCCESS; +} + +static void hinic5_cqm_func_bloomfilter_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_bloomfilter_table *bloomfilter_table = &hinic5_cqm_handle->bloomfilter_table; + + if (bloomfilter_table->table) { + mutex_deinit(&bloomfilter_table->lock); + vfree(bloomfilter_table->table); + bloomfilter_table->table = NULL; + } +} + +static s32 hinic5_cqm_func_bloomfilter_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_bloomfilter_table *bloomfilter_table = NULL; + struct tag_hinic5_cqm_func_capability *capability = NULL; + u32 array_size; + s32 ret; + + bloomfilter_table = &hinic5_cqm_handle->bloomfilter_table; + capability = &hinic5_cqm_handle->func_capability; + + if (capability->bloomfilter_length == 0) { + hinic5_cqm_info(hinic5_cqm_handle->ex_handle->dev_hdl, + "Bloomfilter: bf_length=0, don't need to init bloomfilter\n"); + return HINIC5_CQM_SUCCESS; + } + + /* The unit of bloomfilter_length is 64B(512bits). Each bit is a table + * node. Therefore the value must be shift 9 bits to the left. + */ + bloomfilter_table->table_size = capability->bloomfilter_length << + HINIC5_CQM_BF_LENGTH_UNIT; + /* The unit of bloomfilter_length is 64B. The unit of array entryis 32B. + */ + array_size = capability->bloomfilter_length << 1; + if (array_size == 0 || array_size > HINIC5_CQM_BF_BITARRAY_MAX) { + hinic5_cqm_err(hinic5_cqm_handle->ex_handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(array_size)); + return HINIC5_CQM_FAIL; + } + + bloomfilter_table->array_mask = array_size - 1; + /* This table is not a bitmap, it is the counter of corresponding bit. + */ + bloomfilter_table->table = vmalloc(bloomfilter_table->table_size * + (sizeof(u32))); + if (unlikely(bloomfilter_table->table == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(table)); + return HINIC5_CQM_FAIL; + } + + memset(bloomfilter_table->table, 0, + (bloomfilter_table->table_size * sizeof(u32))); + + /* The the bloomfilter must be initialized to 0 by ucode, + * because the bloomfilter is mem mode + */ + if (hinic5_cqm_handle->func_capability.bloomfilter_enable != 0) { + ret = bloomfilter_init_cmd(hinic5_cqm_handle); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(hinic5_cqm_handle->ex_handle->dev_hdl, + "Bloomfilter: bloomfilter_init_cmd ret=%d\n", + ret); + vfree(bloomfilter_table->table); + bloomfilter_table->table = NULL; + return HINIC5_CQM_FAIL; + } + } + + mutex_init(&bloomfilter_table->lock); + + hinic5_cqm_dbg(hinic5_cqm_handle->dev, + "Bloomfilter: table_size=0x%x, array_size=0x%x\n", + bloomfilter_table->table_size, array_size); + return HINIC5_CQM_SUCCESS; +} + +static void hinic5_cqm_fake_bloomfilter_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + u32 i, child_func_number = hinic5_cqm_get_child_func_number(hinic5_cqm_handle); + + if (!HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) + return; + + for (i = 0; i < child_func_number; i++) { + hinic5_cqm_func_bloomfilter_uninit(hinic5_cqm_handle->fake_hinic5_cqm_handle[i]); + } +} + +static s32 hinic5_cqm_fake_bloomfilter_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle = NULL; + u32 i, child_func_number; + + if (!HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) + return HINIC5_CQM_SUCCESS; + + child_func_number = hinic5_cqm_get_child_func_number(hinic5_cqm_handle); + + for (i = 0; i < child_func_number; i++) { + fake_hinic5_cqm_handle = hinic5_cqm_handle->fake_hinic5_cqm_handle[i]; + if (hinic5_cqm_func_bloomfilter_init(fake_hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_func_bloomfilter_init)); + goto bloomfilter_init_err; + } + } + + return HINIC5_CQM_SUCCESS; + +bloomfilter_init_err: + hinic5_cqm_fake_bloomfilter_uninit(hinic5_cqm_handle); + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_bloomfilter_init + * Description : initialize the bloomfilter of hinic5_cqm + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2016/7/6 + * Modification : Created function + */ +s32 hinic5_cqm_bloomfilter_init(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + + if (hinic5_cqm_fake_bloomfilter_init(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_fake_bloomfilter_init)); + return HINIC5_CQM_FAIL; + } + + if (hinic5_cqm_func_bloomfilter_init(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_func_bloomfilter_init)); + goto bloomfilter_init_err; + } + + return HINIC5_CQM_SUCCESS; + +bloomfilter_init_err: + hinic5_cqm_fake_bloomfilter_uninit(hinic5_cqm_handle); + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_bloomfilter_uninit + * Description : uninitialize the bloomfilter of hinic5_cqm + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2016/7/6 + * Modification : Created function + */ +void hinic5_cqm_bloomfilter_uninit(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + + hinic5_cqm_fake_bloomfilter_uninit(hinic5_cqm_handle); + hinic5_cqm_func_bloomfilter_uninit(hinic5_cqm_handle); +} + +/** + * Prototype : hinic5_cqm_bloomfilter_cmd + * Description : host send bloomfilter api cmd to ucode + * Input : void *ex_handle + * u32 op, + * u32 k_flag + * u64 id, + * Output : None + * Return Value : s32 + * 1.Date : 2016/7/7 + * Modification : Created function + */ +s32 hinic5_cqm_bloomfilter_cmd(void *ex_handle, u16 func_id, u32 op, u32 k_flag, u64 id) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_cmd_buf *buf_in = NULL; + hinic5_cqm_bloomfilter_cmd_s *cmd = NULL; + s32 ret; + + buf_in = hinic5_cqm_cmd_alloc(ex_handle); + if (unlikely(buf_in == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(buf_in)); + return HINIC5_CQM_FAIL; + } + + /* Fill the command format and convert it to big-endian. */ + buf_in->size = sizeof(hinic5_cqm_bloomfilter_cmd_s); + cmd = (hinic5_cqm_bloomfilter_cmd_s *)(buf_in->buf); + memset((void *)cmd, 0, sizeof(hinic5_cqm_bloomfilter_cmd_s)); + cmd->func_id = func_id; + cmd->k_en = k_flag; + cmd->index_h = (u32)(id >> HINIC5_CQM_DW_OFFSET); + cmd->index_l = (u32)(id & HINIC5_CQM_DW_MASK); + + hinic5_cqm_swab32((u8 *)cmd, (sizeof(hinic5_cqm_bloomfilter_cmd_s) >> HINIC5_CQM_DW_SHIFT)); + + ret = hinic5_cqm_send_cmd_box(ex_handle, HINIC5_CQM_MOD_HINIC5_CQM, (u8)op, buf_in, NULL, + NULL, HINIC5_CQM_CMD_TIMEOUT, HINIC5_CHANNEL_DEFAULT); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_send_cmd_box)); + hinic5_cqm_err(handle->dev_hdl, "Bloomfilter: bloomfilter_cmd ret=%d\n", + ret); + hinic5_cqm_err(handle->dev_hdl, "Bloomfilter: op=0x%x, cmd: 0x%x 0x%x 0x%x 0x%x\n", + op, *((u32 *)(void *)cmd), *(((u32 *)(void *)cmd) + HINIC5_CQM_DW_INDEX1), + *(((u32 *)(void *)cmd) + HINIC5_CQM_DW_INDEX2), + *(((u32 *)(void *)cmd) + HINIC5_CQM_DW_INDEX3)); + hinic5_cqm_cmd_free(ex_handle, buf_in); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_cmd_free(ex_handle, buf_in); + + return HINIC5_CQM_SUCCESS; +} + +STATIC struct tag_hinic5_cqm_handle *hinic5_cqm_get_func_hinic5_cqm_handle(struct hinic5_hwdev *ex_handle, u16 func_id) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + u32 child_func_start, child_func_number; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(ex_handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return NULL; + } + + /* function id is PF/VF */ + if (func_id == hinic5_global_func_id(ex_handle)) + return hinic5_cqm_handle; + + if (!HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) { + hinic5_cqm_err(ex_handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(HINIC5_CQM_FAKE_FUNC_TYPE(hinic5_cqm_handle))); + return NULL; + } + + child_func_start = hinic5_cqm_get_child_func_start(hinic5_cqm_handle); + child_func_number = hinic5_cqm_get_child_func_number(hinic5_cqm_handle); + /* function id is fake vf */ + if (func_id >= child_func_start && (func_id < (child_func_start + child_func_number))) + return hinic5_cqm_handle->fake_hinic5_cqm_handle[func_id - (u16)child_func_start]; + + return NULL; +} + +/** + * Prototype : hinic5_cqm_bloomfilter_inc + * Description : The reference counting field is added to the ID of the + * bloomfilter. + * Input : void *ex_handle + * u64 id--hash value + * Output : None + * Return Value : s32 + * 1.Date : 2016/7/7 + * Modification : Created function + */ +s32 hinic5_cqm_bloomfilter_inc(void *ex_handle, u16 func_id, u64 id) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_bloomfilter_table *bloomfilter_table = NULL; + u32 array_tmp[HINIC5_CQM_BF_SECTION_NUMBER] = {0}; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + u32 array_index, array_bit, i; + u32 k_flag = 0; + + if (!ex_handle) + return HINIC5_CQM_FAIL; + + hinic5_cqm_dbg_on(hinic5_cqm_verbose, handle->dev_hdl, + "Bloomfilter: func_id: %d, inc id=0x%llx\n", func_id, id); + + hinic5_cqm_handle = hinic5_cqm_get_func_hinic5_cqm_handle(ex_handle, func_id); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return HINIC5_CQM_FAIL; + } + + if (hinic5_cqm_handle->func_capability.bloomfilter_enable == 0) { + hinic5_cqm_info(handle->dev_hdl, "Bloomfilter inc: bloomfilter is disable\n"); + return HINIC5_CQM_SUCCESS; + } + + /* |(array_index=0)32B(array_bit:256bits)|(array_index=1)32B(256bits)| + * array_index = 0~bloomfilter_table->table_size/256bit + * array_bit = 0~255 + */ + bloomfilter_table = &hinic5_cqm_handle->bloomfilter_table; + + /* The array index identifies a 32-byte entry. */ + array_index = (u32)HINIC5_CQM_BF_BITARRAY_INDEX(id, bloomfilter_table->array_mask); + /* convert the unit of array_index to bit */ + array_index = array_index << HINIC5_CQM_BF_ENTRY_SIZE_UNIT; + hinic5_cqm_dbg_on(hinic5_cqm_verbose, handle->dev_hdl, + "Bloomfilter: inc id=0x%llx, array_index=0x%x\n", id, array_index); + + mutex_lock(&bloomfilter_table->lock); + for (i = 0; i < HINIC5_CQM_BF_SECTION_NUMBER; i++) { + /* the position of the bit in 64-bit section */ + array_bit = (id >> (HINIC5_CQM_BF_SECTION_BASE + i * HINIC5_CQM_BF_SECTION_SIZE)) & HINIC5_CQM_BF_SECTION_MASK; + /* array_bit + number of 32-byte array entries + number of + * 64-bit sections before the section + */ + array_bit = array_bit + array_index + (i * HINIC5_CQM_BF_SECTION_BIT_NUMBER); + + /* array_temp[i] records the index of the bloomfilter. + * It is used to roll back the reference counting of the + * bitarray. + */ + array_tmp[i] = array_bit; + + /* Add one to the corresponding bit in bloomfilter table. + * If the value changes from 0 to 1, change the corresponding + * bit in k_flag. + */ + (bloomfilter_table->table[array_bit])++; + hinic5_cqm_dbg_on(hinic5_cqm_verbose, handle->dev_hdl, + "Bloomfilter: inc bloomfilter_table->table[%d]=0x%x\n", + array_bit, bloomfilter_table->table[array_bit]); + if (bloomfilter_table->table[array_bit] == 1) + k_flag |= (1U << i); + } + + /* send cmd to ucode and set corresponding bit. */ + if (k_flag != 0 && hinic5_cqm_bloomfilter_cmd(ex_handle, func_id, HINIC5_CQM_CMD_T_BLOOMFILTER_SET, + k_flag, id) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bloomfilter_cmd_inc)); + for (i = 0; i < HINIC5_CQM_BF_SECTION_NUMBER; i++) { + array_bit = array_tmp[i]; + (bloomfilter_table->table[array_bit])--; + } + mutex_unlock(&bloomfilter_table->lock); + return HINIC5_CQM_FAIL; + } + + mutex_unlock(&bloomfilter_table->lock); + + return HINIC5_CQM_SUCCESS; +} +EXPORT_SYMBOL(hinic5_cqm_bloomfilter_inc); + +/** + * Prototype : hinic5_cqm_bloomfilter_dec + * Description : The reference counting field is decreased to the ID of the + * bloomfilter. + * Input : void *ex_handle + * u64 id--hash value + * Output : None + * Return Value : s32 + * 1.Date : 2016/7/7 + * Modification : Created function + */ +s32 hinic5_cqm_bloomfilter_dec(void *ex_handle, u16 func_id, u64 id) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_bloomfilter_table *bloomfilter_table = NULL; + u32 array_tmp[HINIC5_CQM_BF_SECTION_NUMBER] = {0}; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + u32 array_index, array_bit, i; + u32 k_flag = 0; + + if (!ex_handle) + return HINIC5_CQM_FAIL; + + hinic5_cqm_handle = hinic5_cqm_get_func_hinic5_cqm_handle(ex_handle, func_id); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return HINIC5_CQM_FAIL; + } + + if (hinic5_cqm_handle->func_capability.bloomfilter_enable == 0) { + hinic5_cqm_info(handle->dev_hdl, "Bloomfilter dec: bloomfilter is disable\n"); + return HINIC5_CQM_SUCCESS; + } + + bloomfilter_table = &hinic5_cqm_handle->bloomfilter_table; + + /* The array index identifies a 32-byte entry. */ + array_index = (u32)HINIC5_CQM_BF_BITARRAY_INDEX(id, bloomfilter_table->array_mask); + hinic5_cqm_dbg_on(hinic5_cqm_verbose, handle->dev_hdl, + "Bloomfilter: dec id=0x%llx, array_index=0x%x\n", id, array_index); + + mutex_lock(&bloomfilter_table->lock); + for (i = 0; i < HINIC5_CQM_BF_SECTION_NUMBER; i++) { + /* the position of the bit in 64-bit section */ + array_bit = (id >> (HINIC5_CQM_BF_SECTION_BASE + i * HINIC5_CQM_BF_SECTION_SIZE)) & + HINIC5_CQM_BF_SECTION_MASK; + /* array_bit + number of 32-byte array entries + number of + * 64-bit sections before the section + */ + array_bit = array_bit + (array_index << 0x8) + (i * 0x40); + + /* array_temp[i] records the index of the bloomfilter. + * It is used to roll back the reference counting of the + * bitarray. + */ + array_tmp[i] = array_bit; + + /* Deduct one to the corresponding bit in bloomfilter table. + * If the value changes from 1 to 0, change the corresponding + * bit in k_flag. Do not continue -1 when the reference counting + * value of the bit is 0. + */ + if (bloomfilter_table->table[array_bit] != 0) { + bloomfilter_table->table[array_bit]--; + hinic5_cqm_dbg_on(hinic5_cqm_verbose, handle->dev_hdl, + "Bloomfilter: dec bloomfilter_table->table[%d]=0x%x\n", + array_bit, bloomfilter_table->table[array_bit]); + if (bloomfilter_table->table[array_bit] == 0) + k_flag |= (1U << i); + } + } + + /* send cmd to ucode and clear corresponding bit. */ + if (k_flag != 0 && hinic5_cqm_bloomfilter_cmd(ex_handle, func_id, HINIC5_CQM_CMD_T_BLOOMFILTER_CLEAR, + k_flag, id) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bloomfilter_cmd_dec)); + for (i = 0; i < HINIC5_CQM_BF_SECTION_NUMBER; i++) { + array_bit = array_tmp[i]; + (bloomfilter_table->table[array_bit])++; + } + mutex_unlock(&bloomfilter_table->lock); + return HINIC5_CQM_FAIL; + } + + mutex_unlock(&bloomfilter_table->lock); + + return HINIC5_CQM_SUCCESS; +} +EXPORT_SYMBOL(hinic5_cqm_bloomfilter_dec); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bloomfilter.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bloomfilter.h new file mode 100644 index 00000000..bf92d2e6 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_bloomfilter.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CQM_BLOOMFILTER_H +#define HINIC5_CQM_BLOOMFILTER_H + +#include <linux/types.h> +#include <linux/mutex.h> + +/* Bloomfilter entry size is 32B(256bit), whitch index is the 48-32-bit of the + * hash. |31~26|25~20|19~14|13~8| will be used to locate 4 bloom filter section + * in one entry. k_en[3:0] used to specify the section of bloom filter. + */ +#define HINIC5_CQM_BF_ENTRY_SIZE 32 +#define HINIC5_CQM_BF_ENTRY_SIZE_UNIT 8 +#define HINIC5_CQM_BF_BITARRAY_MAX BIT(17) + +#define HINIC5_CQM_BF_SECTION_NUMBER 4 +#define HINIC5_CQM_BF_SECTION_BASE 8 +#define HINIC5_CQM_BF_SECTION_SIZE 6 +#define HINIC5_CQM_BF_SECTION_MASK 0x3f +#define HINIC5_CQM_BF_SECTION_BIT_NUMBER 64 + +#define HINIC5_CQM_BF_ARRAY_INDEX_OFFSET 32 +#define HINIC5_CQM_BF_BITARRAY_INDEX(id, mask) \ + (((id) >> HINIC5_CQM_BF_ARRAY_INDEX_OFFSET) & (mask)) + +/* The unit of bloomfilter_length is 64B(512bits). */ +#define HINIC5_CQM_BF_LENGTH_UNIT 9 + +#define HINIC5_CQM_DW_MASK 0xffffffff +#define HINIC5_CQM_DW_OFFSET 32 +#define HINIC5_CQM_DW_INDEX0 0 +#define HINIC5_CQM_DW_INDEX1 1 +#define HINIC5_CQM_DW_INDEX2 2 +#define HINIC5_CQM_DW_INDEX3 3 + +struct tag_hinic5_cqm_bloomfilter_table { + u32 *table; + u32 table_size; /* The unit is bit */ + u32 array_mask; /* The unit of array entry is 32B, used to address entry + */ + struct mutex lock; +}; + +/* only for test */ +s32 hinic5_cqm_bloomfilter_cmd(void *ex_handle, u16 func_id, u32 op, u32 k_flag, u64 id); +s32 hinic5_cqm_bloomfilter_init(void *ex_handle); +void hinic5_cqm_bloomfilter_uninit(void *ex_handle); +s32 hinic5_cqm_bloomfilter_inc(void *ex_handle, u16 func_id, u64 id); +s32 hinic5_cqm_bloomfilter_dec(void *ex_handle, u16 func_id, u64 id); + +#endif /* HINIC5_CQM_BLOOMFILTER_H */ diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmd.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmd.c new file mode 100644 index 00000000..d30c6b6d --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmd.c @@ -0,0 +1,207 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" + +#include "hinic5_cqm_bitmap_table.h" +#include "hinic5_cqm_bat_cla.h" +#include "hinic5_cqm_main.h" + +/** + * Prototype : hinic5_cqm_cmd_alloc + * Description : Apply for a cmd buffer. The buffer size is fixed to 2 KB. + * The buffer content is not cleared and needs to be cleared by + * services. + * Input : void *ex_handle + * Output : None + * Return Value : struct tag_hinic5_cqm_cmd_buf * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_hinic5_cqm_cmd_buf *hinic5_cqm_cmd_alloc(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_cmd_alloc_cnt); + + return (struct tag_hinic5_cqm_cmd_buf *)(void *)hinic5_alloc_cmd_buf(ex_handle); +} +EXPORT_SYMBOL(hinic5_cqm_cmd_alloc); + +/** + * Prototype : hinic5_cqm_cmd_free + * Description : Release for a cmd buffer. + * Input : void *ex_handle + * struct tag_hinic5_cqm_cmd_buf *cmd_buf + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_cmd_free(void *ex_handle, struct tag_hinic5_cqm_cmd_buf *cmd_buf) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return; + } + if (unlikely(cmd_buf == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(cmd_buf)); + return; + } + if (unlikely(cmd_buf->buf == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(buf)); + return; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_cmd_free_cnt); + + hinic5_free_cmd_buf(ex_handle, (struct hinic5_cmd_buf *)(void *)cmd_buf); +} +EXPORT_SYMBOL(hinic5_cqm_cmd_free); + +/** + * Prototype : hinic5_cqm_send_cmd_box + * Description : Send a cmd message in box mode. + * This interface will mount a completion quantity, + * causing sleep. + * Input : void *ex_handle + * u8 mod + * u8 cmd, + * struct tag_hinic5_cqm_cmd_buf *buf_in + * struct tag_hinic5_cqm_cmd_buf *buf_out + * u64 *out_param + * u32 timeout + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, struct tag_hinic5_cqm_cmd_buf *buf_in, + struct tag_hinic5_cqm_cmd_buf *buf_out, u64 *out_param, u32 timeout, + u16 channel) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return HINIC5_CQM_FAIL; + } + if (unlikely(buf_in == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(buf_in)); + return HINIC5_CQM_FAIL; + } + if (unlikely(buf_in->buf == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(buf)); + return HINIC5_CQM_FAIL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_send_cmd_box_cnt); + + return hinic5_cmdq_detail_resp(ex_handle, mod, cmd, + (struct hinic5_cmd_buf *)(void *)buf_in, + (struct hinic5_cmd_buf *)(void *)buf_out, + out_param, timeout, channel); +} +EXPORT_SYMBOL(hinic5_cqm_send_cmd_box); + +/** + * Prototype : hinic5_cqm_lb_send_cmd_box + * Description : Send a cmd message in box mode and open cos_id. + * This interface will mount a completion quantity, + * causing sleep. + * Input : void *ex_handle + * u8 mod + * u8 cmd + * u8 cos_id + * struct tag_hinic5_cqm_cmd_buf *buf_in + * struct tag_hinic5_cqm_cmd_buf *buf_out + * u64 *out_param + * u32 timeout + * Output : None + * Return Value : s32 + * 1.Date : 2020/4/9 + * Modification : Created function + */ +s32 hinic5_cqm_lb_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, u8 cos_id, + struct tag_hinic5_cqm_cmd_buf *buf_in, struct tag_hinic5_cqm_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + + if (unlikely(buf_in == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(buf_in)); + return HINIC5_CQM_FAIL; + } + if (unlikely(buf_in->buf == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(buf_in->buf)); + return HINIC5_CQM_FAIL; + } + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return HINIC5_CQM_FAIL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_send_cmd_box_cnt); + + return hinic5_cos_id_detail_resp(ex_handle, mod, cmd, cos_id, + (struct hinic5_cmd_buf *)(void *)buf_in, + (struct hinic5_cmd_buf *)(void *)buf_out, + out_param, timeout, channel); +} +EXPORT_SYMBOL(hinic5_cqm_lb_send_cmd_box); + +/** + * Prototype : hinic5_cqm_send_cmd_imm + * Description : Send a cmd message in imm mode. + * This interface will mount a completion quantity, + * causing sleep. + * Input : void *ex_handle + * u8 mod + * u8 cmd + * struct tag_hinic5_cqm_cmd_buf *buf_in + * u64 *out_param + * u32 timeout + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_send_cmd_imm(void *ex_handle, u8 mod, u8 cmd, struct tag_hinic5_cqm_cmd_buf *buf_in, + u64 *out_param, u32 timeout, u16 channel) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + + if (unlikely(buf_in == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(buf_in)); + return HINIC5_CQM_FAIL; + } + if (unlikely(buf_in->buf == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(buf)); + return HINIC5_CQM_FAIL; + } + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return HINIC5_CQM_FAIL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_send_cmd_imm_cnt); + + return hinic5_cmdq_direct_resp((void *)ex_handle, mod, cmd, + (struct hinic5_cmd_buf *)(void *)buf_in, + out_param, timeout, channel); +} +EXPORT_SYMBOL(hinic5_cqm_send_cmd_imm); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmd.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmd.h new file mode 100644 index 00000000..32186de5 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmd.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CQM_CMD_H +#define HINIC5_CQM_CMD_H + +#include <linux/types.h> + +#include "hinic5_cqm_object.h" + +#ifdef __cplusplus +#if __cplusplus +extern "C" { +#endif +#endif /* __cplusplus */ + +#ifndef HI1825V100 +#define HINIC5_CQM_CMD_TIMEOUT 10000 /* ms */ +#else +#define HINIC5_CQM_CMD_TIMEOUT 1000000 /* ms */ +#endif + +struct tag_hinic5_cqm_cmd_buf *hinic5_cqm_cmd_alloc(void *ex_handle); +void hinic5_cqm_cmd_free(void *ex_handle, struct tag_hinic5_cqm_cmd_buf *cmd_buf); +s32 hinic5_cqm_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, struct tag_hinic5_cqm_cmd_buf *buf_in, + struct tag_hinic5_cqm_cmd_buf *buf_out, u64 *out_param, u32 timeout, + u16 channel); +s32 hinic5_cqm_lb_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, u8 cos_id, + struct tag_hinic5_cqm_cmd_buf *buf_in, struct tag_hinic5_cqm_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel); +s32 hinic5_cqm_send_cmd_imm(void *ex_handle, u8 mod, u8 cmd, struct tag_hinic5_cqm_cmd_buf *buf_in, + u64 *out_param, u32 timeout, u16 channel); + +#ifdef __cplusplus +#if __cplusplus +} +#endif +#endif /* __cplusplus */ + +#endif /* HINIC5_CQM_CMD_H */ diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmdq.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmdq.h new file mode 100644 index 00000000..ce440cdf --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmdq.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CQM_CMDQ_H +#define HINIC5_CQM_CMDQ_H + +#include "ossl_knl.h" +#include "hinic5_cqm_npu_cmd_defs.h" +#include "hinic5_cqm_main.h" + +struct hinic5_cqm_cmdq_ops { + s32 (*prepare_cmd_buf_bat_update)(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_cmd_buf *buf_in, + struct tag_hinic5_cqm_bat_update_param *param, + u8 *cmd); + void (*prepare_cmd_buf_cla_update)(hinic5_cqm_cla_update_cmd_s *cmd_info, + struct tag_hinic5_cqm_cmd_buf *buf_in, u8 *cmd); + void (*prepare_cmd_cache_invalidate)(hinic5_cqm_cla_cache_invalid_cmd_s *cmd_info, + struct tag_hinic5_cqm_cmd_buf *buf_in, u8 *cmd); +}; + +struct hinic5_cqm_cmdq_ops *hinic5_cqm_cmdq_get_182x_ops(void); +struct hinic5_cqm_cmdq_ops *hinic5_cqm_cmdq_get_187x_ops(void); + +void hinic5_cqm_cmdq_adapt_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle); +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmdq_adapt.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmdq_adapt.c new file mode 100644 index 00000000..4e21c159 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_cmdq_adapt.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "hinic5_cqm_cmdq.h" +#include "hinic5_hwdev.h" + +void hinic5_cqm_cmdq_adapt_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + if (!COMM_SUPPORT_HTN_CMD(hinic5_cqm_handle->ex_handle)) { + hinic5_cqm_handle->cmdq_ops = hinic5_cqm_cmdq_get_182x_ops(); + } else { + hinic5_cqm_handle->cmdq_ops = hinic5_cqm_cmdq_get_187x_ops(); + } +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_db.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_db.c new file mode 100644 index 00000000..b70d58eb --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_db.c @@ -0,0 +1,568 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_mt.h" +#include "hinic5_hwdev.h" + +#include "hinic5_cqm_object.h" +#include "hinic5_cqm_bitmap_table.h" +#include "hinic5_cqm_bat_cla.h" +#include "hinic5_cqm_object_intern.h" +#include "hinic5_cqm_main.h" +#include "hinic5_cqm_db.h" + +/** + * Prototype : hinic5_cqm_db_addr_alloc + * Description : Apply for a page of hardware doorbell and dwqe. + * The indexes are the same. The obtained addresses are physical + * addresses. Each function has a maximum of 1K addresses(DB). + * Input : void *ex_handle + * void __iomem **db_addr, + * void __iomem **dwqe_addr + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/5 + * Modification : Created function + */ +s32 hinic5_cqm_db_addr_alloc(void *ex_handle, void __iomem **db_addr, + void __iomem **dwqe_addr) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return HINIC5_CQM_FAIL; + } + if (unlikely(db_addr == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(db_addr)); + return HINIC5_CQM_FAIL; + } + if (unlikely(dwqe_addr == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(dwqe_addr)); + return HINIC5_CQM_FAIL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_db_addr_alloc_cnt); + + return hinic5_alloc_db_addr(ex_handle, db_addr, dwqe_addr); +} + +s32 hinic5_cqm_db_phy_addr_alloc(void *ex_handle, u64 *db_paddr, u64 *dwqe_addr) +{ + return hinic5_alloc_db_phy_addr(ex_handle, db_paddr, dwqe_addr); +} + +/** + * Prototype : hinic5_cqm_db_addr_free + * Description : Release a page of hardware doorbell and dwqe. + * Input : void *ex_handle + * const void __iomem **db_addr, + * void __iomem **dwqe_addr + * Output : None + * Return Value : void + * 1.Date : 2015/5/5 + * Modification : Created function + */ +void hinic5_cqm_db_addr_free(void *ex_handle, const void __iomem *db_addr, + void __iomem *dwqe_addr) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_db_addr_free_cnt); + + hinic5_free_db_addr(ex_handle, db_addr, dwqe_addr); +} + +static void hinic5_cqm_db_phy_addr_free(void *ex_handle, const u64 *db_paddr, const u64 *dwqe_addr) +{ + hinic5_free_db_phy_addr(ex_handle, *db_paddr, *dwqe_addr); +} + +static bool hinic5_cqm_need_db_init(s32 service) +{ + switch (service) { + case HINIC5_CQM_SERVICE_T_NIC: + case HINIC5_CQM_SERVICE_T_OVS: + case HINIC5_CQM_SERVICE_T_IPSEC: + case HINIC5_CQM_SERVICE_T_VIRTIO: + case HINIC5_CQM_SERVICE_T_PPA: + return false; + default: + return true; + } +} + +/** + * Prototype : hinic5_cqm_db_init + * Description : Initialize the doorbell of the HINIC5_CQM. + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/7/6 + * Modification : Created function + */ +s32 hinic5_cqm_db_init(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + s32 i; + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + + /* Allocate hardware doorbells to services. */ + for (i = 0; i < HINIC5_CQM_SERVICE_T_MAX; i++) { + service = &hinic5_cqm_handle->service[i]; + if (!hinic5_cqm_need_db_init(i) || !service->valid) + continue; + + if (hinic5_cqm_db_addr_alloc(ex_handle, &service->hardware_db_vaddr, + &service->dwqe_vaddr) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_db_addr_alloc)); + break; + } + + if (hinic5_cqm_db_phy_addr_alloc(handle, &service->hardware_db_paddr, + &service->dwqe_paddr) != + HINIC5_CQM_SUCCESS) { + hinic5_cqm_db_addr_free(ex_handle, service->hardware_db_vaddr, + service->dwqe_vaddr); + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_db_phy_addr_alloc)); + break; + } + } + + if (i != HINIC5_CQM_SERVICE_T_MAX) { + i--; + for (; i >= 0; i--) { + service = &hinic5_cqm_handle->service[i]; + if (!hinic5_cqm_need_db_init(i) || !service->valid) + continue; + + hinic5_cqm_db_addr_free(ex_handle, service->hardware_db_vaddr, + service->dwqe_vaddr); + hinic5_cqm_db_phy_addr_free(ex_handle, + &service->hardware_db_paddr, + &service->dwqe_paddr); + } + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_db_uninit + * Description : Deinitialize the doorbell of the HINIC5_CQM. + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2015/7/6 + * Modification : Created function + */ +void hinic5_cqm_db_uninit(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + s32 i; + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + + /* Release hardware doorbell. */ + for (i = 0; i < HINIC5_CQM_SERVICE_T_MAX; i++) { + service = &hinic5_cqm_handle->service[i]; + if (service->valid && hinic5_cqm_need_db_init(i)) { + hinic5_cqm_db_addr_free(ex_handle, service->hardware_db_vaddr, + service->dwqe_vaddr); + hinic5_cqm_db_phy_addr_free(ex_handle, &service->hardware_db_paddr, + &service->dwqe_paddr); + } + } +} + +/** + * Prototype : hinic5_cqm_get_db_addr + * Description : Return hardware DB vaddr. + * Input : void *ex_handle + * u32 service_type + * Output : None + * Return Value : void * + * 1.Date : 2015/7/6 + * Modification : Created function + */ +void *hinic5_cqm_get_db_addr(void *ex_handle, u32 service_type) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct hinic5_hwdev *handle = NULL; + + if (service_type >= HINIC5_CQM_SERVICE_T_MAX) { + pr_err("service_type is out of bounds\n"); + return NULL; + } + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + handle = (struct hinic5_hwdev *)ex_handle; + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return NULL; + } + + service = &hinic5_cqm_handle->service[service_type]; + + return (void *)service->hardware_db_vaddr; +} +EXPORT_SYMBOL(hinic5_cqm_get_db_addr); + +/** + * Prototype : hinic5_cqm_get_db_addr + * Description : Return hardware DB Phyaddr. + * Input : void *ex_handle + * u32 service_type + * Output : None + * Return Value : void * + * 1.Date : 2015/7/6 + * Modification : Created function + */ +s32 hinic5_cqm_get_hardware_db_addr(void *ex_handle, u64 *addr, + enum hinic5_service_type service_type) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct hinic5_hwdev *handle = NULL; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return HINIC5_CQM_FAIL; + } + if (unlikely(addr == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(addr)); + return HINIC5_CQM_FAIL; + } + + if (service_type < SERVICE_T_NIC || service_type >= SERVICE_T_MAX) { + pr_err("%s service_type = %d state is error\n", __func__, + service_type); + return HINIC5_CQM_FAIL; + } + + handle = (struct hinic5_hwdev *)ex_handle; + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return HINIC5_CQM_FAIL; + } + + service = &hinic5_cqm_handle->service[service_type]; + + *addr = service->hardware_db_paddr; + return HINIC5_CQM_SUCCESS; +} +EXPORT_SYMBOL(hinic5_cqm_get_hardware_db_addr); + +/** + * Prototype : hinic5_cqm_ring_hardware_db + * Description : Ring hardware DB to chip. + * Input : void *ex_handle + * u32 service_type: Each kernel-mode service is allocated a + * hardware db page. + * u8 db_count: The bit[7:0] of PI can't be store in 64-bit db. + * u64 db: It contains the content of db, whitch is organized by + * service, including big-endian conversion + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/5 + * Modification : Created function + */ +s32 hinic5_cqm_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, u64 db) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct hinic5_hwdev *handle = NULL; +#if defined(__UEFI__) && defined(__HIFC__) + EFI_STATUS Status; + u64 *offset = NULL; +#endif + + if (service_type >= HINIC5_CQM_SERVICE_T_MAX) { + pr_err("service_type is out of bounds\n"); + return HINIC5_CQM_FAIL; + } + if (!ex_handle) + return HINIC5_CQM_FAIL; + handle = (struct hinic5_hwdev *)ex_handle; + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (!hinic5_cqm_handle) + return HINIC5_CQM_FAIL; + + service = &hinic5_cqm_handle->service[service_type]; + + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ +#if defined(__UEFI__) && defined(__HIFC__) + offset = ((u64 *)service->hardware_db_vaddr + db_count); + MemoryFence(); + Status = ((BUS_IO_PROTOCOL *)handle->pcidev_hdl)->Mem.Write(handle->pcidev_hdl, + EfiBusIoWidthUint64, 0x2, + (u64)offset, 1, + (void *)&db); + MemoryFence(); + + if (EFI_ERROR(Status)) + DEBUGPRINT(CRITICAL, "Hifc: write doorbell fails: %r\n", + Status); +#else + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ + wmb(); + *((u64 *)service->hardware_db_vaddr + db_count) = db; +#endif + return HINIC5_CQM_SUCCESS; +} +EXPORT_SYMBOL(hinic5_cqm_ring_hardware_db); + +/** + * Prototype : hinic5_cqm_ring_hardware_db_fc + * Description : Ring fake vf hardware DB to chip. + * Input : void *ex_handle + * u32 service_type: Each kernel-mode service is allocated a + * hardware db page. + * u8 db_count: The bit[7:0] of PI can't be store in 64-bit db. + * u8 pagenum: Indicates the doorbell address offset of the fake + * VFID. + * u64 db: It contains the content of db, whitch is organized by + * service, including big-endian conversion. + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/5 + * Modification : Created function + */ +s32 hinic5_cqm_ring_hardware_db_fc(void *ex_handle, u32 service_type, u8 db_count, + u8 pagenum, u64 db) +{ +#define HIFC_DB_FAKE_VF_OFFSET 32 + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct hinic5_hwdev *handle = NULL; + void *dbaddr = NULL; + + handle = (struct hinic5_hwdev *)ex_handle; + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + service = &hinic5_cqm_handle->service[service_type]; + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ + wmb(); + dbaddr = (u8 *)service->hardware_db_vaddr + + ((pagenum + HIFC_DB_FAKE_VF_OFFSET) * HINIC5_DB_PAGE_SIZE); + *((u64 *)dbaddr + db_count) = db; + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_ring_direct_wqe_db + * Description : Ring direct wqe hardware DB to chip. + * Input : void *ex_handle + * u32 service_type: Each kernel-mode service is allocated a + * hardware db page. + * u8 db_count: The bit[7:0] of PI can't be store in 64-bit db. + * void *direct_wqe: The content of direct_wqe. + * u16 length: The length of direct_wqe. + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/5 + * Modification : Created function + */ +s32 hinic5_cqm_ring_direct_wqe_db(void *ex_handle, u32 service_type, u8 db_count, + void *direct_wqe) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct hinic5_hwdev *handle = NULL; + u64 *tmp = (u64 *)direct_wqe; + int i; + + if (!ex_handle) + return HINIC5_CQM_FAIL; + + if (service_type >= HINIC5_CQM_SERVICE_T_MAX) { + pr_err("service_type is out of bounds\n"); + return HINIC5_CQM_FAIL; + } + + handle = (struct hinic5_hwdev *)ex_handle; + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (!hinic5_cqm_handle) + return HINIC5_CQM_FAIL; + + service = &hinic5_cqm_handle->service[service_type]; + + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ + wmb(); + for (i = 0; i < 0x80 / 0x8; i++) + *((u64 *)service->dwqe_vaddr + 0x40 + i) = *tmp++; + + return HINIC5_CQM_SUCCESS; +} +EXPORT_SYMBOL(hinic5_cqm_ring_direct_wqe_db); + +s32 hinic5_cqm_ring_direct_wqe_db_fc(void *ex_handle, u32 service_type, + void *direct_wqe) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct hinic5_hwdev *handle = NULL; + u64 *tmp = (u64 *)direct_wqe; + int i; + + handle = (struct hinic5_hwdev *)ex_handle; + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + service = &hinic5_cqm_handle->service[service_type]; + + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ + wmb(); + *((u64 *)service->dwqe_vaddr + 0x0) = tmp[0x2]; + *((u64 *)service->dwqe_vaddr + 0x1) = tmp[0x3]; + *((u64 *)service->dwqe_vaddr + 0x2) = tmp[0x0]; + *((u64 *)service->dwqe_vaddr + 0x3) = tmp[0x1]; + tmp += 0x4; + + /* The FC use 256B WQE. The directwqe is written at block0, + * and the length is 256B + */ + for (i = 0x4; i < 0x20; i++) + *((u64 *)service->dwqe_vaddr + i) = *tmp++; + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_ring_hardware_db_update_pri + * Description : Provides the doorbell interface for the HINIC5_CQM to convert the PRI + * to the CoS. The doorbell transmitted by the service must be + * the host sequence. This interface converts the network + * sequence. + * Input : void *ex_handle + * u32 service_type: Each kernel-mode service is allocated a + * hardware db page. + * u8 db_count: The bit[7:0] of PI can't be store in 64-bit db. + * u64 db: It contains the content of db, whitch is organized by + * service, including big-endian conversion. + * Output : None + * Return Value : s32 + * 1.Date : 2016/11/24 + * Modification : Created function + */ +s32 hinic5_cqm_ring_hardware_db_update_pri(void *ex_handle, u32 service_type, + u8 db_count, u64 db) +{ + struct tag_hinic5_cqm_db_common *db_common = (struct tag_hinic5_cqm_db_common *)(void *)(&db); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct hinic5_hwdev *handle = NULL; + + handle = (struct hinic5_hwdev *)ex_handle; + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + service = &hinic5_cqm_handle->service[service_type]; + + /* the HINIC5_CQM converts the PRI to the CoS */ + db_common->cos = 0x7 - db_common->cos; + + hinic5_cqm_swab32((u8 *)db_common, sizeof(u64) >> HINIC5_CQM_DW_SHIFT); + + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ + wmb(); + *((u64 *)service->hardware_db_vaddr + db_count) = db; + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_ring_software_db + * Description : Ring software db. + * Input : struct tag_hinic5_cqm_object *object + * u64 db_record: It contains the content of db, whitch is + * organized by service, including big-endian + * conversion. For RQ/SQ: This field is filled + * with the doorbell_record area of queue_header. + * For CQ: This field is filled with the value of + * ci_record in queue_header. + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/5 + * Modification : Created function + */ +s32 hinic5_cqm_ring_software_db(struct tag_hinic5_cqm_object *object, u64 db_record) +{ + struct tag_hinic5_cqm_nonrdma_qinfo *nonrdma_qinfo = NULL; + struct tag_hinic5_cqm_rdma_qinfo *rdma_qinfo = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct hinic5_hwdev *handle = NULL; + + if (unlikely(object == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(object)); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return HINIC5_CQM_FAIL; + } + handle = hinic5_cqm_handle->ex_handle; + + if (object->object_type == HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_RQ || + object->object_type == HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_SQ || + object->object_type == HINIC5_CQM_OBJECT_NONRDMA_SRQ) { + nonrdma_qinfo = (struct tag_hinic5_cqm_nonrdma_qinfo *)(void *)object; + nonrdma_qinfo->common.q_header_vaddr->doorbell_record = + db_record; + } else if ((object->object_type == HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_CQ) || + (object->object_type == HINIC5_CQM_OBJECT_NONRDMA_SCQ)) { + nonrdma_qinfo = (struct tag_hinic5_cqm_nonrdma_qinfo *)(void *)object; + nonrdma_qinfo->common.q_header_vaddr->ci_record = db_record; + } else if ((object->object_type == HINIC5_CQM_OBJECT_RDMA_QP) || + (object->object_type == HINIC5_CQM_OBJECT_RDMA_SRQ)) { + rdma_qinfo = (struct tag_hinic5_cqm_rdma_qinfo *)(void *)object; + rdma_qinfo->common.q_header_vaddr->doorbell_record = db_record; + } else if (object->object_type == HINIC5_CQM_OBJECT_RDMA_SCQ) { + rdma_qinfo = (struct tag_hinic5_cqm_rdma_qinfo *)(void *)object; + rdma_qinfo->common.q_header_vaddr->ci_record = db_record; + } else { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object->object_type)); + } + + return HINIC5_CQM_SUCCESS; +} +EXPORT_SYMBOL(hinic5_cqm_ring_software_db); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_db.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_db.h new file mode 100644 index 00000000..f01b0d40 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_db.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CQM_DB_H +#define HINIC5_CQM_DB_H + +#include <linux/types.h> + +struct tag_hinic5_cqm_db_common { +#if (BYTE_ORDER == LITTLE_ENDIAN) + u32 rsvd1 : 23; + u32 c : 1; + u32 cos : 3; + u32 service_type : 5; +#else + u32 service_type : 5; + u32 cos : 3; + u32 c : 1; + u32 rsvd1 : 23; +#endif + + u32 rsvd2; +}; + +/* Only for test */ +s32 hinic5_cqm_db_addr_alloc(void *ex_handle, void __iomem **db_addr, + void __iomem **dwqe_addr); +s32 hinic5_cqm_db_phy_addr_alloc(void *ex_handle, u64 *db_paddr, u64 *dwqe_addr); + +s32 hinic5_cqm_db_init(void *ex_handle); +void hinic5_cqm_db_uninit(void *ex_handle); + +s32 hinic5_cqm_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, + u64 db); + +#endif /* HINIC5_CQM_DB_H */ diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_fast_msg.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_fast_msg.c new file mode 100644 index 00000000..f4cdc9f0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_fast_msg.c @@ -0,0 +1,159 @@ +#include <linux/types.h> +#include <linux/module.h> +#include <linux/semaphore.h> +#include <linux/workqueue.h> + +#include "comm_defs.h" +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_dev_mgmt.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_cqm_fast_msg.h" + +s32 hinic5_cqm_fast_msg_create_q(void *ex_handle, u32 queue_num, u32 sq_depth, u32 rq_depth) +{ + return 0; +} +EXPORT_SYMBOL(hinic5_cqm_fast_msg_create_q); + +s32 hinic5_cqm_fast_msg_connect(void *ex_handle, struct dest_info *des_info, resp_func *rsp, recv_func *recv) +{ + return 0; +} +EXPORT_SYMBOL(hinic5_cqm_fast_msg_connect); + +s32 hinic5_cqm_fast_msg_close(void *ex_handle, u64 msg_id) +{ + return 0; +} +EXPORT_SYMBOL(hinic5_cqm_fast_msg_close); + +s32 hinic5_cqm_fast_msg_listen(void *ex_handle, u32 credit, resp_func *rsp, recv_func *recv) +{ + return 0; +} +EXPORT_SYMBOL(hinic5_cqm_fast_msg_listen); + +s32 hinic5_cqm_fast_msg_send(void *ex_handle, u64 msg_id, struct dest_info *des_info, u8 *buf_res) +{ + return 0; +} +EXPORT_SYMBOL(hinic5_cqm_fast_msg_send); + +static int hinic5_cqm_fast_msg_pf_handler(void *pri_handle, u16 vf_id, u16 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ +#define SPU_HOST_ID 4 + + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)pri_handle; + void *ppf_hw_dev = NULL; + int ret; + + if (!hwdev) + return -EINVAL; + + ppf_hw_dev = hinic5_get_ppf_hw_dev_unsafe(pri_handle); + if (!ppf_hw_dev) { + pr_err("hinic5_cqm_fast_msg_pf_handler ppf is null.\n"); + return -EINVAL; + } + + sdk_info(hwdev->dev_hdl, "hinic5_cqm_fast_msg_pf_handler recv vf 0x%x mbox, cmd: 0x%x\n", + vf_id, cmd); + + ret = hinic5_mbox_ppf_to_host(ppf_hw_dev, HINIC5_MOD_FAKE_FMSG, 0, + SPU_HOST_ID, buf_in, in_size, buf_out, + out_size, 0, HINIC5_CHANNEL_UB); + if (ret != 0) { + pr_err("hinic5_cqm_fast_msg_pf_handler failed send msg to host %u ret:%d\n", + SPU_HOST_ID, ret); + return ret; + } + return 0; +} + +static struct hinic5_hwdev *get_pf_dev_by_ppf(struct hinic5_hwdev *ppf_hwdev, bool hold, u16 pf_id) +{ + struct hinic5_adev *adev = NULL; + struct card_node *chip_node = NULL; + struct hinic5_adev *dev = NULL; + + if (!ppf_hwdev) + return NULL; + + adev = ppf_hwdev->adapter_hdl; + if (!adev) + return NULL; + + lld_hold(); + chip_node = adev->chip_node; + list_for_each_entry(dev, &chip_node->func_list, node) { + if (dev->hwdev && hinic5_global_func_id(dev->hwdev) == pf_id) { + if (hold) + hinic5_lld_dev_hold(&dev->lld_dev); + lld_put(); + return dev->hwdev; + } + } + lld_put(); + + return NULL; +} + +static int hinic5_cqm_fast_msg_ppf_handler(void *pri_handle, u16 pf_idx, u16 vf_id, u16 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ +#define SDI_PPF_ID 16 +#define HOST_PF_ID 2 +#define SPU_HOST_VFID 64U + + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)pri_handle; + struct hinic5_hwdev *pf_hwdev = NULL; + fast_msg_t *msg = (fast_msg_t *)buf_in; + u16 ppf_id; + int ret = 0; + + if (!hwdev) + return -EINVAL; + + ppf_id = hinic5_global_func_id(hwdev); + if (ppf_id == SDI_PPF_ID) { + pr_err("hinic5_cqm_fast_msg_ppf_handler channel error, src: 0x%x cmd: 0x%x.\n", + pf_idx, cmd); + } else { + pf_hwdev = get_pf_dev_by_ppf(hwdev, false, HOST_PF_ID); + if (!pf_hwdev) { + pr_err("hinic5_cqm_fast_msg_ppf_handler pf hwdev is null\n"); + return -EFAULT; + } + ret = hinic5_mbox_to_vf(pf_hwdev, msg->dst_fe_idx - SPU_HOST_VFID + 1u, + HINIC5_MOD_FAKE_FMSG, 0, buf_in, + in_size, buf_out, out_size, 0, HINIC5_CHANNEL_UB); + } + return ret; +} + +s32 hinic5_cqm_init_fast_msg(void *hwdev) +{ + struct hinic5_hwdev *hw_dev = (struct hinic5_hwdev *)hwdev; + + if (HINIC5_IS_PPF(hw_dev)) + hinic5_register_ppf_mbox_cb(hwdev, HINIC5_MOD_FAKE_FMSG, hwdev, + hinic5_cqm_fast_msg_ppf_handler); + if (HINIC5_IS_PF(hw_dev)) + hinic5_register_pf_mbox_cb(hwdev, HINIC5_MOD_FAKE_FMSG, hwdev, + hinic5_cqm_fast_msg_pf_handler); + return 0; +} + +void hinic5_cqm_deinit_fast_msg(void *hwdev) +{ + struct hinic5_hwdev *hw_dev = (struct hinic5_hwdev *)hwdev; + + if (HINIC5_IS_PPF(hw_dev)) + hinic5_unregister_ppf_mbox_cb(hwdev, HINIC5_MOD_FAKE_FMSG); + + if (HINIC5_IS_PF(hw_dev)) + hinic5_unregister_pf_mbox_cb(hwdev, HINIC5_MOD_FAKE_FMSG); +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_fast_msg.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_fast_msg.h new file mode 100644 index 00000000..946b49ea --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_fast_msg.h @@ -0,0 +1,35 @@ +#ifndef HINIC5_CQM_FAST_MSG_H +#define HINIC5_CQM_FAST_MSG_H + +#include <linux/types.h> +#include <linux/module.h> + +#include "ossl_knl.h" + +struct dest_info { + u32 func_id; + u32 queue_id; +}; + +struct src_info { + u32 func_id; + u32 queue_id; + u64 msg_id; +}; + +typedef struct fast_msg { + u16 dst_fe_idx; +} fast_msg_t; + +typedef s32 (resp_func)(u64 msg_id, u8 *buf_res, s32 result); +typedef s32 (recv_func)(struct src_info *src, u8 *buf_res); +s32 hinic5_cqm_fast_msg_create_q(void *ex_handle, u32 queue_num, u32 sq_depth, u32 rq_depth); +s32 hinic5_cqm_fast_msg_connect(void *ex_handle, struct dest_info *des_info, resp_func *rsp, recv_func *recv); +s32 hinic5_cqm_fast_msg_close(void *ex_handle, u64 msg_id); +s32 hinic5_cqm_fast_msg_listen(void *ex_handle, u32 credit, resp_func *rsp, recv_func *recv); +s32 hinic5_cqm_fast_msg_send(void *ex_handle, u64 msg_id, struct dest_info *des_info, u8 *buf_res); +s32 hinic5_cqm_init_fast_msg(void *hwdev); +void hinic5_cqm_deinit_fast_msg(void *hwdev); + + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_main.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_main.c new file mode 100644 index 00000000..a8b7ace7 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_main.c @@ -0,0 +1,2071 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_mt.h" +#include "hinic5_hwdev.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_hw_cfg.h" +#include "hinic5_hinic5_vram_api.h" +#include "hinic5_typedef_inner.h" + +#include "hinic5_vram_common.h" + +#include "hinic5_cqm_object.h" +#include "hinic5_cqm_bitmap_table.h" +#include "hinic5_cqm_bat_cla.h" +#include "hinic5_cqm_bloomfilter.h" +#include "hinic5_cqm_db.h" +#include "hinic5_cqm_cmdq.h" +#include "hinic5_cqm_main.h" + +static s32 hinic5_cqm_set_fake_vf_child_timer(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle, bool en) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)hinic5_cqm_handle->ex_handle; + u16 func_global_idx; + s32 ret; + + if (fake_hinic5_cqm_handle->func_capability.timer_enable == 0) { + return HINIC5_CQM_SUCCESS; + } + + func_global_idx = fake_hinic5_cqm_handle->func_attribute.func_global_idx; + ret = hinic5_func_tmr_bitmap_set(hinic5_cqm_handle->ex_handle, func_global_idx, en); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, "func_id %u Timer %s timer bitmap failed\n", + func_global_idx, en ? "enable" : "disable"); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +static void hinic5_cqm_unset_fake_vf_timer(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + u32 i, child_func_number = hinic5_cqm_get_child_func_number(hinic5_cqm_handle); + + for (i = 0; i < child_func_number; i++) + (void)hinic5_cqm_set_fake_vf_child_timer(hinic5_cqm_handle, hinic5_cqm_handle->fake_hinic5_cqm_handle[i], false); +} + +static s32 hinic5_cqm_set_fake_vf_timer(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + u32 i, child_func_number = hinic5_cqm_get_child_func_number(hinic5_cqm_handle); + + for (i = 0; i < child_func_number; i++) { + s32 ret = hinic5_cqm_set_fake_vf_child_timer(hinic5_cqm_handle, hinic5_cqm_handle->fake_hinic5_cqm_handle[i], true); + if (ret != HINIC5_CQM_SUCCESS) + goto err; + } + + return HINIC5_CQM_SUCCESS; + +err: + hinic5_cqm_unset_fake_vf_timer(hinic5_cqm_handle); + return HINIC5_CQM_FAIL; +} + +static s32 hinic5_cqm_set_timer_enable(void *ex_handle) +{ + struct hinic5_hwdev *handle = ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = handle->hinic5_cqm_hdl; + u16 func_id = hinic5_global_func_id(ex_handle); + int is_in_kexec; + + is_in_kexec = hinic5_vram_get_kexec_flag(); + if (is_in_kexec != 0) { + hinic5_cqm_info(handle->dev_hdl, "Skip starting hinic5_cqm timer during kexec\n"); + return HINIC5_CQM_SUCCESS; + } + + /* Enable children */ + if (HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle) && + hinic5_cqm_set_fake_vf_timer(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) + return HINIC5_CQM_FAIL; + + /* Enable self */ + if (hinic5_func_tmr_bitmap_set(ex_handle, func_id, true) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, "Timer start: enable timer bitmap failed\n"); + goto err; + } + + return HINIC5_CQM_SUCCESS; + +err: + if (HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) + hinic5_cqm_unset_fake_vf_timer(hinic5_cqm_handle); + return HINIC5_CQM_FAIL; +} + +static void hinic5_cqm_set_timer_disable(void *ex_handle) +{ + struct hinic5_hwdev *handle = ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = handle->hinic5_cqm_hdl; + + /* Disable self */ + if (hinic5_func_tmr_bitmap_set(ex_handle, hinic5_global_func_id(ex_handle), + false) != HINIC5_CQM_SUCCESS) + hinic5_cqm_err(handle->dev_hdl, "func_id %u Timer stop: disable timer bitmap failed\n", + hinic5_global_func_id(ex_handle)); + + /* Disable children */ + if (HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) + hinic5_cqm_unset_fake_vf_timer(hinic5_cqm_handle); +} + +static u32 hinic5_cqm_set_vio_enable(void *ex_handle, bool enable) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + int err; + + if (!ex_handle) + return HINIC5_CQM_FAIL; + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (!hinic5_cqm_handle->service[HINIC5_CQM_SERVICE_T_VIRTIO].valid) + return HINIC5_CQM_SUCCESS; + + err = hinic5_func_vio_en(ex_handle, enable); + if (err != 0) { + hinic5_cqm_err(handle->dev_hdl, "VIO %s failed, err %d\n", + (enable ? "enable" : "disable"), err); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_info(handle->dev_hdl, "VIO %s success\n", + (enable ? "enable" : "disable")); + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_initialize_recource(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + + /* Initialize memory entries such as BAT, CLA, and bitmap. */ + if (hinic5_cqm_mem_init(ex_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_mem_init)); + return HINIC5_CQM_FAIL; + } + + /* Event callback initialization */ + if (hinic5_cqm_event_init(ex_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_event_init)); + goto err1; + } + + /* Doorbell initiation */ + if (hinic5_cqm_db_init(ex_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_db_init)); + goto err2; + } + + /* Initialize the bloom filter. */ + if (hinic5_cqm_bloomfilter_init(ex_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bloomfilter_init)); + goto err3; + } + + if (hinic5_cqm_set_timer_enable(ex_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_set_timer_enable)); + goto err4; + } + + if (hinic5_cqm_set_vio_enable(ex_handle, true) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_set_vio_enable)); + goto err5; + } + + return HINIC5_CQM_SUCCESS; + +err5: + hinic5_cqm_set_timer_disable(ex_handle); +err4: + hinic5_cqm_bloomfilter_uninit(ex_handle); +err3: + hinic5_cqm_db_uninit(ex_handle); +err2: + hinic5_cqm_event_uninit(ex_handle); +err1: + hinic5_cqm_mem_uninit(ex_handle); + return HINIC5_CQM_FAIL; +} + +static struct tag_hinic5_cqm_handle *hinic5_cqm_handle_create(void) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + + hinic5_cqm_handle = kzalloc(sizeof(*hinic5_cqm_handle), GFP_KERNEL); + if (unlikely(!hinic5_cqm_handle)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(hinic5_cqm_handle)); + return NULL; + } + + /* Clear the memory to prevent other systems from + * not clearing the memory. + */ + memset(hinic5_cqm_handle, 0, sizeof(struct tag_hinic5_cqm_handle)); + + atomic_set(&hinic5_cqm_handle->handle_state, HINIC5_CQM_HANDLE_STATE_INIT); + + return hinic5_cqm_handle; +} + +static struct tag_hinic5_cqm_handle *hinic5_cqm_handle_fork(struct tag_hinic5_cqm_handle *parent_handle) +{ + struct tag_hinic5_cqm_handle *child_handle = NULL; + + child_handle = kzalloc(sizeof(*child_handle), GFP_KERNEL); + if (unlikely(!child_handle)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(child_handle)); + return NULL; + } + + /* Copy the attributes of the parent HINIC5_CQM handle to the child HINIC5_CQM + * handle and modify the values of function. + */ + memcpy(child_handle, + parent_handle, sizeof(struct tag_hinic5_cqm_handle)); + + /* Clear state & unlink some references */ + atomic_set(&child_handle->handle_state, HINIC5_CQM_HANDLE_STATE_INIT); + memset(child_handle->fake_hinic5_cqm_handle, + 0, sizeof(child_handle->fake_hinic5_cqm_handle)); + + return child_handle; +} + +/** + * Prototype : hinic5_cqm_init + * Description : Complete HINIC5_CQM initialization. + * If the function is a parent fake function, copy the fake. + * If it is a child fake function (in the fake copy function, + * not in this function), set fake_en in the BAT/CLA table. + * hinic5_cqm_init->hinic5_cqm_mem_init->hinic5_cqm_fake_init(copy) + * If the child fake conflict occurs, resources are not + * initialized, but the timer must be enabled. + * If the function is of the normal type, + * follow the normal process. + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_init(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + s32 ret; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_handle = hinic5_cqm_handle_create(); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_handle_create)); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_handle->ex_handle = handle; + hinic5_cqm_handle->dev = handle->dev_hdl; + handle->hinic5_cqm_hdl = (void *)hinic5_cqm_handle; + + /* 187x ops or 182x ops */ + hinic5_cqm_cmdq_adapt_init(hinic5_cqm_handle); + /* Clearing Statistics */ + memset(&handle->hw_stats.hinic5_cqm_stats, 0, sizeof(struct hinic5_cqm_stats)); + + /* Reads VF/PF information. */ + hinic5_cqm_handle->func_attribute = handle->hwif->attr; + hinic5_cqm_info(handle->dev_hdl, "Func init: function[%u] type %d(0:PF,1:VF,2:PPF)\n", + hinic5_cqm_handle->func_attribute.func_global_idx, hinic5_cqm_handle->func_attribute.func_type); + + /* Read capability from configuration management module */ + ret = hinic5_cqm_capability_init(ex_handle); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_capability_init)); + goto err1; + } + + /* memory doorbell event bloomfilter timer init */ + if (hinic5_cqm_initialize_recource(ex_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_initialize_recource)); + goto err1; + } + + atomic_set(&hinic5_cqm_handle->handle_state, HINIC5_CQM_HANDLE_STATE_READY); + return HINIC5_CQM_SUCCESS; + +err1: + kfree(handle->hinic5_cqm_hdl); + handle->hinic5_cqm_hdl = NULL; + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_uninit + * Description : Deinitializes the HINIC5_CQM module. This function is called once + * each time a function is removed. + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_uninit(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + s32 ret; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return; + } + + atomic_set(&hinic5_cqm_handle->handle_state, HINIC5_CQM_HANDLE_STATE_REMOVE); + + hinic5_cqm_set_vio_enable(ex_handle, false); + + hinic5_cqm_set_timer_disable(ex_handle); + + /* After the TMR timer stops, the system releases resources + * after a delay of one or two milliseconds. + */ + if (HINIC5_CQM_IS_PPF(hinic5_cqm_handle)) { + if (hinic5_cqm_handle->func_capability.timer_enable == + HINIC5_CQM_TIMER_ENABLE) { + hinic5_cqm_info(handle->dev_hdl, "PPF timer stop\n"); + ret = hinic5_ppf_tmr_stop(handle); + if (ret != HINIC5_CQM_SUCCESS) + /* The timer fails to be stopped, + * and the resource release is not affected. + */ + hinic5_cqm_info(handle->dev_hdl, "PPF timer stop, ret=%d\n", ret); + } + + usleep_range(0x384, 0x3E8); /* Somebody requires a delay of 1 ms, + * which is inaccurate. + */ + } + + /* Release Bloom Filter Table */ + hinic5_cqm_bloomfilter_uninit(ex_handle); + + /* Release hardware doorbell */ + hinic5_cqm_db_uninit(ex_handle); + + /* Cancel the callback of the event */ + hinic5_cqm_event_uninit(ex_handle); + + /* Release various memory tables and require the service + * to release all objects. + */ + hinic5_cqm_mem_uninit(ex_handle); + + /* Release hinic5_cqm_handle */ + handle->hinic5_cqm_hdl = NULL; + kfree(hinic5_cqm_handle); +} + +static void hinic5_cqm_test_mode_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct service_cap *service_capability) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + if (service_capability->test_mode == 0) + return; + + hinic5_cqm_info(handle->dev_hdl, "Enter HINIC5_CQM test mode\n"); + + func_cap->qpc_number = service_capability->test_qpc_num; + func_cap->qpc_reserved = + GET_MAX(func_cap->qpc_reserved, + service_capability->test_qpc_resvd_num); + func_cap->xid_alloc_mode = service_capability->test_xid_alloc_mode; + func_cap->gpa_check_enable = service_capability->test_gpa_check_enable; + func_cap->pagesize_reorder = service_capability->test_page_size_reorder; + func_cap->qpc_alloc_static = + (bool)(service_capability->test_qpc_alloc_mode); + func_cap->scqc_alloc_static = + (bool)(service_capability->test_scqc_alloc_mode); + func_cap->flow_table_based_conn_number = + service_capability->test_max_conn_num; + func_cap->flow_table_based_conn_cache_number = + service_capability->test_max_cache_conn_num; + func_cap->scqc_number = service_capability->test_scqc_num; + func_cap->mpt_number = service_capability->test_mpt_num; + func_cap->mpt_reserved = service_capability->test_mpt_recvd_num; + func_cap->reorder_number = service_capability->test_reorder_num; + /* 256K buckets, 256K*64B = 16MB */ + func_cap->hash_number = service_capability->test_hash_num; +} + +static void hinic5_cqm_service_capability_update(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + + func_cap->qpc_number = GET_MIN(HINIC5_CQM_MAX_QPC_NUM, func_cap->qpc_number); + func_cap->scqc_number = GET_MIN(HINIC5_CQM_MAX_SCQC_NUM, + func_cap->scqc_number); + func_cap->srqc_number = GET_MIN(HINIC5_CQM_MAX_SRQC_NUM, + func_cap->srqc_number); + func_cap->childc_number = GET_MIN(HINIC5_CQM_MAX_CHILDC_NUM, + func_cap->childc_number); +} + +static void hinic5_cqm_service_valid_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + const struct service_cap *service_capability) +{ + u32 type = service_capability->chip_svc_type; + struct tag_hinic5_cqm_service *svc = hinic5_cqm_handle->service; + + svc[HINIC5_CQM_SERVICE_T_NIC].valid = (type & CFG_SERVICE_MASK_NIC) != 0; + svc[HINIC5_CQM_SERVICE_T_OVS].valid = (type & CFG_SERVICE_MASK_OVS) != 0; + svc[HINIC5_CQM_SERVICE_T_ROCE].valid = (type & CFG_SERVICE_MASK_ROCE) != 0; + svc[HINIC5_CQM_SERVICE_T_TOE].valid = (type & CFG_SERVICE_MASK_TOE) != 0; + svc[HINIC5_CQM_SERVICE_T_FC].valid = (type & CFG_SERVICE_MASK_FC) != 0; + svc[HINIC5_CQM_SERVICE_T_IPSEC].valid = (type & CFG_SERVICE_MASK_IPSEC) != 0; + svc[HINIC5_CQM_SERVICE_T_VBS].valid = (type & CFG_SERVICE_MASK_VBS) != 0; + svc[HINIC5_CQM_SERVICE_T_VIRTIO].valid = (type & CFG_SERVICE_MASK_VIRTIO) != 0; + svc[HINIC5_CQM_SERVICE_T_IOE].valid = false; + svc[HINIC5_CQM_SERVICE_T_PPA].valid = (type & CFG_SERVICE_MASK_PPA) != 0; + svc[HINIC5_CQM_SERVICE_T_UB].valid = (type & CFG_SERVICE_MASK_UB) != 0; + svc[HINIC5_CQM_SERVICE_T_JBOF].valid = (type & CFG_SERVICE_MASK_JBOF) != 0; + svc[HINIC5_CQM_SERVICE_T_VROCE].valid = (type & CFG_SERVICE_MASK_VROCE) != 0; + svc[HINIC5_CQM_SERVICE_T_DMMU].valid = (type & CFG_SERVICE_MASK_DMMU) != 0; + svc[HINIC5_CQM_SERVICE_T_CFM].valid = (type & CFG_SERVICE_MASK_CFM) != 0; +} + +static void hinic5_cqm_service_capability_init_nic(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: nic is valid, but nic need not be init by hinic5_cqm\n"); +} + +static void hinic5_cqm_service_capability_init_ovs(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct service_cap *service_capability = (struct service_cap *)pra; + struct ovs_service_cap *ovs_cap = &service_capability->ovs_cap; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: ovs is valid\n"); + hinic5_cqm_info(handle->dev_hdl, "Cap init: ovs qpc 0x%x\n", + ovs_cap->dev_ovs_cap.max_pctxs); + func_cap->hash_number += ovs_cap->dev_ovs_cap.max_pctxs; + func_cap->hash_basic_size = HINIC5_CQM_HASH_BUCKET_SIZE_64; + func_cap->qpc_number += ovs_cap->dev_ovs_cap.max_pctxs; + func_cap->qpc_basic_size = GET_MAX(ovs_cap->pctx_sz, + func_cap->qpc_basic_size); + func_cap->qpc_reserved += ovs_cap->dev_ovs_cap.max_pctxs; + func_cap->qpc_alloc_static = true; + func_cap->pagesize_reorder = HINIC5_CQM_OVS_PAGESIZE_ORDER; +} + +static void hinic5_cqm_service_capability_roce_cap_print(struct hinic5_hwdev *handle, + const struct hinic5_board_info *board_info, const struct dev_roce_svc_own_cap *roce_own_cap) +{ + hinic5_cqm_info(handle->dev_hdl, "Cap init: roce is valid\n"); + hinic5_cqm_info(handle->dev_hdl, "Cap init: roce qpc 0x%x, scqc 0x%x, srqc 0x%x, drc_qp 0x%x\n", + roce_own_cap->max_qps, roce_own_cap->max_cqs, + roce_own_cap->max_srqs, roce_own_cap->max_drc_qps); + hinic5_cqm_info(handle->dev_hdl, "Cap init: board_type 0x%x, scenes_id:0x%x, srv_bmp:0x%x\n", + board_info->board_type, board_info->scenes_id, board_info->service_en_bitmap); + hinic5_cqm_info(handle->dev_hdl, "Cap init: reserved_qps:0x%x, reserved_qps_back:0x%x, " + "reserved_cqs:0x%x, reserved_cqs_back:0x%x\n", + roce_own_cap->reserved_qps, roce_own_cap->reserved_qps_back, + roce_own_cap->reserved_cqs, roce_own_cap->reserved_cqs_back); + hinic5_cqm_info(handle->dev_hdl, "Cap init: reserved_srqs:0x%x, reserved_srqs_back:0x%x, " + "max_pd:0x%x, max_xrcd:0x%x, max_gid:0x%x\n", + roce_own_cap->reserved_srqs, roce_own_cap->reserved_srqs_back, + roce_own_cap->max_pd, roce_own_cap->max_xrcd, roce_own_cap->max_gid); +} + +static void hinic5_cqm_service_capability_init_roce(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct service_cap *service_capability = (struct service_cap *)pra; + struct rdma_service_cap *rdma_cap = &service_capability->rdma_cap; + struct dev_roce_svc_own_cap *roce_own_cap = &rdma_cap->dev_rdma_cap.roce_own_cap; + + hinic5_cqm_service_capability_roce_cap_print(handle, &handle->board_info, roce_own_cap); + + func_cap->use_fake_parent_cla = true; + + if (COMM_SUPPORT_EXTEND_CAPBILITY(handle)) { + func_cap->qpc_reserved += roce_own_cap->reserved_qps; + func_cap->qpc_reserved_back += roce_own_cap->reserved_qps_back; + func_cap->scq_reserved += roce_own_cap->reserved_cqs; + func_cap->srq_reserved += roce_own_cap->reserved_srqs; + } else { + func_cap->qpc_reserved += HINIC5_CQM_QPC_ROCE_RSVD; + func_cap->scq_reserved += HINIC5_CQM_CQ_ROCE_RSVD; + func_cap->srq_reserved += HINIC5_CQM_SRQ_ROCE_RSVD; + } + + func_cap->xid_alloc_mode = false; /* xid 快速复用 */ + func_cap->qpc_number += roce_own_cap->max_qps; + func_cap->qpc_basic_size = GET_MAX(roce_own_cap->qpc_entry_sz, func_cap->qpc_basic_size); + func_cap->qpc_alloc_static = true; + func_cap->scqc_alloc_static = true; + func_cap->srqc_alloc_static = true; + func_cap->scqc_number += roce_own_cap->max_cqs; + func_cap->scqc_basic_size = GET_MAX(rdma_cap->cqc_entry_sz, func_cap->scqc_basic_size); + func_cap->srqc_number += roce_own_cap->max_srqs; + func_cap->srqc_basic_size = GET_MAX(roce_own_cap->srqc_entry_sz, func_cap->srqc_basic_size); + func_cap->mpt_number += roce_own_cap->max_mpts; + func_cap->mpt_reserved += rdma_cap->reserved_mrws; + func_cap->mpt_basic_size = GET_MAX(rdma_cap->mpt_entry_sz, func_cap->mpt_basic_size); + if (COMM_SUPPORT_EXTEND_CAPBILITY(handle)) + func_cap->gid_number = roce_own_cap->max_gid; + else + func_cap->gid_number = HINIC5_CQM_GID_RDMA_NUM; + + func_cap->gid_basic_size = HINIC5_CQM_GID_SIZE_32; + func_cap->childc_number += roce_own_cap->max_child_ctx_num; + func_cap->childc_basic_size = GET_MAX(HINIC5_CQM_CHILDC_SIZE_256, func_cap->childc_basic_size); +} + +static void hinic5_cqm_service_capability_init_vroce(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct service_cap *service_capability = (struct service_cap *)pra; + struct rdma_service_cap *rdma_cap = &service_capability->rdma_cap; + struct dev_roce_svc_own_cap *roce_own_cap = &rdma_cap->dev_rdma_cap.roce_own_cap; + + if (IS_MASTER_HOST(handle)) { + func_cap->hash_number = roce_own_cap->max_qps; + func_cap->hash_basic_size = HINIC5_CQM_HASH_BUCKET_SIZE_64; + hinic5_cqm_info(handle->dev_hdl, "Cap init: vroce is valid\n"); + hinic5_cqm_info(handle->dev_hdl, "Cap init: hash_number 0x%x hash_basic_size 0x%x\n", func_cap->hash_number, + func_cap->hash_basic_size); + } +} + +static void hinic5_cqm_service_capability_init_toe(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct tag_hinic5_cqm_toe_private_capability *toe_own_cap = &hinic5_cqm_handle->toe_own_capability; + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct service_cap *service_capability = (struct service_cap *)pra; + struct rdma_service_cap *rdma_cap = &service_capability->rdma_cap; + struct toe_service_cap *toe_cap = &service_capability->toe_cap; + struct dev_toe_svc_cap *dev_toe_cap = &toe_cap->dev_toe_cap; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: toe is valid\n"); + hinic5_cqm_info(handle->dev_hdl, "Cap init: toe qpc 0x%x, scqc 0x%x, srqc 0x%x\n", + dev_toe_cap->max_pctxs, dev_toe_cap->max_cqs, + dev_toe_cap->max_srqs); + func_cap->hash_number += dev_toe_cap->max_pctxs; + func_cap->hash_basic_size = HINIC5_CQM_HASH_BUCKET_SIZE_64; + func_cap->qpc_number += dev_toe_cap->max_pctxs; + func_cap->qpc_basic_size = GET_MAX(toe_cap->pctx_sz, + func_cap->qpc_basic_size); + func_cap->qpc_alloc_static = true; + func_cap->scqc_number += dev_toe_cap->max_cqs; + func_cap->scqc_basic_size = GET_MAX(toe_cap->scqc_sz, + func_cap->scqc_basic_size); + func_cap->scqc_alloc_static = true; + + toe_own_cap->toe_srqc_number = dev_toe_cap->max_srqs; + toe_own_cap->toe_srqc_start_id = dev_toe_cap->srq_id_start; + toe_own_cap->toe_srqc_basic_size = HINIC5_CQM_SRQC_SIZE_64; + func_cap->childc_number += dev_toe_cap->max_cctxt; + func_cap->childc_basic_size = GET_MAX(HINIC5_CQM_CHILDC_SIZE_256, + func_cap->childc_basic_size); + func_cap->mpt_number += dev_toe_cap->max_mpts; + func_cap->mpt_reserved = 0; + func_cap->mpt_basic_size = GET_MAX(rdma_cap->mpt_entry_sz, + func_cap->mpt_basic_size); +} + +static void hinic5_cqm_service_capability_init_ioe(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: ioe is valid\n"); +} + +static void hinic5_cqm_service_capability_init_fc(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct service_cap *service_capability = (struct service_cap *)pra; + struct fc_service_cap *fc_cap = &service_capability->fc_cap; + struct dev_fc_svc_cap *dev_fc_cap = &fc_cap->dev_fc_cap; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: fc is valid\n"); + hinic5_cqm_info(handle->dev_hdl, "Cap init: fc qpc 0x%x, scqc 0x%x, srqc 0x%x\n", + dev_fc_cap->max_parent_qpc_num, dev_fc_cap->scq_num, + dev_fc_cap->srq_num); + func_cap->hash_number += dev_fc_cap->max_parent_qpc_num; + func_cap->hash_basic_size = HINIC5_CQM_HASH_BUCKET_SIZE_64; + func_cap->qpc_number += dev_fc_cap->max_parent_qpc_num; + func_cap->qpc_basic_size = GET_MAX(fc_cap->parent_qpc_size, + func_cap->qpc_basic_size); + func_cap->qpc_alloc_static = true; + func_cap->scqc_number += dev_fc_cap->scq_num; + func_cap->scqc_basic_size = GET_MAX(fc_cap->scqc_size, + func_cap->scqc_basic_size); + func_cap->srqc_number += dev_fc_cap->srq_num; + func_cap->srqc_basic_size = GET_MAX(fc_cap->srqc_size, + func_cap->srqc_basic_size); + func_cap->lun_number = HINIC5_CQM_LUN_FC_NUM; + func_cap->lun_basic_size = HINIC5_CQM_LUN_SIZE_8; + func_cap->taskmap_number = HINIC5_CQM_TASKMAP_FC_NUM; + func_cap->taskmap_basic_size = PAGE_SIZE; + func_cap->childc_number += dev_fc_cap->max_child_qpc_num; + func_cap->childc_basic_size = GET_MAX(fc_cap->child_qpc_size, + func_cap->childc_basic_size); + func_cap->pagesize_reorder = HINIC5_CQM_FC_PAGESIZE_ORDER; +} + +static void hinic5_cqm_service_capability_init_vbs(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct service_cap *service_capability = (struct service_cap *)pra; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: vbs is valid\n"); + + /* If the entry size is greater than the cache line (256 bytes), + * align the entries by cache line. + */ + func_cap->qpc_basic_size = GET_MAX(HINIC5_CQM_VBS_QPC_SIZE, + func_cap->qpc_basic_size); + func_cap->qpc_alloc_static = true; + func_cap->scqc_basic_size = HINIC5_CQM_VBS_SCQC_SIZE; + func_cap->scqc_alloc_static = false; + func_cap->scq_reserved += service_capability->vbs_cap.vbs_max_volq; + func_cap->childc_number += service_capability->vbs_cap.vbs_child_ctx_num; + func_cap->childc_basic_size = GET_MAX(HINIC5_CQM_CHILDC_SIZE_256, func_cap->childc_basic_size); + func_cap->xid_alloc_mode = false; + func_cap->hash_number += service_capability->vbs_cap.vbs_hash_bucket_num; + func_cap->hash_basic_size = HINIC5_CQM_HASH_BUCKET_SIZE_64; + + func_cap->qpc_number += service_capability->vbs_cap.vbs_max_volq; + func_cap->scqc_number += service_capability->vbs_cap.vbs_max_volq; +} + +static void hinic5_cqm_service_capability_init_jbof(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct service_cap *service_capability = (struct service_cap *)pra; + struct jbof_service_cap *jbof_cap = &service_capability->jbof_cap; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: jbof is valid\n"); + func_cap->qpc_alloc_static = true; + func_cap->qpc_number += jbof_cap->max_parent_qpc_num; + func_cap->qpc_basic_size = GET_MAX(jbof_cap->parent_qpc_size, + func_cap->qpc_basic_size); + func_cap->childc_number += jbof_cap->max_child_qpc_num; + func_cap->childc_basic_size = GET_MAX(HINIC5_CQM_CHILDC_SIZE_256, + func_cap->childc_basic_size); + func_cap->hash_number += jbof_cap->hash_bucket_num; + func_cap->hash_basic_size = HINIC5_CQM_HASH_BUCKET_SIZE_64; +} + +static void hinic5_cqm_service_capability_init_ipsec(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct service_cap *service_capability = (struct service_cap *)pra; + struct ipsec_service_cap *ipsec_cap = &service_capability->ipsec_cap; + struct dev_ipsec_svc_cap *ipsec_srvcap = &ipsec_cap->dev_ipsec_cap; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + func_cap->childc_number += (ipsec_srvcap->max_sactxs + ipsec_srvcap->max_spctxs); + func_cap->childc_basic_size = GET_MAX(HINIC5_CQM_CHILDC_SIZE_256, + func_cap->childc_basic_size); + func_cap->scqc_number += ipsec_srvcap->max_cqs; + func_cap->scqc_basic_size = GET_MAX(HINIC5_CQM_SCQC_SIZE_64, + func_cap->scqc_basic_size); + func_cap->scqc_alloc_static = true; + func_cap->hash_number += HINIC5_CQM_CRYPT_HASH_BUCKET_NUM(ipsec_srvcap->sa_hash_bucket_num + ipsec_srvcap->sp_hash_bucket_num); + func_cap->hash_basic_size = HINIC5_CQM_HASH_BUCKET_SIZE_64; + hinic5_cqm_info(handle->dev_hdl, "Cap init: ipsec is valid\n"); + hinic5_cqm_info(handle->dev_hdl, "Cap init: max_sactxs: 0x%x, max_spctxs: 0x%x, childc_bsize %u\n", + ipsec_srvcap->max_sactxs, ipsec_srvcap->max_spctxs, func_cap->childc_basic_size); + hinic5_cqm_info(handle->dev_hdl, "scqc_num 0x%x, scqc_bsize %u\n", ipsec_srvcap->max_cqs, func_cap->scqc_basic_size); + hinic5_cqm_info(handle->dev_hdl, + "Cap init: ipsec sa_hash_bucket_num: 0x%x, sp_hash_bucket_num: 0x%x, hash_basic_size %u\n", + ipsec_srvcap->sa_hash_bucket_num, ipsec_srvcap->sp_hash_bucket_num, func_cap->hash_basic_size); +} + +static void hinic5_cqm_service_capability_init_virtio(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct service_cap *svc_cap = (struct service_cap *)pra; + u32 vq_num, vq_size, xid2cid_size; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: virtio is valid\n"); + + vq_num = svc_cap->virtio_vq_num != 0 ? svc_cap->virtio_vq_num : HINIC5_CQM_VIRTIO_VQ_NUM_DEFAULT; + vq_num += svc_cap->nvme_qp_num; + vq_size = vq_num * svc_cap->virtio_vq_size; + hinic5_cqm_info(handle->dev_hdl, "Cap init: vq_num 0x%x, vq_size 0x%x\n", vq_num, vq_size); + + if (COMM_SUPPORT_VIRTIO_FC_CACHE(handle)) { + /* In VirtIO function context cache mode, + * the VQs are divided and stored in all enabled SMFs. */ + xid2cid_size = vq_size / func_cap->smf_enabled_num; + xid2cid_size += svc_cap->vio_func_num * HINIC5_CQM_VIRTIO_FC_SIZE; + hinic5_cqm_info(handle->dev_hdl, "Cap init: vio_func_num 0x%x\n", svc_cap->vio_func_num); + } else { + xid2cid_size = vq_size; + } + + func_cap->xid2cid_number += xid2cid_size / HINIC5_CQM_CHIP_CACHELINE; + func_cap->xid2cid_basic_size = HINIC5_CQM_CHIP_CACHELINE; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: xid2cid_size 0x%x, xid2cid_number 0x%x\n", + xid2cid_size, func_cap->xid2cid_number); +} + +static void hinic5_cqm_service_capability_init_ppa(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct service_cap *service_capability = (struct service_cap *)pra; + struct ppa_service_cap *ppa_cap = &service_capability->ppa_cap; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: ppa is valid\n"); + func_cap->hash_basic_size = HINIC5_CQM_HASH_BUCKET_SIZE_64; + func_cap->qpc_alloc_static = true; + func_cap->pagesize_reorder = HINIC5_CQM_PPA_PAGESIZE_ORDER; + func_cap->qpc_basic_size = GET_MAX(ppa_cap->pctx_sz, + func_cap->qpc_basic_size); +} + +static void hinic5_cqm_service_capability_init_ub(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, void *pra) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct service_cap *service_capability = (struct service_cap *)pra; + struct ub_dev_cap_sdk_res *ub_sdk_res = &service_capability->ub_cap.sdk_res; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: ub is valid\n"); + + func_cap->use_fake_parent_cla = true; + + func_cap->scqc_alloc_static = true; + func_cap->scqc_basic_size = GET_MAX(func_cap->scqc_basic_size, ub_sdk_res->cqc_entry_sz); + func_cap->scqc_number += ub_sdk_res->max_tp; + func_cap->scqc_number += ub_sdk_res->max_jfc; + func_cap->scqc_number += ub_sdk_res->max_jetty_grp; + func_cap->scqc_number += ub_sdk_res->max_vtp; + func_cap->scqc_number += ub_sdk_res->max_utp; + func_cap->scqc_number += ub_sdk_res->max_tpg; + + func_cap->scq_reserved += ub_sdk_res->max_tp; + func_cap->scq_reserved += ub_sdk_res->max_jfrc; + + func_cap->srqc_number += ub_sdk_res->max_jfr; + func_cap->srqc_basic_size = ub_sdk_res->srqc_entry_sz; + func_cap->srqc_alloc_static = true; + + func_cap->mpt_basic_size = GET_MAX(ub_sdk_res->mpt_entry_sz, func_cap->mpt_basic_size); + func_cap->mpt_number += ub_sdk_res->max_mpts; + + func_cap->qpc_alloc_static = true; + func_cap->qpc_number += ub_sdk_res->max_jetty; + func_cap->qpc_number += ub_sdk_res->max_tp; + func_cap->qpc_basic_size = GET_MAX(func_cap->qpc_basic_size, ub_sdk_res->qpc_entry_sz); + func_cap->gid_number += ub_sdk_res->max_gid; + func_cap->gid_basic_size = HINIC5_CQM_GID_SIZE_32; + func_cap->childc_number += ub_sdk_res->max_tpg + (ub_sdk_res->max_tp >> 1); + func_cap->childc_basic_size = GET_MAX(HINIC5_CQM_CHILDC_SIZE_256, func_cap->childc_basic_size); +} + +struct hinic5_cqm_srv_cap_init serv_cap_init_list[] = { + {HINIC5_CQM_SERVICE_T_NIC, hinic5_cqm_service_capability_init_nic}, + {HINIC5_CQM_SERVICE_T_OVS, hinic5_cqm_service_capability_init_ovs}, + {HINIC5_CQM_SERVICE_T_ROCE, hinic5_cqm_service_capability_init_roce}, + {HINIC5_CQM_SERVICE_T_TOE, hinic5_cqm_service_capability_init_toe}, + {HINIC5_CQM_SERVICE_T_IOE, hinic5_cqm_service_capability_init_ioe}, + {HINIC5_CQM_SERVICE_T_FC, hinic5_cqm_service_capability_init_fc}, + {HINIC5_CQM_SERVICE_T_VBS, hinic5_cqm_service_capability_init_vbs}, + {HINIC5_CQM_SERVICE_T_IPSEC, hinic5_cqm_service_capability_init_ipsec}, + {HINIC5_CQM_SERVICE_T_VIRTIO, hinic5_cqm_service_capability_init_virtio}, + {HINIC5_CQM_SERVICE_T_PPA, hinic5_cqm_service_capability_init_ppa}, + {HINIC5_CQM_SERVICE_T_UB, hinic5_cqm_service_capability_init_ub}, + {HINIC5_CQM_SERVICE_T_JBOF, hinic5_cqm_service_capability_init_jbof}, + {HINIC5_CQM_SERVICE_T_VROCE, hinic5_cqm_service_capability_init_vroce}, +}; + +static void hinic5_cqm_service_capability_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct service_cap *service_capability) +{ + u32 list_size = ARRAY_SIZE(serv_cap_init_list); + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 i; + + for (i = 0; i < HINIC5_CQM_SERVICE_T_MAX; i++) { + hinic5_cqm_handle->service[i].valid = false; + hinic5_cqm_handle->service[i].has_register = false; + hinic5_cqm_handle->service[i].buf_order = 0; + } + + hinic5_cqm_service_valid_init(hinic5_cqm_handle, service_capability); + + hinic5_cqm_info(handle->dev_hdl, "Cap init: service type %d\n", + service_capability->chip_svc_type); + + for (i = 0; i < list_size; i++) { + if (hinic5_cqm_handle->service[serv_cap_init_list[i].service_type].valid && + serv_cap_init_list[i].serv_cap_proc) { + serv_cap_init_list[i].serv_cap_proc(hinic5_cqm_handle, (void *)service_capability); + } + } +} + +static u32 get_fake_func_type(struct tag_hinic5_cqm_fake_cfg *fake_cfg, u16 func_id) +{ + if (func_id == fake_cfg->parent_func) + return HINIC5_CQM_FAKE_FUNC_PARENT; + + if (func_id >= fake_cfg->child_func_start && + func_id < (fake_cfg->child_func_start + fake_cfg->child_func_number)) + return HINIC5_CQM_FAKE_FUNC_CHILD; + + return HINIC5_CQM_FAKE_FUNC_UNUSED; +} + +/* Set func_type in fake_hinic5_cqm_handle to ppf, pf, or vf. */ +static void hinic5_cqm_set_func_type(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + u32 idx = hinic5_cqm_handle->func_attribute.func_global_idx; + + if (idx == 0) + hinic5_cqm_handle->func_attribute.func_type = HINIC5_CQM_PPF; + else if (idx < HINIC5_CQM_MAX_PF_NUM) + hinic5_cqm_handle->func_attribute.func_type = HINIC5_CQM_PF; + else + hinic5_cqm_handle->func_attribute.func_type = HINIC5_CQM_VF; +} + +static void hinic5_cqm_capability_init_smf(struct hinic5_hwdev *handle, struct service_cap *svc_cap) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = handle->hinic5_cqm_hdl; + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + + func_cap->lb_mode = svc_cap->lb_mode; + + /* Initializing the LB Mode */ + if (func_cap->lb_mode == HINIC5_CQM_LB_MODE_NORMAL) + func_cap->smf_pg = 0; + else + func_cap->smf_pg = svc_cap->smf_pg; + func_cap->smf_max_num = svc_cap->smf_max_num; + func_cap->smf_enabled_num = svc_cap->smf_enabled_num; + func_cap->bat_cid_index_bit_width = svc_cap->bat_cid_index_bit_width; + + hinic5_cqm_info(handle->dev_hdl, + "Cap init: lb_mode %u, smf_pg %u, smf_max_num %u\n", + func_cap->lb_mode, func_cap->smf_pg, func_cap->smf_max_num); +} + +static void hinic5_cqm_capability_init_fake_vf(struct hinic5_hwdev *handle, struct service_cap *svc_cap) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = handle->hinic5_cqm_hdl; + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct tag_hinic5_cqm_fake_cfg *cfg = &func_cap->fake_cfg; + + func_cap->fake_func_type = HINIC5_CQM_FAKE_FUNC_UNUSED; + memset(cfg, 0, sizeof(*cfg)); + + if (svc_cap->fake_vf_num != 0) { + u32 parent_func_id = svc_cap->fake_vf_parent_func_id; + if (parent_func_id == 0) + parent_func_id = hinic5_cqm_handle->func_attribute.port_to_port_idx; + + cfg->parent_func = parent_func_id; + cfg->child_func_start = svc_cap->fake_vf_start_id; + cfg->child_func_number = svc_cap->fake_vf_num_cfg; + + cfg->fake_vf_lazy_init = svc_cap->fake_vf_lazy_init; + + cfg->fake_vf_max_pctx = svc_cap->fake_vf_max_pctx; + cfg->fake_vf_max_scqc_ctx = svc_cap->fake_vf_max_scqc_ctx; + cfg->fake_vf_max_srqc_ctx = svc_cap->fake_vf_max_srqc_ctx; + cfg->fake_vf_max_gid_ctx = svc_cap->fake_vf_max_gid_ctx; + cfg->fake_vf_max_mpt_ctx = svc_cap->fake_vf_max_mpt_ctx; + cfg->fake_vf_max_childc_ctx = svc_cap->fake_vf_max_childc_ctx; + + if (svc_cap->fake_vf_qpc_ctx_size_en) + cfg->fake_vf_qpc_basic_size = 0x1 << svc_cap->fake_vf_qpc_ctx_size_order; + + cfg->fake_vf_bfilter_start_addr = svc_cap->fake_vf_bfilter_start_addr; + cfg->fake_vf_bfilter_len = svc_cap->fake_vf_bfilter_len; + + func_cap->fake_func_type = get_fake_func_type(cfg, hinic5_global_func_id(handle)); + } + + hinic5_cqm_info(handle->dev_hdl, + "Cap init: fake_func_type %u, parent %u, child start %u num %u, lazy init %d\n", + func_cap->fake_func_type, cfg->parent_func, + cfg->child_func_start, cfg->child_func_number, + cfg->fake_vf_lazy_init); +} + +static int hinic5_cqm_capability_init_bloomfilter(struct hinic5_hwdev *handle) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + + func_cap->bloomfilter_enable = service_capability->bloomfilter_en; + hinic5_cqm_info(handle->dev_hdl, "Cap init: bloomfilter_enable %u (1: enable; 0: disable)\n", + func_cap->bloomfilter_enable); + + if (func_cap->bloomfilter_enable != 0) { + func_cap->bloomfilter_length = service_capability->bfilter_len; + func_cap->bloomfilter_addr = service_capability->bfilter_start_addr; + if (func_cap->bloomfilter_length != 0 && + !hinic5_cqm_check_align(func_cap->bloomfilter_length)) { + hinic5_cqm_err(handle->dev_hdl, "Cap init: bloomfilter_length %u is not the power of 2\n", + func_cap->bloomfilter_length); + + return HINIC5_CQM_FAIL; + } + } + + hinic5_cqm_info(handle->dev_hdl, "Cap init: bloomfilter_length 0x%x, bloomfilter_addr 0x%x\n", + func_cap->bloomfilter_length, func_cap->bloomfilter_addr); + + return 0; +} + +static void hinic5_cqm_capability_init_part_cap(struct hinic5_hwdev *handle) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + + func_cap->flow_table_based_conn_number = service_capability->max_connect_num; + func_cap->flow_table_based_conn_cache_number = service_capability->max_stick2cache_num; + hinic5_cqm_info(handle->dev_hdl, "Cap init: cfg max_conn_num 0x%x, max_cache_conn_num 0x%x\n", + func_cap->flow_table_based_conn_number, + func_cap->flow_table_based_conn_cache_number); + + func_cap->hash_basic_size = HINIC5_CQM_HASH_BUCKET_SIZE_64; + + func_cap->qpc_reserved = 0; + func_cap->qpc_reserved_back = 0; + func_cap->mpt_reserved = 0; + func_cap->mpt_reserved_back = 0; + func_cap->scq_reserved = 0; + func_cap->scq_reserved_back = 0; + func_cap->srq_reserved = 0; + func_cap->srq_reserved_back = 0; + func_cap->qpc_alloc_static = false; + func_cap->scqc_alloc_static = false; + func_cap->srqc_alloc_static = false; + + func_cap->l3i_number = 0; + func_cap->l3i_basic_size = HINIC5_CQM_L3I_SIZE_8; + + func_cap->xid_alloc_mode = true; /* xid alloc do not reuse */ + func_cap->gpa_check_enable = true; +} + +STATIC int hinic5_cqm_get_ppf_timer_cfg(struct hinic5_hwdev *handle) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = handle->hinic5_cqm_hdl; + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct timer_vf_info_seg *vf_segs = func_cap->timer_vf_segs; + struct service_cap *svc_cap = &handle->cfg_mgmt->svc_cap; + u16 vf_actual = 0; + int i, err; + + err = hinic5_get_ppf_timer_cfg(handle); + if (err != 0) + return err; + + func_cap->timer_pf_id_start = svc_cap->timer_pf_id_start; + func_cap->timer_pf_num = svc_cap->timer_pf_num; + func_cap->timer_vf_id_start = svc_cap->timer_vf_id_start; + func_cap->timer_vf_num = svc_cap->timer_vf_num; + + memcpy(func_cap->timer_vf_segs, + svc_cap->timer_vf_segs, sizeof(svc_cap->timer_vf_segs)); + + for (i = 0; i < TIMER_VF_SEGS_NUM; i++) { + if (vf_segs[i].start == 0) + break; + vf_actual += vf_segs[i].num; + } + + func_cap->timer_vf_num_actual = vf_actual; + if (vf_actual == 0) + func_cap->timer_vf_num_actual = func_cap->timer_vf_num; + + hinic5_cqm_info(handle->dev_hdl, + "host timer cfg: pf start %u, num %u. vf start %u, num %u, actual %u, seg deploy %d\n", + func_cap->timer_pf_id_start, func_cap->timer_pf_num, + func_cap->timer_vf_id_start, func_cap->timer_vf_num, + func_cap->timer_vf_num_actual, + func_cap->timer_vf_deploy_with_segs); + + hinic5_cqm_info(handle->dev_hdl, + "vf timer segs: %u-%u %u-%u %u-%u %u-%u %u-%u %u-%u %u-%u\n", + vf_segs[0x0].start, vf_segs[0x0].start + vf_segs[0x0].num, + vf_segs[0x1].start, vf_segs[0x1].start + vf_segs[0x1].num, + vf_segs[0x2].start, vf_segs[0x2].start + vf_segs[0x2].num, + vf_segs[0x3].start, vf_segs[0x3].start + vf_segs[0x3].num, + vf_segs[0x4].start, vf_segs[0x4].start + vf_segs[0x4].num, + vf_segs[0x5].start, vf_segs[0x5].start + vf_segs[0x5].num, + vf_segs[0x6].start, vf_segs[0x6].start + vf_segs[0x6].num); + return 0; +} + +static int hinic5_cqm_capability_init_timer(struct hinic5_hwdev *handle) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + u32 total_timer_num = 0; + int err; + + /* Initializes the PPF capabilities: include timer, pf, vf. */ + if (HINIC5_CQM_IS_PPF(hinic5_cqm_handle) && (service_capability->timer_en != 0)) { + func_cap->pf_num = service_capability->pf_num; + func_cap->pf_id_start = service_capability->pf_id_start; + func_cap->vf_num = service_capability->vf_num; + func_cap->vf_id_start = service_capability->vf_id_start; + hinic5_cqm_info(handle->dev_hdl, "Cap init: total function num 0x%x\n", + service_capability->host_total_function); + hinic5_cqm_info(handle->dev_hdl, "Cap init: pf_num 0x%x, pf_id_start 0x%x, vf_num 0x%x, vf_id_start 0x%x\n", + func_cap->pf_num, func_cap->pf_id_start, + func_cap->vf_num, func_cap->vf_id_start); + + err = hinic5_cqm_get_ppf_timer_cfg(handle); + if (err != 0) + return err; + + total_timer_num = func_cap->timer_pf_num + func_cap->timer_vf_num; + } + + func_cap->timer_enable = service_capability->timer_en; + hinic5_cqm_info(handle->dev_hdl, "Cap init: timer_enable %u (1: enable; 0: disable)\n", + func_cap->timer_enable); + + func_cap->timer_number = HINIC5_CQM_TIMER_ALIGN_SCALE_NUM * total_timer_num; + func_cap->timer_basic_size = HINIC5_CQM_TIMER_SIZE_32; + + return 0; +} + +static void print_bat_cap(struct hinic5_hwdev *hwdev, const char *prefix_in, + struct tag_hinic5_cqm_func_capability *cap) +{ + const char *prefix = prefix_in ? prefix_in : ""; + + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: hash number 0x%x\n", + prefix, cap->hash_number); + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: qpc number 0x%x, reserved 0x%x, basic size 0x%x, alloc static %d\n", + prefix, cap->qpc_number, cap->qpc_reserved, cap->qpc_basic_size, + cap->qpc_alloc_static); + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: scqc number 0x%x, reserved 0x%x, basic size 0x%x, alloc static %d\n", + prefix, cap->scqc_number, cap->scq_reserved, cap->scqc_basic_size, + cap->scqc_alloc_static); + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: srqc number 0x%x, reserved 0x%x, basic size 0x%x, alloc static %d\n", + prefix, cap->srqc_number, cap->srq_reserved, cap->srqc_basic_size, + cap->srqc_alloc_static); + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: mpt number 0x%x, reserved 0x%x\n", + prefix, cap->mpt_number, cap->mpt_reserved); + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: gid number 0x%x, lun number 0x%x\n", + prefix, cap->gid_number, cap->lun_number); + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: taskmap number 0x%x, l3i number 0x%x\n", + prefix, cap->taskmap_number, cap->l3i_number); + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: childc number 0x%x, basic size 0x%x\n", + prefix, cap->childc_number, cap->childc_basic_size); + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: timer number 0x%x\n", + prefix, cap->timer_number); + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: xid2cid number 0x%x, alloc static %d\n", + prefix, cap->xid2cid_number, cap->xid_alloc_mode); + hinic5_cqm_info(hwdev->dev_hdl, "%sCap init: reorder number 0x%x\n", + prefix, cap->reorder_number); +} + +static void hinic5_cqm_capability_init_cap_print(struct hinic5_hwdev *handle) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + + func_cap->ft_enable = service_capability->sf_svc_attr.ft_en; + func_cap->rdma_enable = service_capability->sf_svc_attr.rdma_en; + func_cap->gpa_spu_en = service_capability->func_gpa_spu_en; + + hinic5_cqm_info(handle->dev_hdl, "Cap init: pagesize_reorder %u\n", func_cap->pagesize_reorder); + hinic5_cqm_info(handle->dev_hdl, "Cap init: acs_spu_en %u, gpa_check_enable %d\n", + func_cap->gpa_spu_en, func_cap->gpa_check_enable); + hinic5_cqm_info(handle->dev_hdl, "Cap init: ft_enable %d, rdma_enable %d\n", + func_cap->ft_enable, func_cap->rdma_enable); + + print_bat_cap(handle, NULL, func_cap); +} + +/** + * Prototype : hinic5_cqm_capability_init + * Description : Initializes the function and service capabilities of the HINIC5_CQM. + * Information needs to be read from the configuration management + * module. + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/12/9 + * Modification : Created function + */ +s32 hinic5_cqm_capability_init(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + int err = 0; + + err = hinic5_cqm_capability_init_timer(handle); + if (err != 0) + goto out; + + err = hinic5_cqm_capability_init_bloomfilter(handle); + if (err != 0) + goto out; + + hinic5_cqm_capability_init_part_cap(handle); + + hinic5_cqm_capability_init_smf(handle, service_capability); + + hinic5_cqm_capability_init_fake_vf(handle, service_capability); + + hinic5_cqm_service_capability_init(hinic5_cqm_handle, service_capability); + + hinic5_cqm_test_mode_init(hinic5_cqm_handle, service_capability); + + hinic5_cqm_service_capability_update(hinic5_cqm_handle); + + hinic5_cqm_capability_init_cap_print(handle); + + return HINIC5_CQM_SUCCESS; + +out: + if (HINIC5_CQM_IS_PPF(hinic5_cqm_handle)) + func_cap->timer_enable = 0; + + return err; +} + +static void hinic5_cqm_fake_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + u32 i; + + if (!HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) + return; + + for (i = 0; i < HINIC5_CQM_FAKE_FUNC_MAX; i++) { + kfree(hinic5_cqm_handle->fake_hinic5_cqm_handle[i]); + hinic5_cqm_handle->fake_hinic5_cqm_handle[i] = NULL; + } +} + +static void set_fake_hinic5_cqm_attr(struct hinic5_hwdev *handle, struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle, + u32 child_func_start, u32 i) +{ + struct hinic5_func_attr *func_attr = &fake_hinic5_cqm_handle->func_attribute; + struct tag_hinic5_cqm_func_capability *func_cap = &fake_hinic5_cqm_handle->func_capability; + struct tag_hinic5_cqm_fake_cfg *cfg = &func_cap->fake_cfg; + + func_attr->func_global_idx = (u16)(child_func_start + i); + hinic5_cqm_set_func_type(fake_hinic5_cqm_handle); + + func_cap->fake_func_type = HINIC5_CQM_FAKE_FUNC_CHILD_AGENT; + + func_cap->qpc_number = cfg->fake_vf_max_pctx; + func_cap->scqc_number = cfg->fake_vf_max_scqc_ctx; + func_cap->srqc_number = cfg->fake_vf_max_srqc_ctx; + func_cap->gid_number = cfg->fake_vf_max_gid_ctx; + func_cap->mpt_number = cfg->fake_vf_max_mpt_ctx; + func_cap->childc_number = cfg->fake_vf_max_childc_ctx; + func_cap->hash_number = cfg->fake_vf_max_pctx; + func_cap->qpc_reserved = cfg->fake_vf_max_pctx; + + if (cfg->fake_vf_qpc_basic_size != 0) + func_cap->qpc_basic_size = cfg->fake_vf_qpc_basic_size; + + if (cfg->fake_vf_bfilter_len != 0) { + func_cap->bloomfilter_enable = true; + func_cap->bloomfilter_addr = cfg->fake_vf_bfilter_start_addr + + cfg->fake_vf_bfilter_len * i; + func_cap->bloomfilter_length = cfg->fake_vf_bfilter_len; + } + + hinic5_cqm_service_capability_update(fake_hinic5_cqm_handle); +} + +static void print_fake_hinic5_cqm_attr(struct hinic5_hwdev *hwdev, struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle) +{ + hinic5_cqm_func_capability_s *fake_func_cap = &fake_hinic5_cqm_handle->func_capability; + struct hinic5_func_attr *fake_func_attr = &fake_hinic5_cqm_handle->func_attribute; + const u16 fake_func_id = fake_func_attr->func_global_idx; + char prefix[0x20] = { 0 }; + hinic5_cqm_info(hwdev->dev_hdl, "[Fake %u] global_func_idx %u, func_type %d, parent_func_idx %u\n", + fake_func_id, fake_func_id, fake_func_attr->func_type, hinic5_global_func_id(hwdev)); + + sprintf(prefix, "[Fake %u] ", fake_func_id); + print_bat_cap(hwdev, prefix, fake_func_cap); +} + +/** + * Prototype : hinic5_cqm_fake_init + * Description : When the fake VF mode is supported, the HINIC5_CQM handles of + * the fake VFs need to be copied. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle: Parent HINIC5_CQM handle of the current PF + * Output : None + * Return Value : s32 + * 1.Date : 2020/4/15 + * Modification : Created function + */ +static s32 hinic5_cqm_fake_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle = NULL; + u32 child_func_start, child_func_number, i; + + if (!HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) + return HINIC5_CQM_SUCCESS; + + child_func_start = hinic5_cqm_get_child_func_start(hinic5_cqm_handle); + child_func_number = hinic5_cqm_get_child_func_number(hinic5_cqm_handle); + if (child_func_number == 0) { + hinic5_cqm_warn(handle->dev_hdl, "no child func, skip fake init\n"); + return HINIC5_CQM_SUCCESS; + } + + for (i = 0; i < child_func_number; i++) { + fake_hinic5_cqm_handle = hinic5_cqm_handle_fork(hinic5_cqm_handle); + if (!fake_hinic5_cqm_handle) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_handle_fork)); + goto err; + } + + set_fake_hinic5_cqm_attr(handle, fake_hinic5_cqm_handle, (u32)child_func_start, i); + print_fake_hinic5_cqm_attr(handle, fake_hinic5_cqm_handle); + + fake_hinic5_cqm_handle->parent_hinic5_cqm_handle = hinic5_cqm_handle; + hinic5_cqm_handle->fake_hinic5_cqm_handle[i] = fake_hinic5_cqm_handle; + } + + return HINIC5_CQM_SUCCESS; + +err: + hinic5_cqm_fake_uninit(hinic5_cqm_handle); + return HINIC5_CQM_FAIL; +} + +static void hinic5_cqm_fake_mem_uninit(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle = NULL; + u32 child_func_number, i; + + if (!HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) + return; + + child_func_number = hinic5_cqm_get_child_func_number(hinic5_cqm_handle); + + for (i = 0; i < child_func_number; i++) { + fake_hinic5_cqm_handle = hinic5_cqm_handle->fake_hinic5_cqm_handle[i]; + atomic_set(&fake_hinic5_cqm_handle->handle_state, HINIC5_CQM_HANDLE_STATE_REMOVE); + + hinic5_cqm_object_table_uninit(fake_hinic5_cqm_handle); + hinic5_cqm_bitmap_uninit(fake_hinic5_cqm_handle); + hinic5_cqm_cla_uninit(fake_hinic5_cqm_handle, HINIC5_CQM_BAT_ENTRY_MAX); + hinic5_cqm_bat_uninit(fake_hinic5_cqm_handle); + } +} + +static s32 fake_hinic5_cqm_handle_mem_init(struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle) +{ + struct hinic5_hwdev *handle = fake_hinic5_cqm_handle->ex_handle; + + if (!HINIC5_CQM_IS_FAKE_CHILD_AGENT(fake_hinic5_cqm_handle)) + return HINIC5_CQM_FAIL; + + if (atomic_cmpxchg(&fake_hinic5_cqm_handle->handle_state, + HINIC5_CQM_HANDLE_STATE_INIT, HINIC5_CQM_HANDLE_STATE_READY + ) != HINIC5_CQM_HANDLE_STATE_INIT) { + hinic5_cqm_warn(handle->dev_hdl, "[Fake %u] mem already inited\n", + fake_hinic5_cqm_handle->func_attribute.func_global_idx); + return HINIC5_CQM_FAIL; + } + + if (hinic5_cqm_bat_init(fake_hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bat_init)); + goto err1; + } + + if (hinic5_cqm_cla_init(fake_hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_init)); + goto err2; + } + + if (hinic5_cqm_bitmap_init(fake_hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bitmap_init)); + goto err3; + } + + if (hinic5_cqm_object_table_init(fake_hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_object_table_init)); + goto err4; + } + + hinic5_cqm_info(handle->dev_hdl, "[Fake %u] mem inited\n", + fake_hinic5_cqm_handle->func_attribute.func_global_idx); + + return HINIC5_CQM_SUCCESS; + +err4: + hinic5_cqm_bitmap_uninit(fake_hinic5_cqm_handle); +err3: + hinic5_cqm_cla_uninit(fake_hinic5_cqm_handle, HINIC5_CQM_BAT_ENTRY_MAX); +err2: + hinic5_cqm_bat_uninit(fake_hinic5_cqm_handle); +err1: + hinic5_cqm_fake_mem_uninit(fake_hinic5_cqm_handle); + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_fake_mem_init + * Description : Initialize resources of the extended fake function. + * Input : struct tag_hinic5_cqm_handle *hinic5_cqm_handle: Parent HINIC5_CQM handle of the current PF + * Output : None + * Return Value : s32 + * 1.Date : 2020/4/15 + * Modification : Created function + */ +static s32 hinic5_cqm_fake_mem_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle = NULL; + u32 child_func_number, i; + int ret; + + if (!HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) + return HINIC5_CQM_SUCCESS; + + child_func_number = hinic5_cqm_get_child_func_number(hinic5_cqm_handle); + + for (i = 0; i < child_func_number; i++) { + fake_hinic5_cqm_handle = hinic5_cqm_handle->fake_hinic5_cqm_handle[i]; + ret = snprintf(fake_hinic5_cqm_handle->name, HINIC5_VRAM_NAME_MAX_LEN, + "%s%s%02u", hinic5_cqm_handle->name, HINIC5_VRAM_HINIC5_CQM_FAKE_MEM_BASE, i); + if (ret < 0) { + hinic5_cqm_err(handle->dev_hdl, "fake hinic5_cqm handle hinic5_vram name snprintf_s failed"); + return HINIC5_CQM_FAIL; + } + + /* Fake VF lazy init support */ + if (hinic5_cqm_is_fake_vf_lazy_init(hinic5_cqm_handle)) { + hinic5_cqm_info(handle->dev_hdl, "[Fake %u] init delayed\n", + fake_hinic5_cqm_handle->func_attribute.func_global_idx); + continue; + } + + ret = fake_hinic5_cqm_handle_mem_init(fake_hinic5_cqm_handle); + if (ret != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(fake_hinic5_cqm_handle_mem_init)); + goto err; + } + } + + return HINIC5_CQM_SUCCESS; + +err: + hinic5_cqm_fake_mem_uninit(hinic5_cqm_handle); + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_mem_init + * Description : Initialize HINIC5_CQM memory, including tables at different levels. + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/7/6 + * Modification : Created function + */ +s32 hinic5_cqm_mem_init(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + int ret; + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + ret = snprintf(hinic5_cqm_handle->name, HINIC5_VRAM_NAME_MAX_LEN, + "%s%02u", HINIC5_VRAM_HINIC5_CQM_GLB_FUNC_BASE, hinic5_global_func_id(handle)); + if (ret < 0) { + hinic5_cqm_err(handle->dev_hdl, "hinic5_cqm handle hinic5_vram name snprintf_s failed"); + return HINIC5_CQM_FAIL; + } + if (hinic5_cqm_fake_init(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_fake_init)); + return HINIC5_CQM_FAIL; + } + + if (hinic5_cqm_fake_mem_init(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_fake_mem_init)); + goto err1; + } + + if (hinic5_cqm_bat_init(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bat_init)); + goto err2; + } + + if (hinic5_cqm_cla_init(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_init)); + goto err3; + } + + if (hinic5_cqm_bitmap_init(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bitmap_init)); + goto err4; + } + + if (hinic5_cqm_object_table_init(hinic5_cqm_handle) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_object_table_init)); + goto err5; + } + + return HINIC5_CQM_SUCCESS; + +err5: + hinic5_cqm_bitmap_uninit(hinic5_cqm_handle); +err4: + hinic5_cqm_cla_uninit(hinic5_cqm_handle, HINIC5_CQM_BAT_ENTRY_MAX); +err3: + hinic5_cqm_bat_uninit(hinic5_cqm_handle); +err2: + hinic5_cqm_fake_mem_uninit(hinic5_cqm_handle); +err1: + hinic5_cqm_fake_uninit(hinic5_cqm_handle); + return HINIC5_CQM_FAIL; +} + +int hinic5_cqm_init_fake_vf(void *ex_handle, u32 vf_id) +{ + struct hinic5_hwdev *handle = ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle = NULL; + u32 child_func_start, child_func_number; + int err; + + if (unlikely(!ex_handle)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return -EINVAL; + } + + hinic5_cqm_handle = handle->hinic5_cqm_hdl; + if (unlikely(!hinic5_cqm_handle)) { + hinic5_cqm_err(handle->dev_hdl, "Stateful not init\n"); + return -EINVAL; + } + + if (unlikely(atomic_read(&hinic5_cqm_handle->handle_state) != HINIC5_CQM_HANDLE_STATE_READY)) { + hinic5_cqm_err(handle->dev_hdl, "Stateful not ready\n"); + return -EAGAIN; + } + + if (!HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) { + hinic5_cqm_err(handle->dev_hdl, "Not a Fake VF group parent\n"); + return -EPERM; + } + + child_func_start = hinic5_cqm_get_child_func_start(hinic5_cqm_handle); + child_func_number = hinic5_cqm_get_child_func_number(hinic5_cqm_handle); + if (vf_id < child_func_start || vf_id >= child_func_start + child_func_number) { + hinic5_cqm_err(handle->dev_hdl, + "VF %u is not in the Fake VF group\n", vf_id); + return -EINVAL; + } + + fake_hinic5_cqm_handle = hinic5_cqm_handle->fake_hinic5_cqm_handle[vf_id - child_func_start]; + err = fake_hinic5_cqm_handle_mem_init(fake_hinic5_cqm_handle); + if (err != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(fake_hinic5_cqm_handle_mem_init)); + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL(hinic5_cqm_init_fake_vf); + +void hinic5_cqm_cla_fake_vf_cache_invalid(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, u32 reset_flag) +{ + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle = NULL; + u32 child_func_number, i; + u16 func_global_idx; + int err; + + if (!HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) + return; + + child_func_number = hinic5_cqm_get_child_func_number(hinic5_cqm_handle); + + for (i = 0; i < child_func_number; i++) { + fake_hinic5_cqm_handle = hinic5_cqm_handle->fake_hinic5_cqm_handle[i]; + func_global_idx = fake_hinic5_cqm_handle->func_attribute.func_global_idx; + + err = hinic5_func_reset(handle, func_global_idx, + BIT(reset_flag), HINIC5_CHANNEL_COMM); + if (err != 0) + hinic5_cqm_err(handle->dev_hdl, "hinic5_cqm fake vf cla cache invalid err, func_id 0x%x\n", func_global_idx); + } +} + +void hinic5_cqm_cla_func_cache_invalid(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, u32 reset_flag) +{ + int err; + u16 func_id; + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)hinic5_cqm_handle->ex_handle; + + func_id = hinic5_global_func_id(handle); + err = hinic5_func_reset(handle, func_id, BIT(reset_flag), HINIC5_CHANNEL_COMM); + if (err != 0) + hinic5_cqm_err(handle->dev_hdl, "hinic5_cqm cla cache invalid err, func_index = 0x%x\n", func_id); +} + +/** + * Prototype : hinic5_cqm_mem_uninit + * Description : Deinitialize HINIC5_CQM memory, including tables at different levels. + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2015/7/6 + * Modification : Created function + */ +void hinic5_cqm_mem_uninit(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + + hinic5_cqm_object_table_uninit(hinic5_cqm_handle); + hinic5_cqm_bitmap_uninit(hinic5_cqm_handle); + + if (COMM_SUPPORT_SMF_CACHE_INVALID(handle)) { + hinic5_cqm_cla_fake_vf_cache_invalid(hinic5_cqm_handle, RES_TYPE_SMF); + hinic5_cqm_cla_func_cache_invalid(hinic5_cqm_handle, RES_TYPE_SMF); + } + + hinic5_cqm_cla_uninit(hinic5_cqm_handle, HINIC5_CQM_BAT_ENTRY_MAX); + hinic5_cqm_bat_uninit(hinic5_cqm_handle); + hinic5_cqm_fake_mem_uninit(hinic5_cqm_handle); + + if (COMM_SUPPORT_SMF_CACHE_INVALID(handle)) { + hinic5_cqm_cla_fake_vf_cache_invalid(hinic5_cqm_handle, RES_TYPE_SMF_CACHE_INVALID); + hinic5_cqm_cla_func_cache_invalid(hinic5_cqm_handle, RES_TYPE_SMF_CACHE_INVALID); + } + + hinic5_cqm_fake_uninit(hinic5_cqm_handle); +} + +/** + * Prototype : hinic5_cqm_event_init + * Description : Initialize HINIC5_CQM event callback. + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/7/6 + * Modification : Created function + */ +s32 hinic5_cqm_event_init(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + + /* Registers the CEQ and AEQ callback functions. */ + if (hinic5_ceq_register_cb(ex_handle, ex_handle, HINIC5_NON_L2NIC_SCQ, + hinic5_cqm_scq_callback) != CHIPIF_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, "Event: fail to register scq callback\n"); + return HINIC5_CQM_FAIL; + } + + if (hinic5_ceq_register_cb(ex_handle, ex_handle, HINIC5_NON_L2NIC_ECQ, + hinic5_cqm_ecq_callback) != CHIPIF_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, "Event: fail to register ecq callback\n"); + goto err1; + } + + if (hinic5_ceq_register_cb(ex_handle, ex_handle, HINIC5_NON_L2NIC_NO_CQ_EQ, + hinic5_cqm_nocq_callback) != CHIPIF_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, "Event: fail to register nocq callback\n"); + goto err2; + } + + if (hinic5_aeq_register_swe_cb(ex_handle, ex_handle, HINIC5_STATEFUL_EVENT, + hinic5_cqm_aeq_callback) != CHIPIF_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, "Event: fail to register aeq callback\n"); + goto err3; + } + + return HINIC5_CQM_SUCCESS; + +err3: + hinic5_ceq_unregister_cb(ex_handle, HINIC5_NON_L2NIC_NO_CQ_EQ); +err2: + hinic5_ceq_unregister_cb(ex_handle, HINIC5_NON_L2NIC_ECQ); +err1: + hinic5_ceq_unregister_cb(ex_handle, HINIC5_NON_L2NIC_SCQ); + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_event_uninit + * Description : Deinitialize HINIC5_CQM event callback. + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2015/7/6 + * Modification : Created function + */ +void hinic5_cqm_event_uninit(void *ex_handle) +{ + hinic5_aeq_unregister_swe_cb(ex_handle, HINIC5_STATEFUL_EVENT); + hinic5_ceq_unregister_cb(ex_handle, HINIC5_NON_L2NIC_NO_CQ_EQ); + hinic5_ceq_unregister_cb(ex_handle, HINIC5_NON_L2NIC_ECQ); + hinic5_ceq_unregister_cb(ex_handle, HINIC5_NON_L2NIC_SCQ); +} + +/** + * Prototype : hinic5_cqm_scq_callback + * Description : HINIC5_CQM module callback processing for the ceq, + * which processes NON_L2NIC_SCQ. + * Input : void *ex_handle + * u32 ceqe_data + * Output : None + * Return Value : void + * 1.Date : 2015/5/5 + * Modification : Created function + */ +void hinic5_cqm_scq_callback(void *ex_handle, u32 ceqe_data) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_service_register_template *service_template = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct tag_hinic5_cqm_queue *hinic5_cqm_queue = NULL; + struct tag_hinic5_cqm_object *obj = NULL; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(scq_callback_ex_handle)); + return; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_scq_callback_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(scq_callback_hinic5_cqm_handle)); + return; + } + + hinic5_cqm_dbg_on(hinic5_cqm_verbose, handle->dev_hdl, + "Event: %s, ceqe_data=0x%x\n", __func__, ceqe_data); + obj = hinic5_cqm_object_get(ex_handle, HINIC5_CQM_OBJECT_NONRDMA_SCQ, + HINIC5_CQM_CQN_FROM_CEQE(ceqe_data), true); + if (unlikely(obj == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(scq_callback_obj)); + return; + } + + if (unlikely(obj->service_type >= HINIC5_CQM_SERVICE_T_MAX)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(obj->service_type)); + hinic5_cqm_object_put(obj); + return; + } + + service = &hinic5_cqm_handle->service[obj->service_type]; + service_template = &service->service_template; + if (service_template->shared_cq_ceq_callback) { + hinic5_cqm_queue = (struct tag_hinic5_cqm_queue *)(void *)obj; + service_template->shared_cq_ceq_callback(service_template->service_handle, + HINIC5_CQM_CQN_FROM_CEQE(ceqe_data), + hinic5_cqm_queue->priv); + } else { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_PTR_NULL(shared_cq_ceq_callback)); + } + + hinic5_cqm_object_put(obj); +} + +/** + * Prototype : hinic5_cqm_ecq_callback + * Description : HINIC5_CQM module callback processing for the ceq, + * which processes NON_L2NIC_ECQ. + * Input : void *ex_handle + * u32 ceqe_data + * Output : None + * Return Value : void + * 1.Date : 2015/5/5 + * Modification : Created function + */ +void hinic5_cqm_ecq_callback(void *ex_handle, u32 ceqe_data) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_service_register_template *service_template = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct tag_hinic5_cqm_qpc_mpt *qpc = NULL; + struct tag_hinic5_cqm_object *obj = NULL; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ecq_callback_ex_handle)); + return; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_ecq_callback_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ecq_callback_hinic5_cqm_handle)); + return; + } + + obj = hinic5_cqm_object_get(ex_handle, HINIC5_CQM_OBJECT_SERVICE_CTX, + HINIC5_CQM_XID_FROM_CEQE(ceqe_data), true); + if (unlikely(obj == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ecq_callback_obj)); + return; + } + + if (unlikely(obj->service_type >= HINIC5_CQM_SERVICE_T_MAX)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(obj->service_type)); + hinic5_cqm_object_put(obj); + return; + } + + service = &hinic5_cqm_handle->service[obj->service_type]; + service_template = &service->service_template; + if (service_template->embedded_cq_ceq_callback) { + qpc = (struct tag_hinic5_cqm_qpc_mpt *)(void *)obj; + service_template->embedded_cq_ceq_callback(service_template->service_handle, + HINIC5_CQM_XID_FROM_CEQE(ceqe_data), qpc->priv); + } else { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_PTR_NULL(embedded_cq_ceq_callback)); + } + + hinic5_cqm_object_put(obj); +} + +/** + * Prototype : hinic5_cqm_nocq_callback + * Description : HINIC5_CQM module callback processing for the ceq, + * which processes NON_L2NIC_NO_CQ_EQ. + * Input : void *ex_handle + * u32 ceqe_data + * Output : None + * Return Value : void + * 1.Date : 2015/5/5 + * Modification : Created function + */ +void hinic5_cqm_nocq_callback(void *ex_handle, u32 ceqe_data) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_service_register_template *service_template = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct tag_hinic5_cqm_qpc_mpt *qpc = NULL; + struct tag_hinic5_cqm_object *obj = NULL; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(nocq_callback_ex_handle)); + return; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_nocq_callback_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(nocq_callback_hinic5_cqm_handle)); + return; + } + + obj = hinic5_cqm_object_get(ex_handle, HINIC5_CQM_OBJECT_SERVICE_CTX, + HINIC5_CQM_XID_FROM_CEQE(ceqe_data), true); + if (unlikely(obj == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(nocq_callback_obj)); + return; + } + + if (unlikely(obj->service_type >= HINIC5_CQM_SERVICE_T_MAX)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(obj->service_type)); + hinic5_cqm_object_put(obj); + return; + } + + service = &hinic5_cqm_handle->service[obj->service_type]; + service_template = &service->service_template; + if (service_template->no_cq_ceq_callback) { + qpc = (struct tag_hinic5_cqm_qpc_mpt *)(void *)obj; + service_template->no_cq_ceq_callback(service_template->service_handle, + HINIC5_CQM_XID_FROM_CEQE(ceqe_data), + HINIC5_CQM_QID_FROM_CEQE(ceqe_data), + qpc->priv); + } else { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_PTR_NULL(no_cq_ceq_callback)); + } + + hinic5_cqm_object_put(obj); +} + +/* Distributes events to different service modules + * based on the event type. + */ +static u32 hinic5_cqm_aeq_event2type(u8 event) +{ + if (event < HINIC5_CQM_AEQ_BASE_T_DMMU) + return HINIC5_CQM_SERVICE_T_NIC; + if (event < HINIC5_CQM_AEQ_BASE_T_ROCE) + return HINIC5_CQM_SERVICE_T_DMMU; + if (event < HINIC5_CQM_AEQ_BASE_T_FC) + return HINIC5_CQM_SERVICE_T_ROCE; + if (event < HINIC5_CQM_AEQ_BASE_T_IOE) + return HINIC5_CQM_SERVICE_T_FC; + if (event < HINIC5_CQM_AEQ_BASE_T_TOE) + return HINIC5_CQM_SERVICE_T_IOE; + if (event < HINIC5_CQM_AEQ_BASE_T_UB) + return HINIC5_CQM_SERVICE_T_TOE; + if (event < HINIC5_CQM_AEQ_BASE_T_VBS) + return HINIC5_CQM_SERVICE_T_UB; + if (event < HINIC5_CQM_AEQ_BASE_T_IPSEC) + return HINIC5_CQM_SERVICE_T_VBS; + if (event < HINIC5_CQM_AEQ_BASE_T_MAX) + return HINIC5_CQM_SERVICE_T_IPSEC; + return HINIC5_CQM_SERVICE_T_MAX; +} + +/** + * Prototype : hinic5_cqm_aeq_callback + * Description : HINIC5_CQM module callback processing for the aeq. + * Input : void *ex_handle + * u8 event + * u64 data + * Output : None + * Return Value : void + * 1.Date : 2015/5/5 + * Modification : Created function + */ +u8 hinic5_cqm_aeq_callback(void *ex_handle, u8 event, u8 *data) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_service_register_template *service_template = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + u8 event_level = FAULT_LEVEL_MAX; + u32 service_type; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(aeq_callback_ex_handle)); + return event_level; + } + + if (event >= HINIC5_CQM_AEQ_CALLBACK_CNT_MAX) { + hinic5_cqm_err(handle->dev_hdl, "hinic5_cqm aeq event invalid %u\n", event); + return event_level; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_aeq_callback_cnt[event]); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(aeq_callback_hinic5_cqm_handle)); + return event_level; + } + + /* Distributes events to different service modules + * based on the event type. + */ + service_type = hinic5_cqm_aeq_event2type(event); + if (service_type == HINIC5_CQM_SERVICE_T_MAX) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(event)); + return event_level; + } + + service = &hinic5_cqm_handle->service[service_type]; + service_template = &service->service_template; + + if (!service_template->aeq_level_callback) + hinic5_cqm_err(handle->dev_hdl, "Event: service_type %u aeq_level_callback unregistered, event %u\n", + service_type, event); + else + event_level = service_template->aeq_level_callback(service_template->service_handle, + event, data); + + if (!service_template->aeq_callback) + hinic5_cqm_err(handle->dev_hdl, "Event: service_type %u aeq_callback unregistered\n", + service_type); + else + service_template->aeq_callback(service_template->service_handle, + event, data); + + return event_level; +} + +/** + * Prototype : hinic5_cqm_service_register + * Description : Callback template for the service driver + * to register with the HINIC5_CQM. + * Input : void *ex_handle + * struct tag_service_register_template *service_template + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/5 + * Modification : Created function + */ +s32 hinic5_cqm_service_register(void *ex_handle, struct tag_service_register_template *service_template) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return HINIC5_CQM_FAIL; + } + if (unlikely(service_template == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(service_template)); + return HINIC5_CQM_FAIL; + } + + if (service_template->service_type >= HINIC5_CQM_SERVICE_T_MAX) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_WRONG_VALUE(service_template->service_type)); + return HINIC5_CQM_FAIL; + } + service = &hinic5_cqm_handle->service[service_template->service_type]; + if (!service->valid) { + hinic5_cqm_err(handle->dev_hdl, "Service register: service_type %u is invalid\n", + service_template->service_type); + return HINIC5_CQM_FAIL; + } + + if (service->has_register) { + hinic5_cqm_err(handle->dev_hdl, "Service register: service_type %u has registered\n", + service_template->service_type); + return HINIC5_CQM_FAIL; + } + + service->has_register = true; + memcpy((void *)(&service->service_template), + (void *)service_template, + sizeof(struct tag_service_register_template)); + + return HINIC5_CQM_SUCCESS; +} +EXPORT_SYMBOL(hinic5_cqm_service_register); + +/** + * Prototype : hinic5_cqm_service_unregister + * Description : The service driver deregisters the callback function + * from the HINIC5_CQM. + * Input : void *ex_handle + * u32 service_type + * Output : None + * Return Value : void + * 1.Date : 2015/4/5 + * Modification : Created function + */ +void hinic5_cqm_service_unregister(void *ex_handle, u32 service_type) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return; + } + + if (service_type >= HINIC5_CQM_SERVICE_T_MAX) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(service_type)); + return; + } + + service = &hinic5_cqm_handle->service[service_type]; + if (!service->valid) + hinic5_cqm_err(handle->dev_hdl, "Service unregister: service_type %u is disable\n", + service_type); + + service->has_register = false; + memset(&service->service_template, 0, + sizeof(struct tag_service_register_template)); +} +EXPORT_SYMBOL(hinic5_cqm_service_unregister); + +s32 hinic5_cqm_fake_vf_num_set(void *ex_handle, u16 fake_vf_num_cfg) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct service_cap *svc_cap = NULL; + + if (!ex_handle || !handle->cfg_mgmt) + return HINIC5_CQM_FAIL; + + svc_cap = &handle->cfg_mgmt->svc_cap; + + if (fake_vf_num_cfg > svc_cap->fake_vf_num) { + hinic5_cqm_err(handle->dev_hdl, "fake_vf_num_cfg is invlaid, fw fake_vf_num is %u\n", + svc_cap->fake_vf_num); + return HINIC5_CQM_FAIL; + } + + /* fake_vf_num_cfg is valid when func type is HINIC5_CQM_FAKE_FUNC_PARENT */ + svc_cap->fake_vf_num_cfg = fake_vf_num_cfg; + hinic5_cqm_info(handle->dev_hdl, "fake_vf_num_cfg set to %u\n", fake_vf_num_cfg); + + return HINIC5_CQM_SUCCESS; +} +EXPORT_SYMBOL(hinic5_cqm_fake_vf_num_set); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_main.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_main.h new file mode 100644 index 00000000..702b9470 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_main.h @@ -0,0 +1,505 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CQM_MAIN_H +#define HINIC5_CQM_MAIN_H + +#include "hinic5_crm.h" +#include "hinic5_cqm_bloomfilter.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_cqm_bat_cla.h" + +#define GET_MAX(a, b) ((a) > (b) ? (a) : (b)) +#define GET_MIN(a, b) ((a) < (b) ? (a) : (b)) +#define HINIC5_CQM_DW_SHIFT 2 +#define HINIC5_CQM_QW_SHIFT 3 + +#define CHIPIF_SUCCESS 0 +#define CHIPIF_FAIL (-1) + +#define HINIC5_CQM_TIMER_ENABLE 1 +#define HINIC5_CQM_TIMER_DISABLE 0 + +#define HINIC5_CQM_HANDLE_STATE_INIT 0 +#define HINIC5_CQM_HANDLE_STATE_READY 1 +#define HINIC5_CQM_HANDLE_STATE_REMOVE 2 + +/* The value must be the same as that of hinic5_service_type in hinic5_crm.h. */ +#define HINIC5_CQM_SERVICE_T_NIC SERVICE_T_NIC +#define HINIC5_CQM_SERVICE_T_OVS SERVICE_T_OVS +#define HINIC5_CQM_SERVICE_T_ROCE SERVICE_T_ROCE +#define HINIC5_CQM_SERVICE_T_TOE SERVICE_T_TOE +#define HINIC5_CQM_SERVICE_T_IOE SERVICE_T_IOE +#define HINIC5_CQM_SERVICE_T_FC SERVICE_T_FC +#define HINIC5_CQM_SERVICE_T_VBS SERVICE_T_VBS +#define HINIC5_CQM_SERVICE_T_IPSEC SERVICE_T_IPSEC +#define HINIC5_CQM_SERVICE_T_VIRTIO SERVICE_T_VIRTIO +#define HINIC5_CQM_SERVICE_T_PPA SERVICE_T_PPA +#define HINIC5_CQM_SERVICE_T_UB SERVICE_T_UB +#define HINIC5_CQM_SERVICE_T_JBOF SERVICE_T_JBOF +#define HINIC5_CQM_SERVICE_T_VROCE SERVICE_T_VROCE +#define HINIC5_CQM_SERVICE_T_DMMU SERVICE_T_DMMU +#define HINIC5_CQM_SERVICE_T_CFM SERVICE_T_CFM +#define HINIC5_CQM_SERVICE_T_MAX SERVICE_T_MAX + +struct tag_hinic5_cqm_service { + bool valid; /* Whether to enable this service on the function. */ + bool has_register; /* Registered or Not */ + u64 hardware_db_paddr; + void __iomem *hardware_db_vaddr; + u64 dwqe_paddr; + void __iomem *dwqe_vaddr; + u32 buf_order; /* The size of each buf node is 2^buf_order pages. */ + struct tag_service_register_template service_template; +}; + +struct tag_hinic5_cqm_fake_cfg { + u32 parent_func; /* The parent func_id of the fake vfs. */ + u32 child_func_start; /* The start func_id of the child fake vfs. */ + u32 child_func_number; /* The number of the child fake vfs. */ + + bool fake_vf_lazy_init; + + u32 fake_vf_max_pctx; + u32 fake_vf_max_scqc_ctx; + u32 fake_vf_max_srqc_ctx; + u32 fake_vf_max_gid_ctx; + u32 fake_vf_max_mpt_ctx; + u32 fake_vf_max_childc_ctx; + + u8 fake_vf_qpc_basic_size; + + u16 fake_vf_bfilter_start_addr; + u16 fake_vf_bfilter_len; +}; + +typedef struct tag_hinic5_cqm_func_capability { + /* BAT_PTR table(SMLC) */ + bool ft_enable; /* BAT for flow table enable: support toe/ioe/fc service + */ + bool rdma_enable; /* BAT for rdma enable: support RoCE */ + /* VAT table(SMIR) */ + bool ft_pf_enable; /* Same as ft_enable. BAT entry for toe/ioe/fc on pf + */ + bool rdma_pf_enable; /* Same as rdma_enable. BAT entry for rdma on pf */ + + u8 gpa_spu_en; + + /* Dynamic or static memory allocation during the application of + * specified QPC/SCQC for each service. + */ + bool qpc_alloc_static; + bool scqc_alloc_static; + bool srqc_alloc_static; + + u8 timer_enable; /* Whether the timer function is enabled */ + u8 bloomfilter_enable; /* Whether the bloomgfilter function is enabled + */ + u32 flow_table_based_conn_number; /* Maximum number of connections for + * toe/ioe/fc, whitch cannot excedd + * qpc_number + */ + u32 flow_table_based_conn_cache_number; /* Maximum number of sticky + * caches + */ + u32 bloomfilter_length; /* Size of the bloomfilter table, 64-byte + * aligned + */ + u32 bloomfilter_addr; /* Start position of the bloomfilter table in the + * SMF main cache. + */ + u32 qpc_reserved; /* Reserved bit in bitmap */ + u32 qpc_reserved_back; /* Reserved back bit in bitmap */ + u32 mpt_reserved; /* The ROCE/IWARP MPT also has a reserved bit. */ + u32 mpt_reserved_back; /* Reserved back bit in bitmap */ + + /* All basic_size must be 2^n-aligned. */ + u32 hash_number; /* The number of hash bucket. The size of BAT table is + * aliaed with 64 bucket. At least 64 buckets is + * required. + */ + u32 hash_basic_size; /* THe basic size of hash bucket is 64B, including + * 5 valid entry and one next entry. + */ + u32 qpc_number; + u32 qpc_basic_size; + + /* Number of PFs/VFs on the current host only for timer resource used */ + u32 pf_num; + u32 pf_id_start; + u32 vf_num; + u32 vf_id_start; + + u8 timer_pf_id_start; + u8 timer_pf_num; + u16 timer_vf_id_start; + u16 timer_vf_num; + u16 timer_vf_num_actual; + bool timer_vf_deploy_with_segs; + struct timer_vf_info_seg timer_vf_segs[TIMER_VF_SEGS_NUM]; + + bool use_fake_parent_cla; + + /* SMF capabilities */ + u32 lb_mode; + /* A bitmap indicating which SMFs are enabled. + * For example, 0101B indicates that SMF0 and SMF2 are enabled. + * The valid length of this bitmap is smf_max_num. + */ + u32 smf_pg; + u32 smf_max_num; + u32 smf_enabled_num; + + /* SMF BAT capabilities */ + u8 bat_cid_index_bit_width; + + /* Fake VF capabilities */ + u32 fake_func_type; /* Whether the current function belongs to the fake + * group (parent or child) + */ + struct tag_hinic5_cqm_fake_cfg fake_cfg; + + /* Note: for hinic5_cqm specail test */ + u32 pagesize_reorder; + bool xid_alloc_mode; + bool gpa_check_enable; + u32 scq_reserved; + u32 scq_reserved_back; + u32 srq_reserved; + u32 srq_reserved_back; + + u32 mpt_number; + u32 mpt_basic_size; + u32 scqc_number; + u32 scqc_basic_size; + u32 srqc_number; + u32 srqc_basic_size; + + u32 gid_number; + u32 gid_basic_size; + u32 lun_number; + u32 lun_basic_size; + u32 taskmap_number; + u32 taskmap_basic_size; + u32 l3i_number; + u32 l3i_basic_size; + u32 childc_number; + u32 childc_basic_size; + u32 child_qpc_id_start; /* FC service Child CTX is global addressing. */ + u32 childc_number_all_function; /* The chip supports a maximum of 8096 + * child CTXs. + */ + u32 timer_number; + u32 timer_basic_size; + u32 xid2cid_number; + u32 xid2cid_basic_size; + u32 reorder_number; + u32 reorder_basic_size; +} hinic5_cqm_func_capability_s; + +#define HINIC5_CQM_PF TYPE_PF +#define HINIC5_CQM_VF TYPE_VF +#define HINIC5_CQM_PPF TYPE_PPF +#define HINIC5_CQM_UNKNOWN TYPE_UNKNOWN +#define HINIC5_CQM_MAX_PF_NUM 32 + +#define HINIC5_CQM_LB_MODE_NORMAL 0xff +#define HINIC5_CQM_LB_MODE_0 0 +#define HINIC5_CQM_LB_MODE_1 1 +#define HINIC5_CQM_LB_MODE_2 2 + +#define HINIC5_CQM_FPGA_MODE 0 +#define HINIC5_CQM_EMU_MODE 1 + +#define HINIC5_CQM_FAKE_FUNC_UNUSED 0U /* The HINIC5_CQM handle does not use Fake VF. */ +#define HINIC5_CQM_FAKE_FUNC_PARENT 1U /* The HINIC5_CQM handle is responsible for + initializing some VF's resouces. */ +#define HINIC5_CQM_FAKE_FUNC_CHILD_AGENT 2U /* An agent handle created by a Fake VF + Parent that acts as a Fake VF Child + in Fake VF Parent's process. */ +#define HINIC5_CQM_FAKE_FUNC_CHILD 3U /* Some resources of this HINIC5_CQM handle + are managed by a Fake VF Parent. */ + +#define HINIC5_CQM_FAKE_FUNC_MAX 32 + +#define HINIC5_CQM_QPC_ROCE_PER_DRCT 12 +#define HINIC5_CQM_QPC_ROCE_NORMAL 0 +#define HINIC5_CQM_QPC_ROCE_VBS_MODE 2 + +struct tag_hinic5_cqm_toe_private_capability { + /* TOE srq is different from other services + * and does not need to be managed by the CLA table. + */ + u32 toe_srqc_number; + u32 toe_srqc_basic_size; + u32 toe_srqc_start_id; + + struct tag_hinic5_cqm_bitmap srqc_bitmap; +}; + +struct hinic5_cqm_cmdq_ops; +struct tag_hinic5_cqm_handle { + struct hinic5_hwdev *ex_handle; + struct device *dev; + struct hinic5_func_attr func_attribute; /* vf/pf attributes */ + struct tag_hinic5_cqm_func_capability func_capability; /* function capability set */ + struct tag_hinic5_cqm_service service[HINIC5_CQM_SERVICE_T_MAX]; /* Service-related structure */ + struct tag_hinic5_cqm_bat_table bat_table; + struct tag_hinic5_cqm_bloomfilter_table bloomfilter_table; + + atomic_t handle_state; /* see HINIC5_CQM_HANDLE_STATE_XXX */ + + /* fake-vf-related structure */ + struct tag_hinic5_cqm_handle *fake_hinic5_cqm_handle[HINIC5_CQM_FAKE_FUNC_MAX]; + struct tag_hinic5_cqm_handle *parent_hinic5_cqm_handle; + + struct tag_hinic5_cqm_toe_private_capability toe_own_capability; /* TOE service-related + * capability set + */ + + char name[HINIC5_VRAM_NAME_MAX_LEN]; + struct hinic5_cqm_cmdq_ops *cmdq_ops; +}; + +#define HINIC5_CQM_FUNC_TYPE(hinic5_cqm_handle) ((hinic5_cqm_handle)->func_attribute.func_type) +#define HINIC5_CQM_FAKE_FUNC_TYPE(hinic5_cqm_handle) ((hinic5_cqm_handle)->func_capability.fake_func_type) + +#define HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle) \ + (HINIC5_CQM_FAKE_FUNC_TYPE(hinic5_cqm_handle) == HINIC5_CQM_FAKE_FUNC_PARENT) +#define HINIC5_CQM_IS_FAKE_CHILD(hinic5_cqm_handle) \ + (HINIC5_CQM_FAKE_FUNC_TYPE(hinic5_cqm_handle) == HINIC5_CQM_FAKE_FUNC_CHILD) +#define HINIC5_CQM_IS_FAKE_CHILD_AGENT(hinic5_cqm_handle) \ + (HINIC5_CQM_FAKE_FUNC_TYPE(hinic5_cqm_handle) == HINIC5_CQM_FAKE_FUNC_CHILD_AGENT) + +#define HINIC5_CQM_IS_PPF(hinic5_cqm_handle) (HINIC5_CQM_FUNC_TYPE(hinic5_cqm_handle) == HINIC5_CQM_PPF) +#define HINIC5_CQM_IS_VF(hinic5_cqm_handle) (HINIC5_CQM_FUNC_TYPE(hinic5_cqm_handle) == HINIC5_CQM_VF && \ + HINIC5_CQM_FAKE_FUNC_TYPE(hinic5_cqm_handle) == HINIC5_CQM_FAKE_FUNC_UNUSED) + +#define HINIC5_CQM_CLA_IS_SECURE_MEM(type) ((type) == HINIC5_CQM_BAT_ENTRY_T_QPC || (type) == HINIC5_CQM_BAT_ENTRY_T_MPT || \ + (type) == HINIC5_CQM_BAT_ENTRY_T_SCQC || (type) == HINIC5_CQM_BAT_ENTRY_T_SRQC) + +#define HINIC5_CQM_IS_LB_MODE_NORMAL(hinic5_cqm_handle) ((hinic5_cqm_handle)->func_capability.lb_mode == HINIC5_CQM_LB_MODE_NORMAL) +#define HINIC5_CQM_IS_LB_MODE_0(hinic5_cqm_handle) ((hinic5_cqm_handle)->func_capability.lb_mode == HINIC5_CQM_LB_MODE_0) +#define HINIC5_CQM_IS_LB_MODE_1(hinic5_cqm_handle) ((hinic5_cqm_handle)->func_capability.lb_mode == HINIC5_CQM_LB_MODE_1) +#define HINIC5_CQM_IS_LB_MODE_2(hinic5_cqm_handle) ((hinic5_cqm_handle)->func_capability.lb_mode == HINIC5_CQM_LB_MODE_2) +#define HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle) (HINIC5_CQM_IS_LB_MODE_1(hinic5_cqm_handle) || HINIC5_CQM_IS_LB_MODE_2(hinic5_cqm_handle)) + +#define HINIC5_CQM_CQN_FROM_CEQE(data) ((data) & 0xfffff) +#define HINIC5_CQM_XID_FROM_CEQE(data) ((data) & 0xfffff) +#define HINIC5_CQM_QID_FROM_CEQE(data) (((data) >> 20) & 0x7) +#define HINIC5_CQM_TYPE_FROM_CEQE(data) (((data) >> 23) & 0x7) + +#define HINIC5_CQM_HASH_BUCKET_SIZE_64 64 + +#define HINIC5_CQM_MAX_QPC_NUM 0x100000 +#define HINIC5_CQM_MAX_SCQC_NUM 0x100000 +#define HINIC5_CQM_MAX_SRQC_NUM 0x100000 +#define HINIC5_CQM_MAX_CHILDC_NUM 0x100000 + +#define HINIC5_CQM_QPC_SIZE_256 256 +#define HINIC5_CQM_QPC_SIZE_512 512 +#define HINIC5_CQM_QPC_SIZE_1024 1024 + +#define HINIC5_CQM_SCQC_SIZE_32 32 +#define HINIC5_CQM_SCQC_SIZE_64 64 +#define HINIC5_CQM_SCQC_SIZE_128 128 + +#define HINIC5_CQM_SRQC_SIZE_32 32 +#define HINIC5_CQM_SRQC_SIZE_64 64 +#define HINIC5_CQM_SRQC_SIZE_128 128 + +#define HINIC5_CQM_MPT_SIZE_64 64 + +#define HINIC5_CQM_GID_SIZE_32 32 + +#define HINIC5_CQM_LUN_SIZE_8 8 + +#define HINIC5_CQM_L3I_SIZE_8 8 + +#define HINIC5_CQM_TIMER_SIZE_32 32 + +#define HINIC5_CQM_XID2CID_SIZE_8 8 + +#define HINIC5_CQM_REORDER_SIZE_256 256 + +#define HINIC5_CQM_CHILDC_SIZE_256 256 + +#define HINIC5_CQM_XID2CID_VBS_NUM (2 * 1024) /* 2K nvme Q */ + +#define HINIC5_CQM_VBS_QPC_SIZE 512 + +#define HINIC5_CQM_VBS_SCQC_SIZE 128 + +/* Default number of VirtIO VQs. + * Future models should get this value from the MGMT. + */ +#define HINIC5_CQM_VIRTIO_VQ_NUM_DEFAULT (16 * 1024) +#define HINIC5_CQM_VIRTIO_FC_SIZE 256 /* VirtIO Function Context size */ + +#define HINIC5_CQM_GID_RDMA_NUM 128 + +#define HINIC5_CQM_LUN_FC_NUM 64 + +#define HINIC5_CQM_TASKMAP_FC_NUM 4 + +#define HINIC5_CQM_L3I_COMM_NUM 64 + +#define HINIC5_CQM_CHILDC_OVS_VBS_NUM (8 * 1024) +#define HINIC5_CQM_CHILDC_VBS_NUM (2 * 1024) + +#define HINIC5_CQM_TIMER_SCALE_NUM (2 * 1024) +#define HINIC5_CQM_TIMER_ALIGN_WHEEL_NUM 8 +#define HINIC5_CQM_TIMER_ALIGN_SCALE_NUM \ + (HINIC5_CQM_TIMER_SCALE_NUM * HINIC5_CQM_TIMER_ALIGN_WHEEL_NUM) + +#define HINIC5_CQM_QPC_OVS_RSVD (1024 * 1024) +#define HINIC5_CQM_QPC_ROCE_RSVD 2 +#define HINIC5_CQM_QPC_ROCEAA_SWITCH_QP_NUM 4 +#define HINIC5_CQM_QPC_ROCEAA_RSVD \ + (4 * 1024 + HINIC5_CQM_QPC_ROCEAA_SWITCH_QP_NUM) /* 4096 Normal QP + + * 4 Switch QP + */ +#define HINIC5_CQM_CQ_ROCE_RSVD 16 +#define HINIC5_CQM_CQ_UB_RSVD 131072 // 128K +#define HINIC5_CQM_SRQ_ROCE_RSVD 16 + +#define HINIC5_CQM_CQ_ROCEAA_RSVD 64 +#define HINIC5_CQM_SRQ_ROCEAA_RSVD 64 +#define HINIC5_CQM_QPC_ROCE_VBS_RSVD_BACK 204800 /* 200K */ +#define HINIC5_CQM_CQ_VBS_VOLQ_RSVD (2 + 2048) +#define HINIC5_CQM_CQ_ROCE_VBS_RSVD GET_MAX(HINIC5_CQM_QPC_ROCE_VBS_RSVD_BACK, HINIC5_CQM_CQ_VBS_VOLQ_RSVD) + +#define HINIC5_CQM_OVS_MAX_TIMER_FUNC 48 + +#define HINIC5_CQM_HASH_BUCKET_NUM_UNIT_4_TO_64 4 +#define HINIC5_CQM_CRYPT_HASH_BUCKET_NUM(tbl_num) ((tbl_num) >> HINIC5_CQM_HASH_BUCKET_NUM_UNIT_4_TO_64) + +#define HINIC5_CQM_PPA_PAGESIZE_ORDER 8 + +#if defined(__WIN__) && defined(__HIFC__) +#define HINIC5_CQM_FC_PAGESIZE_ORDER 8 +#else +#define HINIC5_CQM_FC_PAGESIZE_ORDER 0 +#endif + +#define HINIC5_CQM_QHEAD_ALIGN_ORDER 6 + +typedef void (*serv_cap_init_cb)(struct tag_hinic5_cqm_handle *, void *); + +struct hinic5_cqm_srv_cap_init { + u32 service_type; + serv_cap_init_cb serv_cap_proc; +}; + +/* Only for llt test */ +s32 hinic5_cqm_capability_init(void *ex_handle); +/* Can be defined as static */ +s32 hinic5_cqm_mem_init(void *ex_handle); +void hinic5_cqm_mem_uninit(void *ex_handle); +s32 hinic5_cqm_event_init(void *ex_handle); +void hinic5_cqm_event_uninit(void *ex_handle); +void hinic5_cqm_scq_callback(void *ex_handle, u32 ceqe_data); +void hinic5_cqm_ecq_callback(void *ex_handle, u32 ceqe_data); +void hinic5_cqm_nocq_callback(void *ex_handle, u32 ceqe_data); +u8 hinic5_cqm_aeq_callback(void *ex_handle, u8 event, u8 *data); + +s32 hinic5_cqm_init(void *ex_handle); +void hinic5_cqm_uninit(void *ex_handle); +s32 hinic5_cqm_service_register(void *ex_handle, struct tag_service_register_template *service_template); +void hinic5_cqm_service_unregister(void *ex_handle, u32 service_type); + +s32 hinic5_cqm_fake_vf_num_set(void *ex_handle, u16 fake_vf_num_cfg); + +#define HINIC5_CQM_LOG_ID 0 + +#define HINIC5_CQM_PTR_NULL(x) "%s: " #x " is null\n", __func__ +#define HINIC5_CQM_ALLOC_FAIL(x) "%s: " #x " alloc fail\n", __func__ +#define HINIC5_CQM_MAP_FAIL(x) "%s: " #x " map fail\n", __func__ +#define HINIC5_CQM_FUNCTION_FAIL(x) "%s: " #x " return failure\n", __func__ +#define HINIC5_CQM_WRONG_VALUE(x) "%s: " #x " %u is wrong\n", __func__, (u32)(x) + +#define hinic5_cqm_err(dev, format, ...) dev_err(dev, "[HINIC5_CQM]" format, ##__VA_ARGS__) +#define hinic5_cqm_warn(dev, format, ...) dev_warn(dev, "[HINIC5_CQM]" format, ##__VA_ARGS__) +#define hinic5_cqm_notice(dev, format, ...) \ + dev_notice(dev, "[HINIC5_CQM]" format, ##__VA_ARGS__) +#define hinic5_cqm_info(dev, format, ...) dev_info(dev, "[HINIC5_CQM]" format, ##__VA_ARGS__) + +#ifdef __HINIC5_CQM_DEBUG__ +extern bool hinic5_cqm_verbose; + +#define hinic5_cqm_dbg(dev, format, ...) dev_info(dev, "[HINIC5_CQM]" format, ##__VA_ARGS__) +#define hinic5_cqm_dbg_on(condition, dev, format, ...) \ + ({ \ + if (condition) \ + hinic5_cqm_dbg(dev, format, ##__VA_ARGS__); \ + }) + +#define hinic5_cqm_dbg_pr(format, ...) pr_info("[HINIC5_CQM]" format, ##__VA_ARGS__) +#define hinic5_cqm_dbg_pr_on(condition, format, ...) \ + ({ \ + if (condition) \ + hinic5_cqm_dbg_pr(format, ##__VA_ARGS__); \ + }) + +static inline void hinic5_cqm_dbg_byte_print(struct device *dev, u32 *ptr, u32 len) +{ + u32 i; + for (i = 0; i < (len >> 0x2); i += 0x4) + hinic5_cqm_dbg(dev, "%.8x %.8x %.8x %.8x\n", + ptr[i], ptr[i + 0x1], ptr[i + 0x2], ptr[i + 0x3]); +} +#else +#define hinic5_cqm_dbg(format, ...) +#define hinic5_cqm_dbg_on(condition, format, ...) +#define hinic5_cqm_dbg_pr(format, ...) +#define hinic5_cqm_dbg_pr_on(condition, format, ...) +#define hinic5_cqm_dbg_byte_print(dev, ptr, len) +#endif + +#define HINIC5_CQM_PTR_CHECK_ERR(desc) pr_err("[HINIC5_CQM]" desc) + +static inline u32 hinic5_cqm_get_child_func_start(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + return func_cap->fake_cfg.child_func_start; +} + +/* + * Get the number of child functions. + * The number of child functions can be zero. + */ +static inline u32 hinic5_cqm_get_child_func_number(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + return func_cap->fake_cfg.child_func_number; +} + +static inline bool hinic5_cqm_is_fake_vf_lazy_init(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + return func_cap->fake_cfg.fake_vf_lazy_init; +} + +/** + * SMF support to use acs_spu_en to determine whether to send data over the HVA + * interface or API ring. + * @ref 'SPU ACCESS' in SM FS + * @return 1: over HVA, 0 over API ring + */ +static inline u8 hinic5_cqm_get_acs_spu_en(struct tag_hinic5_cqm_handle *hinic5_cqm_handle) +{ + struct hinic5_hwdev *hwdev = hinic5_cqm_handle->ex_handle; + + if (hinic5_cqm_handle->func_capability.gpa_spu_en == FUNC_GPA_SPU_DIS) + return 0; + if (hinic5_cqm_handle->func_capability.gpa_spu_en == FUNC_GPA_SPU_EN) + return 0x1; + + if (!hinic5_in_spu(hwdev)) + return 0; + + /* Load balancing from the SMF to the CPI, depending on the func ID. */ + return hinic5_global_func_id(hwdev) & 0x1; +} + +#endif /* HINIC5_CQM_MAIN_H */ diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object.c new file mode 100644 index 00000000..8d8db198 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object.c @@ -0,0 +1,1700 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <linux/mm.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_typedef_inner.h" + +#include "hinic5_cqm_bitmap_table.h" +#include "hinic5_cqm_bat_cla.h" +#include "hinic5_cqm_object_intern.h" +#include "hinic5_cqm_main.h" +#include "hinic5_cqm_object.h" + +static inline void hinic5_cqm_object_init(struct tag_hinic5_cqm_object *object, u32 service_type, + enum hinic5_cqm_object_type object_type, u32 object_size, + void *hinic5_cqm_handle) +{ + object->service_type = service_type; + object->object_type = object_type; + object->object_size = object_size; + atomic_set(&object->refcount, 1); + init_completion(&object->free); + object->hinic5_cqm_handle = hinic5_cqm_handle; +} + +static s32 hinic5_cqm_object_create_check(struct tag_hinic5_cqm_handle *hinic5_cqm_handle, u32 service_type) +{ + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return HINIC5_CQM_FAIL; + } + + if (unlikely(service_type >= HINIC5_CQM_SERVICE_T_MAX)) { + hinic5_cqm_err(hinic5_cqm_handle->dev, "invalid service %u\n", service_type); + return HINIC5_CQM_FAIL; + } + if (unlikely(!hinic5_cqm_handle->service[service_type].has_register)) { + hinic5_cqm_err(hinic5_cqm_handle->dev, "service %u has not registered\n", service_type); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_object_qpc_mpt_create + * Description : create QPC/MPT + * Input : void *ex_handle + * u32 service_type + * enum hinic5_cqm_object_type object_type: must be mpt or ctx. + * u32 object_size: unit is Byte + * void *object_priv: private structure of the service layer, + * it can be NULL. + * u32 index: apply for the reserved qpn 0~(1M-1) based on this value; + * if automatic allocation is required, + * index[20:0] : fixed to 0x1fffff + * index[23:21] : specified xid_lowbits[2:0] + * index[26:24] : xid[2:0] match mode, see HINIC5_CQM_DYNAMIC_XID_MOD + * index[27] : search mode, + * 0---specify the XID range, + * 1---search for the entire dynamic area + * index[31:28] : rsvd + * notes: when index is HINIC5_CQM_INDEX_INVALID, means match all available xid + * u32 bitmap_start: start index of dynamic xid search range, + * valid when index[25]=0 && index[20:0]=0x1fffff + * u32 bitmap_end: end index of dynamic xid search range, + * valid when index[25]=0 && index[20:0]=0x1fffff + * when search forward(bitmap_start<bitmap_end), + * search range is [bitmap_start, bitmap_end) + * when search reverse(bitmap_start>bitmap_end), + * search range is (bitmap_end, bitmap_start]. + * bitmap_start=bitmap_end is illegal in range search mode + * Output : None + * Return Value : struct tag_hinic5_cqm_qpc_mpt * + * 1.Date : 2016/2/16 + * Modification : Created function + */ +struct tag_hinic5_cqm_qpc_mpt *hinic5_cqm_object_qpc_mpt_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 object_size, void *object_priv, u32 index, + u32 bitmap_start, u32 bitmap_end) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_qpc_mpt_info *qpc_mpt_info = NULL; + struct tag_hinic5_cqm_bitmap_range bp_range; + s32 ret = HINIC5_CQM_FAIL; + u32 relative_index; + u32 fake_func_id; + u32 index_num = index; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_qpc_mpt_create_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (hinic5_cqm_object_create_check(hinic5_cqm_handle, service_type) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_object_create_check)); + return NULL; + } + + if (object_type != HINIC5_CQM_OBJECT_SERVICE_CTX && object_type != HINIC5_CQM_OBJECT_MPT) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object_type)); + return NULL; + } + + /* fake vf adaption, switch to corresponding VF. */ + if (HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle) && + !hinic5_cqm_handle->func_capability.use_fake_parent_cla) { + struct tag_hinic5_cqm_fake_cfg *fake_cfg = &hinic5_cqm_handle->func_capability.fake_cfg; + if (fake_cfg->fake_vf_max_pctx == 0) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(fake_cfg->fake_vf_max_pctx)); + return NULL; + } + + fake_func_id = index_num / fake_cfg->fake_vf_max_pctx; + relative_index = index_num % fake_cfg->fake_vf_max_pctx; + + if (fake_func_id >= hinic5_cqm_get_child_func_number(hinic5_cqm_handle)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(fake_func_id)); + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(index)); + return NULL; + } + + index_num = relative_index; + hinic5_cqm_handle = hinic5_cqm_handle->fake_hinic5_cqm_handle[fake_func_id]; + } + + qpc_mpt_info = kzalloc(sizeof(*qpc_mpt_info), GFP_ATOMIC); + if (unlikely(qpc_mpt_info == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(qpc_mpt_info)); + return NULL; + } + + hinic5_cqm_object_init(&qpc_mpt_info->common.object, service_type, object_type, + object_size, hinic5_cqm_handle); + qpc_mpt_info->common.xid = index_num; + bp_range.start = bitmap_start; + bp_range.end = bitmap_end; + + qpc_mpt_info->common.priv = object_priv; + + ret = hinic5_cqm_qpc_mpt_create(&qpc_mpt_info->common.object, &bp_range); + if (ret == HINIC5_CQM_SUCCESS) + return &qpc_mpt_info->common; + + hinic5_cqm_warn(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_qpc_mpt_create)); + kfree(qpc_mpt_info); + return NULL; +} +EXPORT_SYMBOL(hinic5_cqm_object_qpc_mpt_create); + +static struct tag_hinic5_cqm_queue *hinic5_cqm_create_rqs(struct tag_hinic5_cqm_nonrdma_qinfo *rq_qinfo, struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct hinic5_hwdev *handle, u32 init_rq_num) +{ + u32 i; + /* 3. create queue header */ + rq_qinfo->common.q_header_vaddr = hinic5_cqm_kmalloc_align(sizeof(struct tag_hinic5_cqm_queue_header), + GFP_KERNEL | __GFP_ZERO, HINIC5_CQM_QHEAD_ALIGN_ORDER); + if (!rq_qinfo->common.q_header_vaddr) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(q_header_vaddr)); + return NULL; + } + + rq_qinfo->common.q_header_paddr = dma_map_single(hinic5_cqm_handle->dev, rq_qinfo->common.q_header_vaddr, + sizeof(struct tag_hinic5_cqm_queue_header), DMA_BIDIRECTIONAL); + if (dma_mapping_error(hinic5_cqm_handle->dev, rq_qinfo->common.q_header_paddr) != 0) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_MAP_FAIL(q_header_vaddr)); + goto err1; + } + + /* 4. create rq */ + for (i = 0; i < init_rq_num; i++) { + if (hinic5_cqm_container_create(&rq_qinfo->common.object, NULL, true) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_container_create)); + goto err2; + } + if (!rq_qinfo->common.head_container) + rq_qinfo->common.head_container = rq_qinfo->common.tail_container; + } + + return &rq_qinfo->common; + +err2: + hinic5_cqm_container_free(rq_qinfo->common.head_container, NULL, + &rq_qinfo->common); +err1: + hinic5_cqm_kfree_align(rq_qinfo->common.q_header_vaddr); + rq_qinfo->common.q_header_vaddr = NULL; + return NULL; +} + +/** + * Prototype : hinic5_cqm_object_recv_queue_create + * Description : when srq is used, create rq. + * Input : void *ex_handle + * u32 service_type + * enum hinic5_cqm_object_type object_type + * u32 init_rq_num + * u32 container_size + * u32 wqe_size + * void *object_priv + * Output : None + * Return Value : struct tag_hinic5_cqm_queue * + * 1.Date : 2016/2/16 + * Modification : Created function + */ +struct tag_hinic5_cqm_queue *hinic5_cqm_object_recv_queue_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 init_rq_num, u32 container_size, + u32 wqe_size, void *object_priv) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_nonrdma_qinfo *rq_qinfo = NULL; + struct tag_hinic5_cqm_queue *ret = NULL; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_rq_create_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (hinic5_cqm_object_create_check(hinic5_cqm_handle, service_type) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_object_create_check)); + return NULL; + } + + if (object_type != HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_RQ) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object_type)); + return NULL; + } + + if (service_type != HINIC5_CQM_SERVICE_T_TOE) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(service_type)); + return NULL; + } + + /* 1. create rq qinfo */ + rq_qinfo = kzalloc(sizeof(*rq_qinfo), GFP_KERNEL); + if (unlikely(rq_qinfo == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(rq_qinfo)); + return NULL; + } + + /* 2. init rq qinfo */ + rq_qinfo->container_size = container_size; + rq_qinfo->wqe_size = wqe_size; + rq_qinfo->wqe_per_buf = container_size / wqe_size - 1; + + rq_qinfo->common.queue_link_mode = HINIC5_CQM_QUEUE_TOE_SRQ_LINK_MODE; + rq_qinfo->common.priv = object_priv; + hinic5_cqm_object_init(&rq_qinfo->common.object, service_type, object_type, + init_rq_num, hinic5_cqm_handle); + + /* 3. create rq */ + ret = hinic5_cqm_create_rqs(rq_qinfo, hinic5_cqm_handle, handle, init_rq_num); + if (ret == NULL) + kfree(rq_qinfo); + + return ret; +} +EXPORT_SYMBOL(hinic5_cqm_object_recv_queue_create); + +/** + * Prototype : hinic5_cqm_object_share_recv_queue_add_container + * Description : allocate new container for srq + * Input : struct tag_hinic5_cqm_queue *common + * Output : None + * Return Value : tail_container address + * 1.Date : 2016/2/14 + * Modification : Created function + */ +s32 hinic5_cqm_object_share_recv_queue_add_container(struct tag_hinic5_cqm_queue *common) +{ + if (unlikely(common == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(common)); + return HINIC5_CQM_FAIL; + } + + return hinic5_cqm_container_create(&common->object, NULL, true); +} +EXPORT_SYMBOL(hinic5_cqm_object_share_recv_queue_add_container); + +s32 hinic5_cqm_object_srq_add_container_free(struct tag_hinic5_cqm_queue *common, u8 **container_addr) +{ + if (unlikely(common == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(common)); + return HINIC5_CQM_FAIL; + } + + return hinic5_cqm_container_create(&common->object, container_addr, false); +} +EXPORT_SYMBOL(hinic5_cqm_object_srq_add_container_free); + +static bool hinic5_cqm_object_share_recv_queue_param_check(struct hinic5_hwdev *handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 container_size, u32 wqe_size) +{ + /* service_type must be HINIC5_CQM_SERVICE_T_TOE */ + if (service_type != HINIC5_CQM_SERVICE_T_TOE) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(service_type)); + return false; + } + + /* container size2^N aligning */ + if (!hinic5_cqm_check_align(container_size)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(container_size)); + return false; + } + + /* external parameter check: object_type must be + * HINIC5_CQM_OBJECT_NONRDMA_SRQ + */ + if (object_type != HINIC5_CQM_OBJECT_NONRDMA_SRQ) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object_type)); + return false; + } + + /* wqe_size, the divisor, cannot be 0 */ + if (wqe_size == 0) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(wqe_size)); + return false; + } + + return true; +} + +/** + * Prototype : hinic5_cqm_object_share_recv_queue_create + * Description : create srq + * Input : void *ex_handle + * u32 service_type + * enum hinic5_cqm_object_type object_type + * u32 container_number + * u32 container_size + * u32 wqe_size + * Output : None + * Return Value : struct tag_hinic5_cqm_queue * + * 1.Date : 2016/2/1 + * Modification : Created function + */ +struct tag_hinic5_cqm_queue *hinic5_cqm_object_share_recv_queue_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 container_number, u32 container_size, + u32 wqe_size) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_nonrdma_qinfo *srq_qinfo = NULL; + struct tag_hinic5_cqm_service *service = NULL; + s32 ret; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_srq_create_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (hinic5_cqm_object_create_check(hinic5_cqm_handle, service_type) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_object_create_check)); + return NULL; + } + + if (!hinic5_cqm_object_share_recv_queue_param_check(handle, service_type, object_type, + container_size, wqe_size)) + return NULL; + + /* 2. create and initialize srq info */ + srq_qinfo = kzalloc(sizeof(*srq_qinfo), GFP_KERNEL); + if (unlikely(srq_qinfo == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(srq_qinfo)); + return NULL; + } + + hinic5_cqm_object_init(&srq_qinfo->common.object, service_type, object_type, + container_number, hinic5_cqm_handle); + + srq_qinfo->common.queue_link_mode = HINIC5_CQM_QUEUE_TOE_SRQ_LINK_MODE; + srq_qinfo->common.priv = NULL; + srq_qinfo->wqe_per_buf = container_size / wqe_size - 1; + srq_qinfo->wqe_size = wqe_size; + srq_qinfo->container_size = container_size; + service = &hinic5_cqm_handle->service[service_type]; + srq_qinfo->q_ctx_size = service->service_template.srq_ctx_size; + + /* 3. create srq and srq ctx */ + ret = hinic5_cqm_share_recv_queue_create(&srq_qinfo->common.object); + if (ret == HINIC5_CQM_SUCCESS) + return &srq_qinfo->common; + + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_share_recv_queue_create)); + kfree(srq_qinfo); + return NULL; +} +EXPORT_SYMBOL(hinic5_cqm_object_share_recv_queue_create); + +/* FC RQ is SRQ. (Different from the SRQ concept of TOE, FC indicates + * that packets received by all flows are placed on the same RQ. + * The SRQ of TOE is similar to the RQ resource pool.) + */ +static bool hinic5_cqm_object_fc_srq_param_check(struct hinic5_hwdev *handle, u32 service_type, + enum hinic5_cqm_object_type object_type, u32 wqe_size) +{ + /* service_type must be HINIC5_CQM_SERVICE_T_FC */ + if (service_type != HINIC5_CQM_SERVICE_T_FC) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(service_type)); + return false; + } + + /* object_type must be HINIC5_CQM_OBJECT_NONRDMA_SRQ */ + if (object_type != HINIC5_CQM_OBJECT_NONRDMA_SRQ) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object_type)); + return false; + } + + if (wqe_size >= PAGE_SIZE || !hinic5_cqm_check_align(wqe_size)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(wqe_size)); + return false; + } + + return true; +} + +/** + * Prototype : hinic5_cqm_object_fc_rq_create + * Description : RQ creation temporarily provided for the FC service. + * Special requirement: The number of valid WQEs in the queue + * must meet the number of transferred WQEs. Linkwqe can only be + * filled at the end of the page. The actual valid number exceeds + * the requirement. In this case, the service needs to be + * informed of the additional number to be created. + * Input : void *ex_handle + * u32 service_type + * enum hinic5_cqm_object_type object_type + * u32 wqe_number: Number of valid WQEs + * u32 wqe_size + * void *object_priv + * Output : None + * 1.Date : 2016/3/1 + * Modification : Created function + */ +struct tag_hinic5_cqm_queue *hinic5_cqm_object_fc_srq_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_nonrdma_qinfo *nonrdma_qinfo = NULL; + struct tag_hinic5_cqm_service *service = NULL; + u32 valid_wqe_per_buffer, buf_size, buf_num; + u32 wqe_sum; /* include linkwqe, normal wqe */ + s32 ret; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_fc_srq_create_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (hinic5_cqm_object_create_check(hinic5_cqm_handle, service_type) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_object_create_check)); + return NULL; + } + + if (!hinic5_cqm_object_fc_srq_param_check(handle, service_type, object_type, wqe_size)) + return NULL; + + service = &hinic5_cqm_handle->service[service_type]; + buf_size = (u32)(PAGE_SIZE << (service->buf_order)); + /* subtract 1 link wqe */ + valid_wqe_per_buffer = buf_size / wqe_size - 1; + buf_num = wqe_number / valid_wqe_per_buffer; + if (wqe_number % valid_wqe_per_buffer != 0) + buf_num++; + + /* calculate the total number of WQEs */ + wqe_sum = buf_num * (valid_wqe_per_buffer + 1); + nonrdma_qinfo = kzalloc(sizeof(*nonrdma_qinfo), GFP_KERNEL); + if (unlikely(nonrdma_qinfo == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(nonrdma_qinfo)); + return NULL; + } + + hinic5_cqm_object_init(&nonrdma_qinfo->common.object, service_type, object_type, + wqe_sum, hinic5_cqm_handle); + + /* Initialize the doorbell used by the current queue. + * The default doorbell is the hardware doorbell. + */ + nonrdma_qinfo->common.current_q_doorbell = HINIC5_CQM_HARDWARE_DOORBELL; + /* Currently, the connection mode is fixed. In the future, + * the service needs to transfer the connection mode. + */ + nonrdma_qinfo->common.queue_link_mode = HINIC5_CQM_QUEUE_RING_MODE; + + /* initialize public members */ + nonrdma_qinfo->common.priv = object_priv; + nonrdma_qinfo->common.valid_wqe_num = wqe_sum - buf_num; + + /* initialize internal private members */ + nonrdma_qinfo->wqe_size = wqe_size; + /* RQ (also called SRQ of FC) created by FC services, + * CTX needs to be created. + */ + nonrdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size; + + ret = hinic5_cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); + if (ret == HINIC5_CQM_SUCCESS) + return &nonrdma_qinfo->common; + + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_fc_queue_create)); + kfree(nonrdma_qinfo); + return NULL; +} +EXPORT_SYMBOL(hinic5_cqm_object_fc_srq_create); + +static bool hinic5_cqm_object_nonrdma_queue_param_check(struct hinic5_hwdev *handle, + enum hinic5_cqm_object_type object_type, u32 wqe_size) +{ + /* wqe_size can't be more than PAGE_SIZE, can't be zero, must be power + * of 2 the function of hinic5_cqm_check_align is to check above + */ + if (wqe_size >= PAGE_SIZE || (!hinic5_cqm_check_align(wqe_size))) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(wqe_size)); + return false; + } + + /* nonrdma supports: RQ, SQ, SRQ, CQ, SCQ */ + if (object_type < HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_RQ || + object_type > HINIC5_CQM_OBJECT_NONRDMA_SCQ) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object_type)); + return false; + } + + return true; +} + +/** + * Prototype : hinic5_cqm_object_nonrdma_queue_create + * Description : create nonrdma queue + * Input : void *ex_handle + * u32 service_type + * enum hinic5_cqm_object_type object_type: can be embedded RQ/SQ/CQ and + * SRQ/SCQ. + * u32 wqe_number: include link wqe + * u32 wqe_size: fixed length, must be power of 2 + * void *object_priv: private structure of the service layer, + * it can be NULL. + * Output : None + * Return Value : struct tag_hinic5_cqm_queue * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_hinic5_cqm_queue *hinic5_cqm_object_nonrdma_queue_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_nonrdma_qinfo *nonrdma_qinfo = NULL; + struct tag_hinic5_cqm_service *service = NULL; + s32 ret; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_nonrdma_queue_create_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (hinic5_cqm_object_create_check(hinic5_cqm_handle, service_type) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_object_create_check)); + return NULL; + } + + if (!hinic5_cqm_object_nonrdma_queue_param_check(handle, object_type, wqe_size)) + return NULL; + + nonrdma_qinfo = kzalloc(sizeof(*nonrdma_qinfo), GFP_KERNEL); + if (unlikely(nonrdma_qinfo == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(nonrdma_qinfo)); + return NULL; + } + + hinic5_cqm_object_init(&nonrdma_qinfo->common.object, service_type, object_type, + wqe_number, hinic5_cqm_handle); + + /* Initialize the doorbell used by the current queue. + * The default value is hardware doorbell + */ + nonrdma_qinfo->common.current_q_doorbell = HINIC5_CQM_HARDWARE_DOORBELL; + /* Currently, the link mode is hardcoded and needs to be transferred by + * the service side. + */ + nonrdma_qinfo->common.queue_link_mode = HINIC5_CQM_QUEUE_RING_MODE; + + nonrdma_qinfo->common.priv = object_priv; + + /* Initialize internal private members */ + nonrdma_qinfo->wqe_size = wqe_size; + service = &hinic5_cqm_handle->service[service_type]; + if (object_type == HINIC5_CQM_OBJECT_NONRDMA_SCQ) { + nonrdma_qinfo->q_ctx_size = service->service_template.scq_ctx_size; + } else if (object_type == HINIC5_CQM_OBJECT_NONRDMA_SRQ) { + /* Currently, the SRQ of the service is created through a + * dedicated interface. + */ + nonrdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size; + } + + ret = hinic5_cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); + if (ret == HINIC5_CQM_SUCCESS) + return &nonrdma_qinfo->common; + + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_nonrdma_queue_create)); + kfree(nonrdma_qinfo); + return NULL; +} +EXPORT_SYMBOL(hinic5_cqm_object_nonrdma_queue_create); + +static bool hinic5_cqm_object_rdma_queue_param_check(struct hinic5_hwdev *handle, u32 service_type, + enum hinic5_cqm_object_type object_type) +{ + /* service_type must be HINIC5_CQM_SERVICE_T_ROCE or HINIC5_CQM_SERVICE_T_UB */ + if (service_type != HINIC5_CQM_SERVICE_T_ROCE && service_type != HINIC5_CQM_SERVICE_T_UB && service_type != HINIC5_CQM_SERVICE_T_VBS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(service_type)); + return false; + } + + /* rdma supports: QP, SRQ, SCQ */ + if (object_type > HINIC5_CQM_OBJECT_RDMA_SCQ || object_type < HINIC5_CQM_OBJECT_RDMA_QP) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object_type)); + return false; + } + + return true; +} + +/** + * Prototype : hinic5_cqm_object_rdma_queue_create + * Description : create rdma queue + * Input : void *ex_handle + * u32 service_type + * enum hinic5_cqm_object_type object_type: can be QP and SRQ/SCQ. + * u32 object_size + * void *object_priv: private structure of the service layer, + * it can be NULL. + * bool room_header_alloc: Whether to apply for queue room and + * header space + * u32 xid: apply for the reserved qpn 0~(1M-1) based on this value; + * if automatic allocation is required, + * xid[20:0] : fixed to 0x1fffff + * xid[23:21] : specified xid_lowbits[2:0] + * xid[26:24] : xid[2:0] match mode, see HINIC5_CQM_DYNAMIC_XID_MOD + * xid[27] : search mode, + * 0---specify the XID range, + * 1---search for the entire dynamic area + * xid[31:28] : rsvd + * notes: when index is HINIC5_CQM_INDEX_INVALID, means match all available xid + * u32 bitmap_start: start index of dynamic xid search range, + * valid when index[25]=0 && index[20:0]=0x1fffff + * u32 bitmap_end: end index of dynamic xid search range, + * valid when index[25]=0 && index[20:0]=0x1fffff + * when search forward(bitmap_start<bitmap_end), + * search range is [bitmap_start, bitmap_end) + * when search reverse(bitmap_start>bitmap_end), + * search range is (bitmap_end, bitmap_start]. + * bitmap_start=bitmap_end is illegal in range search mode + * Output : None + * Return Value : struct tag_hinic5_cqm_queue * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_hinic5_cqm_queue *hinic5_cqm_object_rdma_queue_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 object_size, void *object_priv, + bool room_header_alloc, u32 xid, + u32 bitmap_start, u32 bitmap_end) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_rdma_qinfo *rdma_qinfo = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct tag_hinic5_cqm_bitmap_range bp_range; + s32 ret; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_rdma_queue_create_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (hinic5_cqm_object_create_check(hinic5_cqm_handle, service_type) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_object_create_check)); + return NULL; + } + + if (!hinic5_cqm_object_rdma_queue_param_check(handle, service_type, object_type)) + return NULL; + + rdma_qinfo = kzalloc(sizeof(*rdma_qinfo), GFP_KERNEL); + if (unlikely(rdma_qinfo == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(rdma_qinfo)); + return NULL; + } + + hinic5_cqm_object_init(&rdma_qinfo->common.object, service_type, object_type, + object_size, hinic5_cqm_handle); + rdma_qinfo->common.queue_link_mode = HINIC5_CQM_QUEUE_RDMA_QUEUE_MODE; + rdma_qinfo->common.priv = object_priv; + rdma_qinfo->common.current_q_room = HINIC5_CQM_RDMA_Q_ROOM_1; + rdma_qinfo->room_header_alloc = room_header_alloc; + rdma_qinfo->common.index = xid; + bp_range.start = bitmap_start; + bp_range.end = bitmap_end; + + /* Initializes the doorbell used by the current queue. + * The default value is hardware doorbell + */ + rdma_qinfo->common.current_q_doorbell = HINIC5_CQM_HARDWARE_DOORBELL; + + service = &hinic5_cqm_handle->service[service_type]; + if (object_type == HINIC5_CQM_OBJECT_RDMA_SCQ) + rdma_qinfo->q_ctx_size = service->service_template.scq_ctx_size; + else if (object_type == HINIC5_CQM_OBJECT_RDMA_SRQ) + rdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size; + + ret = hinic5_cqm_rdma_queue_create(&rdma_qinfo->common.object, &bp_range); + if (ret == HINIC5_CQM_SUCCESS) + return &rdma_qinfo->common; + + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_rdma_queue_create)); + kfree(rdma_qinfo); + return NULL; +} +EXPORT_SYMBOL(hinic5_cqm_object_rdma_queue_create); + +/** + * Prototype : hinic5_cqm_object_rdma_table_get + * Description : create mtt and rdmarc of the rdma service + * Input : void *ex_handle + * u32 service_type + * enum hinic5_cqm_object_type object_type + * u32 index_base: start of index + * u32 index_number + * Output : None + * Return Value : struct tag_hinic5_cqm_mtt_rdmarc * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_hinic5_cqm_mtt_rdmarc *hinic5_cqm_object_rdma_table_get(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 index_base, u32 index_number) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_rdma_table *rdma_table = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + s32 ret; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_rdma_table_create_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (hinic5_cqm_object_create_check(hinic5_cqm_handle, service_type) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_object_create_check)); + return NULL; + } + + /* service_type must be HINIC5_CQM_SERVICE_T_ROCE or HINIC5_CQM_SERVICE_T_UB */ + if (service_type != HINIC5_CQM_SERVICE_T_ROCE && service_type != HINIC5_CQM_SERVICE_T_UB) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(service_type)); + return NULL; + } + + if (object_type != HINIC5_CQM_OBJECT_MTT && + object_type != HINIC5_CQM_OBJECT_RDMARC) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object_type)); + return NULL; + } + + rdma_table = kzalloc(sizeof(*rdma_table), GFP_KERNEL); + if (unlikely(rdma_table == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(rdma_table)); + return NULL; + } + + hinic5_cqm_object_init(&rdma_table->common.object, service_type, object_type, + (u32)(index_number * sizeof(dma_addr_t)), hinic5_cqm_handle); + rdma_table->common.index_base = index_base; + rdma_table->common.index_number = index_number; + + ret = hinic5_cqm_rdma_table_create(&rdma_table->common.object); + if (ret == HINIC5_CQM_SUCCESS) + return &rdma_table->common; + + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_rdma_table_create)); + kfree(rdma_table); + return NULL; +} +EXPORT_SYMBOL(hinic5_cqm_object_rdma_table_get); + +static inline void hinic5_cqm_object_do_delete(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = object->hinic5_cqm_handle; + const u32 object_type = object->object_type; + + switch (object_type) { + case HINIC5_CQM_OBJECT_SERVICE_CTX: + case HINIC5_CQM_OBJECT_MPT: + hinic5_cqm_qpc_mpt_delete(object); + return; + case HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_RQ: + case HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_SQ: + case HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_CQ: + case HINIC5_CQM_OBJECT_NONRDMA_SCQ: + hinic5_cqm_nonrdma_queue_delete(object); + return; + case HINIC5_CQM_OBJECT_NONRDMA_SRQ: + if (object->service_type == HINIC5_CQM_SERVICE_T_TOE) + hinic5_cqm_share_recv_queue_delete(object); + else + hinic5_cqm_nonrdma_queue_delete(object); + return; + case HINIC5_CQM_OBJECT_RDMA_QP: + case HINIC5_CQM_OBJECT_RDMA_SRQ: + case HINIC5_CQM_OBJECT_RDMA_SCQ: + hinic5_cqm_rdma_queue_delete(object); + return; + case HINIC5_CQM_OBJECT_MTT: + case HINIC5_CQM_OBJECT_RDMARC: + hinic5_cqm_rdma_table_delete(object); + return; + default: + hinic5_cqm_err(hinic5_cqm_handle->dev, HINIC5_CQM_WRONG_VALUE(object_type)); + return; + } +} + +/** + * Prototype : hinic5_cqm_object_delete + * Description : Deletes a created object. This function may be sleep and wait + * for all operations on this object to be performed. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_object_delete(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct hinic5_hwdev *handle = NULL; + + if (unlikely(object == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(object)); + return; + } + if (!object->hinic5_cqm_handle) { + pr_err("[HINIC5_CQM]object del: hinic5_cqm_handle is null, service type %u, refcount %d\n", + object->service_type, (int)object->refcount.counter); + kfree(object); + return; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + + if (!hinic5_cqm_handle->ex_handle) { + pr_err("[HINIC5_CQM]object del: ex_handle is null, service type %u, refcount %d\n", + object->service_type, (int)object->refcount.counter); + kfree(object); + return; + } + + handle = hinic5_cqm_handle->ex_handle; + + if (object->service_type >= HINIC5_CQM_SERVICE_T_MAX) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object->service_type)); + kfree(object); + return; + } + + hinic5_cqm_object_do_delete(object); + kfree(object); +} +EXPORT_SYMBOL(hinic5_cqm_object_delete); + +/** + * Prototype : hinic5_cqm_object_offset_addr + * Description : Only the rdma table can be searched to obtain the PA and VA + * at the specified offset of the object buffer. + * Input : struct tag_hinic5_cqm_object *object + * u32 offset: For a rdma table, the offset is the absolute index + * number. + * dma_addr_t *paddr: PA(physical address) + * Output : None + * Return Value : u8 * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u8 *hinic5_cqm_object_offset_addr(struct tag_hinic5_cqm_object *object, u32 offset, dma_addr_t *paddr) +{ + u32 object_type; + + if (!object) + return NULL; + + object_type = object->object_type; + + /* The data flow path takes performance into consideration and + * does not check input parameters. + */ + switch (object_type) { + case HINIC5_CQM_OBJECT_MTT: + case HINIC5_CQM_OBJECT_RDMARC: + return hinic5_cqm_rdma_table_offset_addr(object, offset, paddr); + default: + break; + } + + return NULL; +} +EXPORT_SYMBOL(hinic5_cqm_object_offset_addr); + +/** + * Prototype : hinic5_cqm_object_get + * Description : Obtain an object based on the index. + * Input : void *ex_handle + * enum hinic5_cqm_object_type object_type + * u32 index: support qpn,mptn,scqn,srqn (n->number) + * bool bh + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_hinic5_cqm_object *hinic5_cqm_object_get(void *ex_handle, enum hinic5_cqm_object_type object_type, + u32 index, bool bh) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_bat_table *bat_table = NULL; + struct tag_hinic5_cqm_object_table *object_table = NULL; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_object *object = NULL; + + if (!ex_handle) + return NULL; + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (!hinic5_cqm_handle) + return NULL; + + bat_table = &hinic5_cqm_handle->bat_table; + + /* The data flow path takes performance into consideration and + * does not check input parameters. + */ + switch (object_type) { + case HINIC5_CQM_OBJECT_SERVICE_CTX: + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_QPC); + break; + case HINIC5_CQM_OBJECT_MPT: + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_MPT); + break; + case HINIC5_CQM_OBJECT_RDMA_SRQ: + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_SRQC); + break; + case HINIC5_CQM_OBJECT_RDMA_SCQ: + case HINIC5_CQM_OBJECT_NONRDMA_SCQ: + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_SCQC); + break; + default: + return NULL; + } + + if (!cla_table) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_table_get)); + return NULL; + } + + object_table = &cla_table->obj_table; + object = hinic5_cqm_object_table_get(hinic5_cqm_handle, object_table, index, bh); + return object; +} +EXPORT_SYMBOL(hinic5_cqm_object_get); + +/** + * Prototype : hinic5_cqm_object_put + * Description : This function must be called after the hinic5_cqm_object_get + * function. Otherwise, the object cannot be released. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_object_put(struct tag_hinic5_cqm_object *object) +{ + /* The data flow path takes performance into consideration and + * does not check input parameters. + */ + if (!object) + return; + + if (atomic_dec_and_test(&object->refcount) != 0) + complete(&object->free); +} +EXPORT_SYMBOL(hinic5_cqm_object_put); + +/** + * Prototype : hinic5_cqm_object_funcid + * Description : Obtain the ID of the function to which the object belongs. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : If successful, the ID of the function will be returned. + * If fail HINIC5_CQM_FAIL(-1) will be returned. + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_object_funcid(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + + if (unlikely(object == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(object)); + return HINIC5_CQM_FAIL; + } + if (unlikely(object->hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + + return hinic5_cqm_handle->func_attribute.func_global_idx; +} +EXPORT_SYMBOL(hinic5_cqm_object_funcid); + +/** + * Prototype : hinic5_cqm_object_resize_alloc_new + * Description : Currently this function is only used for RoCE. + * The CQ buffer is ajusted, but the cqn and cqc remain + * unchanged. This function allocates new buffer, but do not + * release old buffer. The valid buffer is still old buffer. + * Input : struct tag_hinic5_cqm_object *object + * u32 object_size + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_object_resize_alloc_new(struct tag_hinic5_cqm_object *object, u32 object_size) +{ + struct tag_hinic5_cqm_rdma_qinfo *qinfo = (struct tag_hinic5_cqm_rdma_qinfo *)(void *)object; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_service *service = NULL; + struct tag_hinic5_cqm_buf *q_room_buf = NULL; + struct hinic5_hwdev *handle = NULL; + u32 order, buf_size; + + if (unlikely(object == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(object)); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(object->hinic5_cqm_handle)); + return HINIC5_CQM_FAIL; + } + handle = hinic5_cqm_handle->ex_handle; + + /* This interface is used only for the CQ of RoCE service. */ + if (object->service_type == HINIC5_CQM_SERVICE_T_ROCE && + object->object_type == HINIC5_CQM_OBJECT_RDMA_SCQ) { + service = hinic5_cqm_handle->service + object->service_type; + order = service->buf_order; + buf_size = (u32)(PAGE_SIZE << order); + + if (qinfo->common.current_q_room == HINIC5_CQM_RDMA_Q_ROOM_1) + q_room_buf = &qinfo->common.q_room_buf_2; + else + q_room_buf = &qinfo->common.q_room_buf_1; + + if (qinfo->room_header_alloc) { + q_room_buf->buf_number = ALIGN(object_size, buf_size) / + buf_size; + q_room_buf->page_number = q_room_buf->buf_number << + order; + q_room_buf->buf_size = buf_size; + if (hinic5_cqm_buf_alloc(hinic5_cqm_handle, q_room_buf, true) == + HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_buf_alloc)); + return HINIC5_CQM_FAIL; + } + + qinfo->new_object_size = object_size; + return HINIC5_CQM_SUCCESS; + } + + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_WRONG_VALUE(qinfo->room_header_alloc)); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_err(handle->dev_hdl, "Cq resize alloc: service_type %u object_type %u do not support resize\n", + object->service_type, object->object_type); + return HINIC5_CQM_FAIL; +} +EXPORT_SYMBOL(hinic5_cqm_object_resize_alloc_new); + +/** + * Prototype : hinic5_cqm_object_resize_free_new + * Description : Currently this function is only used for RoCE. + * The CQ buffer is ajusted, but the cqn and cqc remain + * unchanged. This function frees new buffer, and is used to deal + * with exceptions. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_object_resize_free_new(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_rdma_qinfo *qinfo = (struct tag_hinic5_cqm_rdma_qinfo *)(void *)object; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_buf *q_room_buf = NULL; + struct hinic5_hwdev *handle = NULL; + + if (unlikely(object == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(object)); + return; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return; + } + handle = hinic5_cqm_handle->ex_handle; + + /* This interface is used only for the CQ of RoCE service. */ + if (object->service_type == HINIC5_CQM_SERVICE_T_ROCE && + object->object_type == HINIC5_CQM_OBJECT_RDMA_SCQ) { + if (qinfo->common.current_q_room == HINIC5_CQM_RDMA_Q_ROOM_1) + q_room_buf = &qinfo->common.q_room_buf_2; + else + q_room_buf = &qinfo->common.q_room_buf_1; + + qinfo->new_object_size = 0; + + hinic5_cqm_buf_free(q_room_buf, hinic5_cqm_handle->dev); + } else { + hinic5_cqm_err(handle->dev_hdl, "Cq resize free: service_type %u object_type %u do not support resize\n", + object->service_type, object->object_type); + } +} +EXPORT_SYMBOL(hinic5_cqm_object_resize_free_new); + +/** + * Prototype : hinic5_cqm_object_resize_free_old + * Description : Currently this function is only used for RoCE. + * The CQ buffer is ajusted, but the cqn and cqc remain + * unchanged. This function frees old buffer and switches the + * valid buffer to new buffer. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_object_resize_free_old(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_rdma_qinfo *qinfo = (struct tag_hinic5_cqm_rdma_qinfo *)(void *)object; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_buf *q_room_buf = NULL; + + if (unlikely(object == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(object)); + return; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return; + } + + /* This interface is used only for the CQ of RoCE service. */ + if (object->service_type == HINIC5_CQM_SERVICE_T_ROCE && + object->object_type == HINIC5_CQM_OBJECT_RDMA_SCQ) { + if (qinfo->common.current_q_room == HINIC5_CQM_RDMA_Q_ROOM_1) { + q_room_buf = &qinfo->common.q_room_buf_1; + qinfo->common.current_q_room = HINIC5_CQM_RDMA_Q_ROOM_2; + } else { + q_room_buf = &qinfo->common.q_room_buf_2; + qinfo->common.current_q_room = HINIC5_CQM_RDMA_Q_ROOM_1; + } + + object->object_size = qinfo->new_object_size; + + hinic5_cqm_buf_free(q_room_buf, hinic5_cqm_handle->dev); + } +} +EXPORT_SYMBOL(hinic5_cqm_object_resize_free_old); + +/** + * Prototype : hinic5_cqm_gid_base + * Description : Obtain the base virtual address of the gid table for FT + * debug. + * Input : void *ex_handle + * Output : None + * 1.Date : 2015/9/8 + * Modification : Created function + */ +void *hinic5_cqm_gid_base(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_bat_table *bat_table = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_buf *cla_z_buf = NULL; + u32 entry_type, i; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return NULL; + } + + bat_table = &hinic5_cqm_handle->bat_table; + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + entry_type = bat_table->bat_entry_type[i]; + if (entry_type == HINIC5_CQM_BAT_ENTRY_T_GID) { + cla_table = &bat_table->entry[i]; + cla_z_buf = &cla_table->cla_z_buf; + if (cla_z_buf->buf_list) + return cla_z_buf->buf_list->va; + } + } + + return NULL; +} + +/** + * Prototype : hinic5_cqm_timer_base + * Description : Obtain the base virtual address of the timer for live + * migration. + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2020/5/21 + * Modification : Created function + */ +void *hinic5_cqm_timer_base(void *ex_handle) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_bat_table *bat_table = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_buf *cla_z_buf = NULL; + u32 entry_type, i; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return NULL; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return NULL; + } + + /* Timer resource is configured on PPF. */ + if (!HINIC5_CQM_IS_PPF(hinic5_cqm_handle)) { + hinic5_cqm_err(handle->dev_hdl, "%s: wrong function type:%d\n", + __func__, handle->hwif->attr.func_type); + return NULL; + } + + bat_table = &hinic5_cqm_handle->bat_table; + + for (i = 0; i < HINIC5_CQM_BAT_ENTRY_MAX; i++) { + entry_type = bat_table->bat_entry_type[i]; + if (entry_type != HINIC5_CQM_BAT_ENTRY_T_TIMER) + continue; + + cla_table = &bat_table->entry[i]; + cla_z_buf = &cla_table->cla_z_buf; + + if (!cla_z_buf->direct.va) { + if (hinic5_cqm_buf_alloc_direct(hinic5_cqm_handle, cla_z_buf, true) == + HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_buf_alloc_direct)); + return NULL; + } + } + + return cla_z_buf->direct.va; + } + + return NULL; +} +EXPORT_SYMBOL(hinic5_cqm_timer_base); + +static inline bool val_in_range(u32 val, u32 start, u32 num) +{ + return val >= start && val - start < num; +} + +/* Convert func id to func offset used in timer buffers. */ +STATIC s32 hinic5_cqm_timer_get_func_offset(struct hinic5_hwdev *ex_handle, + u32 func_id, u32 *func_offset) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = ex_handle->hinic5_cqm_hdl; + struct tag_hinic5_cqm_func_capability *cap = &hinic5_cqm_handle->func_capability; + u32 vf_offset; + int i; + + /* PF */ + if (val_in_range(func_id, cap->timer_pf_id_start, cap->timer_pf_num)) { + *func_offset = func_id - cap->timer_pf_id_start; + return HINIC5_CQM_SUCCESS; + } + + if (!val_in_range(func_id, cap->timer_vf_id_start, cap->timer_vf_num)) + goto fail; + + if (!cap->timer_vf_deploy_with_segs) { + vf_offset = func_id - cap->timer_vf_id_start; + *func_offset = cap->timer_pf_num + vf_offset; + return HINIC5_CQM_SUCCESS; + } + + /* Timer buffer segmentation deployment */ + vf_offset = 0; + for (i = 0; i < ARRAY_SIZE(cap->timer_vf_segs); i++) { + struct timer_vf_info_seg *seg = &cap->timer_vf_segs[i]; + if (seg->start == 0) + break; + if (val_in_range(func_id, seg->start, seg->num)) { + vf_offset += func_id - seg->start; + *func_offset = cap->timer_pf_num + vf_offset; + return HINIC5_CQM_SUCCESS; + } + vf_offset += seg->num; + } + +fail: + hinic5_cqm_err(ex_handle->dev_hdl, + "Timer clear: wrong func id %u\n", func_id); + return HINIC5_CQM_FAIL; +} + +STATIC void hinic5_cqm_clear_timer(struct hinic5_hwdev *handle, u32 func_id, + struct tag_hinic5_cqm_cla_table *cla_table) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = handle->hinic5_cqm_hdl; + struct tag_hinic5_cqm_func_capability *cap = &hinic5_cqm_handle->func_capability; + struct tag_hinic5_cqm_buf *cla_buf = &cla_table->cla_z_buf; + u32 func_timer_size = HINIC5_CQM_TIMER_ALIGN_SCALE_NUM * cap->timer_basic_size; + u32 func_buf_num = 0, func_offset = 0; + u32 i, func_buf_start, func_buf_end; + s32 ret; + + ret = hinic5_cqm_timer_get_func_offset(handle, func_id, &func_offset); + if (ret == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_timer_get_func_offset)); + return; + } + + if (cla_buf->buf_size == 0) + goto fail; + func_buf_num = func_timer_size / cla_buf->buf_size; + if (func_buf_num == 0) { + /* Func timer size smaller than CLA buffer? Not yet implemented */ + goto fail; + } + + func_buf_start = func_offset * func_buf_num; + func_buf_end = func_buf_start + func_buf_num; + if (func_buf_end > cla_buf->buf_number) { + hinic5_cqm_err(handle->dev_hdl, + "Timer clear: func buffer end %u overflow, limit %u.\n", + func_buf_end, cla_buf->buf_number); + goto fail; + } + + hinic5_cqm_dbg(handle->dev_hdl, + "Timer clear: func id %u, offset %u. cla lvl %u.\n", + func_id, func_offset, cla_table->cla_lvl); + + for (i = func_buf_start; i < func_buf_end; i++) { + hinic5_cqm_dbg_on(hinic5_cqm_verbose, handle->dev_hdl, + "Timer clear: buf %4u, pa 0x%lx, va 0x%lx\n", + i, (uintptr_t)cla_buf->buf_list[i].pa, + (uintptr_t)cla_buf->buf_list[i].va); + memset(cla_buf->buf_list[i].va, 0, cla_buf->buf_size); + } + return; + +fail: + hinic5_cqm_err(handle->dev_hdl, + "Timer clear: failed. timer cla lvl %u, buf size %u, buf num 0x%x\n", + cla_table->cla_lvl, cla_buf->buf_size, cla_buf->buf_number); + hinic5_cqm_err(handle->dev_hdl, + "Timer clear: func id %u, offset %u. func timer size 0x%x, func buf num %u\n", + func_id, func_offset, func_timer_size, func_buf_num); +} + +/** + * Prototype : hinic5_cqm_function_timer_clear + * Description : Clear the timer buffer based on the function ID. + * The function ID starts from 0 and the timer buffer is arranged + * in sequence by function ID. + * Input : void *ex_handle + * u32 functionid + * Output : None + * Return Value : void + * 1.Date : 2016/12/19 + * Modification : Created function + */ +void hinic5_cqm_function_timer_clear(void *ex_handle, u32 function_id) +{ + /* The timer buffer of one function is 32B*8wheel*2048spoke=128*4k */ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + u32 loop, i; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_func_timer_clear_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return; + } + + if (HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle)) { + cla_table = &hinic5_cqm_handle->bat_table.timer_entry[0]; + loop = hinic5_cqm_handle->func_capability.smf_max_num; + } else { + cla_table = hinic5_cqm_cla_table_get(&hinic5_cqm_handle->bat_table, HINIC5_CQM_BAT_ENTRY_T_TIMER); + loop = 1; + } + + if (unlikely(cla_table == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(cla_table)); + return; + } + for (i = 0; i < loop; i++) { + hinic5_cqm_clear_timer(handle, function_id, cla_table); + cla_table++; + } +} +EXPORT_SYMBOL(hinic5_cqm_function_timer_clear); + +/** + * Prototype : hinic5_cqm_function_hash_buf_clear + * Description : clear hash buffer based on global function_id + * Input : void *ex_handle + * s32 global_funcid + * Output : None + * Return Value : None + * 1.Date : 2017/11/27 + * Modification : Created function + * 2.Date : 2021/02/23 + * Modification : Add para func_id; clear hash buf by func_id + */ +void hinic5_cqm_function_hash_buf_clear(void *ex_handle, s32 global_funcid) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_func_capability *func_cap = NULL; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_buf *cla_z_buf = NULL; + s32 fake_funcid; + u32 loop; + u32 i; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return; + } + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_func_hash_buf_clear_cnt); + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return; + } + func_cap = &hinic5_cqm_handle->func_capability; + + /* fake vf adaption, switch to corresponding VF. */ + if (HINIC5_CQM_IS_FAKE_PARENT(hinic5_cqm_handle)) { + fake_funcid = global_funcid - + (s32)(func_cap->fake_cfg.child_func_start); + hinic5_cqm_info(handle->dev_hdl, "fake_funcid =%d\n", fake_funcid); + if (fake_funcid < 0 || fake_funcid >= HINIC5_CQM_FAKE_FUNC_MAX) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(fake_funcid)); + return; + } + + hinic5_cqm_handle = hinic5_cqm_handle->fake_hinic5_cqm_handle[fake_funcid]; + } + + if (HINIC5_CQM_IS_LB_MODE_1_OR_2(hinic5_cqm_handle)) { + cla_table = &hinic5_cqm_handle->bat_table.hash_entry[0]; + loop = hinic5_cqm_handle->func_capability.smf_max_num; + } else { + cla_table = hinic5_cqm_cla_table_get(&hinic5_cqm_handle->bat_table, HINIC5_CQM_BAT_ENTRY_T_HASH); + loop = 1; + } + + if (unlikely(cla_table == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(cla_table)); + return; + } + + while (loop > 0) { + cla_z_buf = &cla_table->cla_z_buf; + + for (i = 0; i < cla_z_buf->buf_number; i++) + memset(cla_z_buf->buf_list[i].va, 0, cla_z_buf->buf_size); + + cla_table++; + loop--; + } +} +EXPORT_SYMBOL(hinic5_cqm_function_hash_buf_clear); + +void hinic5_cqm_srq_used_rq_container_delete(struct tag_hinic5_cqm_object *object, u8 *container) +{ + struct tag_hinic5_cqm_queue *common = NULL; + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_srq_linkwqe *srq_link_wqe = NULL; + struct hinic5_hwdev *handle = NULL; + dma_addr_t addr; + u32 link_wqe_offset; + + if (!object || !container) { + pr_err("object or container is null\n"); + return; + } + + common = container_of(object, struct tag_hinic5_cqm_queue, object); + qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, common); + link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size; + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(common->object.hinic5_cqm_handle); + handle = hinic5_cqm_handle->ex_handle; + + /* 1. Obtain the current container pa through link wqe table, + * unmap pa + */ + srq_link_wqe = (struct tag_hinic5_cqm_srq_linkwqe *)((uintptr_t)container + link_wqe_offset); + /* shift right by 2 bits to get the length of dw(4B) */ + hinic5_cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct tag_hinic5_cqm_linkwqe) >> 2); + + addr = HINIC5_CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_gpa_h, + srq_link_wqe->current_buffer_gpa_l); + if (addr == 0) { + hinic5_cqm_err(handle->dev_hdl, "Rq container del: buffer physical addr is null\n"); + return; + } + dma_unmap_single(hinic5_cqm_handle->dev, addr, qinfo->container_size, + DMA_BIDIRECTIONAL); + + /* 2. Obtain the current container va through link wqe table, free va */ + addr = HINIC5_CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_addr_h, + srq_link_wqe->current_buffer_addr_l); + if (addr == 0) { + hinic5_cqm_err(handle->dev_hdl, "Rq container del: buffer virtual addr is null\n"); + return; + } + kfree((void *)(uintptr_t)addr); +} +EXPORT_SYMBOL(hinic5_cqm_srq_used_rq_container_delete); + +s32 hinic5_cqm_dtoe_share_recv_queue_create(void *ex_handle, u32 contex_size, + u32 *index_count, u32 *index) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_toe_private_capability *tow_own_cap = NULL; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + u32 step; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return HINIC5_CQM_FAIL; + } + if (unlikely(index_count == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(index_count)); + return HINIC5_CQM_FAIL; + } + if (unlikely(index == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(index)); + return HINIC5_CQM_FAIL; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return HINIC5_CQM_FAIL; + } + + tow_own_cap = &hinic5_cqm_handle->toe_own_capability; + + bitmap = &tow_own_cap->srqc_bitmap; + *index_count = (ALIGN(contex_size, tow_own_cap->toe_srqc_basic_size)) / + tow_own_cap->toe_srqc_basic_size; + /* toe srqc number must align of 2 */ + step = ALIGN(tow_own_cap->toe_srqc_number, 2); + *index = hinic5_cqm_bitmap_alloc(bitmap, step, *index_count, + hinic5_cqm_handle->func_capability.xid_alloc_mode); + if (*index >= bitmap->max_num) { + hinic5_cqm_err(handle->dev_hdl, "Srq create: queue index %u exceeds max_num %u\n", + *index, bitmap->max_num); + return HINIC5_CQM_FAIL; + } + *index += tow_own_cap->toe_srqc_start_id; + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_srq_create_cnt); + + return HINIC5_CQM_SUCCESS; +} +EXPORT_SYMBOL(hinic5_cqm_dtoe_share_recv_queue_create); + +void hinic5_cqm_dtoe_free_srq_bitmap_index(void *ex_handle, u32 index_count, u32 index) +{ + struct hinic5_hwdev *handle = (struct hinic5_hwdev *)ex_handle; + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = NULL; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + + if (unlikely(ex_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(ex_handle)); + return; + } + + hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(handle->hinic5_cqm_hdl); + if (unlikely(hinic5_cqm_handle == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(hinic5_cqm_handle)); + return; + } + + bitmap = &hinic5_cqm_handle->toe_own_capability.srqc_bitmap; + if ((index + index_count) > bitmap->max_num || (index + index_count) <= index) { // 避免翻圈 + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_WRONG_VALUE(index + index_count)); + return; + } + + hinic5_cqm_bitmap_free(bitmap, index, index_count); +} +EXPORT_SYMBOL(hinic5_cqm_dtoe_free_srq_bitmap_index); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object.h new file mode 100644 index 00000000..218469f0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object.h @@ -0,0 +1,383 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CQM_OBJECT_H +#define HINIC5_CQM_OBJECT_H + +#include "comm_defs.h" +#include "hinic5_hinic5_cqm.h" + +#define HINIC5_CQM_LINKWQE_128B 128 +#define HINIC5_CQM_MOD_TOE HINIC5_MOD_TOE +#define HINIC5_CQM_MOD_HINIC5_CQM HINIC5_MOD_HINIC5_CQM + +#ifdef __cplusplus +#if __cplusplus +extern "C" { +#endif +#endif /* __cplusplus */ + +/** + * @brief: create FC SRQ. + * @details: The number of valid WQEs in the queue must meet the number of + * transferred WQEs. Linkwqe can only be filled at the end of the + * page. The actual number of valid links exceeds the requirement. + * The service needs to be informed of the number of extra links to + * be created. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param wqe_number: number of WQEs + * @param wqe_size: wqe size + * @param object_priv: pointer to object private information + * @retval struct tag_hinic5_cqm_queue*: queue structure pointer + * @date: 2019-5-4 + */ +struct tag_hinic5_cqm_queue *hinic5_cqm_object_fc_srq_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv); + +/** + * @brief: create RQ. + * @details: When SRQ is used, the RQ queue is created. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param init_rq_num: number of containers + * @param container_size: container size + * @param wqe_size: wqe size + * @param object_priv: pointer to object private information + * @retval struct tag_hinic5_cqm_queue*: queue structure pointer + * @date: 2019-5-4 + */ +struct tag_hinic5_cqm_queue *hinic5_cqm_object_recv_queue_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 init_rq_num, u32 container_size, + u32 wqe_size, void *object_priv); + +/** + * @brief: SRQ applies for a new container and is linked after the container + * is created. + * @details: SRQ applies for a new container and is linked after the container + * is created. + * @param common: queue structure pointer + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +s32 hinic5_cqm_object_share_recv_queue_add_container(struct tag_hinic5_cqm_queue *common); + +/** + * @brief: SRQ applies for a new container. After the container is created, + * no link is attached to the container. The service is attached to + * the container. + * @details: SRQ applies for a new container. After the container is created, + * no link is attached to the container. The service is attached to + * the container. + * @param common: queue structure pointer + * @param container_addr: returned container address + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +s32 hinic5_cqm_object_srq_add_container_free(struct tag_hinic5_cqm_queue *common, u8 **container_addr); + +/** + * @brief: create SRQ for TOE services. + * @details: create SRQ for TOE services. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param container_number: number of containers + * @param container_size: container size + * @param wqe_size: wqe size + * @retval struct tag_hinic5_cqm_queue*: queue structure pointer + * @date: 2019-5-4 + */ +struct tag_hinic5_cqm_queue *hinic5_cqm_object_share_recv_queue_create(void *ex_handle, + u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 container_number, + u32 container_size, + u32 wqe_size); + +/** + * @brief: create QPC and MPT. + * @details: When QPC and MPT are created, the interface sleeps. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param object_size: object size, in bytes. + * @param object_priv: private structure of the service layer. + * The value can be NULL. + * @param index: apply for reserved qpn based on the value. If automatic + * allocation is required, fill HINIC5_CQM_INDEX_INVALID. + * @param bitmap_start: start index of bitmap when range search. + * @param bitmap_end: end index of bitmap when range search. + * @retval struct tag_hinic5_cqm_qpc_mpt *: pointer to the QPC/MPT structure + * @date: 2019-5-4 + */ +struct tag_hinic5_cqm_qpc_mpt *hinic5_cqm_object_qpc_mpt_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 object_size, void *object_priv, + u32 index, u32 bitmap_start, u32 bitmap_end); + +/** + * @brief: create a queue for non-RDMA services. + * @details: create a queue for non-RDMA services. The interface sleeps. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param wqe_number: number of Link WQEs + * @param wqe_size: fixed length, size 2^n + * @param object_priv: private structure of the service layer. + * The value can be NULL. + * @retval struct tag_hinic5_cqm_queue *: queue structure pointer + * @date: 2019-5-4 + */ +struct tag_hinic5_cqm_queue *hinic5_cqm_object_nonrdma_queue_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv); + +/** + * @brief: create a RDMA service queue. + * @details: create a queue for the RDMA service. The interface sleeps. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param object_size: object size + * @param object_priv: private structure of the service layer. + * The value can be NULL. + * @param room_header_alloc: whether to apply for the queue room and header + * space + * @param xid: apply for reserved qpn based on the value. If automatic + * allocation is required, fill HINIC5_CQM_INDEX_INVALID. + * @param bitmap_start: start index of bitmap when range search. + * @param bitmap_end: end index of bitmap when range search. + * @retval struct tag_hinic5_cqm_queue *: queue structure pointer + * @date: 2019-5-4 + */ +struct tag_hinic5_cqm_queue *hinic5_cqm_object_rdma_queue_create(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 object_size, void *object_priv, + bool room_header_alloc, u32 xid, + u32 bitmap_start, u32 bitmap_end); + +/** + * @brief: create the MTT and RDMARC of the RDMA service. + * @details: create the MTT and RDMARC of the RDMA service. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param index_base: start index number + * @param index_number: index number + * @retval struct tag_hinic5_cqm_mtt_rdmarc *: pointer to the MTT/RDMARC structure + * @date: 2019-5-4 + */ +struct tag_hinic5_cqm_mtt_rdmarc *hinic5_cqm_object_rdma_table_get(void *ex_handle, u32 service_type, + enum hinic5_cqm_object_type object_type, + u32 index_base, u32 index_number); + +/** + * @brief: delete created objects. + * @details: delete the created object. This function does not return until all + * operations on the object are complete. + * @param object: object pointer + * @retval: void + * @date: 2019-5-4 + */ +void hinic5_cqm_object_delete(struct tag_hinic5_cqm_object *object); + +/** + * @brief: obtains the physical address and virtual address at the specified + * offset of the object buffer. + * @details: Only RDMA table query is supported to obtain the physical address + * and virtual address at the specified offset of the object buffer. + * @param object: object pointer + * @param offset: for a rdma table, offset is the absolute index number. + * @param paddr: The physical address is returned only for the rdma table. + * @retval u8 *: buffer specify the virtual address at the offset + * @date: 2019-5-4 + */ +u8 *hinic5_cqm_object_offset_addr(struct tag_hinic5_cqm_object *object, u32 offset, dma_addr_t *paddr); + +/** + * @brief: obtain object according index. + * @details: obtain object according index. + * @param ex_handle: device pointer that represents the PF + * @param object_type: object type + * @param index: support qpn,mptn,scqn,srqn + * @param bh: whether to disable the bottom half of the interrupt + * @retval struct tag_hinic5_cqm_object *: object pointer + * @date: 2019-5-4 + */ +struct tag_hinic5_cqm_object *hinic5_cqm_object_get(void *ex_handle, enum hinic5_cqm_object_type object_type, + u32 index, bool bh); + +/** + * @brief: object reference counting release + * @details: After the function hinic5_cqm_object_get is invoked, this API must be put. + * Otherwise, the object cannot be released. + * @param object: object pointer + * @retval: void + * @date: 2019-5-4 + */ +void hinic5_cqm_object_put(struct tag_hinic5_cqm_object *object); + +/** + * @brief: obtain the ID of the function where the object resides. + * @details: obtain the ID of the function where the object resides. + * @param object: object pointer + * @retval >=0: ID of function + * @retval -1: fail + * @date: 2020-4-15 + */ +s32 hinic5_cqm_object_funcid(struct tag_hinic5_cqm_object *object); + +/** + * @brief: apply for a new space for an object. + * @details: Currently, this parameter is valid only for the ROCE service. + * The CQ buffer size is adjusted, but the CQN and CQC remain + * unchanged. New buffer space is applied for, and the old buffer + * space is not released. The current valid buffer is still the old + * buffer. + * @param object: object pointer + * @param object_size: new buffer size + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +s32 hinic5_cqm_object_resize_alloc_new(struct tag_hinic5_cqm_object *object, u32 object_size); + +/** + * @brief: release the newly applied buffer space for the object. + * @details: This function is used to release the newly applied buffer space for + * service exception handling. + * @param object: object pointer + * @retval: void + * @date: 2019-5-4 + */ +void hinic5_cqm_object_resize_free_new(struct tag_hinic5_cqm_object *object); + +/** + * @brief: release old buffer space for objects. + * @details: This function releases the old buffer and sets the current valid + * buffer to the new buffer. + * @param object: object pointer + * @retval: void + * @date: 2019-5-4 + */ +void hinic5_cqm_object_resize_free_old(struct tag_hinic5_cqm_object *object); + +/** + * @brief: release container. + * @details: release container. + * @param object: object pointer + * @param container: container pointer to be released + * @retval: void + * @date: 2019-5-4 + */ +void hinic5_cqm_srq_used_rq_container_delete(struct tag_hinic5_cqm_object *object, u8 *container); + +void *hinic5_cqm_get_db_addr(void *ex_handle, u32 service_type); + +s32 hinic5_cqm_ring_hardware_db_fc(void *ex_handle, u32 service_type, u8 db_count, + u8 pagenum, u64 db); + +/** + * @brief: provide the interface of knocking on doorbell. + * The HINIC5_CQM converts the pri to cos. + * @details: provide interface of knocking on doorbell for the HINIC5_CQM to convert + * the pri to cos. The doorbell transferred by the service must be the + * host sequence. This interface converts the network sequence. + * @param ex_handle: device pointer that represents the PF + * @param service_type: Each kernel-mode service is allocated a hardware + * doorbell page. + * @param db_count: PI[7:0] beyond 64b in the doorbell + * @param db: The doorbell content is organized by the service. If there is + * endian conversion, the service needs to complete the conversion. + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +s32 hinic5_cqm_ring_hardware_db_update_pri(void *ex_handle, u32 service_type, + u8 db_count, u64 db); + +/** + * @brief: knock on software doorbell. + * @details: knock on software doorbell. + * @param object: object pointer + * @param db_record: software doorbell content. If there is big-endian + * conversion, the service needs to complete the conversion. + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +s32 hinic5_cqm_ring_software_db(struct tag_hinic5_cqm_object *object, u64 db_record); + +/** + * @brief: reference counting is added to the bloom filter ID. + * @details: reference counting is added to the bloom filter ID. When the ID + * changes from 0 to 1, the sending API is set to 1. + * This interface sleeps. + * @param ex_handle: device pointer that represents the PF + * @param id: id + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +void *hinic5_cqm_gid_base(void *ex_handle); + +/** + * @brief: obtain the base virtual address of the timer. + * @details: obtain the base virtual address of the timer. + * @param ex_handle: device pointer that represents the PF + * @retval void *: base virtual address of the timer + * @date: 2020-5-21 + */ +void *hinic5_cqm_timer_base(void *ex_handle); + +/** + * @brief: clear timer buffer. + * @details: clear the timer buffer based on the function ID. Function IDs start + * from 0, and timer buffers are arranged by function ID. + * @param ex_handle: device pointer that represents the PF + * @param function_id: function id + * @retval: void + * @date: 2019-5-4 + */ +void hinic5_cqm_function_timer_clear(void *ex_handle, u32 function_id); + +/** + * @brief: clear hash buffer. + * @details: clear the hash buffer based on the function ID. + * @param ex_handle: device pointer that represents the PF + * @param global_funcid + * @retval: void + * @date: 2019-5-4 + */ +void hinic5_cqm_function_hash_buf_clear(void *ex_handle, s32 global_funcid); + +s32 hinic5_cqm_ring_direct_wqe_db(void *ex_handle, u32 service_type, u8 db_count, + void *direct_wqe); + +/** + * @brief: 敲direct wqe db for fc + * @details: 敲direct wqe db for fc + * @param ex_handle: device pointer that represents the PF + * @param service_type: 服务类型 + * @param direct_wqe: 要写入的direct wqe + * @retval: s32, 0成功,其他失败 + */ +s32 hinic5_cqm_ring_direct_wqe_db_fc(void *ex_handle, u32 service_type, + void *direct_wqe); + +#ifdef __cplusplus +#if __cplusplus +} +#endif +#endif /* __cplusplus */ + +#endif /* HINIC5_CQM_OBJECT_H */ diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object_intern.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object_intern.c new file mode 100644 index 00000000..ad96cce0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object_intern.c @@ -0,0 +1,1470 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <linux/mm.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" + +#include "hinic5_cqm_object.h" +#include "hinic5_cqm_bitmap_table.h" +#include "hinic5_cqm_bat_cla.h" +#include "hinic5_cqm_main.h" +#include "hinic5_cqm_object_intern.h" + +#define srq_obj_intern_if_section + +/** + * Prototype : hinic5_cqm_container_free + * Description : Only the container buffer is released. The buffer in the WQE + * and fast link tables are not involved. + * Containers can be released from head to tail, including head + * and tail. This function does not modify the start and + * end pointers of qinfo records. + * Input : u8 *srq_head_container + * u8 *srq_tail_container: If it is NULL, it means to release + * container from head to tail. + * struct tag_hinic5_cqm_queue *common + * Output : None + * Return Value : void + * 1.Date : 2016/2/1 + * Modification : Created function + */ +void hinic5_cqm_container_free(u8 *srq_head_container, u8 *srq_tail_container, + struct tag_hinic5_cqm_queue *common) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(common->object.hinic5_cqm_handle); + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, + common); + u32 link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_srq_linkwqe *srq_link_wqe = NULL; + u32 container_size = qinfo->container_size; + struct device *dev = hinic5_cqm_handle->dev; + u64 addr; + u8 *srqhead_container = srq_head_container; + u8 *srqtail_container = srq_tail_container; + + if (unlikely(srqhead_container == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_PTR_NULL(srqhead_container)); + return; + } + + /* 1. The range is released cyclically from the head to the tail, i.e. + * [head:tail]. If the tail is null, the range is [head:null]. Oterwise, + * [head:tail->next). + */ + if (srqtail_container) { + /* [head:tail->next): Update srqtail_container to the next + * container va. + */ + srq_link_wqe = (struct tag_hinic5_cqm_srq_linkwqe *)(srqtail_container + + link_wqe_offset); + /* Only the link wqe part needs to be converted. */ + hinic5_cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct tag_hinic5_cqm_linkwqe) >> HINIC5_CQM_DW_SHIFT); + srqtail_container = (u8 *)(uintptr_t)HINIC5_CQM_ADDR_COMBINE(srq_link_wqe->fixed_next_buffer_addr_h, + srq_link_wqe->fixed_next_buffer_addr_l); + } + + do { + /* 2. Obtain the link wqe of the current container */ + srq_link_wqe = (struct tag_hinic5_cqm_srq_linkwqe *)(srqhead_container + + link_wqe_offset); + /* Only the link wqe part needs to be converted. */ + hinic5_cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct tag_hinic5_cqm_linkwqe) >> HINIC5_CQM_DW_SHIFT); + /* Obtain the va of the next container using the link wqe. */ + srqhead_container = (u8 *)(uintptr_t)HINIC5_CQM_ADDR_COMBINE(srq_link_wqe->fixed_next_buffer_addr_h, + srq_link_wqe->fixed_next_buffer_addr_l); + + /* 3. Obtain the current container pa from the link wqe, + * and cancel the mapping + */ + addr = HINIC5_CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_gpa_h, + srq_link_wqe->current_buffer_gpa_l); + if (addr == 0) { + hinic5_cqm_err(handle->dev_hdl, "Container free: buffer physical addr is null\n"); + return; + } + dma_unmap_single(dev, (dma_addr_t)addr, container_size, + DMA_BIDIRECTIONAL); + + /* 4. Obtain the container va through linkwqe and release the + * container va. + */ + addr = HINIC5_CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_addr_h, + srq_link_wqe->current_buffer_addr_l); + if (addr == 0) { + hinic5_cqm_err(handle->dev_hdl, "Container free: buffer virtual addr is null\n"); + return; + } + kfree((void *)(uintptr_t)addr); + } while (srqhead_container != srqtail_container); +} + +static void hinic5_cqm_update_srq_link_wqe(u32 link_wqe_offset, u8 *new_container, + dma_addr_t new_container_pa) +{ + struct tag_hinic5_cqm_srq_linkwqe *srq_link_wqe = + (struct tag_hinic5_cqm_srq_linkwqe *)((uintptr_t)new_container + link_wqe_offset); + struct tag_hinic5_cqm_linkwqe *link_wqe = &srq_link_wqe->linkwqe; + + link_wqe->o = HINIC5_CQM_LINK_WQE_OWNER_INVALID; + link_wqe->ctrlsl = HINIC5_CQM_LINK_WQE_CTRLSL_VALUE; + link_wqe->lp = HINIC5_CQM_LINK_WQE_LP_INVALID; + link_wqe->wf = HINIC5_CQM_WQE_WF_LINK; + srq_link_wqe->current_buffer_gpa_h = HINIC5_CQM_ADDR_HI(new_container_pa); + srq_link_wqe->current_buffer_gpa_l = HINIC5_CQM_ADDR_LW(new_container_pa); + srq_link_wqe->current_buffer_addr_h = HINIC5_CQM_ADDR_HI((uintptr_t)new_container); + srq_link_wqe->current_buffer_addr_l = HINIC5_CQM_ADDR_LW((uintptr_t)new_container); + + /* Convert only the area accessed by the chip to the network sequence */ + hinic5_cqm_swab32((u8 *)link_wqe, sizeof(struct tag_hinic5_cqm_linkwqe) >> HINIC5_CQM_DW_SHIFT); +} + +/** + * Prototype : hinic5_cqm_container_create + * Description : Create a container for the RQ or SRQ, link it to the tail of + * the queue, and update the tail container pointer of the queue. + * Input : struct tag_hinic5_cqm_object *object + * u8 **container_addr + * bool link + * Output : None + * Return Value : s32 + * 1.Date : 2016/2/16 + * Modification : Created function + */ +s32 hinic5_cqm_container_create(struct tag_hinic5_cqm_object *object, u8 **container_addr, bool link) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(object->hinic5_cqm_handle); + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, common); + u32 link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_srq_linkwqe *srq_link_wqe = NULL; + struct tag_hinic5_cqm_linkwqe *link_wqe = NULL; + dma_addr_t new_container_pa; + u8 *new_container = NULL; + + /* 1. Applying for Container Space and Initializing Invalid/Normal WQE + * of the Container. + */ + new_container = kzalloc(qinfo->container_size, GFP_ATOMIC); + if (!new_container) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(new_container)); + return HINIC5_CQM_FAIL; + } + + /* Container PCI mapping */ + new_container_pa = dma_map_single(hinic5_cqm_handle->dev, new_container, qinfo->container_size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(hinic5_cqm_handle->dev, new_container_pa) != 0) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_MAP_FAIL(new_container_pa)); + kfree(new_container); + return HINIC5_CQM_FAIL; + } + + /* 2. The container is linked to the SRQ, and the link wqe of + * tail_container and new_container is updated. + */ + /* If the SRQ is not empty, update the linkwqe of the tail container. */ + if (link) { + if (common->tail_container) { + srq_link_wqe = (struct tag_hinic5_cqm_srq_linkwqe *)(common->tail_container + link_wqe_offset); + link_wqe = &srq_link_wqe->linkwqe; + link_wqe->next_page_gpa_h = __swab32((u32)HINIC5_CQM_ADDR_HI(new_container_pa)); + link_wqe->next_page_gpa_l = __swab32((u32)HINIC5_CQM_ADDR_LW(new_container_pa)); + link_wqe->next_buffer_addr_h = __swab32((u32)HINIC5_CQM_ADDR_HI((uintptr_t)new_container)); + link_wqe->next_buffer_addr_l = __swab32((u32)HINIC5_CQM_ADDR_LW((uintptr_t)new_container)); + /* make sure next page gpa and next buffer addr of + * link wqe update first + */ + wmb(); + /* The SRQ tail container may be accessed by the chip. + * Therefore, obit must be set to 1 at last. + */ + (*(u32 *)(void *)link_wqe) |= 0x80; + /* make sure obit set ahead of fixed next buffer addr + * updating of srq link wqe + */ + wmb(); + srq_link_wqe->fixed_next_buffer_addr_h = (u32)HINIC5_CQM_ADDR_HI((uintptr_t)new_container); + srq_link_wqe->fixed_next_buffer_addr_l = (u32)HINIC5_CQM_ADDR_LW((uintptr_t)new_container); + } + } + + /* Update the Invalid WQE of a New Container */ + clear_bit(0x1F, (ulong *)new_container); + /* Update the link wqe of the new container. */ + hinic5_cqm_update_srq_link_wqe(link_wqe_offset, new_container, new_container_pa); + if (link) + /* Update the tail pointer of a queue. */ + common->tail_container = new_container; + else + *container_addr = new_container; + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_srq_container_init + * Description : Initialize the SRQ to create all containers and link them. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : s32 + * 1.Date : 2016/2/3 + * Modification : Created function + */ +static s32 hinic5_cqm_srq_container_init(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, + common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 container_num = object->object_size; + s32 ret; + u32 i; + + if (common->head_container || common->tail_container) { + hinic5_cqm_err(handle->dev_hdl, "Srq container init: srq tail/head container not null\n"); + return HINIC5_CQM_FAIL; + } + + /* Applying for a Container + * During initialization, the head/tail pointer is null. + * After the first application is successful, head=tail. + */ + ret = hinic5_cqm_container_create(&qinfo->common.object, NULL, true); + if (ret == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, "Srq container init: hinic5_cqm_srq_container_add fail\n"); + return HINIC5_CQM_FAIL; + } + common->head_container = common->tail_container; + + /* The container is dynamically created and the tail pointer is updated. + * If the container fails to be created, release the containers from + * head to null. + */ + for (i = 1; i < container_num; i++) { + ret = hinic5_cqm_container_create(&qinfo->common.object, NULL, true); + if (ret == HINIC5_CQM_FAIL) { + hinic5_cqm_container_free(common->head_container, NULL, + &qinfo->common); + return HINIC5_CQM_FAIL; + } + } + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_share_recv_queue_create + * Description : Create SRQ(share receive queue) + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : s32 + * 1.Date : 2016/1/27 + * Modification : Created function + */ +s32 hinic5_cqm_share_recv_queue_create(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, + common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct tag_hinic5_cqm_toe_private_capability *toe_own_cap = &hinic5_cqm_handle->toe_own_capability; + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + u32 step; + s32 ret; + + /* 1. Create srq container, including initializing the link wqe. */ + ret = hinic5_cqm_srq_container_init(object); + if (ret == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_srq_container_init)); + return HINIC5_CQM_FAIL; + } + + /* 2. Create srq ctx: SRQ CTX is directly delivered by the driver to the + * chip memory area through the cmdq channel, and no CLA table + * management is required. Therefore, the HINIC5_CQM applies for only one empty + * buffer for the driver. + */ + /* bitmap applies for index */ + bitmap = &toe_own_cap->srqc_bitmap; + qinfo->index_count = (ALIGN(qinfo->q_ctx_size, + toe_own_cap->toe_srqc_basic_size)) / + toe_own_cap->toe_srqc_basic_size; + /* align with 2 as the upper bound */ + step = ALIGN(toe_own_cap->toe_srqc_number, 2); + qinfo->common.index = hinic5_cqm_bitmap_alloc(bitmap, step, qinfo->index_count, + func_cap->xid_alloc_mode); + if (qinfo->common.index >= bitmap->max_num) { + hinic5_cqm_err(handle->dev_hdl, "Srq create: queue index %u exceeds max_num %u\n", + qinfo->common.index, bitmap->max_num); + goto err1; + } + qinfo->common.index += toe_own_cap->toe_srqc_start_id; + + /* apply for buffer for SRQC */ + common->q_ctx_vaddr = kzalloc(qinfo->q_ctx_size, GFP_KERNEL); + if (!common->q_ctx_vaddr) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(q_ctx_vaddr)); + goto err2; + } + return HINIC5_CQM_SUCCESS; + +err2: + hinic5_cqm_bitmap_free(bitmap, + qinfo->common.index - toe_own_cap->toe_srqc_start_id, + qinfo->index_count); +err1: + hinic5_cqm_container_free(common->head_container, common->tail_container, + &qinfo->common); + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_srq_used_rq_delete + * Description : Delete RQ in TOE SRQ mode. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2016/5/19 + * Modification : Created function + */ +static void hinic5_cqm_srq_used_rq_delete(const struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)(common->object.hinic5_cqm_handle); + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, + common); + u32 link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_srq_linkwqe *srq_link_wqe = NULL; + dma_addr_t addr; + + /* Currently, the SRQ solution does not support RQ initialization + * without mounting container. + * As a result, RQ resources are released incorrectly. + * Temporary workaround: Only one container is mounted during RQ + * initialization and only one container is released + * during resource release. + */ + if (unlikely(common->head_container == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR("Rq del: rq has no contianer to release\n"); + return; + } + + /* 1. Obtain current container pa from the link wqe table and + * cancel the mapping. + */ + srq_link_wqe = (struct tag_hinic5_cqm_srq_linkwqe *)(common->head_container + link_wqe_offset); + /* Only the link wqe part needs to be converted. */ + hinic5_cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct tag_hinic5_cqm_linkwqe) >> HINIC5_CQM_DW_SHIFT); + + addr = HINIC5_CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_gpa_h, + srq_link_wqe->current_buffer_gpa_l); + if (addr == 0) { + hinic5_cqm_err(handle->dev_hdl, "Rq del: buffer physical addr is null\n"); + return; + } + dma_unmap_single(hinic5_cqm_handle->dev, addr, qinfo->container_size, + DMA_BIDIRECTIONAL); + + /* 2. Obtain the container va through the linkwqe and release. */ + addr = HINIC5_CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_addr_h, + srq_link_wqe->current_buffer_addr_l); + if (addr == 0) { + hinic5_cqm_err(handle->dev_hdl, "Rq del: buffer virtual addr is null\n"); + return; + } + kfree((void *)(uintptr_t)addr); +} + +/** + * Prototype : hinic5_cqm_share_recv_queue_delete + * Description : The SRQ object is deleted. Delete only containers that are not + * used by SRQ, that is, containers from the head to the tail. + * The RQ releases containers that have been used by the RQ. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2016/2/2 + * Modification : Created function + */ +void hinic5_cqm_share_recv_queue_delete(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, + common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct tag_hinic5_cqm_bitmap *bitmap = &hinic5_cqm_handle->toe_own_capability.srqc_bitmap; + u32 index = common->index - hinic5_cqm_handle->toe_own_capability.toe_srqc_start_id; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + + /* 1. Wait for completion and ensure that all references to the QPC + * are complete. + */ + if (atomic_dec_and_test(&object->refcount) != 0) + complete(&object->free); + else + hinic5_cqm_err(handle->dev_hdl, "Srq del: object is referred by others, has to wait for completion\n"); + + wait_for_completion(&object->free); + destroy_completion(&object->free); + /* 2. The corresponding index in the bitmap is cleared. */ + hinic5_cqm_bitmap_free(bitmap, index, qinfo->index_count); + + /* 3. SRQC resource release */ + if (unlikely(common->q_ctx_vaddr == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR("Srq del: srqc kfree, context virtual addr is null\n"); + return; + } + kfree(common->q_ctx_vaddr); + + /* 4. The SRQ queue is released. */ + hinic5_cqm_container_free(common->head_container, NULL, &qinfo->common); +} + +#define obj_intern_if_section + +/* include dynamic and static applications. */ +static u32 hinic5_cqm_general_bitmap_alloc(struct tag_hinic5_cqm_object *object, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_bitmap_range *bp_range, + u32 xid, u32 count) +{ + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct tag_hinic5_cqm_func_capability *func_cap = &hinic5_cqm_handle->func_capability; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_bitmap *bitmap = &cla_table->bitmap; + u32 index; + + if (HINIC5_CQM_DYNAMIC_XID_ALLOC_MODE(xid)) { + if (HINIC5_CQM_DYNAMIC_XID_LB_MODE(xid) != HINIC5_CQM_XID_LOW_BIT_NONE || + HINIC5_CQM_DYNAMIC_XID_SEARCH_MODE(xid) != HINIC5_CQM_XID_SEARCH_ALL) { + if (count > 1) { + hinic5_cqm_warn(handle->dev_hdl, "Not support alloc multiple bits.\n"); + return HINIC5_CQM_INDEX_INVALID; + } + index = hinic5_cqm_bitmap_alloc_lowbits_align(bitmap, bp_range, hinic5_cqm_handle, + xid, func_cap->xid_alloc_mode); + } else { + /* apply for an index normally */ + index = hinic5_cqm_bitmap_alloc(bitmap, 1U << (cla_table->z + 1), + count, func_cap->xid_alloc_mode); + } + + if (index >= bitmap->max_num - bitmap->reserved_back) { + hinic5_cqm_warn(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bitmap_alloc)); + return HINIC5_CQM_INDEX_INVALID; + } + } else { + if ((IS_MASTER_HOST(handle)) && (hinic5_func_type((void *)handle) != TYPE_PPF) && + (hinic5_support_vroce((void *)handle, NULL))) { + /* If PF is vroce control function, apply for index by xid */ + index = hinic5_cqm_bitmap_alloc_by_xid(bitmap, count, xid); + } else { + /* apply for index to be reserved */ + index = hinic5_cqm_bitmap_alloc_reserved(bitmap, count, xid); + } + if (index != xid) { + hinic5_cqm_warn(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_bitmap_alloc_reserved)); + return HINIC5_CQM_INDEX_INVALID; + } + } + + return index; +} + +/** + * Prototype : hinic5_cqm_qpc_mpt_bitmap_alloc + * Description : Apply for index from the bitmap when creating QPC or MPT. + * Input : struct tag_hinic5_cqm_object *object + * struct tag_hinic5_cqm_cla_table *cla_table + * struct tag_hinic5_cqm_bitmap_range *bp_range + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +static s32 hinic5_cqm_qpc_mpt_bitmap_alloc(struct tag_hinic5_cqm_object *object, + struct tag_hinic5_cqm_cla_table *cla_table, + struct tag_hinic5_cqm_bitmap_range *bp_range) +{ + struct tag_hinic5_cqm_qpc_mpt *common = container_of(object, struct tag_hinic5_cqm_qpc_mpt, object); + struct tag_hinic5_cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, + struct tag_hinic5_cqm_qpc_mpt_info, + common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 xid = qpc_mpt_info->common.xid; + + qpc_mpt_info->index_count = + (ALIGN(object->object_size, cla_table->obj_size)) / cla_table->obj_size; + qpc_mpt_info->common.xid = hinic5_cqm_general_bitmap_alloc(object, cla_table, bp_range, + xid, qpc_mpt_info->index_count); + if (qpc_mpt_info->common.xid == HINIC5_CQM_INDEX_INVALID) { + hinic5_cqm_warn(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_general_bitmap_alloc)); + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_qpc_mpt_create + * Description : Create QPC or MPT + * Input : struct tag_hinic5_cqm_object *object + * struct tag_hinic5_cqm_bitmap_range *bp_range + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_qpc_mpt_create(struct tag_hinic5_cqm_object *object, struct tag_hinic5_cqm_bitmap_range *bp_range) +{ + struct tag_hinic5_cqm_qpc_mpt *common = container_of(object, struct tag_hinic5_cqm_qpc_mpt, object); + struct tag_hinic5_cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, struct tag_hinic5_cqm_qpc_mpt_info, common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_object_table *object_table = NULL; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + bool is_lock_bh; + u32 index, count; + + /* find the corresponding cla table */ + if (object->object_type == HINIC5_CQM_OBJECT_SERVICE_CTX) { + cla_table = hinic5_cqm_cla_table_get(&hinic5_cqm_handle->bat_table, HINIC5_CQM_BAT_ENTRY_T_QPC); + } else if (object->object_type == HINIC5_CQM_OBJECT_MPT) { + cla_table = hinic5_cqm_cla_table_get(&hinic5_cqm_handle->bat_table, HINIC5_CQM_BAT_ENTRY_T_MPT); + } else { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object->object_type)); + return HINIC5_CQM_FAIL; + } + + if (unlikely(cla_table == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_table_get)); + return HINIC5_CQM_FAIL; + } + + /* Bitmap applies for index. */ + if (hinic5_cqm_qpc_mpt_bitmap_alloc(object, cla_table, bp_range) == HINIC5_CQM_FAIL) { + hinic5_cqm_warn(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_qpc_mpt_bitmap_alloc)); + return HINIC5_CQM_FAIL; + } + + bitmap = &cla_table->bitmap; + index = qpc_mpt_info->common.xid; + count = qpc_mpt_info->index_count; + + /* Find the trunk page from the BAT/CLA and allocate the buffer. + * Ensure that the released buffer has been cleared. + */ + if (!HINIC5_CQM_IS_FAKE_CHILD(hinic5_cqm_handle)) { + /* The CLA memory of the Fake VF are holded by the parent + * function, so the Fake VF can't get the memory. */ + qpc_mpt_info->common.vaddr = hinic5_cqm_cla_get(hinic5_cqm_handle, cla_table, + index, count, &common->paddr); + + if (!qpc_mpt_info->common.vaddr) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_get)); + hinic5_cqm_err(handle->dev_hdl, + "Qpc mpt init: qpc mpt vaddr is null, alloc_static=%d\n", + cla_table->alloc_static); + goto err1; + } + } + + /* Indexes are associated with objects, and FC is executed + * in the interrupt context. + */ + object_table = &cla_table->obj_table; + is_lock_bh = (object->service_type != HINIC5_CQM_SERVICE_T_FC); + if (hinic5_cqm_object_table_insert(hinic5_cqm_handle, object_table, index, object, is_lock_bh) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_object_table_insert)); + goto err2; + } + + return HINIC5_CQM_SUCCESS; + +err2: + hinic5_cqm_cla_put(hinic5_cqm_handle, cla_table, index, count); +err1: + hinic5_cqm_bitmap_free(bitmap, index, count); + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_qpc_mpt_delete + * Description : Delete QPC or MPT. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_qpc_mpt_delete(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_qpc_mpt *common = container_of(object, struct tag_hinic5_cqm_qpc_mpt, object); + struct tag_hinic5_cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, + struct tag_hinic5_cqm_qpc_mpt_info, + common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_object_table *object_table = NULL; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + u32 count = qpc_mpt_info->index_count; + u32 index = qpc_mpt_info->common.xid; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_qpc_mpt_delete_cnt); + + /* find the corresponding cla table */ + if (object->object_type == HINIC5_CQM_OBJECT_SERVICE_CTX) { + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_QPC); + } else if (object->object_type == HINIC5_CQM_OBJECT_MPT) { + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_MPT); + } else { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(object->object_type)); + return; + } + + if (unlikely(cla_table == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_table_get_qpc)); + return; + } + + /* disassociate index and object */ + object_table = &cla_table->obj_table; + if (object->service_type == HINIC5_CQM_SERVICE_T_FC) + hinic5_cqm_object_table_remove(hinic5_cqm_handle, object_table, index, object, + false); + else + hinic5_cqm_object_table_remove(hinic5_cqm_handle, object_table, index, object, + true); + + /* wait for completion to ensure that all references to + * the QPC are complete + */ + if (atomic_dec_and_test(&object->refcount) != 0) + complete(&object->free); + else + hinic5_cqm_err(handle->dev_hdl, "Qpc mpt del: object is referred by others, has to wait for completion\n"); + + /* Static QPC allocation must be non-blocking. + * Services ensure that the QPC is referenced + * when the QPC is deleted. Roce and UB service + * should depend on completion to avoid race condition. + */ + if (!cla_table->alloc_static || object->service_type == HINIC5_CQM_SERVICE_T_ROCE || + object->service_type == HINIC5_CQM_SERVICE_T_UB) + wait_for_completion(&object->free); + + /* VMware FC need explicitly deinit spin_lock in completion */ + destroy_completion(&object->free); + + /* release qpc buffer */ + hinic5_cqm_cla_put(hinic5_cqm_handle, cla_table, index, count); + + /* release the index to the bitmap */ + bitmap = &cla_table->bitmap; + hinic5_cqm_bitmap_free(bitmap, index, count); +} + +/** + * Prototype : hinic5_cqm_linkwqe_fill + * Description : Used to organize the queue buffer of non-RDMA services and + * fill the link wqe. + * Input : wqe_per_buf: Linkwqe is not included. + * wqe_number: Linkwqe is not included. + * tail: true - The linkwqe must be at the end of the page; + * false - The linkwqe can be not at the end of the page. + * Output : None + * Return Value : void + * 1.Date : 2015/6/15 + * Modification : Created function + */ +static void hinic5_cqm_linkwqe_fill(struct tag_hinic5_cqm_buf *buf, u32 wqe_per_buf, u32 wqe_size, + u32 wqe_number, bool tail, u8 link_mode) +{ + struct tag_hinic5_cqm_linkwqe_128B *linkwqe = NULL; + struct tag_hinic5_cqm_linkwqe *wqe = NULL; + dma_addr_t addr; + u8 *tmp = NULL; + u8 *va = NULL; + u32 i; + + /* The linkwqe of other buffer except the last buffer + * is directly filled to the tail. + */ + for (i = 0; i < buf->buf_number; i++) { + va = (u8 *)(buf->buf_list[i].va); + + if (i != (buf->buf_number - 1)) { + wqe = (struct tag_hinic5_cqm_linkwqe *)(va + (u32)(wqe_size * wqe_per_buf)); + wqe->wf = HINIC5_CQM_WQE_WF_LINK; + wqe->ctrlsl = HINIC5_CQM_LINK_WQE_CTRLSL_VALUE; + wqe->lp = HINIC5_CQM_LINK_WQE_LP_INVALID; + /* The valid value of link wqe needs to be set to 1. + * Each service ensures that o-bit=1 indicates that + * link wqe is valid and o-bit=0 indicates that + * link wqe is invalid. + */ + wqe->o = HINIC5_CQM_LINK_WQE_OWNER_VALID; + addr = buf->buf_list[(u32)(i + 1)].pa; + wqe->next_page_gpa_h = HINIC5_CQM_ADDR_HI(addr); + wqe->next_page_gpa_l = HINIC5_CQM_ADDR_LW(addr); + } else { /* linkwqe special padding of the last buffer */ + if (tail) { + /* must be filled at the end of the page */ + tmp = va + (u32)(wqe_size * wqe_per_buf); + wqe = (struct tag_hinic5_cqm_linkwqe *)tmp; + } else { + /* The last linkwqe is filled + * following the last wqe. + */ + tmp = va + (u32)(wqe_size * (wqe_number - wqe_per_buf * + (buf->buf_number - 1))); + wqe = (struct tag_hinic5_cqm_linkwqe *)tmp; + } + wqe->wf = HINIC5_CQM_WQE_WF_LINK; + wqe->ctrlsl = HINIC5_CQM_LINK_WQE_CTRLSL_VALUE; + + /* In link mode, the last link WQE is invalid; + * In ring mode, the last link wqe is valid, pointing to + * the home page, and the lp is set. + */ + if (link_mode == HINIC5_CQM_QUEUE_LINK_MODE) { + wqe->o = HINIC5_CQM_LINK_WQE_OWNER_INVALID; + } else { + /* The lp field of the last link_wqe is set to + * 1, indicating that the meaning of the o-bit + * is reversed. + */ + wqe->lp = HINIC5_CQM_LINK_WQE_LP_VALID; + wqe->o = HINIC5_CQM_LINK_WQE_OWNER_VALID; + addr = buf->buf_list[0].pa; + wqe->next_page_gpa_h = HINIC5_CQM_ADDR_HI(addr); + wqe->next_page_gpa_l = HINIC5_CQM_ADDR_LW(addr); + } + } + + if (wqe_size == HINIC5_CQM_LINKWQE_128B) { + /* After the B800 version, the WQE obit scheme is + * changed. The 64B bits before and after the 128B WQE + * need to be assigned a value: + * ifoe the 63rd bit from the end of the last 64B is + * obit; + * toe the 157th bit from the end of the last 64B is + * obit. + */ + linkwqe = (struct tag_hinic5_cqm_linkwqe_128B *)(void *)wqe; + linkwqe->second64B.third_16B.bs.toe_o = HINIC5_CQM_LINK_WQE_OWNER_VALID; + linkwqe->second64B.forth_16B.bs.ifoe_o = HINIC5_CQM_LINK_WQE_OWNER_VALID; + + /* shift 2 bits by right to get length of dw(4B) */ + hinic5_cqm_swab32((u8 *)wqe, sizeof(struct tag_hinic5_cqm_linkwqe_128B) >> 2); + } else { + /* shift 2 bits by right to get length of dw(4B) */ + hinic5_cqm_swab32((u8 *)wqe, sizeof(struct tag_hinic5_cqm_linkwqe) >> 2); + } + } +} + +static int hinic5_cqm_nonrdma_queue_ctx_create_scq(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, + common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_object_table *object_table = NULL; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + bool bh = false; + + /* find the corresponding cla table */ + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_SCQC); + if (!cla_table) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(nonrdma_hinic5_cqm_cla_table_get)); + return HINIC5_CQM_FAIL; + } + + /* bitmap applies for index */ + bitmap = &cla_table->bitmap; + qinfo->index_count = (ALIGN(qinfo->q_ctx_size, cla_table->obj_size)) / cla_table->obj_size; + qinfo->common.index = hinic5_cqm_bitmap_alloc(bitmap, 1U << (cla_table->z + 1), + qinfo->index_count, + hinic5_cqm_handle->func_capability.xid_alloc_mode); + if (qinfo->common.index >= bitmap->max_num) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(nonrdma_hinic5_cqm_bitmap_alloc)); + return HINIC5_CQM_FAIL; + } + + /* find the trunk page from BAT/CLA and allocate the buffer */ + common->q_ctx_vaddr = hinic5_cqm_cla_get(hinic5_cqm_handle, cla_table, qinfo->common.index, + qinfo->index_count, &common->q_ctx_paddr); + if (!common->q_ctx_vaddr) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(nonrdma_hinic5_cqm_cla_get_lock)); + hinic5_cqm_bitmap_free(bitmap, qinfo->common.index, qinfo->index_count); + return HINIC5_CQM_FAIL; + } + + /* index and object association */ + object_table = &cla_table->obj_table; + bh = (object->service_type == HINIC5_CQM_SERVICE_T_FC) ? false : true; + if (hinic5_cqm_object_table_insert(hinic5_cqm_handle, object_table, qinfo->common.index, object, + bh) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(nonrdma_hinic5_cqm_object_table_insert)); + hinic5_cqm_cla_put(hinic5_cqm_handle, cla_table, qinfo->common.index, qinfo->index_count); + hinic5_cqm_bitmap_free(bitmap, qinfo->common.index, qinfo->index_count); + + return HINIC5_CQM_FAIL; + } + + return 0; +} + +static s32 hinic5_cqm_nonrdma_queue_ctx_create(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, + common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 shift; + int ret; + + if (object->object_type == HINIC5_CQM_OBJECT_NONRDMA_SRQ) { + shift = hinic5_cqm_shift(qinfo->q_ctx_size); + common->q_ctx_vaddr = hinic5_cqm_kmalloc_align(qinfo->q_ctx_size, + GFP_KERNEL | __GFP_ZERO, + (u16)shift); + if (!common->q_ctx_vaddr) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(q_ctx_vaddr)); + return HINIC5_CQM_FAIL; + } + + common->q_ctx_paddr = dma_map_single(hinic5_cqm_handle->dev, common->q_ctx_vaddr, + qinfo->q_ctx_size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(hinic5_cqm_handle->dev, common->q_ctx_paddr) != 0) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_MAP_FAIL(q_ctx_vaddr)); + hinic5_cqm_kfree_align(common->q_ctx_vaddr); + common->q_ctx_vaddr = NULL; + return HINIC5_CQM_FAIL; + } + } else if (object->object_type == HINIC5_CQM_OBJECT_NONRDMA_SCQ) { + ret = hinic5_cqm_nonrdma_queue_ctx_create_scq(object); + if (ret != 0) + return ret; + } + + return HINIC5_CQM_SUCCESS; +} + +static void hinic5_cqm_free_queue_header(struct tag_hinic5_cqm_queue *common, struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct hinic5_hwdev *handle) +{ + dma_unmap_single(hinic5_cqm_handle->dev, common->q_header_paddr, sizeof(struct tag_hinic5_cqm_queue_header), + DMA_BIDIRECTIONAL); + + hinic5_cqm_kfree_align(common->q_header_vaddr); + common->q_header_vaddr = NULL; +} + +static s32 hinic5_cqm_alloc_queue_header(struct tag_hinic5_cqm_queue *common, struct tag_hinic5_cqm_handle *hinic5_cqm_handle, + struct hinic5_hwdev *handle) +{ + common->q_header_vaddr = hinic5_cqm_kmalloc_align(sizeof(struct tag_hinic5_cqm_queue_header), + GFP_KERNEL | __GFP_ZERO, HINIC5_CQM_QHEAD_ALIGN_ORDER); + if (!common->q_header_vaddr) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_ALLOC_FAIL(q_header_vaddr)); + return HINIC5_CQM_FAIL; + } + + common->q_header_paddr = dma_map_single(hinic5_cqm_handle->dev, common->q_header_vaddr, + sizeof(struct tag_hinic5_cqm_queue_header), DMA_BIDIRECTIONAL); + if (dma_mapping_error(hinic5_cqm_handle->dev, common->q_header_paddr) != 0) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_MAP_FAIL(q_header_vaddr)); + hinic5_cqm_kfree_align(common->q_header_vaddr); + common->q_header_vaddr = NULL; + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} +/** + * Prototype : hinic5_cqm_nonrdma_queue_create + * Description : Create a queue for non-RDMA services. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_nonrdma_queue_create(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct tag_hinic5_cqm_buf *q_room_buf = &common->q_room_buf_1; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + u32 wqe_number = qinfo->common.object.object_size; + u32 wqe_size = qinfo->wqe_size; + u32 order = hinic5_cqm_handle->service[object->service_type].buf_order; + u32 buf_number, buf_size; + bool tail = false; /* determine whether the linkwqe is at the end of the page */ + + /* When creating a CQ/SCQ queue, the page size is 4 KB, + * the linkwqe must be at the end of the page. + */ + if (object->object_type == HINIC5_CQM_OBJECT_NONRDMA_EMBEDDED_CQ || object->object_type == HINIC5_CQM_OBJECT_NONRDMA_SCQ) { + /* depth: 2^n-aligned; depth range: 256-32 K */ + if (wqe_number < HINIC5_CQM_CQ_DEPTH_MIN || wqe_number > HINIC5_CQM_CQ_DEPTH_MAX) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(wqe_number)); + return HINIC5_CQM_FAIL; + } + if (!hinic5_cqm_check_align(wqe_number)) { + hinic5_cqm_err(handle->dev_hdl, "Nonrdma queue alloc: wqe_number is not align on 2^n\n"); + return HINIC5_CQM_FAIL; + } + + order = HINIC5_CQM_4K_PAGE_ORDER; /* wqe page 4k */ + tail = true; /* The linkwqe must be at the end of the page. */ + buf_size = HINIC5_CQM_4K_PAGE_SIZE; + } else { + buf_size = (u32)(PAGE_SIZE << order); + } + + /* Calculate the total number of buffers required, + * -1 indicates that the link wqe in a buffer is deducted. + */ + qinfo->wqe_per_buf = (buf_size / wqe_size) - 1; + /* number of linkwqes that are included in the depth transferred + * by the service + */ + buf_number = ALIGN((wqe_size * wqe_number), buf_size) / buf_size; + + /* apply for buffer */ + q_room_buf->buf_number = buf_number; + q_room_buf->buf_size = buf_size; + q_room_buf->page_number = buf_number << order; + if (hinic5_cqm_buf_alloc(hinic5_cqm_handle, q_room_buf, false) == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_buf_alloc)); + return HINIC5_CQM_FAIL; + } + /* fill link wqe, wqe_number - buf_number is the number of wqe without + * link wqe + */ + hinic5_cqm_linkwqe_fill(q_room_buf, qinfo->wqe_per_buf, wqe_size, wqe_number - buf_number, tail, common->queue_link_mode); + + /* create queue header */ + if (hinic5_cqm_alloc_queue_header(common, hinic5_cqm_handle, handle) != HINIC5_CQM_SUCCESS) { + goto err1; + } + + /* create queue ctx */ + if (hinic5_cqm_nonrdma_queue_ctx_create(object) == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_nonrdma_queue_ctx_create)); + goto err2; + } + + return HINIC5_CQM_SUCCESS; + +err2: + hinic5_cqm_free_queue_header(common, hinic5_cqm_handle, handle); +err1: + hinic5_cqm_buf_free(q_room_buf, hinic5_cqm_handle->dev); + return HINIC5_CQM_FAIL; +} + +/** + * Prototype : hinic5_cqm_nonrdma_queue_delete + * Description : Delete the queues of non-RDMA services. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_nonrdma_queue_delete(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_nonrdma_qinfo, common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct tag_hinic5_cqm_buf *q_room_buf = &common->q_room_buf_1; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_object_table *object_table = NULL; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + u32 index = qinfo->common.index; + u32 count = qinfo->index_count; + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_nonrdma_queue_delete_cnt); + + /* The SCQ has an independent SCQN association. */ + if (object->object_type == HINIC5_CQM_OBJECT_NONRDMA_SCQ) { + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_SCQC); + if (unlikely(cla_table == NULL)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_cla_table_get_queue)); + return; + } + + /* disassociate index and object */ + object_table = &cla_table->obj_table; + hinic5_cqm_object_table_remove(hinic5_cqm_handle, object_table, index, object, object->service_type != HINIC5_CQM_SERVICE_T_FC); + } + + /* wait for completion to ensure that all references to + * the QPC are complete + */ + if (atomic_dec_and_test(&object->refcount) != 0) + complete(&object->free); + else + hinic5_cqm_err(handle->dev_hdl, "Nonrdma queue del: object is referred by others, has to wait for completion\n"); + + wait_for_completion(&object->free); + destroy_completion(&object->free); + + /* If the q header exists, release. */ + if (qinfo->common.q_header_vaddr) { + dma_unmap_single(hinic5_cqm_handle->dev, common->q_header_paddr, sizeof(struct tag_hinic5_cqm_queue_header), + DMA_BIDIRECTIONAL); + + hinic5_cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; + } + + /* RQ deletion in TOE SRQ mode */ + if (common->queue_link_mode == HINIC5_CQM_QUEUE_TOE_SRQ_LINK_MODE) + hinic5_cqm_srq_used_rq_delete(&common->object); + else + /* If q room exists, release. */ + hinic5_cqm_buf_free(q_room_buf, hinic5_cqm_handle->dev); + /* SRQ and SCQ have independent CTXs and release. */ + if (object->object_type == HINIC5_CQM_OBJECT_NONRDMA_SRQ) { + /* The CTX of the SRQ of the nordma is + * applied for independently. + */ + if (common->q_ctx_vaddr) { + dma_unmap_single(hinic5_cqm_handle->dev, common->q_ctx_paddr, qinfo->q_ctx_size, DMA_BIDIRECTIONAL); + + hinic5_cqm_kfree_align(common->q_ctx_vaddr); + common->q_ctx_vaddr = NULL; + } + } else if (object->object_type == HINIC5_CQM_OBJECT_NONRDMA_SCQ) { + /* The CTX of the SCQ of the nordma is managed by BAT/CLA. */ + hinic5_cqm_cla_put(hinic5_cqm_handle, cla_table, index, count); + + /* release the index to the bitmap */ + bitmap = &cla_table->bitmap; + hinic5_cqm_bitmap_free(bitmap, index, count); + } +} + +static s32 hinic5_cqm_rdma_queue_ctx_create(struct tag_hinic5_cqm_object *object, struct tag_hinic5_cqm_bitmap_range *bp_range) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_rdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_rdma_qinfo, common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_object_table *object_table = NULL; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + + if (object->object_type == HINIC5_CQM_OBJECT_RDMA_SRQ || object->object_type == HINIC5_CQM_OBJECT_RDMA_SCQ) { + if (object->object_type == HINIC5_CQM_OBJECT_RDMA_SRQ) + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_SRQC); + else + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_SCQC); + + if (!cla_table) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(rdma_hinic5_cqm_cla_table_get)); + return HINIC5_CQM_FAIL; + } + + qinfo->index_count = (ALIGN(qinfo->q_ctx_size, cla_table->obj_size)) / cla_table->obj_size; + qinfo->common.index = hinic5_cqm_general_bitmap_alloc(object, cla_table, bp_range, qinfo->common.index, + qinfo->index_count); + if (qinfo->common.index == HINIC5_CQM_INDEX_INVALID) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_general_bitmap_alloc)); + return HINIC5_CQM_FAIL; + } + + /* bitmap applies for index */ + bitmap = &cla_table->bitmap; + + /* find the trunk page from BAT/CLA and allocate the buffer */ + if (!HINIC5_CQM_IS_FAKE_CHILD(hinic5_cqm_handle)) { + /* The CLA memory of the Fake VF are holded by the parent + * function, so the Fake VF can't get the memory. */ + qinfo->common.q_ctx_vaddr = + hinic5_cqm_cla_get(hinic5_cqm_handle, cla_table, qinfo->common.index, + qinfo->index_count, &qinfo->common.q_ctx_paddr); + if (!qinfo->common.q_ctx_vaddr) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(rdma_hinic5_cqm_cla_get_lock)); + hinic5_cqm_bitmap_free(bitmap, qinfo->common.index, qinfo->index_count); + return HINIC5_CQM_FAIL; + } + } + + /* associate index and object */ + object_table = &cla_table->obj_table; + if (hinic5_cqm_object_table_insert(hinic5_cqm_handle, object_table, qinfo->common.index, object, true) != HINIC5_CQM_SUCCESS) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_FUNCTION_FAIL(rdma_hinic5_cqm_object_table_insert)); + hinic5_cqm_cla_put(hinic5_cqm_handle, cla_table, qinfo->common.index, qinfo->index_count); + hinic5_cqm_bitmap_free(bitmap, qinfo->common.index, qinfo->index_count); + return HINIC5_CQM_FAIL; + } + } + + return HINIC5_CQM_SUCCESS; +} + +static s32 hinic5_cqm_qinfo_judgment(struct tag_hinic5_cqm_rdma_qinfo *qinfo, struct tag_hinic5_cqm_buf *q_room_buf, + struct tag_hinic5_cqm_handle *hinic5_cqm_handle, struct hinic5_hwdev *handle) +{ + if (hinic5_cqm_buf_alloc(hinic5_cqm_handle, q_room_buf, true) == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_buf_alloc)); + return HINIC5_CQM_FAIL; + } + + /* queue header */ + qinfo->common.q_header_vaddr = + hinic5_cqm_kmalloc_align(sizeof(struct tag_hinic5_cqm_queue_header), + GFP_KERNEL | __GFP_ZERO, + HINIC5_CQM_QHEAD_ALIGN_ORDER); + if (!qinfo->common.q_header_vaddr) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_ALLOC_FAIL(q_header_vaddr)); + + if (qinfo->room_header_alloc) + hinic5_cqm_buf_free(q_room_buf, hinic5_cqm_handle->dev); + + return HINIC5_CQM_FAIL; + } + + qinfo->common.q_header_paddr = + dma_map_single(hinic5_cqm_handle->dev, + qinfo->common.q_header_vaddr, + sizeof(struct tag_hinic5_cqm_queue_header), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hinic5_cqm_handle->dev, + qinfo->common.q_header_paddr) != 0) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_MAP_FAIL(q_header_vaddr)); + + if (qinfo->room_header_alloc) { + hinic5_cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; + } + + if (qinfo->room_header_alloc) + hinic5_cqm_buf_free(q_room_buf, hinic5_cqm_handle->dev); + + return HINIC5_CQM_FAIL; + } + + return 0; +} + +/** + * Prototype : hinic5_cqm_rdma_queue_create + * Description : Create rdma queue. + * Input : struct tag_hinic5_cqm_object *object + * : struct tag_hinic5_cqm_bitmap_range *bp_range + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_rdma_queue_create(struct tag_hinic5_cqm_object *object, struct tag_hinic5_cqm_bitmap_range *bp_range) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_rdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_rdma_qinfo, + common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct tag_hinic5_cqm_service *service = hinic5_cqm_handle->service + object->service_type; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_buf *q_room_buf = NULL; + u32 order = service->buf_order; + u32 buf_size = (u32)(PAGE_SIZE << order); + + if (qinfo->room_header_alloc) { + /* apply for queue room buffer */ + if (qinfo->common.current_q_room == HINIC5_CQM_RDMA_Q_ROOM_1) + q_room_buf = &qinfo->common.q_room_buf_1; + else + q_room_buf = &qinfo->common.q_room_buf_2; + + q_room_buf->buf_number = ALIGN(object->object_size, buf_size) / + buf_size; + q_room_buf->page_number = (q_room_buf->buf_number << order); + q_room_buf->buf_size = buf_size; + + if (hinic5_cqm_qinfo_judgment(qinfo, q_room_buf, hinic5_cqm_handle, handle) == HINIC5_CQM_FAIL) { + return HINIC5_CQM_FAIL; + } + } + + /* queue ctx */ + if (hinic5_cqm_rdma_queue_ctx_create(object, bp_range) == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_rdma_queue_ctx_create)); + + if (qinfo->room_header_alloc) { + dma_unmap_single(hinic5_cqm_handle->dev, qinfo->common.q_header_paddr, + sizeof(struct tag_hinic5_cqm_queue_header), + DMA_BIDIRECTIONAL); + } + + if (qinfo->room_header_alloc) { + hinic5_cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; + } + + if (qinfo->room_header_alloc) { + hinic5_cqm_buf_free(q_room_buf, hinic5_cqm_handle->dev); + } + + return HINIC5_CQM_FAIL; + } + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_rdma_queue_delete + * Description : Create rdma queue. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_rdma_queue_delete(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_queue *common = container_of(object, struct tag_hinic5_cqm_queue, object); + struct tag_hinic5_cqm_rdma_qinfo *qinfo = container_of(common, struct tag_hinic5_cqm_rdma_qinfo, common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct tag_hinic5_cqm_bat_table *bat_table = &hinic5_cqm_handle->bat_table; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_object_table *object_table = NULL; + struct tag_hinic5_cqm_cla_table *cla_table = NULL; + struct tag_hinic5_cqm_buf *q_room_buf = NULL; + struct tag_hinic5_cqm_bitmap *bitmap = NULL; + u32 index = qinfo->common.index; + u32 count = qinfo->index_count; + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_rdma_queue_delete_cnt); + + q_room_buf = (qinfo->common.current_q_room == HINIC5_CQM_RDMA_Q_ROOM_1) ? + &qinfo->common.q_room_buf_1 : &qinfo->common.q_room_buf_2; + + /* SCQ and SRQ are associated with independent SCQN and SRQN. */ + if (object->object_type == HINIC5_CQM_OBJECT_RDMA_SCQ || object->object_type == HINIC5_CQM_OBJECT_RDMA_SRQ) { + if (object->object_type == HINIC5_CQM_OBJECT_RDMA_SCQ) { + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_SCQC); + } else if (object->object_type == HINIC5_CQM_OBJECT_RDMA_SRQ) { + cla_table = hinic5_cqm_cla_table_get(bat_table, HINIC5_CQM_BAT_ENTRY_T_SRQC); + } + if (unlikely(!cla_table)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_FUNCTION_FAIL + (hinic5_cqm_cla_table_get)); + return; + } + /* disassociate index and object */ + object_table = &cla_table->obj_table; + hinic5_cqm_object_table_remove(hinic5_cqm_handle, object_table, + index, object, true); + } + + /* wait for completion to make sure all references are complete */ + if (atomic_dec_and_test(&object->refcount) != 0) + complete(&object->free); + else + hinic5_cqm_err(handle->dev_hdl, "Rdma queue del: object is referred by others, has to wait for completion\n"); + + wait_for_completion(&object->free); + destroy_completion(&object->free); + + /* If the q header exists, release. */ + if (qinfo->room_header_alloc && qinfo->common.q_header_vaddr) { + dma_unmap_single(hinic5_cqm_handle->dev, qinfo->common.q_header_paddr, + sizeof(struct tag_hinic5_cqm_queue_header), DMA_BIDIRECTIONAL); + + hinic5_cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; + } + + /* If q room exists, release. */ + hinic5_cqm_buf_free(q_room_buf, hinic5_cqm_handle->dev); + + /* SRQ and SCQ have independent CTX, released. */ + if (object->object_type == HINIC5_CQM_OBJECT_RDMA_SRQ || + object->object_type == HINIC5_CQM_OBJECT_RDMA_SCQ) { + hinic5_cqm_cla_put(hinic5_cqm_handle, cla_table, index, count); + + /* release the index to the bitmap */ + bitmap = &cla_table->bitmap; + hinic5_cqm_bitmap_free(bitmap, index, count); + } +} + +/** + * Prototype : hinic5_cqm_rdma_table_create + * Description : Create RDMA-related entries. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 hinic5_cqm_rdma_table_create(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_mtt_rdmarc *common = + container_of(object, struct tag_hinic5_cqm_mtt_rdmarc, object); + struct tag_hinic5_cqm_rdma_table *rdma_table = + container_of(common, struct tag_hinic5_cqm_rdma_table, common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = + (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_buf *buf = &rdma_table->buf; + + /* Less than one page is allocated by actual size. + * RDMARC also requires physical continuity. + */ + if (object->object_size <= PAGE_SIZE || + object->object_type == HINIC5_CQM_OBJECT_RDMARC) { + buf->buf_number = 1; + buf->page_number = buf->buf_number; + buf->buf_size = object->object_size; + buf->direct.va = dma_alloc_coherent(hinic5_cqm_handle->dev, buf->buf_size, + &buf->direct.pa, GFP_ATOMIC); + if (unlikely(!buf->direct.va)) { + HINIC5_CQM_PTR_CHECK_ERR(HINIC5_CQM_ALLOC_FAIL(direct)); + return HINIC5_CQM_FAIL; + } + } else { /* page-by-page alignment greater than one page */ + buf->buf_number = ALIGN(object->object_size, PAGE_SIZE) / + PAGE_SIZE; + buf->page_number = buf->buf_number; + buf->buf_size = PAGE_SIZE; + if (hinic5_cqm_buf_alloc(hinic5_cqm_handle, buf, true) == HINIC5_CQM_FAIL) { + hinic5_cqm_err(handle->dev_hdl, + HINIC5_CQM_FUNCTION_FAIL(hinic5_cqm_buf_alloc)); + return HINIC5_CQM_FAIL; + } + } + + rdma_table->common.vaddr = (u8 *)(buf->direct.va); + + return HINIC5_CQM_SUCCESS; +} + +/** + * Prototype : hinic5_cqm_rdma_table_delete + * Description : Delete RDMA-related Entries. + * Input : struct tag_hinic5_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void hinic5_cqm_rdma_table_delete(struct tag_hinic5_cqm_object *object) +{ + struct tag_hinic5_cqm_mtt_rdmarc *common = + container_of(object, struct tag_hinic5_cqm_mtt_rdmarc, object); + struct tag_hinic5_cqm_rdma_table *rdma_table = + container_of(common, struct tag_hinic5_cqm_rdma_table, common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = + (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_buf *buf = &rdma_table->buf; + + atomic_inc(&handle->hw_stats.hinic5_cqm_stats.hinic5_cqm_rdma_table_delete_cnt); + + if (buf->buf_number == 1) { + if (buf->direct.va) { + dma_free_coherent(hinic5_cqm_handle->dev, buf->buf_size, + buf->direct.va, buf->direct.pa); + buf->direct.va = NULL; + } + } else { + hinic5_cqm_buf_free(buf, hinic5_cqm_handle->dev); + } +} + +/** + * Prototype : hinic5_cqm_rdma_table_offset_addr + * Description : Obtain the address of the RDMA entry based on the offset. + * The offset is the index. + * Input : struct tag_hinic5_cqm_object *object + * u32 offset + * dma_addr_t *paddr + * Output : None + * Return Value : u8 * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u8 *hinic5_cqm_rdma_table_offset_addr(struct tag_hinic5_cqm_object *object, u32 offset, dma_addr_t *paddr) +{ + struct tag_hinic5_cqm_mtt_rdmarc *common = + container_of(object, struct tag_hinic5_cqm_mtt_rdmarc, object); + struct tag_hinic5_cqm_rdma_table *rdma_table = + container_of(common, struct tag_hinic5_cqm_rdma_table, common); + struct tag_hinic5_cqm_handle *hinic5_cqm_handle = + (struct tag_hinic5_cqm_handle *)object->hinic5_cqm_handle; + struct hinic5_hwdev *handle = hinic5_cqm_handle->ex_handle; + struct tag_hinic5_cqm_buf *buf = &rdma_table->buf; + struct tag_hinic5_cqm_buf_list *buf_node = NULL; + u32 buf_id, buf_offset; + + if (offset < rdma_table->common.index_base || + ((offset - rdma_table->common.index_base) >= + rdma_table->common.index_number)) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(offset)); + return NULL; + } + + if (buf->buf_number == 1) { + buf_offset = (u32)((offset - rdma_table->common.index_base) * + (sizeof(dma_addr_t))); + + *paddr = buf->direct.pa + buf_offset; + return ((u8 *)(buf->direct.va)) + buf_offset; + } + + buf_id = (offset - rdma_table->common.index_base) / + (PAGE_SIZE / sizeof(dma_addr_t)); + buf_offset = (u32)((offset - rdma_table->common.index_base) - + (buf_id * (PAGE_SIZE / sizeof(dma_addr_t)))); + buf_offset = (u32)(buf_offset * sizeof(dma_addr_t)); + + if (buf_id >= buf->buf_number) { + hinic5_cqm_err(handle->dev_hdl, HINIC5_CQM_WRONG_VALUE(buf_id)); + return NULL; + } + buf_node = buf->buf_list + buf_id; + *paddr = buf_node->pa + buf_offset; + + return ((u8 *)(buf->direct.va)) + + (offset - rdma_table->common.index_base) * (sizeof(dma_addr_t)); +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object_intern.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object_intern.h new file mode 100644 index 00000000..c9be0342 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_cqm/hinic5_cqm_object_intern.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CQM_OBJECT_INTERN_H +#define HINIC5_CQM_OBJECT_INTERN_H + +#include "ossl_knl.h" +#include "hinic5_cqm_object.h" +#include "hinic5_cqm_bitmap_table.h" + +#define HINIC5_CQM_CQ_DEPTH_MAX 32768 +#define HINIC5_CQM_CQ_DEPTH_MIN 256 + +/* linkwqe */ +#define HINIC5_CQM_LINK_WQE_CTRLSL_VALUE 2 +#define HINIC5_CQM_LINK_WQE_LP_VALID 1 +#define HINIC5_CQM_LINK_WQE_LP_INVALID 0 +#define HINIC5_CQM_LINK_WQE_OWNER_VALID 1 +#define HINIC5_CQM_LINK_WQE_OWNER_INVALID 0 + +#define HINIC5_CQM_ADDR_COMBINE(high_addr, low_addr) \ + ((((dma_addr_t)(high_addr)) << 32) + ((dma_addr_t)(low_addr))) +#define HINIC5_CQM_ADDR_HI(addr) ((u32)((u64)(addr) >> 32)) +#define HINIC5_CQM_ADDR_LW(addr) ((u32)((u64)(addr) & 0xffffffff)) + +#define HINIC5_CQM_QPC_LAYOUT_TABLE_SIZE 16 + +/* cla bitmap */ +#define HINIC5_CQM_DYNAMIC_XID_LOW_BIT_MASK(lb_mode) \ + ((~(lb_mode)) & HINIC5_CQM_XID_LOW_BITS_MASK) + +#define HINIC5_CQM_DYNAMIC_XID_ALLOC_MODE(xid) \ + (((xid) & HINIC5_CQM_DYNAMIC_XID_MASK) == HINIC5_CQM_DYNAMIC_XID_MASK) +#define HINIC5_CQM_DYNAMIC_XID_LB_MODE(xid) \ + (((xid) >> HINIC5_CQM_XID_LB_MODE_SHIFT) & HINIC5_CQM_XID_LB_MODE_MASK) +#define HINIC5_CQM_DYNAMIC_XID_LOW_BITS(xid) \ + (((xid) >> HINIC5_CQM_XID_LOW_BITS_SHIFT) & HINIC5_CQM_XID_LOW_BITS_MASK) +#define HINIC5_CQM_DYNAMIC_XID_SEARCH_MODE(xid) \ + (((xid) >> HINIC5_CQM_XID_SEARCH_MODE_SHIFT) & HINIC5_CQM_XID_SEARCH_MODE_MASK) + +#define HINIC5_CQM_BP_RANGE_VALID(start, end, min_index, max_index) \ + (((start) >= (min_index)) && ((start) <= (max_index)) && \ + ((end) >= (min_index)) && ((end) <= (max_index)) && ((start) != (end))) + +struct tag_hinic5_cqm_qpc_layout_table_node { + u32 type; + u32 size; + u32 offset; + struct tag_hinic5_cqm_object *object; +}; + +struct tag_hinic5_cqm_qpc_mpt_info { + struct tag_hinic5_cqm_qpc_mpt common; + /* Different service has different QPC. + * The large QPC/mpt will occupy some continuous indexes in bitmap. + */ + u32 index_count; + struct tag_hinic5_cqm_qpc_layout_table_node + qpc_layout_table[HINIC5_CQM_QPC_LAYOUT_TABLE_SIZE]; +}; + +struct tag_hinic5_cqm_nonrdma_qinfo { + struct tag_hinic5_cqm_queue common; + u32 wqe_size; + /* Number of WQEs in each buffer (excluding link WQEs) + * For SRQ, the value is the number of WQEs contained in a container. + */ + u32 wqe_per_buf; + u32 q_ctx_size; + /* When different services use CTXs of different sizes, + * a large CTX occupies multiple consecutive indexes in the bitmap. + */ + u32 index_count; + + /* add for srq */ + u32 container_size; +}; + +struct tag_hinic5_cqm_rdma_qinfo { + struct tag_hinic5_cqm_queue common; + bool room_header_alloc; + /* This field is used to temporarily record the new object_size during + * CQ resize. + */ + u32 new_object_size; + u32 q_ctx_size; + /* When different services use CTXs of different sizes, + * a large CTX occupies multiple consecutive indexes in the bitmap. + */ + u32 index_count; +}; + +struct tag_hinic5_cqm_rdma_table { + struct tag_hinic5_cqm_mtt_rdmarc common; + struct tag_hinic5_cqm_buf buf; +}; + +void hinic5_cqm_container_free(u8 *srq_head_container, u8 *srq_tail_container, + struct tag_hinic5_cqm_queue *common); +s32 hinic5_cqm_container_create(struct tag_hinic5_cqm_object *object, + u8 **container_addr, bool link); +s32 hinic5_cqm_share_recv_queue_create(struct tag_hinic5_cqm_object *object); +void hinic5_cqm_share_recv_queue_delete(struct tag_hinic5_cqm_object *object); +s32 hinic5_cqm_qpc_mpt_create(struct tag_hinic5_cqm_object *object, + struct tag_hinic5_cqm_bitmap_range *bp_range); +void hinic5_cqm_qpc_mpt_delete(struct tag_hinic5_cqm_object *object); +s32 hinic5_cqm_nonrdma_queue_create(struct tag_hinic5_cqm_object *object); +void hinic5_cqm_nonrdma_queue_delete(struct tag_hinic5_cqm_object *object); +s32 hinic5_cqm_rdma_queue_create(struct tag_hinic5_cqm_object *object, + struct tag_hinic5_cqm_bitmap_range *bp_range); +void hinic5_cqm_rdma_queue_delete(struct tag_hinic5_cqm_object *object); +s32 hinic5_cqm_rdma_table_create(struct tag_hinic5_cqm_object *object); +void hinic5_cqm_rdma_table_delete(struct tag_hinic5_cqm_object *object); +u8 *hinic5_cqm_rdma_table_offset_addr(struct tag_hinic5_cqm_object *object, + u32 offset, dma_addr_t *paddr); + +#endif /* HINIC5_CQM_OBJECT_INTERN_H */ diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_vram/hinic5_hinic5_vram.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_vram/hinic5_hinic5_vram.c new file mode 100644 index 00000000..825c1a5a --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_vram/hinic5_hinic5_vram.c @@ -0,0 +1,294 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved. + * Description: hinic5_hinic5_vram.c + * Author: - + * Create: + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/async.h> + +#include "ossl_knl.h" +#include "hinic5_typedef_inner.h" +#include "hinic5_mt.h" +#include "hinic5_hwdev.h" +#include "hinic5_common.h" +#include "hinic5_crm.h" +#include "hinic5_sriov.h" +#include "hinic5_dev_mgmt.h" +#include "hinic5_nictool.h" +#include "hinic5_hw.h" + +#include "hinic5_vram_common.h" +#include "hinic5_hinic5_vram.h" + +static ASYNC_DOMAIN_EXCLUSIVE(g_hiudk_async_domain); +static hiudk_async_ctrl g_hiudk_async_ctrl; + +int hiudk5_register_flush_fn(void *lld_dev, hiudk_flush_fn fn) +{ + int i; + int cur_idx = -1; + + if (!lld_dev) { + pr_err("Sdk: register flush function para is null.\n"); + return -ENODEV; + } + + spin_lock(&g_hiudk_async_ctrl.lock); + + for (i = CMD_MAX_MAX_PF_NUM - 1; i >= 0; i--) { + if (!g_hiudk_async_ctrl.flush_infos[i].lld_dev) { + cur_idx = i; + break; + } + } + + if (cur_idx == -1) { + spin_unlock(&g_hiudk_async_ctrl.lock); + pr_err("Sdk: register flush function failed, no available async crtl info.\n"); + return -ENOMEM; + } + + g_hiudk_async_ctrl.flush_infos[cur_idx].lld_dev = lld_dev; + g_hiudk_async_ctrl.flush_infos[cur_idx].flush_ops = fn; + g_hiudk_async_ctrl.flush_infos[cur_idx].ret = 0; + + spin_unlock(&g_hiudk_async_ctrl.lock); + + return 0; +} +EXPORT_SYMBOL(hiudk5_register_flush_fn); + +int hiudk5_unregister_flush_fn(void *lld_dev) +{ + int i; + + if (!lld_dev) { + pr_err("Sdk: unregister flush function para is null.\n"); + return -ENODEV; + } + + spin_lock(&g_hiudk_async_ctrl.lock); + + for (i = CMD_MAX_MAX_PF_NUM - 1; i >= 0; i--) { + if (lld_dev == g_hiudk_async_ctrl.flush_infos[i].lld_dev) { + g_hiudk_async_ctrl.flush_infos[i].lld_dev = NULL; + g_hiudk_async_ctrl.flush_infos[i].flush_ops = NULL; + g_hiudk_async_ctrl.flush_infos[i].ret = 0; + + spin_unlock(&g_hiudk_async_ctrl.lock); + return 0; + } + } + + spin_unlock(&g_hiudk_async_ctrl.lock); + + return -ENODEV; +} +EXPORT_SYMBOL(hiudk5_unregister_flush_fn); + +void hinic5_flush_dev(void *priv_data, async_cookie_t cookie) +{ + hiudk_dev_flush_infos *cur_dev = priv_data; + + if (!cur_dev->lld_dev || !cur_dev->flush_ops) + return; + + cur_dev->ret = cur_dev->flush_ops(cur_dev->lld_dev); +} + +STATIC int hisdk5_notify_flush_dev(struct notifier_block *nb, + unsigned long action, + void *data) +{ + int i; + + rtnl_lock(); + + for (i = 0; i < CMD_MAX_MAX_PF_NUM; i++) + if (g_hiudk_async_ctrl.flush_infos[i].lld_dev) + async_schedule_domain(hinic5_flush_dev, + &g_hiudk_async_ctrl.flush_infos[i], + &g_hiudk_async_domain); + + rtnl_unlock(); + + return 0; +} + +STATIC int hiudk_os_hotreplace_msg_to_mpu(u8 replace_flag) +{ + int ret; + void *dev = NULL; + + dev = hinic5_get_ppf_dev(); + if (!dev) { + pr_err("Get ppf dev failed before os hotreplace.\n"); + return -ENXIO; + } + + ret = hinic5_set_ppf_tbl_hotreplace_flag(dev, replace_flag); + if (ret != 0) { + pr_err("Send mbox to mpu failed in hiudk, ret:%d, flag:%u.\n", ret, replace_flag); + return ret; + } + + return 0; +} + +STATIC int hiudk_notify_pre_update(struct notifier_block *nb, + unsigned long action, + void *data) +{ + int ret; + + pr_info("Set driver flag and mpu flag before os hotreplace.\n"); + // set kexec status set to 1, indicate doing kexec + ret = hinic5_set_kexec_status(1); + if (ret != 0) { + pr_err("Set kexec flag failed before os hotreplace.\n"); + return ret; + } + + ret = hiudk_os_hotreplace_msg_to_mpu(MPU_OS_HOTREPLACE_FLAG); + if (ret != 0) { + pr_err("Send mbox to mpu failed before os hotreplace.\n"); + return ret; + } + + return 0; +} + +STATIC int hiudk_notify_post_update(struct notifier_block *nb, + unsigned long action, + void *data) +{ + int ret; + + pr_info("Clear driver flag and mpu flag after os hotreplace.\n"); + // set kexec status set to 0, indicate kexec done + ret = hinic5_set_kexec_status(0); + if (ret != 0) { + pr_err("Set kexec flag failed after os hotreplace.\n"); + return ret; + } + + ret = hiudk_os_hotreplace_msg_to_mpu(0); + if (ret != 0) { + pr_err("Send mbox to mpu failed after os hotreplace.\n"); + return ret; + } + + return 0; +} + +int hinic5_wait_for_devices_flush(struct notifier_block *nb, + unsigned long action, + void *data) +{ + int i; + int ret = 0; + + async_synchronize_full_domain(&g_hiudk_async_domain); + + for (i = 0; i < CMD_MAX_MAX_PF_NUM; i++) { + if (g_hiudk_async_ctrl.flush_infos[i].ret != 0) { + ret = g_hiudk_async_ctrl.flush_infos[i].ret; + pr_err("Sdk: wait netdev[%d] flush done error, ret:%d.\n", i, ret); + return ret; + } + } + + return 0; +} +EXPORT_SYMBOL(hinic5_wait_for_devices_flush); + +static struct notifier_block hiudk_notifier_pre_update = { + .notifier_call = hiudk_notify_pre_update, + .next = NULL, + .priority = 0 +}; + +static struct notifier_block hisdk5_notifier_flush_dev = { + .notifier_call = hisdk5_notify_flush_dev, + .next = NULL, + .priority = 0 +}; + +static struct notifier_block hisdk5_notifier_wait_flush_done = { + .notifier_call = hinic5_wait_for_devices_flush, + .next = NULL, + .priority = 0 +}; + +static struct notifier_block hiudk_notifier_post_update = { + .notifier_call = hiudk_notify_post_update, + .next = NULL, + .priority = 0 +}; + +int hisdk5_hinic5_vram_init(void) +{ + int err; + + spin_lock_init(&g_hiudk_async_ctrl.lock); + lookup_hinic5_vram_related_symbols(); + + err = hinic5_get_kexec_status(); + if (err != 0) { + pr_err("Get in kexec status failed, err: %d\n", err); + goto get_kexec_status_err; + } + + err = hi_register_nvwa_notifier(PRE_UPDATE_KERNEL, &hiudk_notifier_pre_update); + if (err != 0) { + pr_err("Register nvwa pre update failed, err: %d\n", err); + goto register_pre_update_nvwa_err; + } + + err = hi_register_nvwa_notifier(POST_UPDATE_KERNEL, &hiudk_notifier_post_update); + if (err != 0) { + pr_err("Register nvwa post update failed, err: %d\n", err); + goto register_post_update_nvwa_err; + } + + err = hi_register_nvwa_notifier(FLUSH_DURING_KUP, &hisdk5_notifier_flush_dev); + if (err != 0) { + pr_err("Register nvwa flush device failed, err: %d\n", err); + goto register_flush_dev_err; + } + + err = hi_register_euleros_reboot_notifier(&hisdk5_notifier_wait_flush_done); + if (err != 0) { + pr_err("Register wait flush device notify failed, err: %d\n", err); + goto register_reboot_err; + } + + return 0; + +register_reboot_err: + (void)hi_unregister_nvwa_notifier(FLUSH_DURING_KUP, &hisdk5_notifier_flush_dev); +register_flush_dev_err: + (void)hi_unregister_nvwa_notifier(POST_UPDATE_KERNEL, &hiudk_notifier_post_update); +register_post_update_nvwa_err: + (void)hi_unregister_nvwa_notifier(PRE_UPDATE_KERNEL, &hiudk_notifier_pre_update); +register_pre_update_nvwa_err: +get_kexec_status_err: + spin_lock_deinit(&g_hiudk_async_ctrl.lock); + return err; +} + +void hisdk5_hinic5_vram_deinit(void) +{ + (void)hi_unregister_euleros_reboot_notifier(&hisdk5_notifier_wait_flush_done); + (void)hi_unregister_nvwa_notifier(FLUSH_DURING_KUP, &hisdk5_notifier_flush_dev); + (void)hi_unregister_nvwa_notifier(POST_UPDATE_KERNEL, &hiudk_notifier_pre_update); + (void)hi_unregister_nvwa_notifier(PRE_UPDATE_KERNEL, &hiudk_notifier_post_update); + + spin_lock_deinit(&g_hiudk_async_ctrl.lock); +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_vram/hinic5_vram_common.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_vram/hinic5_vram_common.c new file mode 100644 index 00000000..f7432a21 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hinic5_vram/hinic5_vram_common.c @@ -0,0 +1,210 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2022. All rights reserved. + * Description: Header File, hinic5_vram common + * Create: 2023/7/19 + */ +#include <linux/kallsyms.h> +#include <linux/errno.h> +#include <linux/version.h> + +#include "ossl_knl.h" +#include "hinic5_typedef_inner.h" +#include "hinic5_hinic5_vram_api.h" +#include "hinic5_vram_common.h" + +#ifndef __UEFI__ + +static int g_use_hinic5_vram; +static int g_hinic5_in_kexec; + +STATIC register_nvwa_notifier_t _register_nvwa_notifier; +STATIC unregister_nvwa_notifier_t _unregister_nvwa_notifier; +STATIC register_euleros_reboot_notifier_t _register_euleros_reboot_notifier; +STATIC unregister_euleros_reboot_notifier_t _unregister_euleros_reboot_notifier; +STATIC hinic5_vram_kalloc_t _hinic5_vram_kalloc; +STATIC vpmem_kalloc_node_t _hinic5_vram_kalloc_node; +STATIC hinic5_vram_kfree_t _hinic5_vram_kfree; +STATIC hinic5_vram_get_gfp_hinic5_vram_t _hinic5_vram_get_gfp_hinic5_vram; + +int hi_register_nvwa_notifier(int hook, struct notifier_block *nb) +{ + if (_register_nvwa_notifier) + return _register_nvwa_notifier(hook, nb); + + return -EINVAL; +} + +int hi_unregister_nvwa_notifier(int hook, struct notifier_block *nb) +{ + if (_unregister_nvwa_notifier) + return _unregister_nvwa_notifier(hook, nb); + + return -EINVAL; +} + +int hi_register_euleros_reboot_notifier(struct notifier_block *nb) +{ + if (_register_euleros_reboot_notifier) + return _register_euleros_reboot_notifier(nb); + + return -EINVAL; +} + +int hi_unregister_euleros_reboot_notifier(struct notifier_block *nb) +{ + if (_unregister_euleros_reboot_notifier) + return _unregister_euleros_reboot_notifier(nb); + + return -EINVAL; +} + +void __iomem *hinic5_hinic5_vram_kalloc(char *name, u64 size) +{ + if (_hinic5_vram_kalloc && strnlen(name, HINIC5_VRAM_NAME_SIZE) < HINIC5_VRAM_NAME_SIZE) + return _hinic5_vram_kalloc(name, size); + + return NULL; +} +EXPORT_SYMBOL(hinic5_hinic5_vram_kalloc); + +void __iomem *hinic5_hinic5_vram_kalloc_node(char *name, u64 size, u8 numa) +{ + if (_hinic5_vram_kalloc_node && + strnlen(name, HINIC5_VRAM_NAME_SIZE) < HINIC5_VRAM_NAME_SIZE) { + if (numa == HINIC5_VRAM_AFFINITY_NUMA || numa == HINIC5_VRAM_NO_NUMA) + return _hinic5_vram_kalloc_node(name, size, numa); + return _hinic5_vram_kalloc_node(name, size, + numa >= nr_node_ids ? HINIC5_VRAM_NO_NUMA : numa); + } else { + return hinic5_hinic5_vram_kalloc(name, size); + } +} +EXPORT_SYMBOL(hinic5_hinic5_vram_kalloc_node); + +void hinic5_hinic5_vram_kfree(void __iomem *vaddr, char *name, u64 size) +{ + if (_hinic5_vram_kfree && vaddr && + strnlen(name, HINIC5_VRAM_NAME_SIZE) < HINIC5_VRAM_NAME_SIZE) + _hinic5_vram_kfree(vaddr, name, size); +} +EXPORT_SYMBOL(hinic5_hinic5_vram_kfree); + +gfp_t hinic5_hinic5_vram_get_gfp_hinic5_vram(void) +{ + if (_hinic5_vram_get_gfp_hinic5_vram) + return _hinic5_vram_get_gfp_hinic5_vram(); + return 0; +} +EXPORT_SYMBOL(hinic5_hinic5_vram_get_gfp_hinic5_vram); + +void lookup_hinic5_vram_related_symbols(void) +{ +#if KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE + _register_nvwa_notifier = (register_nvwa_notifier_t) + kallsyms_lookup_name("register_nvwa_notifier"); + + _unregister_nvwa_notifier = (unregister_nvwa_notifier_t) + kallsyms_lookup_name("unregister_nvwa_notifier"); + + _register_euleros_reboot_notifier = (register_euleros_reboot_notifier_t) + kallsyms_lookup_name("register_euleros_reboot_notifier"); + + _unregister_euleros_reboot_notifier = (unregister_euleros_reboot_notifier_t) + kallsyms_lookup_name("unregister_euleros_reboot_notifier"); + + _hinic5_vram_kalloc = (hinic5_vram_kalloc_t) + kallsyms_lookup_name("hinic5_vram_kalloc"); + + _hinic5_vram_kalloc_node = (vpmem_kalloc_node_t) + kallsyms_lookup_name("vpmem_kalloc_node"); + + _hinic5_vram_kfree = (hinic5_vram_kfree_t) + kallsyms_lookup_name("hinic5_vram_kfree"); + + _hinic5_vram_get_gfp_hinic5_vram = (hinic5_vram_get_gfp_hinic5_vram_t) + kallsyms_lookup_name("hinic5_vram_get_hinic5_vram_gfp_t"); +#else +/* only EulerOS and HCE have kallsyms_lookup_name_wrap */ +#if (defined(OS_EULER) || defined(OS_HCE)) + _register_nvwa_notifier = (register_nvwa_notifier_t) + kallsyms_lookup_name_wrap("register_nvwa_notifier"); + + _unregister_nvwa_notifier = (unregister_nvwa_notifier_t) + kallsyms_lookup_name_wrap("unregister_nvwa_notifier"); + + _register_euleros_reboot_notifier = (register_euleros_reboot_notifier_t) + kallsyms_lookup_name_wrap("register_euleros_reboot_notifier"); + + _unregister_euleros_reboot_notifier = (unregister_euleros_reboot_notifier_t) + kallsyms_lookup_name_wrap("unregister_euleros_reboot_notifier"); + + _hinic5_vram_kalloc = (hinic5_vram_kalloc_t) + kallsyms_lookup_name_wrap("hinic5_vram_kalloc"); + + _hinic5_vram_kalloc_node = (vpmem_kalloc_node_t) + kallsyms_lookup_name_wrap("vpmem_kalloc_node"); + + _hinic5_vram_kfree = (hinic5_vram_kfree_t) + kallsyms_lookup_name_wrap("hinic5_vram_kfree"); + + _hinic5_vram_get_gfp_hinic5_vram = (hinic5_vram_get_gfp_hinic5_vram_t) + kallsyms_lookup_name_wrap("hinic5_vram_get_hinic5_vram_gfp_t"); +#endif +#endif +} +EXPORT_SYMBOL(lookup_hinic5_vram_related_symbols); + +int hinic5_set_kexec_status(int status) +{ + int *kexec_status_addr = NULL; + + kexec_status_addr = hinic5_hinic5_vram_kalloc(KEXEC_SIGN, HINIC5_VRAM_BLOCK_SIZE_2M); + if (!kexec_status_addr) { + pr_err("set kexec status hinic5_vram kalloc failed.\n"); + return -ENOMEM; + } + + *kexec_status_addr = status; + g_hinic5_in_kexec = *kexec_status_addr; + + return 0; +} +EXPORT_SYMBOL(hinic5_set_kexec_status); + +int hinic5_get_kexec_status(void) +{ + int *kexec_status_addr = NULL; + + kexec_status_addr = hinic5_hinic5_vram_kalloc(KEXEC_SIGN, HINIC5_VRAM_BLOCK_SIZE_2M); + if (!kexec_status_addr) { + pr_err("get kexec status hinic5_vram kalloc failed.\n"); + return -ENOMEM; + } + + g_hinic5_in_kexec = *kexec_status_addr; + hinic5_hinic5_vram_kfree((void *)kexec_status_addr, KEXEC_SIGN, HINIC5_VRAM_BLOCK_SIZE_2M); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_kexec_status); + +int get_use_hinic5_vram_flag(void) +{ + return g_use_hinic5_vram; +} +EXPORT_SYMBOL(get_use_hinic5_vram_flag); + +void set_use_hinic5_vram_flag(bool flag) +{ + g_use_hinic5_vram = flag; +} +EXPORT_SYMBOL(set_use_hinic5_vram_flag); + +int hinic5_vram_get_kexec_flag(void) +{ + return g_hinic5_in_kexec; +} +EXPORT_SYMBOL(hinic5_vram_get_kexec_flag); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_api_cmd.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_api_cmd.c new file mode 100644 index 00000000..3b91eb0f --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_api_cmd.c @@ -0,0 +1,1216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/semaphore.h> +#include <linux/jiffies.h> +#include <linux/delay.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_common.h" +#include "hinic5_hwdev.h" +#include "hinic5_csr_inner.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_api_cmd.h" + +#define API_CMD_CHAIN_CELL_SIZE_SHIFT 6U + +#define API_CMD_CELL_DESC_SIZE 8 +#define API_CMD_CELL_DATA_ADDR_SIZE 8 + +#define API_CHAIN_NUM_CELLS 32 +#define API_CHAIN_CELL_SIZE 128 +#define API_CHAIN_RSP_DATA_SIZE 128 + +#define API_CMD_CELL_WB_ADDR_SIZE 8 + +#define API_CHAIN_CELL_ALIGNMENT 8 + +#define API_CMD_TIMEOUT 10000 +#define API_CMD_STATUS_TIMEOUT 10000 + +#define API_CMD_BUF_SIZE 2048ULL + +#define API_CMD_NODE_ALIGN_SIZE 512ULL +#define API_PAYLOAD_ALIGN_SIZE 64ULL + +#define API_CHAIN_RESP_ALIGNMENT 128ULL + +#define POLLING_COMPLETION_TIMEOUT_DEFAULT 1000U + +#define API_CMD_RESPONSE_DATA_PADDR(val) be64_to_cpu(*((u64 *)(val))) + +#define READ_API_CMD_PRIV_DATA(id, token) ((((u32)(id)) << 16) + (token)) +#define WRITE_API_CMD_PRIV_DATA(id) (((u8)(id)) << 16) + +#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) + +#define SIZE_4BYTES(size) (ALIGN((u32)(size), 4U) >> 2) +#define SIZE_8BYTES(size) (ALIGN((u32)(size), 8U) >> 3) + +enum api_cmd_data_format { + SGL_DATA = 1, +}; + +enum api_cmd_type { + API_CMD_WRITE_TYPE = 0, + API_CMD_READ_TYPE = 1, +}; + +enum api_cmd_bypass { + NOT_BYPASS = 0, + BYPASS = 1, +}; + +enum api_cmd_resp_aeq { + NOT_TRIGGER = 0, + TRIGGER = 1, +}; + +enum api_cmd_chn_code { + APICHN_0 = 0, +}; + +enum api_cmd_chn_rsvd { + APICHN_VALID = 0, + APICHN_INVALID = 1, +}; + +#define API_DESC_LEN (7) + +static u8 xor_chksum_set(void *data, u32 data_len) +{ + int idx; + u8 checksum = 0; + u8 *val = data; + u8 max_idx = (u8)(data_len / sizeof(u8)); + + for (idx = 0; idx < API_DESC_LEN && idx < max_idx; idx++) + checksum ^= val[idx]; + + return checksum; +} + +static void set_prod_idx(struct hinic5_api_cmd_chain *chain) +{ + enum hinic5_api_cmd_chain_type chain_type = chain->chain_type; + struct hinic5_hwif *hwif = chain->hwdev->hwif; + u32 hw_prod_idx_addr = HINIC5_CSR_API_CMD_CHAIN_PI_ADDR((u32)chain_type); + u32 prod_idx = chain->prod_idx; + + hinic5_hwif_write_reg(hwif, hw_prod_idx_addr, prod_idx); +} + +static u32 get_hw_cons_idx(struct hinic5_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = HINIC5_CSR_API_CMD_STATUS_0_ADDR((u32)chain->chain_type); + val = hinic5_hwif_read_reg(chain->hwdev->hwif, addr); + + return HINIC5_API_CMD_STATUS_GET(val, CONS_IDX); +} + +static void dump_api_chain_reg(struct hinic5_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + u32 addr, val; +#if defined(__LINUX__) && !defined(__UBUS_DRIVER__) + struct pci_dev *pdev = NULL; + u16 pci_cmd = 0; +#endif + + addr = HINIC5_CSR_API_CMD_STATUS_0_ADDR((u32)chain->chain_type); + val = hinic5_hwif_read_reg(chain->hwdev->hwif, addr); + + sdk_err(dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + chain->chain_type, HINIC5_API_CMD_STATUS_GET(val, CPLD_ERR), + HINIC5_API_CMD_STATUS_GET(val, CHKSUM_ERR), + HINIC5_API_CMD_STATUS_GET(val, FSM)); + + sdk_err(dev, "Chain hw current ci: 0x%x\n", + HINIC5_API_CMD_STATUS_GET(val, CONS_IDX)); + + addr = HINIC5_CSR_API_CMD_CHAIN_PI_ADDR((u32)chain->chain_type); + val = hinic5_hwif_read_reg(chain->hwdev->hwif, addr); + sdk_err(dev, "Chain hw current pi: 0x%x\n", val); +#if defined(__LINUX__) && !defined(__UBUS_DRIVER__) + pdev = to_pci_dev(chain->hwdev->dev_hdl); + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + sdk_err(dev, "PCI command reg: 0x%x\n", pci_cmd); +#endif +} + +/** + * chain_busy - check if the chain is still processing last requests + * @chain: chain to check + **/ +static int chain_busy(struct hinic5_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + struct hinic5_api_cmd_cell_ctxt *ctxt = NULL; + u64 resp_header; + + ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HINIC5_API_CMD_MULTI_READ: + case HINIC5_API_CMD_POLL_READ: + resp_header = be64_to_cpu(ctxt->resp->header); + if (ctxt->status != 0 || (HINIC5_API_CMD_RESP_HEADER_VALID(resp_header) == 0)) { + sdk_err(dev, "Context(0x%x) busy!, pi: %u, resp_header: 0x%08x%08x\n", + ctxt->status, chain->prod_idx, + upper_32_bits(resp_header), + lower_32_bits(resp_header)); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + case HINIC5_API_CMD_POLL_WRITE: + case HINIC5_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + chain->cons_idx = get_hw_cons_idx(chain); + + if (chain->cons_idx == MASKED_IDX(chain, chain->prod_idx + 1)) { + sdk_err(dev, "API CMD chain %d is busy, cons_idx = %u, prod_idx = %u\n", + chain->chain_type, chain->cons_idx, + chain->prod_idx); + dump_api_chain_reg(chain); + return -EBUSY; + } + break; + default: + sdk_err(dev, "Unknown Chain type %d\n", chain->chain_type); + return -EINVAL; + } + + return 0; +} + +/** + * get_cell_data_size - get the data size of specific cell type + * @type: chain type + **/ +static u16 get_cell_data_size(enum hinic5_api_cmd_chain_type type) +{ + u16 cell_data_size = 0; + + switch (type) { + case HINIC5_API_CMD_POLL_READ: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_WB_ADDR_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + + case HINIC5_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC5_API_CMD_POLL_WRITE: + case HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CHAIN_CELL_ALIGNMENT); + break; + default: + break; + } + + return cell_data_size; +} + +/** + * prepare_cell_ctrl - prepare the ctrl of the cell for the command + * @cell_ctrl: the control of the cell to set the control into it + * @cell_len: the size of the cell + **/ +static void prepare_cell_ctrl(u64 *cell_ctrl, u16 cell_len) +{ + u64 ctrl; + u8 chksum; + + ctrl = HINIC5_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(cell_len), CELL_LEN) | + HINIC5_API_CMD_CELL_CTRL_SET(0ULL, RD_DMA_ATTR_OFF) | + HINIC5_API_CMD_CELL_CTRL_SET(0ULL, WR_DMA_ATTR_OFF); + + chksum = xor_chksum_set(&ctrl, sizeof(ctrl)); + + ctrl |= HINIC5_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *cell_ctrl = cpu_to_be64(ctrl); +} + +/** + * prepare_api_cmd - prepare API CMD command + * @chain: chain for the command + * @cell: the cell of the command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + **/ +static void prepare_api_cmd(struct hinic5_api_cmd_chain *chain, + struct hinic5_api_cmd_cell *cell, u8 node_id, + const void *cmd, u16 cmd_size) +{ + struct hinic5_api_cmd_cell_ctxt *cell_ctxt = NULL; + u32 priv; + + cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HINIC5_API_CMD_POLL_READ: + priv = READ_API_CMD_PRIV_DATA(chain->chain_type, cell_ctxt->saved_prod_idx); + cell->desc = HINIC5_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC5_API_CMD_DESC_SET(API_CMD_READ_TYPE, RD_WR) | + HINIC5_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HINIC5_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + HINIC5_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HINIC5_API_CMD_POLL_WRITE: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HINIC5_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC5_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HINIC5_API_CMD_DESC_SET(BYPASS, MGMT_BYPASS) | + HINIC5_API_CMD_DESC_SET(NOT_TRIGGER, RESP_AEQE_EN) | + HINIC5_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + case HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + case HINIC5_API_CMD_WRITE_TO_MGMT_CPU: + priv = WRITE_API_CMD_PRIV_DATA(chain->chain_type); + cell->desc = HINIC5_API_CMD_DESC_SET(SGL_DATA, API_TYPE) | + HINIC5_API_CMD_DESC_SET(API_CMD_WRITE_TYPE, RD_WR) | + HINIC5_API_CMD_DESC_SET(NOT_BYPASS, MGMT_BYPASS) | + HINIC5_API_CMD_DESC_SET(TRIGGER, RESP_AEQE_EN) | + HINIC5_API_CMD_DESC_SET(priv, PRIV_DATA); + break; + default: + sdk_err(chain->hwdev->dev_hdl, "Unknown Chain type: %d\n", chain->chain_type); + return; + } + + cell->desc |= HINIC5_API_CMD_DESC_SET(APICHN_0, APICHN_CODE) | + HINIC5_API_CMD_DESC_SET(APICHN_VALID, APICHN_RSVD); + + cell->desc |= HINIC5_API_CMD_DESC_SET(node_id, DEST) | + HINIC5_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); + + cell->desc |= HINIC5_API_CMD_DESC_SET(xor_chksum_set(&cell->desc, + sizeof(cell->desc)), XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + cell->desc = cpu_to_be64(cell->desc); + + memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); +} + +/** + * prepare_cell - prepare cell ctrl and cmd in the current producer cell + * @chain: chain for the command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + * Return: 0 - success, negative - failure + **/ +static void prepare_cell(struct hinic5_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 cmd_size) +{ + struct hinic5_api_cmd_cell *curr_node = NULL; + u16 cell_size; + + curr_node = chain->curr_node; + + cell_size = get_cell_data_size(chain->chain_type); + + prepare_cell_ctrl(&curr_node->ctrl, cell_size); + prepare_api_cmd(chain, curr_node, node_id, cmd, cmd_size); +} + +static inline void cmd_chain_prod_idx_inc(struct hinic5_api_cmd_chain *chain) +{ + chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); +} + +static void issue_api_cmd(struct hinic5_api_cmd_chain *chain) +{ + set_prod_idx(chain); +} + +/** + * api_cmd_status_update - update the status of the chain + * @chain: chain to update + **/ +static void api_cmd_status_update(struct hinic5_api_cmd_chain *chain) +{ + struct hinic5_api_cmd_status *wb_status = NULL; + enum hinic5_api_cmd_chain_type chain_type; + u64 status_header; + u32 buf_desc; + + wb_status = chain->wb_status; + + buf_desc = be32_to_cpu(wb_status->buf_desc); + if (HINIC5_API_CMD_STATUS_GET(buf_desc, CHKSUM_ERR) != 0) + return; + + status_header = be64_to_cpu(wb_status->header); + chain_type = HINIC5_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); + if (chain_type >= HINIC5_API_CMD_MAX) + return; + + if (chain_type != chain->chain_type) + return; + + chain->cons_idx = HINIC5_API_CMD_STATUS_GET(buf_desc, CONS_IDX); +} + +static enum hinic5_wait_return wait_for_status_poll_handler(void *priv_data) +{ + struct hinic5_api_cmd_chain *chain = priv_data; + + if (!hinic5_is_chip_present(chain->hwdev)) + return WAIT_PROCESS_ERR; + + api_cmd_status_update(chain); + /* SYNC API CMD cmd should start after prev cmd finished */ + if (chain->cons_idx == chain->prod_idx) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +/** + * wait_for_status_poll - wait for write to mgmt command to complete + * @chain: the chain of the command + * Return: 0 - success, negative - failure + **/ +static int wait_for_status_poll(struct hinic5_api_cmd_chain *chain) +{ + return hinic5_wait_for_timeout(chain, + wait_for_status_poll_handler, + API_CMD_STATUS_TIMEOUT, 100); /* wait 100 us once */ +} + +static int copy_resp_data(struct hinic5_api_cmd_cell_ctxt *ctxt, void *ack, + u16 ack_size) +{ + struct hinic5_api_cmd_resp_fmt *resp = ctxt->resp; + u16 rsp_size = (ack_size > API_CMD_BUF_SIZE) ? API_CMD_BUF_SIZE : ack_size; + + memcpy(ack, &resp->resp_data, rsp_size); + + ctxt->status = 0; + + return 0; +} + +static enum hinic5_wait_return check_cmd_resp_handler(void *priv_data) +{ + struct hinic5_api_cmd_cell_ctxt *ctxt = priv_data; + u64 resp_header; + u8 resp_status; + + if (!hinic5_is_chip_present(ctxt->hwdev)) + return WAIT_PROCESS_ERR; + + resp_header = be64_to_cpu(ctxt->resp->header); + rmb(); /* read the latest header */ + + if (HINIC5_API_CMD_RESP_HEADER_VALID(resp_header)) { + resp_status = HINIC5_API_CMD_RESP_HEAD_GET(resp_header, STATUS); + if (resp_status != 0) { + pr_err("Api chain response data err, status: %u\n", + resp_status); + return WAIT_PROCESS_ERR; + } + + return WAIT_PROCESS_CPL; + } + + return WAIT_PROCESS_WAITING; +} + +/** + * prepare_cell - polling for respense data of the read api-command + * @chain: pointer to api cmd chain + * + * Return: 0 - success, negative - failure + **/ +static int wait_for_resp_polling(struct hinic5_api_cmd_cell_ctxt *ctxt) +{ + return hinic5_wait_for_timeout(ctxt, check_cmd_resp_handler, + POLLING_COMPLETION_TIMEOUT_DEFAULT, + USEC_PER_MSEC); +} + +/** + * wait_for_api_cmd_completion - wait for command to complete + * @chain: chain for the command + * Return: 0 - success, negative - failure + **/ +static int wait_for_api_cmd_completion(struct hinic5_api_cmd_chain *chain, + struct hinic5_api_cmd_cell_ctxt *ctxt, + void *ack, u16 ack_size) +{ + void *dev = chain->hwdev->dev_hdl; + int err = 0; + + switch (chain->chain_type) { + case HINIC5_API_CMD_POLL_READ: + err = wait_for_resp_polling(ctxt); + if (err == 0) { + err = copy_resp_data(ctxt, ack, ack_size); + if (err != 0) + sdk_err(dev, "Copy resp data failed, ack_size: %u.\n", ack_size); + } else { + ctxt->status = 0; + sdk_err(dev, "API CMD poll response timeout\n"); + } + break; + case HINIC5_API_CMD_POLL_WRITE: + case HINIC5_API_CMD_WRITE_TO_MGMT_CPU: + err = wait_for_status_poll(chain); + if (err != 0) { + sdk_err(dev, "API CMD Poll status timeout, chain type: %d\n", + chain->chain_type); + break; + } + break; + case HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* No need to wait */ + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + if (err != 0) + dump_api_chain_reg(chain); + + return err; +} + +static inline void update_api_cmd_ctxt(struct hinic5_api_cmd_chain *chain, + struct hinic5_api_cmd_cell_ctxt *ctxt) +{ + ctxt->status = 1; + ctxt->saved_prod_idx = chain->prod_idx; + if (ctxt->resp) { + ctxt->resp->header = 0; + + /* make sure "header" was cleared */ + wmb(); + } +} + +/** + * api_cmd - API CMD command + * @chain: chain for the command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +static int api_cmd(struct hinic5_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 cmd_size, void *ack, u16 ack_size) +{ + struct hinic5_api_cmd_cell_ctxt *ctxt = NULL; + + if (chain->chain_type == HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock(&chain->async_lock); + else + down(&chain->sem); + ctxt = &chain->cell_ctxt[chain->prod_idx]; + if (chain_busy(chain) != 0) { + if (chain->chain_type == HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + return -EBUSY; + } + update_api_cmd_ctxt(chain, ctxt); + + prepare_cell(chain, node_id, cmd, cmd_size); + + cmd_chain_prod_idx_inc(chain); + + wmb(); /* issue the command */ + + issue_api_cmd(chain); + + /* incremented prod idx, update ctxt */ + + chain->curr_node = chain->cell_ctxt[chain->prod_idx].cell_vaddr; + if (chain->chain_type == HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_unlock(&chain->async_lock); + else + up(&chain->sem); + + return wait_for_api_cmd_completion(chain, ctxt, ack, ack_size); +} + +/** + * hinic5_api_cmd_write - Write API CMD command + * @chain: chain for write command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +int hinic5_api_cmd_write(struct hinic5_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 size) +{ + /* Verify the chain type */ + return api_cmd(chain, node_id, cmd, size, NULL, 0); +} + +/** + * hinic5_api_cmd_read - Read API CMD command + * @chain: chain for read command + * @node_id: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * Return: 0 - success, negative - failure + **/ +int hinic5_api_cmd_read(struct hinic5_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 size, void *ack, u16 ack_size) +{ + return api_cmd(chain, node_id, cmd, size, ack, ack_size); +} + +static enum hinic5_wait_return check_chain_restart_handler(void *priv_data) +{ + struct hinic5_api_cmd_chain *cmd_chain = priv_data; + u32 reg_addr, val; + + if (!hinic5_is_chip_present(cmd_chain->hwdev)) + return WAIT_PROCESS_ERR; + + reg_addr = HINIC5_CSR_API_CMD_CHAIN_REQ_ADDR((u32)cmd_chain->chain_type); + val = hinic5_hwif_read_reg(cmd_chain->hwdev->hwif, reg_addr); + if (HINIC5_API_CMD_CHAIN_REQ_GET(val, RESTART) == 0) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +/** + * api_cmd_hw_restart - restart the chain in the HW + * @chain: the API CMD specific chain to restart + **/ +static int api_cmd_hw_restart(struct hinic5_api_cmd_chain *cmd_chain) +{ + struct hinic5_hwif *hwif = cmd_chain->hwdev->hwif; + u32 reg_addr, val; + + /* Read Modify Write */ + reg_addr = HINIC5_CSR_API_CMD_CHAIN_REQ_ADDR((u32)cmd_chain->chain_type); + val = hinic5_hwif_read_reg(hwif, reg_addr); + + val = HINIC5_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); + val |= HINIC5_API_CMD_CHAIN_REQ_SET(1, RESTART); + + hinic5_hwif_write_reg(hwif, reg_addr, val); + + return hinic5_wait_for_timeout(cmd_chain, check_chain_restart_handler, + API_CMD_TIMEOUT, USEC_PER_MSEC); +} + +/** + * api_cmd_ctrl_init - set the control register of a chain + * @chain: the API CMD specific chain to set control register for + **/ +static void api_cmd_ctrl_init(struct hinic5_api_cmd_chain *chain) +{ + struct hinic5_hwif *hwif = chain->hwdev->hwif; + u32 reg_addr, ctrl; + u32 size; + + /* Read Modify Write */ + reg_addr = HINIC5_CSR_API_CMD_CHAIN_CTRL_ADDR((u32)chain->chain_type); + + size = (u32)ilog2(chain->cell_size >> API_CMD_CHAIN_CELL_SIZE_SHIFT); + + ctrl = hinic5_hwif_read_reg(hwif, reg_addr); + + ctrl = HINIC5_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC5_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + ctrl |= HINIC5_API_CMD_CHAIN_CTRL_SET(0, AEQE_EN) | + HINIC5_API_CMD_CHAIN_CTRL_SET(size, CELL_SIZE); + + hinic5_hwif_write_reg(hwif, reg_addr, ctrl); +} + +/** + * api_cmd_set_status_addr - set the status address of a chain in the HW + * @chain: the API CMD specific chain to set status address for + **/ +static void api_cmd_set_status_addr(struct hinic5_api_cmd_chain *chain) +{ + struct hinic5_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC5_CSR_API_CMD_STATUS_HI_ADDR((u32)chain->chain_type); + val = upper_32_bits(chain->wb_status_paddr); + hinic5_hwif_write_reg(hwif, addr, val); + + addr = HINIC5_CSR_API_CMD_STATUS_LO_ADDR((u32)chain->chain_type); + val = lower_32_bits(chain->wb_status_paddr); + hinic5_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_set_num_cells - set the number cells of a chain in the HW + * @chain: the API CMD specific chain to set the number of cells for + **/ +static void api_cmd_set_num_cells(struct hinic5_api_cmd_chain *chain) +{ + struct hinic5_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC5_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR((u32)chain->chain_type); + val = chain->num_cells; + hinic5_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_head_init - set the head cell of a chain in the HW + * @chain: the API CMD specific chain to set the head for + **/ +static void api_cmd_head_init(struct hinic5_api_cmd_chain *chain) +{ + struct hinic5_hwif *hwif = chain->hwdev->hwif; + u32 addr, val; + + addr = HINIC5_CSR_API_CMD_CHAIN_HEAD_HI_ADDR((u32)chain->chain_type); + val = upper_32_bits(chain->head_cell_paddr); + hinic5_hwif_write_reg(hwif, addr, val); + + addr = HINIC5_CSR_API_CMD_CHAIN_HEAD_LO_ADDR((u32)chain->chain_type); + val = lower_32_bits(chain->head_cell_paddr); + hinic5_hwif_write_reg(hwif, addr, val); +} + +static enum hinic5_wait_return check_chain_ready_handler(void *priv_data) +{ + struct hinic5_api_cmd_chain *chain = priv_data; + u32 addr, val; + u32 hw_cons_idx; + + if (!hinic5_is_chip_present(chain->hwdev)) + return WAIT_PROCESS_ERR; + + addr = HINIC5_CSR_API_CMD_STATUS_0_ADDR((u32)chain->chain_type); + val = hinic5_hwif_read_reg(chain->hwdev->hwif, addr); + hw_cons_idx = HINIC5_API_CMD_STATUS_GET(val, CONS_IDX); + /* wait for HW cons idx to be updated */ + if (hw_cons_idx == chain->cons_idx) + return WAIT_PROCESS_CPL; + return WAIT_PROCESS_WAITING; +} + +/** + * wait_for_ready_chain - wait for the chain to be ready + * @chain: the API CMD specific chain to wait for + * Return: 0 - success, negative - failure + **/ +static int wait_for_ready_chain(struct hinic5_api_cmd_chain *chain) +{ + return hinic5_wait_for_timeout(chain, check_chain_ready_handler, + API_CMD_TIMEOUT, USEC_PER_MSEC); +} + +/** + * api_cmd_chain_hw_clean - clean the HW + * @chain: the API CMD specific chain + **/ +static void api_cmd_chain_hw_clean(struct hinic5_api_cmd_chain *chain) +{ + struct hinic5_hwif *hwif = chain->hwdev->hwif; + u32 addr, ctrl; + + addr = HINIC5_CSR_API_CMD_CHAIN_CTRL_ADDR((u32)chain->chain_type); + + ctrl = hinic5_hwif_read_reg(hwif, addr); + ctrl = HINIC5_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_EN) & + HINIC5_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HINIC5_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC5_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + HINIC5_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + hinic5_hwif_write_reg(hwif, addr, ctrl); +} + +/** + * api_cmd_chain_hw_init - initialize the chain in the HW + * @chain: the API CMD specific chain to initialize in HW + * Return: 0 - success, negative - failure + **/ +static int api_cmd_chain_hw_init(struct hinic5_api_cmd_chain *chain) +{ + api_cmd_chain_hw_clean(chain); + + api_cmd_set_status_addr(chain); + + if (api_cmd_hw_restart(chain) != 0) { + sdk_err(chain->hwdev->dev_hdl, "Failed to restart api_cmd_hw\n"); + return -EBUSY; + } + + api_cmd_ctrl_init(chain); + api_cmd_set_num_cells(chain); + api_cmd_head_init(chain); + + return wait_for_ready_chain(chain); +} + +/** + * alloc_cmd_buf - allocate a dma buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + * Return: 0 - success, negative - failure + **/ +static int alloc_cmd_buf(struct hinic5_api_cmd_chain *chain, + struct hinic5_api_cmd_cell *cell, u32 cell_idx) +{ + struct hinic5_api_cmd_cell_ctxt *cell_ctxt = NULL; + void *dev = chain->hwdev->dev_hdl; + void *buf_vaddr = NULL; + u64 buf_paddr; + int err = 0; + + buf_vaddr = (u8 *)(uintptr_t)((u64)(uintptr_t)chain->buf_vaddr_base + + chain->buf_size_align * cell_idx); + buf_paddr = chain->buf_paddr_base + + chain->buf_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->api_cmd_vaddr = buf_vaddr; + + /* set the cmd DMA address in the cell */ + switch (chain->chain_type) { + case HINIC5_API_CMD_POLL_READ: + cell->read.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + case HINIC5_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC5_API_CMD_POLL_WRITE: + case HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + /* The data in the HW should be in Big Endian Format */ + cell->write.hw_cmd_paddr = cpu_to_be64(buf_paddr); + break; + default: + sdk_err(dev, "Unknown API CMD Chain type: %d\n", + chain->chain_type); + err = -EINVAL; + break; + } + + return err; +} + +/** + * alloc_cmd_buf - allocate a resp buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + **/ +static void alloc_resp_buf(struct hinic5_api_cmd_chain *chain, + struct hinic5_api_cmd_cell *cell, u32 cell_idx) +{ + struct hinic5_api_cmd_cell_ctxt *cell_ctxt = NULL; + void *resp_vaddr = NULL; + u64 resp_paddr; + + resp_vaddr = (u8 *)(uintptr_t)((u64)(uintptr_t)chain->rsp_vaddr_base + + chain->rsp_size_align * cell_idx); + resp_paddr = chain->rsp_paddr_base + + chain->rsp_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->resp = resp_vaddr; + cell_ctxt->resp->header = cpu_to_be64(HINIC5_API_CMD_RESP_HEAD_VALID_CODE); + cell->read.hw_wb_resp_paddr = cpu_to_be64(resp_paddr); +} + +static int hinic5_alloc_api_cmd_cell_buf(struct hinic5_api_cmd_chain *chain, + u32 cell_idx, + struct hinic5_api_cmd_cell *node) +{ + void *dev = chain->hwdev->dev_hdl; + int err; + + /* For read chain, we should allocate buffer for the response data */ + if (chain->chain_type == HINIC5_API_CMD_MULTI_READ || + chain->chain_type == HINIC5_API_CMD_POLL_READ) + alloc_resp_buf(chain, node, cell_idx); + + switch (chain->chain_type) { + case HINIC5_API_CMD_WRITE_TO_MGMT_CPU: + case HINIC5_API_CMD_POLL_WRITE: + case HINIC5_API_CMD_POLL_READ: + case HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU: + err = alloc_cmd_buf(chain, node, cell_idx); + if (err != 0) { + sdk_err(dev, "Failed to allocate cmd buffer\n"); + goto alloc_cmd_buf_err; + } + break; + /* For api command write and api command read, the data section + * is directly inserted in the cell, so no need to allocate. + */ + case HINIC5_API_CMD_MULTI_READ: + chain->cell_ctxt[cell_idx].api_cmd_vaddr = + &node->read.hw_cmd_paddr; + break; + default: + sdk_err(dev, "Unsupported API CMD chain type\n"); + err = -EINVAL; + goto alloc_cmd_buf_err; + } + + return 0; + +alloc_cmd_buf_err: + + return err; +} + +/** + * api_cmd_create_cell - create API CMD cell of specific chain + * @chain: the API CMD specific chain to create its cell + * @cell_idx: the cell index to create + * @pre_node: previous cell + * @node_vaddr: the virt addr of the cell + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_cell(struct hinic5_api_cmd_chain *chain, u32 cell_idx, + struct hinic5_api_cmd_cell *pre_node, + struct hinic5_api_cmd_cell **node_vaddr) +{ + struct hinic5_api_cmd_cell_ctxt *cell_ctxt = NULL; + struct hinic5_api_cmd_cell *node = NULL; + void *cell_vaddr = NULL; + u64 cell_paddr; + int err; + + cell_vaddr = (void *)(uintptr_t)((u64)(uintptr_t)chain->cell_vaddr_base + + chain->cell_size_align * cell_idx); + cell_paddr = chain->cell_paddr_base + + chain->cell_size_align * cell_idx; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + cell_ctxt->cell_vaddr = cell_vaddr; + cell_ctxt->hwdev = chain->hwdev; + node = cell_ctxt->cell_vaddr; + + if (!pre_node) { + chain->head_node = cell_vaddr; + chain->head_cell_paddr = (dma_addr_t)cell_paddr; + } else { + /* The data in the HW should be in Big Endian Format */ + pre_node->next_cell_paddr = cpu_to_be64(cell_paddr); + } + + /* Driver software should make sure that there is an empty API + * command cell at the end the chain + */ + node->next_cell_paddr = 0; + + err = hinic5_alloc_api_cmd_cell_buf(chain, cell_idx, node); + if (err != 0) + return err; + + *node_vaddr = node; + + return 0; +} + +/** + * api_cmd_create_cells - create API CMD cells for specific chain + * @chain: the API CMD specific chain + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_cells(struct hinic5_api_cmd_chain *chain) +{ + struct hinic5_api_cmd_cell *node = NULL, *pre_node = NULL; + void *dev = chain->hwdev->dev_hdl; + u32 cell_idx; + int err; + + for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { + err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); + if (err != 0) { + sdk_err(dev, "Failed to create API CMD cell\n"); + return err; + } + + pre_node = node; + } + + if (!node) + return -EFAULT; + + /* set the Final node to point on the start */ + node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); + + /* set the current node to be the head */ + chain->curr_node = chain->head_node; + return 0; +} + +/** + * api_chain_init - initialize API CMD specific chain + * @chain: the API CMD specific chain to initialize + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + **/ +static int api_chain_init(struct hinic5_api_cmd_chain *chain, + struct hinic5_api_cmd_chain_attr *attr) +{ + void *dev = chain->hwdev->dev_hdl; + size_t cell_ctxt_size; + size_t cells_buf_size; + int err; + + chain->chain_type = attr->chain_type; + chain->num_cells = attr->num_cells; + chain->cell_size = attr->cell_size; + chain->rsp_size = attr->rsp_size; + + chain->prod_idx = 0; + chain->cons_idx = 0; + + if (chain->chain_type == HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_init(&chain->async_lock); + else + sema_init(&chain->sem, 1); + + cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); + if (cell_ctxt_size == 0) { + sdk_err(dev, "Api chain cell size cannot be zero\n"); + err = -EINVAL; + goto alloc_cell_ctxt_err; + } + + chain->cell_ctxt = kzalloc(cell_ctxt_size, GFP_KERNEL); + if (!chain->cell_ctxt) { + err = -ENOMEM; + goto alloc_cell_ctxt_err; + } + + chain->wb_status = dma_zalloc_coherent(dev, + sizeof(*chain->wb_status), + &chain->wb_status_paddr, + GFP_KERNEL); + if (!chain->wb_status) { + sdk_err(dev, "Failed to allocate DMA wb status\n"); + err = -ENOMEM; + goto alloc_wb_status_err; + } + + chain->cell_size_align = ALIGN((u64)chain->cell_size, + API_CMD_NODE_ALIGN_SIZE); + chain->rsp_size_align = ALIGN((u64)chain->rsp_size, + API_CHAIN_RESP_ALIGNMENT); + chain->buf_size_align = ALIGN(API_CMD_BUF_SIZE, API_PAYLOAD_ALIGN_SIZE); + + cells_buf_size = (chain->cell_size_align + chain->rsp_size_align + + chain->buf_size_align) * chain->num_cells; + + err = hinic5_dma_zalloc_coherent_align(dev, cells_buf_size, + API_CMD_NODE_ALIGN_SIZE, + GFP_KERNEL, + &chain->cells_addr); + if (err != 0) { + sdk_err(dev, "Failed to allocate API CMD cells buffer\n"); + goto alloc_cells_buf_err; + } + + chain->cell_vaddr_base = chain->cells_addr.align_vaddr; + chain->cell_paddr_base = chain->cells_addr.align_paddr; + + chain->rsp_vaddr_base = (u8 *)(uintptr_t)((u64)(uintptr_t)chain->cell_vaddr_base + + chain->cell_size_align * chain->num_cells); + chain->rsp_paddr_base = chain->cell_paddr_base + + chain->cell_size_align * chain->num_cells; + + chain->buf_vaddr_base = (u8 *)(uintptr_t)((u64)(uintptr_t)chain->rsp_vaddr_base + + chain->rsp_size_align * chain->num_cells); + chain->buf_paddr_base = chain->rsp_paddr_base + + chain->rsp_size_align * chain->num_cells; + + return 0; + +alloc_cells_buf_err: + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + +alloc_wb_status_err: + kfree(chain->cell_ctxt); + +alloc_cell_ctxt_err: + if (chain->chain_type == HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_deinit(&chain->async_lock); + else + sema_deinit(&chain->sem); + + return err; +} + +/** + * api_chain_free - free API CMD specific chain + * @chain: the API CMD specific chain to free + **/ +static void api_chain_free(struct hinic5_api_cmd_chain *chain) +{ + void *dev = chain->hwdev->dev_hdl; + + hinic5_dma_free_coherent_align(dev, &chain->cells_addr); + + dma_free_coherent(dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); + kfree(chain->cell_ctxt); + + if (chain->chain_type == HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU) + spin_lock_deinit(&chain->async_lock); + else + sema_deinit(&chain->sem); +} + +/** + * api_cmd_create_chain - create API CMD specific chain + * @chain: the API CMD specific chain to create + * @attr: attributes to set in the chain + * Return: 0 - success, negative - failure + **/ +static int api_cmd_create_chain(struct hinic5_api_cmd_chain **cmd_chain, + struct hinic5_api_cmd_chain_attr *attr) +{ + struct hinic5_hwdev *hwdev = attr->hwdev; + struct hinic5_api_cmd_chain *chain = NULL; + int err; + + if ((attr->num_cells & (attr->num_cells - 1)) != 0) { + sdk_err(hwdev->dev_hdl, "Invalid number of cells, must be power of 2\n"); + return -EINVAL; + } + + chain = kzalloc(sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + chain->hwdev = hwdev; + + err = api_chain_init(chain, attr); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain\n"); + goto chain_init_err; + } + + err = api_cmd_create_cells(chain); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to create cells for API CMD chain\n"); + goto create_cells_err; + } + + err = api_cmd_chain_hw_init(chain); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to initialize chain HW\n"); + goto chain_hw_init_err; + } + + *cmd_chain = chain; + return 0; + +chain_hw_init_err: +create_cells_err: + api_chain_free(chain); + +chain_init_err: + kfree(chain); + return err; +} + +/** + * api_cmd_destroy_chain - destroy API CMD specific chain + * @chain: the API CMD specific chain to destroy + **/ +static void api_cmd_destroy_chain(struct hinic5_api_cmd_chain *chain) +{ + api_chain_free(chain); + kfree(chain); +} + +/** + * hinic5_api_cmd_init - Initialize all the API CMD chains + * @hwif: the hardware interface of a pci function device + * @chain: the API CMD chains that will be initialized + * Return: 0 - success, negative - failure + **/ +int hinic5_api_cmd_init(struct hinic5_hwdev *hwdev, + struct hinic5_api_cmd_chain **chain) +{ + void *dev = hwdev->dev_hdl; + struct hinic5_api_cmd_chain_attr attr; + u8 chain_type, i; + int err; + + if (!COMM_SUPPORT_API_CHAIN(hwdev)) + return 0; + + attr.hwdev = hwdev; + attr.num_cells = API_CHAIN_NUM_CELLS; + attr.cell_size = API_CHAIN_CELL_SIZE; + attr.rsp_size = API_CHAIN_RSP_DATA_SIZE; + + chain_type = HINIC5_API_CMD_WRITE_TO_MGMT_CPU; + for (; chain_type < (u8)HINIC5_API_CMD_MAX; chain_type++) { + attr.chain_type = chain_type; + + err = api_cmd_create_chain(&chain[chain_type], &attr); + if (err != 0) { + sdk_err(dev, "Failed to create chain %u\n", chain_type); + goto create_chain_err; + } + } + + return 0; + +create_chain_err: + i = HINIC5_API_CMD_WRITE_TO_MGMT_CPU; + for (; i < chain_type; i++) + api_cmd_destroy_chain(chain[i]); + + return err; +} + +/** + * hinic5_api_cmd_free - free the API CMD chains + * @chain: the API CMD chains that will be freed + **/ +void hinic5_api_cmd_free(const struct hinic5_hwdev *hwdev, struct hinic5_api_cmd_chain **chain) +{ + u8 chain_type; + + if (!COMM_SUPPORT_API_CHAIN(hwdev)) + return; + + chain_type = HINIC5_API_CMD_WRITE_TO_MGMT_CPU; + + for (; chain_type < (u8)HINIC5_API_CMD_MAX; chain_type++) + api_cmd_destroy_chain(chain[chain_type]); +} + diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_cmdq.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_cmdq.c new file mode 100644 index 00000000..188e7054 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_cmdq.c @@ -0,0 +1,1871 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include "comm_defs.h" +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_eqs.h" +#include "hinic5_comm_cmd.h" +#include "hinic5_common.h" +#include "hinic5_wq.h" +#include "hinic5_hw_comm.h" +#include "hinic5_hwif_inner.h" +#include "npu_cmdq_base_defs.h" +#include "hinic5_cmdq.h" + +#define CMDQ_CMD_RETRY_TIMEOUT 1000U + +#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) +#define LOWER_8_BITS(data) ((data) & 0xFF) + +#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 +#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU +#define CMDQ_DB_INFO_SET(val, member) \ + ((((u32)(val)) & CMDQ_DB_INFO_##member##_MASK) << \ + CMDQ_DB_INFO_##member##_SHIFT) + +#define CMDQ_DB_HEAD_QUEUE_TYPE_SHIFT 23 +#define CMDQ_DB_HEAD_CMDQ_TYPE_SHIFT 24 +#define CMDQ_DB_HEAD_SRC_TYPE_SHIFT 27 +#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK 0x1U +#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK 0x7U +#define CMDQ_DB_HEAD_SRC_TYPE_MASK 0x1FU +#define CMDQ_DB_HEAD_SET(val, member) \ + ((((u32)(val)) & CMDQ_DB_HEAD_##member##_MASK) << \ + CMDQ_DB_HEAD_##member##_SHIFT) + +#define CMDQ_CTRL_PI_SHIFT 0 +#define CMDQ_CTRL_CMD_SHIFT 16 +#define CMDQ_CTRL_MOD_SHIFT 24 +#define CMDQ_CTRL_ACK_TYPE_SHIFT 29 +#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_CTRL_PI_MASK 0xFFFFU +#define CMDQ_CTRL_CMD_MASK 0xFFU +#define CMDQ_CTRL_MOD_MASK 0x1FU +#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U +#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_CTRL_SET(val, member) \ + ((((u32)(val)) & CMDQ_CTRL_##member##_MASK) << \ + CMDQ_CTRL_##member##_SHIFT) + +#define CMDQ_CTRL_GET(val, member) \ + (((val) >> CMDQ_CTRL_##member##_SHIFT) & \ + CMDQ_CTRL_##member##_MASK) + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 +#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 +#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 +#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 +#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 + +#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU +#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U +#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U +#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U + +#define CMDQ_WQE_HEADER_SET(val, member) \ + ((((u32)(val)) & CMDQ_WQE_HEADER_##member##_MASK) << \ + CMDQ_WQE_HEADER_##member##_SHIFT) + +#define CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \ + CMDQ_WQE_HEADER_##member##_MASK) + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define CMDQ_CTXT_EQ_ID_SHIFT 53 +#define CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define CMDQ_CTXT_HW_BUSY_BIT_SHIFT 63 + +#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_EQ_ID_MASK 0xFF +#define CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define CMDQ_CTXT_HW_BUSY_BIT_MASK 0x1 + +#define CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) << \ + CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_PAGE_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) & \ + CMDQ_CTXT_##member##_MASK) + +#define CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define CMDQ_CTXT_CI_SHIFT 52 + +#define CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define CMDQ_CTXT_CI_MASK 0xFFF + +#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & CMDQ_CTXT_##member##_MASK) << \ + CMDQ_CTXT_##member##_SHIFT) + +#define CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ + (((u64)(val) >> CMDQ_CTXT_##member##_SHIFT) & \ + CMDQ_CTXT_##member##_MASK) + +#define SAVED_DATA_ARM_SHIFT 31 + +#define SAVED_DATA_ARM_MASK 0x1U + +#define SAVED_DATA_SET(val, member) \ + (((val) & SAVED_DATA_##member##_MASK) << \ + SAVED_DATA_##member##_SHIFT) + +#define SAVED_DATA_CLEAR(val, member) \ + ((val) & (~(SAVED_DATA_##member##_MASK << \ + SAVED_DATA_##member##_SHIFT))) + +#define WQE_ERRCODE_VAL_SHIFT 0 + +#define WQE_ERRCODE_VAL_MASK 0x7FFFFFFF + +#define WQE_ERRCODE_GET(val, member) \ + (((val) >> WQE_ERRCODE_##member##_SHIFT) & \ + WQE_ERRCODE_##member##_MASK) + +#define CEQE_CMDQ_TYPE_SHIFT 0 + +#define CEQE_CMDQ_TYPE_MASK 0x7 + +#define CEQE_CMDQ_GET(val, member) \ + (((val) >> CEQE_CMDQ_##member##_SHIFT) & \ + CEQE_CMDQ_##member##_MASK) + +#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + +#define WQE_HEADER(wqe) ((struct hinic5_cmdq_header *)(wqe)) + +#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) + +#define CMDQ_DB_ADDR(db_base, pi) \ + (((u8 *)(db_base)) + CMDQ_DB_PI_OFF(pi)) + +#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) + +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 +#define WQE_ENHANCED_CMDQ_SIZE 32 + +#define COMPLETE_LEN 3 + +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_WQE_SIZE 64 + +#define ENHANCE_CMDQ_WQEBB_SIZE 16 + +#define cmdq_to_cmdqs(x) container_of((x) - ((u32)(x)->cmdq_type), \ + struct hinic5_cmdqs, cmdq[0]) + +#define CMDQ_SEND_CMPT_CODE 10 +#define CMDQ_COMPLETE_CMPT_CODE 11 +#define CMDQ_FORCE_STOP_CMPT_CODE 12 + +enum cmdq_scmd_type { + CMDQ_SET_ARM_CMD = 2, +}; + +enum cmdq_wqe_type { + WQE_LCMD_TYPE, + WQE_SCMD_TYPE, +}; + +enum ctrl_sect_len { + CTRL_SECT_LEN = 1, + CTRL_DIRECT_SECT_LEN = 2, +}; + +enum completion_format { + COMPLETE_DIRECT, + COMPLETE_SGE, +}; + +enum completion_request { + CEQ_SET = 1, +}; + +#define NUM_WQEBBS_FOR_CMDQ_WQE 1 +#define NUM_WQEBBS_FOR_ENHANCE_CMDQ_WQE 4 + +bool hinic5_cmdq_idle(struct hinic5_cmdq *cmdq) +{ + return hinic5_wq_is_empty(&cmdq->wq); +} + +static void *cmdq_read_wqe(struct hinic5_wq *wq, u16 *ci) +{ + if (hinic5_wq_is_empty(wq)) + return NULL; + + return hinic5_wq_read_one_wqebb(wq, ci); +} + +static void *hinic5_wq_get_align_wqebbs(struct hinic5_wq *wq, u16 *pi, u16 wqebb_num) +{ + *pi = WQ_MASK_IDX(wq, wq->prod_idx); + wq->prod_idx += wqebb_num; + + return WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, *pi), + WQ_OFFSET_IN_PAGE(wq, *pi)); +} + +/* should guarantee cmdq wq depth % 2 = 0 */ +static void *cmdq_get_wqe(struct hinic5_wq *wq, u16 *pi, u16 wqebb_use_num) +{ + if (hinic5_wq_free_wqebbs(wq) < wqebb_use_num) + return NULL; + + return hinic5_wq_get_align_wqebbs(wq, pi, wqebb_use_num); +} + +struct hinic5_cmd_buf *hinic5_alloc_cmd_buf(void *hwdev) +{ + struct hinic5_cmdqs *cmdqs = NULL; + struct hinic5_cmd_buf *cmd_buf = NULL; + void *dev = NULL; + + if (!hwdev) { + pr_err("Failed to alloc cmd buf, Invalid hwdev\n"); + return NULL; + } + + cmdqs = ((struct hinic5_hwdev *)hwdev)->cmdqs; + dev = ((struct hinic5_hwdev *)hwdev)->dev_hdl; + if (!cmdqs || !dev) { + pr_err("Failed to alloc cmd buf, Invalid hwdev cmdqs or dev\n"); + return NULL; + } + + cmd_buf = kzalloc(sizeof(*cmd_buf), GFP_ATOMIC); + if (!cmd_buf) + return NULL; + + // 申请到的内存不支持默认清零, 调用者根据需求进行清零 + cmd_buf->buf = dma_pool_alloc(cmdqs->cmd_buf_pool, GFP_ATOMIC, + &cmd_buf->dma_addr); + if (!cmd_buf->buf) { + sdk_err(dev, "Failed to allocate cmdq cmd buf from the pool\n"); + goto alloc_dma_buf_err; + } + + cmd_buf->size = (u16)cmdqs->cmd_buf_size; + atomic_set(&cmd_buf->ref_cnt, 1); + + return cmd_buf; + +alloc_dma_buf_err: + kfree(cmd_buf); + return NULL; +} +EXPORT_SYMBOL(hinic5_alloc_cmd_buf); + +void hinic5_free_cmd_buf(void *hwdev, struct hinic5_cmd_buf *cmd_buf) +{ + struct hinic5_cmdqs *cmdqs = NULL; + + if (!hwdev || !cmd_buf) { + pr_err("Failed to free cmd buf, hwdev or cmd_buf is NULL\n"); + return; + } + + if (!atomic_dec_and_test(&cmd_buf->ref_cnt)) + return; + + cmdqs = ((struct hinic5_hwdev *)hwdev)->cmdqs; + + dma_pool_free(cmdqs->cmd_buf_pool, cmd_buf->buf, cmd_buf->dma_addr); + kfree(cmd_buf); +} +EXPORT_SYMBOL(hinic5_free_cmd_buf); + +static void cmdq_set_completion(struct hinic5_cmdq_completion *complete, + struct hinic5_cmd_buf *buf_out) +{ + struct hinic5_sge_resp *sge_resp = &complete->sge_resp; + + hinic5_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size); +} + +static void cmdq_set_lcmd_bufdesc(struct hinic5_cmdq_wqe_lcmd *wqe, + struct hinic5_cmd_buf *buf_in) +{ + hinic5_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size); +} + +static void cmdq_fill_db(struct hinic5_cmdq_db *db, + u8 cmdq_type, u16 prod_idx) +{ + db->db_info = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX); + + db->db_head = CMDQ_DB_HEAD_SET(HINIC5_DB_CMDQ_TYPE, QUEUE_TYPE) | + CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE) | + CMDQ_DB_HEAD_SET(HINIC5_DB_SRC_CMDQ_TYPE, SRC_TYPE); +} + +static void cmdq_set_db(struct hinic5_cmdq *cmdq, + enum hinic5_cmdq_type cmdq_type, u16 prod_idx) +{ + struct hinic5_cmdq_db db = {0}; + u8 *db_base = cmdq->hwdev->cmdqs->cmdqs_db_base; + + cmdq_fill_db(&db, ((u8)cmdq_type | cmdq->hwdev->cmdq_cos_offset), prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + db.db_info = hinic5_hw_be32(db.db_info); + db.db_head = hinic5_hw_be32(db.db_head); + + wmb(); /* write all before the doorbell */ +#ifndef __UEFI__ + writeq(*((u64 *)(void *)&db), CMDQ_DB_ADDR(db_base, prod_idx)); +#else + struct hinic5_hwdev *hwdev = cmdq->hwdev; + + writeq_uefi(hwdev->busdev_hdl, + (u64)CMDQ_DB_ADDR(db_base, prod_idx), + HINIC5_DB_BAR, *((u64 *)&db)); +#endif +} + +static void cmdq_wqe_fill(void *dst, const void *src, int wqe_size) +{ + memcpy((u8 *)((uintptr_t)dst + FIRST_DATA_TO_WRITE_LAST), + (u8 *)((uintptr_t)src + FIRST_DATA_TO_WRITE_LAST), + (u32)wqe_size - FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void cmdq_prepare_wqe_ctrl(struct hinic5_cmdq_wqe *wqe, int wrapped, + u8 mod, u8 cmd, u16 prod_idx, + enum completion_format complete_format) +{ + struct hinic5_ctrl *ctrl = NULL; + enum ctrl_sect_len ctrl_len; + struct hinic5_cmdq_wqe_lcmd *wqe_lcmd = NULL; + u32 saved_data = WQE_HEADER((void *)wqe)->saved_data; + + wqe_lcmd = &wqe->wqe_lcmd; + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CTRL_SECT_LEN; + + ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) | + CMDQ_CTRL_SET(cmd, CMD) | + CMDQ_CTRL_SET(mod, MOD) | + CMDQ_CTRL_SET(HINIC5_ACK_TYPE_CMDQ, ACK_TYPE); + + WQE_HEADER((void *)wqe)->header_info = + CMDQ_WQE_HEADER_SET(BUFDESC_LCMD_LEN, BUFDESC_LEN) | + CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | + CMDQ_WQE_HEADER_SET(DATA_SGE, DATA_FMT) | + CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | + CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | + CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | + CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + + if (cmd == CMDQ_SET_ARM_CMD && mod == HINIC5_MOD_COMM) { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER((void *)wqe)->saved_data = saved_data | + SAVED_DATA_SET(1, ARM); + } else { + saved_data &= SAVED_DATA_CLEAR(saved_data, ARM); + WQE_HEADER((void *)wqe)->saved_data = saved_data; + } +} + +static void cmdq_set_lcmd_wqe(struct hinic5_cmdq_wqe *wqe, + enum hinic5_cmdq_cmd_type cmd_type, + struct hinic5_cmd_buf *buf_in, + struct hinic5_cmd_buf *buf_out, int wrapped, + u8 mod, u8 cmd, u16 prod_idx) +{ + struct hinic5_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum completion_format complete_format = COMPLETE_DIRECT; + + switch (cmd_type) { + case HINIC5_CMD_TYPE_DIRECT_RESP: + wqe_lcmd->completion.direct_resp = 0; + break; + case HINIC5_CMD_TYPE_SGE_RESP: + if (buf_out) { + complete_format = COMPLETE_SGE; + cmdq_set_completion(&wqe_lcmd->completion, + buf_out); + } + break; + case HINIC5_CMD_TYPE_ASYNC: + wqe_lcmd->completion.direct_resp = 0; + wqe_lcmd->buf_desc.saved_async_buf = (u64)(uintptr_t)(buf_in); + break; + default: + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format); + + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static void cmdq_update_cmd_status(struct hinic5_cmdq *cmdq, u16 prod_idx, + struct hinic5_cmdq_wqe *wqe) +{ + struct hinic5_cmdq_cmd_info *cmd_info = NULL; + struct hinic5_cmdq_wqe_lcmd *wqe_lcmd = NULL; + u32 status_info; + + cmd_info = &cmdq->cmd_infos[prod_idx]; + + if (!cmd_info->errcode) { + sdk_err(cmdq->hwdev->dev_hdl, "cmd_info->errcode = NULL\n"); + return; + } + + if (cmdq->hwdev->cmdq_mode == HINIC5_NORMAL_CMDQ) { + wqe_lcmd = &wqe->wqe_lcmd; + status_info = hinic5_hw_cpu32(wqe_lcmd->status.status_info); + *cmd_info->errcode = WQE_ERRCODE_GET(status_info, VAL); + + if (cmd_info->direct_resp) { + *cmd_info->direct_resp = hinic5_hw_cpu32(wqe_lcmd->completion.direct_resp); + if ((*cmd_info->errcode != 0) && (*cmd_info->direct_resp != 0)) { + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq resp err=0x%llx\n", + *cmd_info->direct_resp); + } + } + } else { + enhanced_cmdq_update_cmd_status(cmdq, cmd_info, &wqe->enhanced_cmdq_wqe); + } +} + +static int hinic5_cmdq_sync_timeout_check(struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_wqe *wqe, u16 pi) +{ + struct hinic5_cmdq_wqe_lcmd *wqe_lcmd = NULL; + struct hinic5_ctrl *ctrl = NULL; + u32 ctrl_info; + + if (cmdq->hwdev->cmdq_mode == HINIC5_NORMAL_CMDQ) { + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = hinic5_hw_cpu32((ctrl)->ctrl_info); + + if (WQE_COMPLETED(ctrl_info) == 0) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check busy bit not set\n"); + return -EFAULT; + } + } else { + ctrl_info = hinic5_hw_cpu32(wqe->enhanced_cmdq_wqe.completion.cs_format); + if (ENHANCE_CMDQ_WQE_CS_GET(ctrl_info, HW_BUSY) == 0) { + sdk_info(cmdq->hwdev->dev_hdl, "enhance Cmdq sync command check busy bit not set\n"); + return -EFAULT; + } + } + + cmdq_update_cmd_status(cmdq, pi, wqe); + + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq sync command check succeed\n"); + return 0; +} + +static void clear_cmd_info(struct hinic5_cmdq_cmd_info *cmd_info, + const struct hinic5_cmdq_cmd_info *saved_cmd_info) +{ + if (cmd_info->errcode == saved_cmd_info->errcode) + cmd_info->errcode = NULL; + + if (cmd_info->done == saved_cmd_info->done) + cmd_info->done = NULL; + + if (cmd_info->direct_resp == saved_cmd_info->direct_resp) + cmd_info->direct_resp = NULL; +} + +static int wait_for_cmdq_timeout(struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_cmd_info *cmd_info, + ulong timeout) +{ + ulong timeo, end; + + if (cmdq->cmdqs->poll) { + end = jiffies + msecs_to_jiffies((unsigned int)timeout); + while (time_before(jiffies, end)) { + /* must lock cmdq when poll cqe handle */ + spin_lock_bh(&cmdq->cmdq_lock); + hinic5_cmdq_ceq_handler(cmdq->hwdev, 0); + spin_unlock_bh(&cmdq->cmdq_lock); + + if (try_wait_for_completion(cmd_info->done) != 0) + return 0; + + if (check_outbound_enable_handler(cmdq->hwdev) != + WAIT_PROCESS_CPL) { + return -EPERM; + } + usleep_range(9, 10); /* sleep 9 us ~ 10 us */ + } + } else { + timeo = msecs_to_jiffies((unsigned int)timeout); + if (wait_for_completion_timeout(cmd_info->done, timeo) != 0) + return 0; + } + + return -ETIMEDOUT; +} + +static int cmdq_retry_get_ack(struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_cmd_info *cmd_info, u8 ceq_id) +{ + ulong retry_timeout = msecs_to_jiffies(CMDQ_CMD_RETRY_TIMEOUT); + int err; + + spin_lock_bh(&cmdq->cmdq_lock); + if (try_wait_for_completion(cmd_info->done)) { + spin_unlock_bh(&cmdq->cmdq_lock); + return 0; + } + reinit_completion(cmd_info->done); + spin_unlock_bh(&cmdq->cmdq_lock); + + err = hinic5_reschedule_eq(cmdq->hwdev, HINIC5_CEQ, ceq_id); + if (err != 0) + return err; + + if (wait_for_cmdq_timeout(cmdq, cmd_info, retry_timeout) == 0) + return 0; + + return -ETIMEDOUT; +} + +static int cmdq_ceq_handler_status(struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_cmd_info *cmd_info, + struct hinic5_cmdq_cmd_info *saved_cmd_info, + u64 curr_msg_id, u16 curr_prod_idx, + struct hinic5_cmdq_wqe *curr_wqe, + u32 timeout) +{ + int err; + + err = wait_for_cmdq_timeout(cmdq, saved_cmd_info, timeout); + if (err == 0) + return 0; + + if (!cmdq->cmdqs->poll) { + sdk_warn(cmdq->hwdev->dev_hdl, + "Cmdq retry cmd(type %u, channel %u), msg_id %llu, pi %u\n", + saved_cmd_info->cmd_type, saved_cmd_info->channel, + curr_msg_id, curr_prod_idx); + + err = cmdq_retry_get_ack(cmdq, saved_cmd_info, HINIC5_CEQ_ID_CMDQ); + if (err == 0) + return 0; + } + + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmd_info->cmpt_code == saved_cmd_info->cmpt_code) + cmd_info->cmpt_code = NULL; + + if (*saved_cmd_info->cmpt_code == CMDQ_COMPLETE_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Cmdq direct sync command has been completed\n"); + spin_unlock_bh(&cmdq->cmdq_lock); + return 0; + } + + if (curr_msg_id == cmd_info->cmdq_msg_id) { + err = hinic5_cmdq_sync_timeout_check(cmdq, curr_wqe, + curr_prod_idx); + if (err != 0) + cmd_info->cmd_type = HINIC5_CMD_TYPE_TIMEOUT; + else + cmd_info->cmd_type = HINIC5_CMD_TYPE_FAKE_TIMEOUT; + } else { + err = -ETIMEDOUT; + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq sync command current msg id dismatch with cmd_info msg id\n"); + } + + clear_cmd_info(cmd_info, saved_cmd_info); + + spin_unlock_bh(&cmdq->cmdq_lock); + + if (err == 0) + return 0; + + hinic5_dump_ceq_info(cmdq->hwdev); + + return -ETIMEDOUT; +} + +static int wait_cmdq_sync_cmd_completion(struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_cmd_info *cmd_info, + struct hinic5_cmdq_cmd_info *saved_cmd_info, + u64 curr_msg_id, u16 curr_prod_idx, + struct hinic5_cmdq_wqe *curr_wqe, u32 timeout) +{ + return cmdq_ceq_handler_status(cmdq, cmd_info, saved_cmd_info, + curr_msg_id, curr_prod_idx, + curr_wqe, timeout); +} + +static int cmdq_msg_lock(struct hinic5_cmdq *cmdq, u16 channel) +{ + struct hinic5_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + + if (!cmdqs) + return -EINVAL; + + /* Keep wrapped and doorbell index correct. bh - for tasklet(ceq) */ + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmdqs->lock_channel_en && test_bit(channel, &cmdqs->channel_stop)) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EAGAIN; + } + + return 0; +} + +static void cmdq_msg_unlock(struct hinic5_cmdq *cmdq) +{ + spin_unlock_bh(&cmdq->cmdq_lock); +} + +static void cmdq_clear_cmd_buf(struct hinic5_cmdq_cmd_info *cmd_info, + struct hinic5_hwdev *hwdev) +{ + if (cmd_info->buf_in) + hinic5_free_cmd_buf(hwdev, cmd_info->buf_in); + + if (cmd_info->buf_out) + hinic5_free_cmd_buf(hwdev, cmd_info->buf_out); + + cmd_info->buf_in = NULL; + cmd_info->buf_out = NULL; +} + +static void cmdq_update_next_prod_idx(struct hinic5_cmdq *cmdq, u16 curr_pi, u16 *next_pi, + u16 wqebb_use_num) +{ + u16 q_depth = (u16)cmdq->wq.q_depth; + + *next_pi = curr_pi + wqebb_use_num; + if (*next_pi >= q_depth) { + cmdq->wrapped = (cmdq->wrapped == 0) ? 1 : 0; + *next_pi -= (u16)q_depth; + } +} + +static void cmdq_set_cmd_buf(struct hinic5_cmdq_cmd_info *cmd_info, + struct hinic5_hwdev *hwdev, + struct hinic5_cmd_buf *buf_in, + struct hinic5_cmd_buf *buf_out) +{ + cmd_info->buf_in = buf_in; + cmd_info->buf_out = buf_out; + + if (buf_in) + atomic_inc(&buf_in->ref_cnt); + + if (buf_out) + atomic_inc(&buf_out->ref_cnt); +} + +static void cmdq_fill_inline_data(struct hinic5_cmdq *cmdq, u16 pi, const void *src, u32 size) +{ + struct hinic5_wq *wq = &cmdq->wq; + u8 *dst = WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, pi), WQ_OFFSET_IN_PAGE(wq, pi)); + u32 copy_len = 0; + u16 pi_new = pi; + + /* copy two path when dst_ptr + size > page_size */ + if (WQ_OFFSET_IN_PAGE(wq, pi) + (size >> wq->wqebb_size_shift) > wq->wqebbs_per_page) { + copy_len = (wq->wqebbs_per_page - WQ_OFFSET_IN_PAGE(wq, pi)) + << wq->wqebb_size_shift; + if (copy_len != 0) + memcpy(dst, src, copy_len); + + pi_new = WQ_MASK_IDX(wq, pi + (copy_len >> wq->wqebb_size_shift)); + dst = WQ_GET_WQEBB_ADDR(wq, WQ_PAGE_IDX(wq, pi_new), WQ_OFFSET_IN_PAGE(wq, pi_new)); + } + + memcpy(dst, (u8 *)src + copy_len, size - copy_len); +} + +static void cmdq_sync_wqe_prepare(struct hinic5_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, struct hinic5_cmd_buf *buf_out, + struct hinic5_cmdq_wqe *curr_wqe, u16 curr_pi, + enum hinic5_cmdq_cmd_type nic_cmd_type) +{ + struct hinic5_cmdq_wqe wqe; + struct hinic5_cmdq_cmd_param cmd_buf; + int wrapped, wqe_size; + + if (cmdq->cmdqs->cmdq_mode == HINIC5_ENHANCE_CMDQ) { + wqe_size = WQE_ENHANCED_CMDQ_SIZE; + if (nic_cmd_type == HINIC5_CMD_TYPE_INLINE_DATA) + wqe_size += buf_in->size; + + /* enhance cmdq wqe_size aligned with 64 */ + wqe_size = ALIGN((u32)wqe_size, 64); + } else { + wqe_size = WQE_LCMD_SIZE; + } + + memset(&wqe, 0, (u32)wqe_size); + + wrapped = cmdq->wrapped; + + if (cmdq->cmdqs->cmdq_mode == HINIC5_NORMAL_CMDQ) { + cmdq_set_lcmd_wqe(&wqe, nic_cmd_type, buf_in, buf_out, wrapped, mod, cmd, curr_pi); + } else { + cmd_buf.buf_in = buf_in; + cmd_buf.buf_out = buf_out; + cmd_buf.cmd = cmd; + cmd_buf.mod = mod; + enhanced_cmdq_set_wqe(&wqe, nic_cmd_type, &cmd_buf, wrapped); + } + + /* The data that is written to HW should be in Big Endian Format */ + hinic5_hw_be32_len(&wqe, wqe_size); + + /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ + if (nic_cmd_type == HINIC5_CMD_TYPE_INLINE_DATA) { + cmdq_fill_inline_data(cmdq, WQ_MASK_IDX(&cmdq->wq, curr_pi + + HINIC5_CMDQ_WQE_INLINE_DATA_PI_OFFSET), + wqe.enhanced_cmdq_wqe.inline_data, + (u32)(wqe_size - WQE_ENHANCED_CMDQ_SIZE)); + cmdq_wqe_fill(curr_wqe, &wqe, WQE_ENHANCED_CMDQ_SIZE); + } else { + cmdq_wqe_fill(curr_wqe, &wqe, wqe_size); + } +} + +static inline void hinic5_cmdq_fill_cmd_info(struct hinic5_cmdq_cmd_info *cmd_info, + enum hinic5_cmdq_cmd_type nic_cmd_type, u16 channel, + u16 wqebb_use_num) +{ + cmd_info->cmd_type = nic_cmd_type; + cmd_info->channel = channel; + cmd_info->wqebb_use_num = wqebb_use_num; +} + +static inline void hinic5_cmdq_fill_completion_info(struct hinic5_cmdq_cmd_info *cmd_info, + int *cmpt_code, struct completion *done, + int *errcode, u64 *out_param) +{ + cmd_info->done = done; + cmd_info->errcode = errcode; + cmd_info->direct_resp = out_param; + cmd_info->cmpt_code = cmpt_code; +} + +static int cmdq_sync_cmd(struct hinic5_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, struct hinic5_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel, + enum hinic5_cmdq_cmd_type nic_cmd_type) +{ + struct hinic5_wq *wq = &cmdq->wq; + struct hinic5_cmdq_wqe *curr_wqe = NULL; + struct hinic5_cmdq_cmd_info *cmd_info = NULL, saved_cmd_info; + struct completion done; + u16 curr_pi, next_pi, wqebb_use_num; + int errcode = 0; + int cmpt_code = CMDQ_SEND_CMPT_CODE; + u64 curr_msg_id; + int err; + u32 real_timeout; + + err = cmdq_msg_lock(cmdq, channel); + if (err != 0) + return err; + + wqebb_use_num = cmdq->cmdqs->wqebb_use_num; + /* 4: 16B data per wqebb, - 2: 0 equal 32B */ + if (nic_cmd_type == HINIC5_CMD_TYPE_INLINE_DATA) + wqebb_use_num += ((buf_in->size >> 4) - 2); + + curr_wqe = cmdq_get_wqe(wq, &curr_pi, wqebb_use_num); + if (!curr_wqe) { + cmdq_msg_unlock(cmdq); + return -EBUSY; + } + + init_completion(&done); + cmd_info = &cmdq->cmd_infos[curr_pi]; + hinic5_cmdq_fill_cmd_info(cmd_info, nic_cmd_type, channel, wqebb_use_num); + hinic5_cmdq_fill_completion_info(cmd_info, &cmpt_code, &done, &errcode, out_param); + + cmdq_set_cmd_buf(cmd_info, cmdq->hwdev, buf_in, buf_out); + memcpy(&saved_cmd_info, cmd_info, sizeof(*cmd_info)); + + cmdq_sync_wqe_prepare(cmdq, mod, cmd, buf_in, buf_out, curr_wqe, curr_pi, nic_cmd_type); + + (cmd_info->cmdq_msg_id)++; + curr_msg_id = cmd_info->cmdq_msg_id; + + cmdq_update_next_prod_idx(cmdq, curr_pi, &next_pi, wqebb_use_num); + cmdq_set_db(cmdq, cmdq->cmdq_type, next_pi); + + cmdq_msg_unlock(cmdq); + + real_timeout = (timeout != 0) ? timeout : cmdq->hwdev->timeout_info->cmdq_timeout; + err = wait_cmdq_sync_cmd_completion(cmdq, cmd_info, &saved_cmd_info, curr_msg_id, + curr_pi, curr_wqe, real_timeout); + if (err != 0) { + sdk_err(cmdq->hwdev->dev_hdl, + "Cmdq sync cmd(mod: %u, cmd: %u) timeout, pi: 0x%x, \ + real_timeout: %u, expect_timeout: %u\n", + mod, cmd, curr_pi, real_timeout, cmdq->hwdev->timeout_info->cmdq_timeout); + err = -ETIMEDOUT; + } + + if (cmpt_code == CMDQ_FORCE_STOP_CMPT_CODE) { + sdk_info(cmdq->hwdev->dev_hdl, "Force stop cmdq cmd, mod: %u, cmd: %u\n", mod, cmd); + err = -EAGAIN; + } + + destroy_completion(&done); + smp_rmb(); /* read error code after completion */ + + return (err != 0) ? err : errcode; +} + +static int cmdq_sync_cmd_direct_resp(struct hinic5_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, u64 *out_param, + u32 timeout, u16 channel) +{ + return cmdq_sync_cmd(cmdq, mod, cmd, buf_in, NULL, + out_param, timeout, channel, + HINIC5_CMD_TYPE_DIRECT_RESP); +} + +static int cmdq_sync_cmd_detail_resp(struct hinic5_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, + struct hinic5_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel) +{ + return cmdq_sync_cmd(cmdq, mod, cmd, buf_in, buf_out, + out_param, timeout, channel, + HINIC5_CMD_TYPE_SGE_RESP); +} + +static int cmdq_async_cmd(struct hinic5_cmdq *cmdq, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, u16 channel) +{ + struct hinic5_cmdq_cmd_info *cmd_info = NULL; + struct hinic5_wq *wq = &cmdq->wq; + int wqe_size; + u16 curr_prod_idx, next_prod_idx, wqebb_use_num; + struct hinic5_cmdq_wqe *curr_wqe = NULL, wqe; + struct hinic5_cmdq_cmd_param cmd_buf; + int wrapped, err; + + wqe_size = cmdq->cmdqs->cmdq_mode == HINIC5_NORMAL_CMDQ ? + WQE_LCMD_SIZE : WQE_ENHANCED_CMDQ_SIZE; + + /* enhance cmdq wqe_size aligned with 64 */ + wqe_size = ALIGN((u32)wqe_size, 64); + + err = cmdq_msg_lock(cmdq, channel); + if (err != 0) + return err; + + wqebb_use_num = cmdq->cmdqs->wqebb_use_num; + curr_wqe = cmdq_get_wqe(wq, &curr_prod_idx, wqebb_use_num); + if (!curr_wqe) { + cmdq_msg_unlock(cmdq); + return -EBUSY; + } + + memset(&wqe, 0, sizeof(wqe)); + + wrapped = cmdq->wrapped; + + cmdq_update_next_prod_idx(cmdq, curr_prod_idx, &next_prod_idx, wqebb_use_num); + + if (cmdq->cmdqs->cmdq_mode == HINIC5_NORMAL_CMDQ) { + cmdq_set_lcmd_wqe(&wqe, HINIC5_CMD_TYPE_ASYNC, buf_in, NULL, wrapped, + mod, cmd, curr_prod_idx); + } else { + cmd_buf.buf_in = buf_in; + cmd_buf.buf_out = NULL; + cmd_buf.cmd = cmd; + cmd_buf.mod = mod; + enhanced_cmdq_set_wqe(&wqe, HINIC5_CMD_TYPE_ASYNC, &cmd_buf, wrapped); + } + + /* The data that is written to HW should be in Big Endian Format */ + hinic5_hw_be32_len(&wqe, wqe_size); + cmdq_wqe_fill(curr_wqe, &wqe, wqe_size); + + cmd_info = &cmdq->cmd_infos[curr_prod_idx]; + cmd_info->cmd_type = HINIC5_CMD_TYPE_ASYNC; + cmd_info->channel = channel; + cmd_info->wqebb_use_num = wqebb_use_num; + /* The caller will not free the cmd_buf of the asynchronous command, + * so there is no need to increase the reference count here + */ + cmd_info->buf_in = buf_in; + + /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait */ + cmdq_set_db(cmdq, HINIC5_CMDQ_SYNC, next_prod_idx); + + cmdq_msg_unlock(cmdq); + + return 0; +} + +static int cmdq_inline_data_params_valid(const void *hwdev, + const struct hinic5_cmdq_cmd_param *cmd_param) +{ + if (!cmd_param || !cmd_param->buf_in || !cmd_param->buf_out || !hwdev) { + pr_err("Invalid CMDQ buffer addr or hwdev\n"); + return -EINVAL; + } + + if (cmd_param->buf_in->size > HINIC5_CMDQ_MAX_INLINE_DATA_SIZE || + (cmd_param->buf_in->size % 64) != 32) { /* % 64: inline data must be 32B/96B/160B */ + pr_err("Invalid CMDQ buffer in size: 0x%x\n", cmd_param->buf_in->size); + return -EINVAL; + } + + if (cmd_param->buf_out->size == 0 || + cmd_param->buf_out->size > HINIC5_CMDQ_MAX_INLINE_DATA_SIZE) { + pr_err("Invalid CMDQ buffer out size: 0x%x\n", cmd_param->buf_out->size); + return -EINVAL; + } + return 0; +} + +static int cmdq_params_valid(const void *hwdev, const struct hinic5_cmd_buf *buf_in) +{ + struct hinic5_cmdqs *cmdqs = NULL; + + if (!buf_in || !hwdev) { + pr_err("Invalid CMDQ buffer addr or hwdev\n"); + return -EINVAL; + } + + cmdqs = ((struct hinic5_hwdev *)hwdev)->cmdqs; + if (!cmdqs || buf_in->size < HINIC5_CMDQ_MIN_BUF_SIZE || + buf_in->size > cmdqs->cmd_buf_size) { + pr_err("Invalid cmdqs addr or CMDQ buffer size: 0x%x\n", buf_in->size); + return -EINVAL; + } + + return 0; +} + +#define WAIT_CMDQ_ENABLE_TIMEOUT 300 +static int wait_cmdqs_enable(struct hinic5_cmdqs *cmdqs) +{ + ulong end; + + if (!cmdqs) + return -EINVAL; + + end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT); + do { + if ((cmdqs->status & HINIC5_CMDQ_ENABLE) != 0) + return 0; + } while (time_before(jiffies, end) && hinic5_is_chip_present(cmdqs->hwdev) && + (cmdqs->disable_flag == 0)); + + cmdqs->disable_flag = 1; + + return -EBUSY; +} + +static int check_cmdq_ready(struct hinic5_hwdev *hwdev, struct hinic5_cmdqs *cmdqs) +{ + int err; + + if (!get_card_present_state(hwdev)) + return -EPERM; + + if (check_outbound_enable_handler(hwdev) != + WAIT_PROCESS_CPL) { + return -EPERM; + } + + err = wait_cmdqs_enable(cmdqs); + if (err != 0) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + return 0; +} + +static void cmdq_cmd_cost_time(struct hinic5_cmdqs *cmdqs, u8 mod, u8 cmd, struct timeval start) +{ + struct timeval end = {0}; + u64 cost_usec; + + if (hinic5_get_perf_en(HINIC5_CMDQ_PERF)) { + do_gettimeofday(&end); + cost_usec = (u64)((end.tv_sec - start.tv_sec) * MSEC_PER_SEC * + USEC_PER_MSEC + end.tv_usec - start.tv_usec); + sdk_info(cmdqs->hwdev->dev_hdl, + "Cmdq mod: %u cmd: %u, cost time: %llu us\n", mod, cmd, cost_usec); + } +} + +int hinic5_cos_id_direct_resp(void *hwdev, u8 mod, u8 cmd, u16 cos_id, + struct hinic5_cmd_buf *buf_in, u64 *out_param, + u32 timeout, u16 channel) +{ + struct hinic5_cmdqs *cmdqs = NULL; + struct timeval start = {0}; + int err; + + if (hinic5_get_perf_en(HINIC5_CMDQ_PERF)) + do_gettimeofday(&start); + + err = cmdq_params_valid(hwdev, buf_in); + if (err != 0) { + pr_err("Invalid CMDQ parameters\n"); + return err; + } + + cmdqs = ((struct hinic5_hwdev *)hwdev)->cmdqs; + err = check_cmdq_ready((struct hinic5_hwdev *)hwdev, cmdqs); + if (err != 0) + return err; + + if (cos_id >= cmdqs->cmdq_num) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq id is invalid\n"); + return -EINVAL; + } + + err = cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[cos_id], mod, cmd, buf_in, + out_param, timeout, channel); + if (err != 0) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq direct_resp fail\n"); + return err; + } + + if (!hinic5_is_chip_present(hwdev)) + return -ETIMEDOUT; + + cmdq_cmd_cost_time(cmdqs, mod, cmd, start); + return err; +} + +int hinic5_cmdq_direct_resp(void *hwdev, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, u64 *out_param, + u32 timeout, u16 channel) +{ + return hinic5_cos_id_direct_resp(hwdev, mod, cmd, HINIC5_CMDQ_SYNC, buf_in, out_param, + timeout, channel); +} +EXPORT_SYMBOL(hinic5_cmdq_direct_resp); + +int hinic5_cmdq_inline_data(void *hwdev, struct hinic5_cmdq_cmd_param *cmd_param, + u32 timeout, u16 channel) +{ + struct hinic5_cmdqs *cmdqs = NULL; + struct timeval start = {0}; + int err; + + if (hinic5_get_perf_en(HINIC5_CMDQ_PERF)) + do_gettimeofday(&start); + + err = cmdq_inline_data_params_valid(hwdev, cmd_param); + if (err != 0) + return err; + + cmdqs = ((struct hinic5_hwdev *)hwdev)->cmdqs; + err = check_cmdq_ready((struct hinic5_hwdev *)hwdev, cmdqs); + if (err != 0) + return err; + + err = cmdq_sync_cmd(&cmdqs->cmdq[HINIC5_CMDQ_SYNC], + cmd_param->mod, cmd_param->cmd, cmd_param->buf_in, cmd_param->buf_out, + cmd_param->out_param, timeout, channel, HINIC5_CMD_TYPE_INLINE_DATA); + if (!hinic5_is_chip_present(hwdev)) + return -ETIMEDOUT; + + cmdq_cmd_cost_time(cmdqs, cmd_param->mod, cmd_param->cmd, start); + return err; +} +EXPORT_SYMBOL(hinic5_cmdq_inline_data); + +int hinic5_cmdq_detail_resp(void *hwdev, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, + struct hinic5_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel) +{ + return hinic5_cos_id_detail_resp(hwdev, mod, cmd, HINIC5_CMDQ_SYNC, + buf_in, buf_out, out_param, timeout, channel); +} +EXPORT_SYMBOL(hinic5_cmdq_detail_resp); + +int hinic5_send_fast_msg_need_resp(void *hwdev, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, u64 *out_param) +{ + struct hinic5_hwdev *dev = hwdev; + struct hinic5_cmdqs *cmdqs = NULL; + struct timeval start = {0}; + int err; + u32 fast_msg_qid = HINIC5_CMDQ_FAST_MSG; + + if (hinic5_get_perf_en(HINIC5_CMDQ_PERF)) + do_gettimeofday(&start); + + err = cmdq_params_valid(hwdev, buf_in); + if (err != 0) + return err; + + if ((!COMM_SUPPORT_FAST_MSG(dev)) || dev->glb_attr.cmdq_num < fast_msg_qid) + return -EPERM; + + /* cmdq数量等于2时, fast_msg混用async queue */ + if (dev->glb_attr.cmdq_num == fast_msg_qid) + fast_msg_qid = HINIC5_CMDQ_ASYNC; + + cmdqs = dev->cmdqs; + err = check_cmdq_ready(dev, cmdqs); + if (err != 0) + return err; + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[fast_msg_qid], + mod, cmd, buf_in, buf_in, out_param, + 0, HINIC5_CHANNEL_COMM); + if (!hinic5_is_chip_present(dev)) + return -ETIMEDOUT; + + cmdq_cmd_cost_time(cmdqs, mod, cmd, start); + return err; +} + +int hinic5_cos_id_detail_resp(void *hwdev, u8 mod, u8 cmd, u8 cos_id, + struct hinic5_cmd_buf *buf_in, + struct hinic5_cmd_buf *buf_out, u64 *out_param, + u32 timeout, u16 channel) +{ + struct hinic5_cmdqs *cmdqs = NULL; + struct timeval start = {0}; + int err; + + if (hinic5_get_perf_en(HINIC5_CMDQ_PERF)) + do_gettimeofday(&start); + + err = cmdq_params_valid(hwdev, buf_in); + if (err != 0) + return err; + + cmdqs = ((struct hinic5_hwdev *)hwdev)->cmdqs; + err = check_cmdq_ready((struct hinic5_hwdev *)hwdev, cmdqs); + if (err != 0) + return err; + + if (cos_id >= cmdqs->cmdq_num) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq id is invalid\n"); + return -EINVAL; + } + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[cos_id], mod, cmd, + buf_in, buf_out, out_param, + timeout, channel); + if (!hinic5_is_chip_present(hwdev)) + return -ETIMEDOUT; + + cmdq_cmd_cost_time(cmdqs, mod, cmd, start); + return err; +} +EXPORT_SYMBOL(hinic5_cos_id_detail_resp); + +int hinic5_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct hinic5_cmd_buf *buf_in, u16 channel) +{ + struct hinic5_cmdqs *cmdqs = NULL; + int err; + + err = cmdq_params_valid(hwdev, buf_in); + if (err != 0) + return err; + + cmdqs = ((struct hinic5_hwdev *)hwdev)->cmdqs; + + err = check_cmdq_ready((struct hinic5_hwdev *)hwdev, cmdqs); + if (err != 0) + return err; + /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait */ + return cmdq_async_cmd(&cmdqs->cmdq[HINIC5_CMDQ_SYNC], mod, + cmd, buf_in, channel); +} +EXPORT_SYMBOL(hinic5_cmdq_async); + +static void clear_wqe_complete_bit(struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_wqe *wqe, u16 ci) +{ + struct hinic5_ctrl *ctrl = NULL; + u32 header_info; + enum data_format df; + + if (cmdq->hwdev->cmdq_mode == HINIC5_NORMAL_CMDQ) { + header_info = hinic5_hw_cpu32(WQE_HEADER((void *)wqe)->header_info); + df = CMDQ_WQE_HEADER_GET(header_info, DATA_FMT); + if (df == DATA_SGE) + ctrl = &wqe->wqe_lcmd.ctrl; + else + ctrl = &wqe->inline_wqe.wqe_scmd.ctrl; + + ctrl->ctrl_info = 0; /* clear HW busy bit */ + } else { + wqe->enhanced_cmdq_wqe.completion.cs_format = 0; /* clear HW busy bit */ + } + + cmdq->cmd_infos[ci].cmd_type = HINIC5_CMD_TYPE_NONE; + + wmb(); /* verify wqe is clear */ + + hinic5_wq_put_wqebbs(&cmdq->wq, cmdq->cmd_infos[ci].wqebb_use_num); +} + +static void cmdq_sync_cmd_handler(struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_wqe *wqe, u16 ci) +{ + /* cmdq already locked in poll mode */ + if (!cmdq->cmdqs->poll) + spin_lock(&cmdq->cmdq_lock); + + cmdq_update_cmd_status(cmdq, ci, wqe); + + if (cmdq->cmd_infos[ci].cmpt_code) { + *cmdq->cmd_infos[ci].cmpt_code = CMDQ_COMPLETE_CMPT_CODE; + cmdq->cmd_infos[ci].cmpt_code = NULL; + } + + /* make sure cmpt_code operation before done operation */ + smp_rmb(); + + if (cmdq->cmd_infos[ci].done) { + complete(cmdq->cmd_infos[ci].done); + cmdq->cmd_infos[ci].done = NULL; + } + + if (!cmdq->cmdqs->poll) + spin_unlock(&cmdq->cmdq_lock); + + cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], cmdq->hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +static void cmdq_async_cmd_handler(struct hinic5_hwdev *hwdev, + struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_wqe *wqe, u16 ci) +{ + cmdq_clear_cmd_buf(&cmdq->cmd_infos[ci], hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); +} + +#define HINIC5_CMDQ_WQE_HEAD_LEN 32 +static void hinic5_dump_cmdq_wqe_head(struct hinic5_hwdev *hwdev, + struct hinic5_cmdq_wqe *wqe) +{ + u32 i; + u32 *data = (u32 *)(void *)wqe; + + for (i = 0; i < (HINIC5_CMDQ_WQE_HEAD_LEN / sizeof(u32)); i += 0x4) { + sdk_info(hwdev->dev_hdl, "wqe data: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + *(data + i), *(data + i + 0x1), *(data + i + 0x2), + *(data + i + 0x3)); + } +} + +static int cmdq_type_default_ceq_handler(struct hinic5_hwdev *hwdev, + struct hinic5_cmdq_cmd_info *cmd_info, + struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_wqe *wqe, u16 ci) +{ + struct hinic5_cmdq_wqe_lcmd *wqe_lcmd = NULL; + struct hinic5_ctrl *ctrl = NULL; + u32 ctrl_info; + + if (hwdev->cmdq_mode == HINIC5_NORMAL_CMDQ) { + /* only arm bit is using scmd wqe, the wqe is lcmd */ + wqe_lcmd = &wqe->wqe_lcmd; + ctrl = &wqe_lcmd->ctrl; + ctrl_info = hinic5_hw_cpu32((ctrl)->ctrl_info); + + if (WQE_COMPLETED(ctrl_info) == 0) + return -EBUSY; + } else { + ctrl_info = wqe->enhanced_cmdq_wqe.completion.cs_format; + ctrl_info = hinic5_hw_cpu32(ctrl_info); + if (ENHANCE_CMDQ_WQE_CS_GET(ctrl_info, HW_BUSY) == 0) + return -EBUSY; + } + dma_rmb(); + /* For FORCE_STOP cmd_type, we also need to wait for + * the firmware processing to complete to prevent the + * firmware from accessing the released cmd_buf + */ + if (cmd_info->cmd_type == HINIC5_CMD_TYPE_FORCE_STOP) { + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); + } else if (cmd_info->cmd_type == HINIC5_CMD_TYPE_ASYNC) { + cmdq_async_cmd_handler(hwdev, cmdq, wqe, ci); + } else { + cmdq_sync_cmd_handler(cmdq, wqe, ci); + } + + return 0; +} + +void hinic5_cmdq_ceq_handler(void *handle, u32 ceqe_data) +{ + struct hinic5_cmdqs *cmdqs = ((struct hinic5_hwdev *)handle)->cmdqs; + enum hinic5_cmdq_type cmdq_type = CEQE_CMDQ_GET(ceqe_data, TYPE); + struct hinic5_cmdq *cmdq = NULL; + struct hinic5_hwdev *hwdev = cmdqs->hwdev; + struct hinic5_cmdq_wqe *wqe = NULL; + struct hinic5_cmdq_cmd_info *cmd_info = NULL; + u16 ci; + int err; + + if (cmdq_type >= HINIC5_MAX_CMDQ_TYPES) { + sdk_err(hwdev->dev_hdl, "Cmdq type invalid, type: %u\n", cmdq_type); + return; + } + cmdq = &cmdqs->cmdq[cmdq_type]; + + while ((wqe = cmdq_read_wqe(&cmdq->wq, &ci)) != NULL) { + cmd_info = &cmdq->cmd_infos[ci]; + switch (cmd_info->cmd_type) { + case HINIC5_CMD_TYPE_NONE: + return; + case HINIC5_CMD_TYPE_TIMEOUT: + sdk_warn(hwdev->dev_hdl, "Cmdq timeout, q_id: %u, ci: %u\n", cmdq_type, ci); + hinic5_dump_cmdq_wqe_head(hwdev, wqe); + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); + break; + case HINIC5_CMD_TYPE_FAKE_TIMEOUT: + cmdq_clear_cmd_buf(cmd_info, hwdev); + clear_wqe_complete_bit(cmdq, wqe, ci); + break; + default: + err = cmdq_type_default_ceq_handler(hwdev, cmd_info, cmdq, wqe, ci); + if (err != 0) + return; + break; + } + } +} + +static void cmdq_init_queue_ctxt(struct hinic5_cmdqs *cmdqs, + struct hinic5_cmdq *cmdq, + struct cmdq_ctxt_info *ctxt_info) +{ + struct hinic5_wq *wq = &cmdq->wq; + u64 cmdq_first_block_paddr, pfn; + u16 start_ci = (u16)wq->cons_idx; + + pfn = CMDQ_PFN(hinic5_wq_get_first_wqe_page_addr(wq)); + + ctxt_info->curr_wqe_page_pfn = + CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) | +#ifndef __UEFI__ + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | +#else + CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_EN) | + CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_ARM) | +#endif + CMDQ_CTXT_PAGE_INFO_SET(HINIC5_CEQ_ID_CMDQ, EQ_ID) | + CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN); + + if (!WQ_IS_0_LEVEL_CLA(wq)) { + cmdq_first_block_paddr = cmdqs->wq_block_paddr; + pfn = CMDQ_PFN(cmdq_first_block_paddr); + } + + ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) | + CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN); +} + +static int init_cmdq(struct hinic5_cmdq *cmdq, struct hinic5_hwdev *hwdev, + enum hinic5_cmdq_type q_type) +{ + int err; + + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + cmdq->hwdev = hwdev; + cmdq->cmdqs = hwdev->cmdqs; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->cmd_infos = kcalloc(cmdq->wq.q_depth, sizeof(*cmdq->cmd_infos), + GFP_KERNEL); + if (!cmdq->cmd_infos) { + err = -ENOMEM; + goto cmd_infos_err; + } + + return 0; + +cmd_infos_err: + spin_lock_deinit(&cmdq->cmdq_lock); + + return err; +} + +static void free_cmdq(struct hinic5_cmdq *cmdq) +{ + kfree(cmdq->cmd_infos); + spin_lock_deinit(&cmdq->cmdq_lock); +} + +static int hinic5_set_cmdq_ctxts(struct hinic5_hwdev *hwdev) +{ + struct hinic5_cmdqs *cmdqs = hwdev->cmdqs; + struct enhance_cmdq_ctxt_info *ctxt = NULL; + u8 cmdq_type; + int err; + + cmdq_type = HINIC5_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + if (cmdqs->cmdq_mode == HINIC5_NORMAL_CMDQ) { + err = hinic5_set_cmdq_ctxt(hwdev, (u8)cmdq_type, + &cmdqs->cmdq[cmdq_type].cmdq_ctxt); + } else { + ctxt = &cmdqs->cmdq[cmdq_type].cmdq_enhance_ctxt; + err = hinic5_set_enhance_cmdq_ctxt(hwdev, (u8)cmdq_type, ctxt); + } + if (err != 0) + return err; + } + + cmdqs->status |= HINIC5_CMDQ_ENABLE; + cmdqs->disable_flag = 0; + + return 0; +} + +static void cmdq_flush_sync_cmd(struct hinic5_cmdq_cmd_info *cmd_info) +{ + if (cmd_info->cmd_type != HINIC5_CMD_TYPE_DIRECT_RESP && + cmd_info->cmd_type != HINIC5_CMD_TYPE_SGE_RESP) + return; + + cmd_info->cmd_type = HINIC5_CMD_TYPE_FORCE_STOP; + + if (cmd_info->cmpt_code && + *cmd_info->cmpt_code == CMDQ_SEND_CMPT_CODE) + *cmd_info->cmpt_code = CMDQ_FORCE_STOP_CMPT_CODE; + + if (cmd_info->done) { + complete(cmd_info->done); + cmd_info->done = NULL; + cmd_info->cmpt_code = NULL; + cmd_info->direct_resp = NULL; + cmd_info->errcode = NULL; + } +} + +void hinic5_cmdq_flush_cmd(struct hinic5_hwdev *hwdev, + struct hinic5_cmdq *cmdq) +{ + struct hinic5_cmdq_cmd_info *cmd_info = NULL; + u16 ci = 0; + + spin_lock_bh(&cmdq->cmdq_lock); + + while (cmdq_read_wqe(&cmdq->wq, &ci)) { + cmd_info = &cmdq->cmd_infos[ci]; + hinic5_wq_put_wqebbs(&cmdq->wq, cmd_info->wqebb_use_num); + + if (cmd_info->cmd_type == HINIC5_CMD_TYPE_DIRECT_RESP || + cmd_info->cmd_type == HINIC5_CMD_TYPE_SGE_RESP) + cmdq_flush_sync_cmd(cmd_info); + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +static void hinic5_cmdq_flush_channel_sync_cmd(struct hinic5_hwdev *hwdev, u16 channel) +{ + struct hinic5_cmdq_cmd_info *cmd_info = NULL; + struct hinic5_cmdq *cmdq = NULL; + struct hinic5_wq *wq = NULL; + u16 wqe_cnt, ci, i; + + if (channel >= HINIC5_CHANNEL_MAX) + return; + + cmdq = &hwdev->cmdqs->cmdq[HINIC5_CMDQ_SYNC]; + + spin_lock_bh(&cmdq->cmdq_lock); + + wq = &cmdq->wq; + ci = wq->cons_idx; + wqe_cnt = (u16)WQ_MASK_IDX(wq, wq->prod_idx + + wq->q_depth - wq->cons_idx); + for (i = 0; i < wqe_cnt; i++) { + cmd_info = &cmdq->cmd_infos[WQ_MASK_IDX(wq, ci + i)]; + if (cmd_info->channel == channel) + cmdq_flush_sync_cmd(cmd_info); + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +void hinic5_cmdq_flush_sync_cmd(struct hinic5_hwdev *hwdev) +{ + struct hinic5_cmdq_cmd_info *cmd_info = NULL; + struct hinic5_cmdq *cmdq = NULL; + struct hinic5_wq *wq = NULL; + u16 wqe_cnt, ci, i; + + cmdq = &hwdev->cmdqs->cmdq[HINIC5_CMDQ_SYNC]; + + spin_lock_bh(&cmdq->cmdq_lock); + + wq = &cmdq->wq; + ci = wq->cons_idx; + wqe_cnt = (u16)WQ_MASK_IDX(wq, wq->prod_idx + + wq->q_depth - wq->cons_idx); + for (i = 0; i < wqe_cnt; i++) { + cmd_info = &cmdq->cmd_infos[WQ_MASK_IDX(wq, ci + i)]; + cmdq_flush_sync_cmd(cmd_info); + } + + spin_unlock_bh(&cmdq->cmdq_lock); +} + +static void cmdq_reset_all_cmd_buff(struct hinic5_cmdq *cmdq) +{ + u16 i; + + if (!cmdq) { + pr_err("cmdq is null\n"); + return; + } + + for (i = 0; i < cmdq->wq.q_depth; i++) + cmdq_clear_cmd_buf(&cmdq->cmd_infos[i], cmdq->hwdev); +} + +int hinic5_cmdq_set_channel_status(struct hinic5_hwdev *hwdev, u16 channel, + bool enable) +{ + if (channel >= HINIC5_CHANNEL_MAX) + return -EINVAL; + + if (enable) { + clear_bit(channel, &hwdev->cmdqs->channel_stop); + } else { + set_bit(channel, &hwdev->cmdqs->channel_stop); + hinic5_cmdq_flush_channel_sync_cmd(hwdev, channel); + } + + sdk_info(hwdev->dev_hdl, "%s cmdq channel 0x%x\n", + enable ? "Enable" : "Disable", channel); + + return 0; +} + +void hinic5_cmdq_enable_channel_lock(struct hinic5_hwdev *hwdev, bool enable) +{ + hwdev->cmdqs->lock_channel_en = enable; + + sdk_info(hwdev->dev_hdl, "%s cmdq channel lock\n", + enable ? "Enable" : "Disable"); +} + +int hinic5_reinit_cmdq_ctxts(struct hinic5_hwdev *hwdev) +{ + struct hinic5_cmdqs *cmdqs = hwdev->cmdqs; + u8 cmdq_type; + + cmdq_type = HINIC5_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + hinic5_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + cmdq_reset_all_cmd_buff(&cmdqs->cmdq[cmdq_type]); + cmdqs->cmdq[cmdq_type].wrapped = 1; + hinic5_wq_reset(&cmdqs->cmdq[cmdq_type].wq); + } + + return hinic5_set_cmdq_ctxts(hwdev); +} + +static int create_cmdq_wq(struct hinic5_cmdqs *cmdqs) +{ + u8 type, cmdq_type; + int err = 0; + + cmdq_type = HINIC5_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + err = hinic5_wq_create(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type].wq, + HINIC5_CMDQ_DEPTH, cmdqs->wqebb_size); + if (err != 0) { + sdk_err(cmdqs->hwdev->dev_hdl, "Failed to create cmdq wq\n"); + goto destroy_wq; + } + } + + /* 1-level CLA must put all cmdq's wq page addr in one wq block */ + if (!WQ_IS_0_LEVEL_CLA(&cmdqs->cmdq[HINIC5_CMDQ_SYNC].wq)) { + /* cmdq wq's CLA table is up to 512B */ +#define CMDQ_WQ_CLA_SIZE 512 + if (cmdqs->cmdq[HINIC5_CMDQ_SYNC].wq.num_wq_pages > + CMDQ_WQ_CLA_SIZE / sizeof(u64)) { + err = -EINVAL; + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq wq page exceed limit: %lu\n", + CMDQ_WQ_CLA_SIZE / sizeof(u64)); + goto destroy_wq; + } + + cmdqs->wq_block_vaddr = + dma_zalloc_coherent(cmdqs->hwdev->dev_hdl, PAGE_SIZE, + &cmdqs->wq_block_paddr, GFP_KERNEL); + if (!cmdqs->wq_block_vaddr) { + err = -ENOMEM; + sdk_err(cmdqs->hwdev->dev_hdl, "Failed to alloc cmdq wq block\n"); + goto destroy_wq; + } + + type = HINIC5_CMDQ_SYNC; + for (; type < cmdqs->cmdq_num; type++) { + memcpy((u8 *)cmdqs->wq_block_vaddr + ((u64)type *CMDQ_WQ_CLA_SIZE), + cmdqs->cmdq[type].wq.wq_block_vaddr, + cmdqs->cmdq[type].wq.num_wq_pages * sizeof(u64)); + } + } + + return 0; + +destroy_wq: + type = HINIC5_CMDQ_SYNC; + for (; type < cmdq_type; type++) + hinic5_wq_destroy(&cmdqs->cmdq[type].wq); + + return err; +} + +static void destroy_cmdq_wq(struct hinic5_cmdqs *cmdqs) +{ + u8 cmdq_type; + + if (cmdqs->wq_block_vaddr) + dma_free_coherent(cmdqs->hwdev->dev_hdl, PAGE_SIZE, + cmdqs->wq_block_vaddr, cmdqs->wq_block_paddr); + + cmdq_type = HINIC5_CMDQ_SYNC; + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) + hinic5_wq_destroy(&cmdqs->cmdq[cmdq_type].wq); +} + +static int init_cmdqs(struct hinic5_hwdev *hwdev) +{ + struct hinic5_cmdqs *cmdqs = NULL; + + cmdqs = kzalloc(sizeof(*cmdqs), GFP_KERNEL); + if (!cmdqs) + return -ENOMEM; + + hwdev->cmdqs = cmdqs; + cmdqs->hwdev = hwdev; + if (HINIC5_HWIF_NUM_CEQS(hwdev->hwif) == 0 || hwdev->poll != 0) + cmdqs->poll = true; + + if (COMM_SUPPORT_ONLY_ENHANCE_CMDQ(hwdev)) + cmdqs->cmdq_mode = HINIC5_ENHANCE_CMDQ; + else + cmdqs->cmdq_mode = HINIC5_NORMAL_CMDQ; + + hwdev->cmdq_mode = cmdqs->cmdq_mode; + + if (cmdqs->cmdq_mode == HINIC5_NORMAL_CMDQ) { + cmdqs->wqebb_size = CMDQ_WQEBB_SIZE; + cmdqs->wqebb_use_num = NUM_WQEBBS_FOR_CMDQ_WQE; + } else { + cmdqs->wqebb_size = ENHANCE_CMDQ_WQEBB_SIZE; + cmdqs->wqebb_use_num = NUM_WQEBBS_FOR_ENHANCE_CMDQ_WQE; + } + + cmdqs->cmdq_num = HINIC5_MAX_CMDQ_TYPES; + if (COMM_SUPPORT_CMDQ_NUM(hwdev)) { + if (hwdev->glb_attr.cmdq_num <= HINIC5_MAX_CMDQ_TYPES) + cmdqs->cmdq_num = hwdev->glb_attr.cmdq_num; + else + sdk_warn(hwdev->dev_hdl, "Adjust cmdq num to %d\n", HINIC5_MAX_CMDQ_TYPES); + } + + cmdqs->cmd_buf_size = HINIC5_CMDQ_MAX_BUF_SIZE; + if (COMM_SUPPORT_CMD_BUF_SIZE(hwdev)) { + if (hwdev->glb_attr.cmd_buf_size <= HINIC5_CMDQ_MAX_BUF_SIZE) + cmdqs->cmd_buf_size = hwdev->glb_attr.cmd_buf_size; + else + sdk_warn(hwdev->dev_hdl, + "Adjust cmd buf size to %d\n", HINIC5_CMDQ_MAX_BUF_SIZE); + } + + cmdqs->cmd_buf_pool = dma_pool_create("hinic5_cmdq", hwdev->dev_hdl, cmdqs->cmd_buf_size, + HINIC5_CMDQ_BUF_ALIGN, 0ULL); + if (!cmdqs->cmd_buf_pool) { + sdk_err(hwdev->dev_hdl, "Failed to create cmdq buffer pool\n"); + kfree(cmdqs); + return -ENOMEM; + } + + return 0; +} + +int hinic5_cmdqs_init(struct hinic5_hwdev *hwdev) +{ + struct hinic5_cmdqs *cmdqs = NULL; + void __iomem *db_base = NULL; + u8 type, cmdq_type; + int err = -ENOMEM; + + err = init_cmdqs(hwdev); + if (err != 0) + return err; + + cmdqs = hwdev->cmdqs; + + err = create_cmdq_wq(cmdqs); + if (err != 0) + goto create_wq_err; + + err = hinic5_alloc_db_addr(hwdev, &db_base, NULL); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to allocate doorbell address\n"); + goto alloc_db_err; + } + + cmdqs->cmdqs_db_base = (u8 *)db_base; + for (cmdq_type = HINIC5_CMDQ_SYNC; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev, cmdq_type); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to initialize cmdq type :%u\n", cmdq_type); + goto init_cmdq_err; + } + + if (cmdqs->cmdq_mode == HINIC5_NORMAL_CMDQ) + cmdq_init_queue_ctxt(cmdqs, &cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq[cmdq_type].cmdq_ctxt); + else /* HINIC5_ENHANCE_CMDQ */ + enhanced_cmdq_init_queue_ctxt(cmdqs, &cmdqs->cmdq[cmdq_type]); + } + + err = hinic5_set_cmdq_ctxts(hwdev); + if (err != 0) + goto init_cmdq_err; + + return 0; + +init_cmdq_err: + for (type = HINIC5_CMDQ_SYNC; type < cmdq_type; type++) + free_cmdq(&cmdqs->cmdq[type]); + + hinic5_free_db_addr(hwdev, cmdqs->cmdqs_db_base, NULL); + +alloc_db_err: + destroy_cmdq_wq(cmdqs); + +create_wq_err: + dma_pool_destroy(cmdqs->cmd_buf_pool); + kfree(cmdqs); + + return err; +} + +void hinic5_cmdqs_free(struct hinic5_hwdev *hwdev) +{ + struct hinic5_cmdqs *cmdqs = hwdev->cmdqs; + u8 cmdq_type = HINIC5_CMDQ_SYNC; + + cmdqs->status &= ~HINIC5_CMDQ_ENABLE; + + for (; cmdq_type < cmdqs->cmdq_num; cmdq_type++) { + hinic5_cmdq_flush_cmd(hwdev, &cmdqs->cmdq[cmdq_type]); + cmdq_reset_all_cmd_buff(&cmdqs->cmdq[cmdq_type]); + free_cmdq(&cmdqs->cmdq[cmdq_type]); + } + + hinic5_free_db_addr(hwdev, cmdqs->cmdqs_db_base, NULL); + destroy_cmdq_wq(cmdqs); + + dma_pool_destroy(cmdqs->cmd_buf_pool); + + kfree(cmdqs); +} + +/* 攻击接口, 删除了 fastmsg en的拦截 */ +int hinic5_attack_fast_msg(void *hwdev, struct hinic5_cmd_buf *cmd_buf, u64 *out_param) +{ + struct hinic5_hwdev *dev = hwdev; + struct hinic5_cmdqs *cmdqs = NULL; + int err; + u32 fast_msg_qid = HINIC5_CMDQ_FAST_MSG; + + if (!hwdev || !cmd_buf || !out_param) { + err = -EINVAL; + goto fail; + } + + err = cmdq_params_valid(hwdev, cmd_buf); + if (err != 0) + goto fail; + + if (dev->glb_attr.cmdq_num < fast_msg_qid) { + err = -EPERM; + goto fail; + } + + /* cmdq数量等于2时, fast_msg混用async queue */ + if (dev->glb_attr.cmdq_num == fast_msg_qid) + fast_msg_qid = HINIC5_CMDQ_ASYNC; + + cmdqs = dev->cmdqs; + err = check_cmdq_ready(dev, cmdqs); + if (err != 0) + goto fail; + + err = cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[fast_msg_qid], + HINIC5_MOD_COMM, COMM_CMD_UCODE_FAST_MSG_CMD, + cmd_buf, cmd_buf, out_param, + 0, HINIC5_CHANNEL_COMM); + if (!hinic5_is_chip_present(dev)) { + err = -ETIMEDOUT; + goto fail; + } + + if (err != 0) + goto fail; + + return 0; + +fail: + sdk_err(dev->dev_hdl, "Failed to send fast msg, ret = 0x%x\n", err); + return err; +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_common.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_common.c new file mode 100644 index 00000000..1b470744 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_common.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/kernel.h> +#include <linux/io-mapping.h> +#include <linux/delay.h> + +#include "ossl_knl.h" +#include "hinic5_common.h" + +int hinic5_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, + struct hinic5_dma_addr_align *mem_align) +{ + void *vaddr = NULL, *align_vaddr = NULL; + dma_addr_t paddr, align_paddr; + u64 real_size = size; + + if (!mem_align) + return -ENOMEM; + + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + /* align */ + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + dma_free_coherent(dev_hdl, real_size, vaddr, paddr); + + /* realloc memory for align */ + real_size = size + align; + vaddr = dma_zalloc_coherent(dev_hdl, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + align_vaddr = (void *)(uintptr_t)((u64)(uintptr_t)vaddr + (align_paddr - paddr)); + +out: + mem_align->real_size = (u32)real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} +EXPORT_SYMBOL(hinic5_dma_zalloc_coherent_align); + +void hinic5_dma_free_coherent_align(void *dev_hdl, + struct hinic5_dma_addr_align *mem_align) +{ + if (!mem_align) { + pr_err("mem_align is null\n"); + return; + } + dma_free_coherent(dev_hdl, mem_align->real_size, + mem_align->ori_vaddr, mem_align->ori_paddr); +} +EXPORT_SYMBOL(hinic5_dma_free_coherent_align); + +int hinic5_wait_for_timeout(void *priv_data, wait_cpl_handler handler, + u32 wait_total_ms, u32 wait_once_us) +{ + enum hinic5_wait_return ret; + ulong end; + /* Take 9/10 * wait_once_us as the minimum sleep time of usleep_range */ + u32 usleep_min = wait_once_us - wait_once_us / 10; + + if (!handler) + return -EINVAL; + + end = jiffies + msecs_to_jiffies(wait_total_ms); + do { + ret = handler(priv_data); + if (ret == WAIT_PROCESS_CPL) + return 0; + else if (ret == WAIT_PROCESS_ERR) + return -EIO; + + /* Sleep more than 20ms using msleep is accurate */ + if (wait_once_us >= 20 * USEC_PER_MSEC) + msleep(wait_once_us / USEC_PER_MSEC); + else + usleep_range(usleep_min, wait_once_us); + } while (time_before(jiffies, end)); + + ret = handler(priv_data); + if (ret == WAIT_PROCESS_CPL) + return 0; + else if (ret == WAIT_PROCESS_ERR) + return -EIO; + + return -ETIMEDOUT; +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_enhance_cmdq.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_enhance_cmdq.c new file mode 100644 index 00000000..e3154ad3 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_enhance_cmdq.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/completion.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include "ossl_knl.h" +#include "npu_cmdq_base_defs.h" +#include "comm_defs.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hw_api.h" +#include "hinic5_hwdev.h" +#include "hinic5_eqs.h" +#include "hinic5_common.h" +#include "hinic5_wq.h" +#include "hinic5_hw_comm.h" +#include "hinic5_cmdq.h" + +void enhanced_cmdq_init_queue_ctxt(struct hinic5_cmdqs *cmdqs, struct hinic5_cmdq *cmdq) +{ + struct enhance_cmdq_ctxt_info *ctxt_info = &cmdq->cmdq_enhance_ctxt; + struct hinic5_wq *wq = &cmdq->wq; + u64 cmdq_first_block_paddr, pfn; + u16 start_ci = (u16)wq->cons_idx; + u32 start_pi = (u16)wq->prod_idx; + + pfn = CMDQ_PFN(hinic5_wq_get_first_wqe_page_addr(wq)); + + /* first part 16B */ + if (cmdq->cmdqs->poll) { + ctxt_info->eq_cfg = + ENHANCED_CMDQ_SET(pfn, CTXT0_CI_WQE_ADDR) | + ENHANCED_CMDQ_SET(HINIC5_CEQ_ID_CMDQ, CTXT0_EQ) | + ENHANCED_CMDQ_SET(0, CTXT0_CEQ_ARM) | + ENHANCED_CMDQ_SET(0, CTXT0_CEQ_EN) | + ENHANCED_CMDQ_SET(1, CTXT0_HW_BUSY_BIT); + } else { + ctxt_info->eq_cfg = + ENHANCED_CMDQ_SET(pfn, CTXT0_CI_WQE_ADDR) | + ENHANCED_CMDQ_SET(HINIC5_CEQ_ID_CMDQ, CTXT0_EQ) | + ENHANCED_CMDQ_SET(1, CTXT0_CEQ_ARM) | + ENHANCED_CMDQ_SET(1, CTXT0_CEQ_EN) | + ENHANCED_CMDQ_SET(1, CTXT0_HW_BUSY_BIT); + } + + ctxt_info->dfx_pi_ci = + ENHANCED_CMDQ_SET(0, CTXT1_Q_DIS) | + ENHANCED_CMDQ_SET(0, CTXT1_ERR_CODE) | + ENHANCED_CMDQ_SET(start_pi, CTXT1_PI) | + ENHANCED_CMDQ_SET(start_ci, CTXT1_CI); + + /* second part 16B */ + ctxt_info->pft_thd = + ENHANCED_CMDQ_SET(CI_HIGN_IDX(start_ci), CTXT2_PFT_CI) | + ENHANCED_CMDQ_SET(1, CTXT2_O_BIT) | + ENHANCED_CMDQ_SET(WQ_PREFETCH_MIN, CTXT2_PFT_MIN) | + ENHANCED_CMDQ_SET(WQ_PREFETCH_MAX, CTXT2_PFT_MAX) | + ENHANCED_CMDQ_SET(WQ_PREFETCH_THRESHOLD, CTXT2_PFT_THD); + ctxt_info->pft_ci = + ENHANCED_CMDQ_SET(pfn, CTXT3_PFT_CI_ADDR) | + ENHANCED_CMDQ_SET(start_ci, CTXT3_PFT_CI); + + /* third part 16B */ + cmdq_first_block_paddr = cmdqs->wq_block_paddr; + pfn = WQ_BLOCK_PFN(cmdq_first_block_paddr); + + ctxt_info->ci_cla_addr = ENHANCED_CMDQ_SET(pfn, CTXT4_CI_CLA_ADDR); +} + +static void enhance_cmdq_set_completion(union hinic5_cmdq_enhance_completion *completion, + const struct hinic5_cmd_buf *buf_out) +{ + completion->sge_resp_hi_addr = upper_32_bits(buf_out->dma_addr); + completion->sge_resp_lo_addr = lower_32_bits(buf_out->dma_addr); + completion->sge_resp_len = buf_out->size; +} + +static void cmdq_set_wqe_buf_desc(struct hinic5_enhanced_cmdq_wqe *enhanced_wqe, + const struct hinic5_cmdq_cmd_param *cmd_buf, u32 len) +{ + enhanced_wqe->buf_desc[0].sge_send_hi_addr = upper_32_bits(cmd_buf->buf_in->dma_addr + len); + enhanced_wqe->buf_desc[0].sge_send_lo_addr = lower_32_bits(cmd_buf->buf_in->dma_addr + len); + enhanced_wqe->buf_desc[0].len = len; + + enhanced_wqe->buf_desc[1].sge_send_hi_addr = + upper_32_bits(cmd_buf->buf_in->dma_addr + (len << 1)); + enhanced_wqe->buf_desc[1].sge_send_lo_addr = + lower_32_bits(cmd_buf->buf_in->dma_addr + (len << 1)); + enhanced_wqe->buf_desc[1].len = cmd_buf->buf_in->size - (len << 1); /* remain data len */ +} + +void enhanced_cmdq_set_wqe(struct hinic5_cmdq_wqe *wqe, enum hinic5_cmdq_cmd_type cmd_type, + const struct hinic5_cmdq_cmd_param *cmd_buf, int wrapped) +{ + struct hinic5_enhanced_cmdq_wqe *enhanced_wqe = NULL; + u32 len = 0; + + if (!wqe || !cmd_buf || !cmd_buf->buf_in) { + pr_err("wqe or buf_in is null\n"); + return; + } + + enhanced_wqe = &wqe->enhanced_cmdq_wqe; + /* Wqe should be 64B aligned, so we fill 3 sges + * split data len as three parts carried with ctrl sec and two bdsl + */ + len = cmd_buf->buf_in->size / 3; + + if (cmd_type != HINIC5_CMD_TYPE_INLINE_DATA) { + enhanced_wqe->ctrl_sec.header = ENHANCE_CMDQ_WQE_HEADER_SET(len, SEND_SGE_LEN) | + ENHANCE_CMDQ_WQE_HEADER_SET(BUFDESC_ENHANCE_CMD_LEN, BDSL) | + ENHANCE_CMDQ_WQE_HEADER_SET(DATA_SGE, DF) | + ENHANCE_CMDQ_WQE_HEADER_SET(NORMAL_WQE_TYPE, DN) | + ENHANCE_CMDQ_WQE_HEADER_SET(COMPACT_WQE_TYPE, EC) | + ENHANCE_CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + + enhanced_wqe->ctrl_sec.sge_send_hi_addr = upper_32_bits(cmd_buf->buf_in->dma_addr); + enhanced_wqe->ctrl_sec.sge_send_lo_addr = lower_32_bits(cmd_buf->buf_in->dma_addr); + + cmdq_set_wqe_buf_desc(enhanced_wqe, cmd_buf, len); + } else { + enhanced_wqe->ctrl_sec.header = + ENHANCE_CMDQ_WQE_HEADER_SET(cmd_buf->buf_in->size, BDSL) | /* 64B ALIGNED */ + ENHANCE_CMDQ_WQE_HEADER_SET(DATA_DIRECT, DF) | + ENHANCE_CMDQ_WQE_HEADER_SET(NORMAL_WQE_TYPE, DN) | + ENHANCE_CMDQ_WQE_HEADER_SET(COMPACT_WQE_TYPE, EC) | + ENHANCE_CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT); + } + + enhanced_wqe->completion.cs_format = ENHANCE_CMDQ_WQE_CS_SET(cmd_buf->cmd, CMD) | + ENHANCE_CMDQ_WQE_CS_SET(HINIC5_ACK_TYPE_CMDQ, ACK_TYPE) | + ENHANCE_CMDQ_WQE_CS_SET((cmd_buf->mod == HINIC5_MOD_ROCE), RN) | + ENHANCE_CMDQ_WQE_CS_SET((cmd_buf->mod == HINIC5_MOD_HIHTR), RN) | + ENHANCE_CMDQ_WQE_CS_SET(cmd_buf->mod, MOD); + + switch (cmd_type) { + case HINIC5_CMD_TYPE_DIRECT_RESP: + enhanced_wqe->completion.cs_format |= ENHANCE_CMDQ_WQE_CS_SET(INLINE_DATA, CF); + break; + case HINIC5_CMD_TYPE_SGE_RESP: + if (cmd_buf->buf_out) { + enhanced_wqe->completion.cs_format |= + ENHANCE_CMDQ_WQE_CS_SET(SGE_RESPONSE, CF); + enhance_cmdq_set_completion(&enhanced_wqe->completion, cmd_buf->buf_out); + } + break; + case HINIC5_CMD_TYPE_ASYNC: + break; + case HINIC5_CMD_TYPE_INLINE_DATA: + enhance_cmdq_set_completion(&enhanced_wqe->completion, cmd_buf->buf_out); + memcpy(enhanced_wqe->inline_data, cmd_buf->buf_in->buf, cmd_buf->buf_in->size); + break; + default: + break; + } +} + +static inline u32 enhanced_cmdq_completion_get_error_code + (union hinic5_cmdq_enhance_completion *completion) +{ + u32 cs_dw0 = hinic5_hw_cpu32(completion->dw[0]); + + return ENHANCE_CMDQ_WQE_CS_GET(cs_dw0, ERR_CODE); +} + +static inline u32 enhanced_cmdq_completion_get_error_status + (union hinic5_cmdq_enhance_completion *completion) +{ + u32 cs_dw0, cs_dw3, error_status; + + cs_dw0 = hinic5_hw_cpu32(completion->dw[0]); + cs_dw3 = hinic5_hw_cpu32(completion->dw[0x3]); + + error_status = ENHANCE_CMDQ_WQE_CS_GET(cs_dw0, ERR_STATUS_28_18) << + ENHANCE_CMDQ_WQE_CS_ERR_STATUS_28_18_OFFSET; + error_status |= ENHANCE_CMDQ_WQE_CS_GET(cs_dw3, ERR_STATUS_17_0); + + return error_status; +} + +static inline u64 enhanced_cmdq_completion_get_udata + (union hinic5_cmdq_enhance_completion *completion, + struct hinic5_cmdq *cmdq) +{ + u64 udata_l, udata_h; + + /* HTN has no udata */ + if (COMM_SUPPORT_HTN_CMD(cmdq->hwdev)) + return 0; + + udata_l = hinic5_hw_cpu32(completion->dw[0x1]); + udata_h = hinic5_hw_cpu32(completion->dw[0x2]); + return (udata_h << BIT_32) | udata_l; +} + +void enhanced_cmdq_update_cmd_status(struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_cmd_info *cmd_info, + struct hinic5_enhanced_cmdq_wqe *wqe) +{ + union hinic5_cmdq_enhance_completion *completion = &wqe->completion; + u32 errcode, error_status = 0; + + errcode = enhanced_cmdq_completion_get_error_code(completion); + if (errcode != 0) { + error_status = enhanced_cmdq_completion_get_error_status(completion); + sdk_err(cmdq->hwdev->dev_hdl, "Cmdq error code 0x%x, error status 0x%x\n", + errcode, error_status); + } + + if (COMM_SUPPORT_HTN_CMD(cmdq->hwdev)) { + *cmd_info->errcode = (int)errcode; + if (cmd_info->direct_resp) + *cmd_info->direct_resp = error_status; + } else { + /* Non-HTN enhanced CMDQ errcode try to align with normal CMDQ. + * [30:29] is errcode from completion, which actually has only 1 bit, + * [28:0] is error_status from completion + */ + errcode = (errcode & HINIC5_CMDQ_CQE_DW0_ERR_CODE_MASK) << + HINIC5_CMDQ_CQE_DW0_ERR_CODE_SHIFT; + errcode |= error_status & HINIC5_CMDQ_CQE_DW0_ERR_STATUS_MASK; + + *cmd_info->errcode = (int)errcode; + if (cmd_info->direct_resp) + *cmd_info->direct_resp = + enhanced_cmdq_completion_get_udata(completion, cmdq); + } +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_eqs.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_eqs.c new file mode 100644 index 00000000..7e773d82 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_eqs.c @@ -0,0 +1,1824 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#ifndef __UEFI__ +#include <linux/cpumask.h> +#endif + +#include "hinic5_vram_common.h" + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_common.h" +#include "hinic5_hwdev.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_hw.h" +#include "hinic5_csr_inner.h" +#include "hinic5_hw_comm.h" +#include "hinic5_prof_adap.h" +#include "hinic5_eqs.h" + +#define HINIC5_EQS_WQ_NAME "hinic5_eqs" + +#define AEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 +#define AEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define AEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define AEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x7U +#define AEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define AEQ_CTRL_0_SET(val, member) \ + (((val) & AEQ_CTRL_0_##member##_MASK) << \ + AEQ_CTRL_0_##member##_SHIFT) + +#define AEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_0_##member##_MASK << \ + AEQ_CTRL_0_##member##_SHIFT))) + +#define AEQ_CTRL_1_LEN_SHIFT 0 +#define AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define AEQ_CTRL_1_LEN_MASK 0x1FFFFFU +#define AEQ_CTRL_1_ELEM_SIZE_MASK 0x3U +#define AEQ_CTRL_1_PAGE_SIZE_MASK 0xFU + +#define AEQ_CTRL_1_SET(val, member) \ + (((val) & AEQ_CTRL_1_##member##_MASK) << \ + AEQ_CTRL_1_##member##_SHIFT) + +#define AEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(AEQ_CTRL_1_##member##_MASK << \ + AEQ_CTRL_1_##member##_SHIFT))) + +#define HINIC5_EQ_PROD_IDX_MASK 0xFFFFF +#define HINIC5_TASK_PROCESS_EQE_LIMIT 1024 +#define HINIC5_EQ_UPDATE_CI_STEP 64 + +static u32 g_aeq_len = HINIC5_DEFAULT_AEQ_LEN; +module_param(g_aeq_len, uint, 0444); +MODULE_PARM_DESC(g_aeq_len, + "aeq depth, valid range is " __stringify(HINIC5_MIN_AEQ_LEN) + " - " __stringify(HINIC5_MAX_AEQ_LEN)); + +static u32 g_ceq_len = HINIC5_DEFAULT_CEQ_LEN; +module_param(g_ceq_len, uint, 0444); +MODULE_PARM_DESC(g_ceq_len, + "ceq depth, valid range is " __stringify(HINIC5_MIN_CEQ_LEN) + " - " __stringify(HINIC5_MAX_CEQ_LEN)); + +static u32 g_num_ceqe_in_tasklet = HINIC5_TASK_PROCESS_EQE_LIMIT; +module_param(g_num_ceqe_in_tasklet, uint, 0444); +MODULE_PARM_DESC(g_num_ceqe_in_tasklet, + "The max number of ceqe can be processed in tasklet, default = 1024"); + +static int g_aeq_cpu_affinity[HINIC5_AEQ_CPU_AFFINITY_MAX]; +static unsigned int g_aeq_cpu_affinity_nargs; +module_param_array_named(aeq_cpu_affinity, g_aeq_cpu_affinity, + int, &g_aeq_cpu_affinity_nargs, 0444); +MODULE_PARM_DESC(aeq_cpu_affinity, "Aeqs cpu affinity, max num 16"); + +#define CEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define CEQ_CTRL_0_LIMIT_KICK_SHIFT 20 +#define CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 +#define CEQ_CTRL_0_PAGE_SIZE_SHIFT 27 +#define CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define CEQ_CTRL_0_INTR_IDX_MASK 0x3FFU +#define CEQ_CTRL_0_DMA_ATTR_MASK 0x3FU +#define CEQ_CTRL_0_LIMIT_KICK_MASK 0xFU +#define CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x7U +#define CEQ_CTRL_0_PAGE_SIZE_MASK 0xF +#define CEQ_CTRL_0_INTR_MODE_MASK 0x1U + +#define CEQ_CTRL_0_SET(val, member) \ + (((val) & CEQ_CTRL_0_##member##_MASK) << \ + CEQ_CTRL_0_##member##_SHIFT) + +#define CEQ_CTRL_1_LEN_SHIFT 0 +#define CEQ_CTRL_1_GLB_FUNC_ID_SHIFT 20 + +#define CEQ_CTRL_1_LEN_MASK 0xFFFFFU +#define CEQ_CTRL_1_GLB_FUNC_ID_MASK 0xFFFU + +#define CEQ_CTRL_1_SET(val, member) \ + (((val) & CEQ_CTRL_1_##member##_MASK) << \ + CEQ_CTRL_1_##member##_SHIFT) + +#define EQ_ELEM_DESC_TYPE_SHIFT 0 +#define EQ_ELEM_DESC_SRC_SHIFT 7 +#define EQ_ELEM_DESC_SIZE_SHIFT 8 +#define EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define EQ_ELEM_DESC_TYPE_MASK 0x7FU +#define EQ_ELEM_DESC_SRC_MASK 0x1U +#define EQ_ELEM_DESC_SIZE_MASK 0xFFU +#define EQ_ELEM_DESC_WRAPPED_MASK 0x1U + +#define EQ_ELEM_DESC_GET(val, member) \ + (((val) >> EQ_ELEM_DESC_##member##_SHIFT) & \ + EQ_ELEM_DESC_##member##_MASK) + +#define EQ_CONS_IDX_CONS_IDX_SHIFT 0 +#define EQ_CONS_IDX_INT_ARMED_SHIFT 31 + +#define EQ_CONS_IDX_CONS_IDX_MASK 0x1FFFFFU +#define EQ_CONS_IDX_INT_ARMED_MASK 0x1U + +#define EQ_CONS_IDX_SET(val, member) \ + (((val) & EQ_CONS_IDX_##member##_MASK) << \ + EQ_CONS_IDX_##member##_SHIFT) + +#define EQ_CONS_IDX_CLEAR(val, member) \ + ((val) & (~(EQ_CONS_IDX_##member##_MASK << \ + EQ_CONS_IDX_##member##_SHIFT))) + +#define EQ_CI_SIMPLE_INDIR_CI_SHIFT 0 +#define EQ_CI_SIMPLE_INDIR_ARMED_SHIFT 21 +#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_SHIFT 30 +#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_SHIFT 24 + +#define EQ_CI_SIMPLE_INDIR_CI_MASK 0x1FFFFFU +#define EQ_CI_SIMPLE_INDIR_ARMED_MASK 0x1U +#define EQ_CI_SIMPLE_INDIR_AEQ_IDX_MASK 0x3U +#define EQ_CI_SIMPLE_INDIR_CEQ_IDX_MASK 0xFFU + +#define EQ_CI_SIMPLE_INDIR_SET(val, member) \ + (((val) & EQ_CI_SIMPLE_INDIR_##member##_MASK) << \ + EQ_CI_SIMPLE_INDIR_##member##_SHIFT) + +#define EQ_CI_SIMPLE_INDIR_CLEAR(val, member) \ + ((val) & (~(EQ_CI_SIMPLE_INDIR_##member##_MASK << \ + EQ_CI_SIMPLE_INDIR_##member##_SHIFT))) + +#define EQ_WRAPPED(eq) ((u32)(eq)->wrapped << EQ_VALID_SHIFT) + +#define EQ_CONS_IDX(eq) ((eq)->cons_idx | \ + ((u32)(eq)->wrapped << EQ_WRAPPED_SHIFT)) + +#define EQ_CONS_IDX_REG_ADDR(eq) \ + (((eq)->type == HINIC5_AEQ) ? \ + HINIC5_CSR_AEQ_CONS_IDX_ADDR : \ + HINIC5_CSR_CEQ_CONS_IDX_ADDR) +#define EQ_CI_SIMPLE_INDIR_REG_ADDR(eq) \ + (((eq)->type == HINIC5_AEQ) ? \ + HINIC5_CSR_AEQ_CI_SIMPLE_INDIR_ADDR : \ + HINIC5_CSR_CEQ_CI_SIMPLE_INDIR_ADDR) + +#define EQ_PROD_IDX_REG_ADDR(eq) \ + (((eq)->type == HINIC5_AEQ) ? \ + HINIC5_CSR_AEQ_PROD_IDX_ADDR : \ + HINIC5_CSR_CEQ_PROD_IDX_ADDR) + +#define HINIC5_EQ_HI_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == HINIC5_AEQ) ? \ + HINIC5_AEQ_HI_PHYS_ADDR_REG(pg_num) : \ + HINIC5_CEQ_HI_PHYS_ADDR_REG(pg_num))) + +#define HINIC5_EQ_LO_PHYS_ADDR_REG(type, pg_num) \ + ((u32)((type == HINIC5_AEQ) ? \ + HINIC5_AEQ_LO_PHYS_ADDR_REG(pg_num) : \ + HINIC5_CEQ_LO_PHYS_ADDR_REG(pg_num))) + +#define GET_EQ_NUM_PAGES(eq, size) \ + ((u16)(ALIGN((u32)((eq)->eq_len * (eq)->elem_size), \ + (size)) / (size))) + +#define HINIC5_EQ_MAX_PAGES(eq) \ + ((eq)->type == HINIC5_AEQ ? HINIC5_AEQ_MAX_PAGES : \ + HINIC5_CEQ_MAX_PAGES) + +#define GET_EQ_NUM_ELEMS(eq, pg_size) ((pg_size) / (u32)(eq)->elem_size) + +#define GET_EQ_ELEMENT(eq, idx) \ + (((u8 *)(eq)->eq_pages[(idx) / (eq)->num_elem_in_pg].align_vaddr) + \ + (u32)(((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) + +#define GET_AEQ_ELEM(eq, idx) \ + ((struct hinic5_aeq_elem *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CEQ_ELEM(eq, idx) ((u32 *)GET_EQ_ELEMENT((eq), (idx))) + +#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM((eq), (eq)->cons_idx) + +#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM((eq), (eq)->cons_idx) + +#define PAGE_IN_4K(page_size) ((page_size) >> 12) +#define EQ_SET_HW_PAGE_SIZE_VAL(eq) \ + ((u32)ilog2(PAGE_IN_4K((eq)->page_size))) + +#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) +#define EQ_SET_HW_ELEM_SIZE_VAL(eq) ((u32)ilog2(ELEMENT_SIZE_IN_32B(eq))) + +#define AEQ_DMA_ATTR_DEFAULT 0 +#define CEQ_DMA_ATTR_DEFAULT 0 + +#define CEQ_LMT_KICK_DEFAULT 0 + +#define EQ_MSIX_RESEND_TIMER_CLEAR 1 + +#define EQ_WRAPPED_SHIFT 20 + +#define EQ_VALID_SHIFT 31 + +#define CEQE_TYPE_SHIFT 23 +#define CEQE_TYPE_MASK 0x7 + +#define CEQE_TYPE(type) (((type) >> CEQE_TYPE_SHIFT) & \ + CEQE_TYPE_MASK) + +#define CEQE_DATA_MASK 0x3FFFFFF +#define CEQE_DATA(data) ((data) & CEQE_DATA_MASK) + +#define aeq_to_aeqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic5_aeqs, aeq[0]) + +#define ceq_to_ceqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic5_ceqs, ceq[0]) + +#ifdef __VMWARE__ +static void ceq_interrupt(void *data, u32 irq); +static void aeq_interrupt(void *data, u32 irq); +#else +static irqreturn_t ceq_interrupt(int irq, void *data); +static irqreturn_t aeq_interrupt(int irq, void *data); +#endif + +static void ceq_tasklet(ulong ceq_data); + +/** + * hinic5_aeq_register_hw_cb - register aeq callback for specific event + * @hwdev: the pointer to hw device + * @pri_handle: the pointer to private invoker device + * @event: event for the handler + * @hw_cb: callback function + **/ +int hinic5_aeq_register_hw_cb(void *hwdev, void *pri_handle, enum hinic5_aeq_type event, + hinic5_aeq_hwe_cb hwe_cb) +{ + struct hinic5_aeqs *aeqs = NULL; + + if (!hwdev || !hwe_cb || event >= HINIC5_MAX_AEQ_EVENTS) + return -EINVAL; + + aeqs = ((struct hinic5_hwdev *)hwdev)->aeqs; + if (!aeqs) + return -EINVAL; + + aeqs->aeq_hwe_cb[event] = hwe_cb; + aeqs->aeq_hwe_cb_data[event] = pri_handle; + + set_bit(HINIC5_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + return 0; +} + +/** + * hinic5_aeq_unregister_hw_cb - unregister the aeq callback for specific event + * @hwdev: the pointer to hw device + * @event: event for the handler + **/ +void hinic5_aeq_unregister_hw_cb(void *hwdev, enum hinic5_aeq_type event) +{ + struct hinic5_aeqs *aeqs = NULL; + + if (!hwdev || event >= HINIC5_MAX_AEQ_EVENTS) + return; + + aeqs = ((struct hinic5_hwdev *)hwdev)->aeqs; + if (!aeqs) { + pr_err("aeqs is null\n"); + return; + } + + clear_bit(HINIC5_AEQ_HW_CB_REG, &aeqs->aeq_hw_cb_state[event]); + + while (test_bit(HINIC5_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event])) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + aeqs->aeq_hwe_cb[event] = NULL; +} + +/** + * hinic5_aeq_register_swe_cb - register aeq callback for sw event + * @hwdev: the pointer to hw device + * @pri_handle: the pointer to private invoker device + * @event: soft event for the handler + * @sw_cb: callback function + **/ +int hinic5_aeq_register_swe_cb(void *hwdev, void *pri_handle, enum hinic5_aeq_sw_type event, + hinic5_aeq_swe_cb aeq_swe_cb) +{ + struct hinic5_aeqs *aeqs = NULL; + + if (!hwdev || !aeq_swe_cb || event >= HINIC5_MAX_AEQ_SW_EVENTS) + return -EINVAL; + + aeqs = ((struct hinic5_hwdev *)hwdev)->aeqs; + if (!aeqs) + return -EINVAL; + + aeqs->aeq_swe_cb[event] = aeq_swe_cb; + aeqs->aeq_swe_cb_data[event] = pri_handle; + + set_bit(HINIC5_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + return 0; +} + +/** + * hinic5_aeq_unregister_swe_cb - unregister the aeq callback for sw event + * @hwdev: the pointer to hw device + * @event: soft event for the handler + **/ +void hinic5_aeq_unregister_swe_cb(void *hwdev, enum hinic5_aeq_sw_type event) +{ + struct hinic5_aeqs *aeqs = NULL; + + if (!hwdev || event >= HINIC5_MAX_AEQ_SW_EVENTS) + return; + + aeqs = ((struct hinic5_hwdev *)hwdev)->aeqs; + if (!aeqs) { + pr_err("aeqs is null\n"); + return; + } + + clear_bit(HINIC5_AEQ_SW_CB_REG, &aeqs->aeq_sw_cb_state[event]); + + while (test_bit(HINIC5_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[event])) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + aeqs->aeq_swe_cb[event] = NULL; +} + +/** + * hinic5_ceq_register_cb - register ceq callback for specific event + * @hwdev: the pointer to hw device + * @pri_handle: the pointer to private invoker device + * @event: event for the handler + * @ceq_cb: callback function + **/ +int hinic5_ceq_register_cb(void *hwdev, void *pri_handle, enum hinic5_ceq_event event, + hinic5_ceq_event_cb callback) +{ + struct hinic5_ceqs *ceqs = NULL; + +#ifdef __UEFI__ + return 0; +#endif + + if (!hwdev || event >= HINIC5_MAX_CEQ_EVENTS) + return -EINVAL; + + ceqs = ((struct hinic5_hwdev *)hwdev)->ceqs; + if (!ceqs) + return 0; + + ceqs->ceq_cb[event] = callback; + ceqs->ceq_cb_data[event] = pri_handle; + + set_bit(HINIC5_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + return 0; +} +EXPORT_SYMBOL(hinic5_ceq_register_cb); + +/** + * hinic5_ceq_unregister_cb - unregister ceq callback for specific event + * @hwdev: the pointer to hw device + * @event: event for the handler + **/ +void hinic5_ceq_unregister_cb(void *hwdev, enum hinic5_ceq_event event) +{ + struct hinic5_ceqs *ceqs = NULL; + +#ifdef __UEFI__ + return; +#endif + + if (!hwdev || event >= HINIC5_MAX_CEQ_EVENTS) + return; + + ceqs = ((struct hinic5_hwdev *)hwdev)->ceqs; + if (!ceqs) + return; + + clear_bit(HINIC5_CEQ_CB_REG, &ceqs->ceq_cb_state[event]); + + while (test_bit(HINIC5_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event])) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + ceqs->ceq_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic5_ceq_unregister_cb); + +/** + * set_eq_cons_idx - write the cons idx to the hw + * @eq: The event queue to update the cons idx for + * @cons idx: consumer index value + **/ +static void set_eq_cons_idx(struct hinic5_eq *eq, u32 arm_state) +{ + u32 eq_wrap_ci, val; + u32 addr = EQ_CI_SIMPLE_INDIR_REG_ADDR(eq); + + eq_wrap_ci = EQ_CONS_IDX(eq); + +#ifndef __UEFI__ + /* if use poll mode only eq0 use int_arm mode */ + if (eq->q_id != 0 && eq->hwdev->poll) + val = EQ_CI_SIMPLE_INDIR_SET(HINIC5_EQ_NOT_ARMED, ARMED); + else +#endif + val = EQ_CI_SIMPLE_INDIR_SET(arm_state, ARMED); + if (eq->type == HINIC5_AEQ) { + val = val | + EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | + EQ_CI_SIMPLE_INDIR_SET(eq->q_id, AEQ_IDX); + } else { + val = val | + EQ_CI_SIMPLE_INDIR_SET(eq_wrap_ci, CI) | + EQ_CI_SIMPLE_INDIR_SET(eq->q_id, CEQ_IDX); + } + + hinic5_hwif_write_reg(eq->hwdev->hwif, addr, val); +} + +/** + * ceq_event_handler - handle for the ceq events + * @ceqs: ceqs part of the chip + * @ceqe: ceq element of the event + **/ +static void ceq_event_handler(struct hinic5_ceqs *ceqs, u32 ceqe) +{ + struct hinic5_hwdev *hwdev = ceqs->hwdev; + enum hinic5_ceq_event event = CEQE_TYPE(ceqe); + u32 ceqe_data = CEQE_DATA(ceqe); + + if (event >= HINIC5_MAX_CEQ_EVENTS) { + sdk_err(hwdev->dev_hdl, "Ceq unknown event:%d, ceqe date: 0x%x\n", + event, ceqe_data); + return; + } + + set_bit(HINIC5_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); + + if (ceqs->ceq_cb[event] && + test_bit(HINIC5_CEQ_CB_REG, &ceqs->ceq_cb_state[event])) + ceqs->ceq_cb[event](ceqs->ceq_cb_data[event], ceqe_data); + + clear_bit(HINIC5_CEQ_CB_RUNNING, &ceqs->ceq_cb_state[event]); +} + +static void aeq_elem_handler(struct hinic5_eq *eq, u32 aeqe_desc) +{ + struct hinic5_aeqs *aeqs = aeq_to_aeqs(eq); + struct hinic5_aeq_elem *aeqe_pos = NULL; + enum hinic5_aeq_type event; + enum hinic5_aeq_sw_type sw_type; + u32 sw_event; + u8 data[HINIC5_AEQE_DATA_SIZE], size; + + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + + eq->hwdev->cur_recv_aeq_cnt++; + + event = EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (EQ_ELEM_DESC_GET(aeqe_desc, SRC) != 0) { + sw_event = event; + sw_type = sw_event >= HINIC5_NIC_FATAL_ERROR_MAX ? + HINIC5_STATEFUL_EVENT : HINIC5_STATELESS_EVENT; + /* SW event uses only the first 8B */ + memcpy(data, aeqe_pos->aeqe_data, HINIC5_AEQE_DATA_SIZE); + hinic5_be32_to_cpu(data, HINIC5_AEQE_DATA_SIZE); + set_bit(HINIC5_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_type]); + if (aeqs->aeq_swe_cb[sw_type] && + test_bit(HINIC5_AEQ_SW_CB_REG, + &aeqs->aeq_sw_cb_state[sw_type])) + aeqs->aeq_swe_cb[sw_type](aeqs->aeq_swe_cb_data[sw_type], event, data); + + clear_bit(HINIC5_AEQ_SW_CB_RUNNING, + &aeqs->aeq_sw_cb_state[sw_type]); + return; + } + + if (event < HINIC5_MAX_AEQ_EVENTS) { + memcpy(data, aeqe_pos->aeqe_data, HINIC5_AEQE_DATA_SIZE); + hinic5_be32_to_cpu(data, HINIC5_AEQE_DATA_SIZE); + + size = EQ_ELEM_DESC_GET(aeqe_desc, SIZE); + set_bit(HINIC5_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + if (aeqs->aeq_hwe_cb[event] && + test_bit(HINIC5_AEQ_HW_CB_REG, + &aeqs->aeq_hw_cb_state[event])) + aeqs->aeq_hwe_cb[event](aeqs->aeq_hwe_cb_data[event], data, size); + clear_bit(HINIC5_AEQ_HW_CB_RUNNING, + &aeqs->aeq_hw_cb_state[event]); + return; + } + sdk_warn(eq->hwdev->dev_hdl, "Unknown aeq hw event %d\n", event); +} + +/** + * aeq_irq_handler - handler for the aeq event + * @eq: the async event queue of the event + **/ +static bool aeq_irq_handler(struct hinic5_eq *eq) +{ + struct hinic5_aeq_elem *aeqe_pos = NULL; + u32 aeqe_desc; + u32 i, eqe_cnt = 0; + + for (i = 0; i < HINIC5_TASK_PROCESS_EQE_LIMIT; i++) { + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + + /* Data in HW is in Big endian Format */ + aeqe_desc = be32_to_cpu(aeqe_pos->desc); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) + return false; + + dma_rmb(); + + aeq_elem_handler(eq, aeqe_desc); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = (eq->wrapped == 0); + } + + if (++eqe_cnt >= HINIC5_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HINIC5_EQ_NOT_ARMED); + } + } + + return true; +} + +/** + * ceq_irq_handler - handler for the ceq event + * @eq: the completion event queue of the event + **/ +static bool ceq_irq_handler(struct hinic5_eq *eq) +{ + struct hinic5_ceqs *ceqs = ceq_to_ceqs(eq); + u32 ceqe, eqe_cnt = 0; + u32 i; + + for (i = 0; i < g_num_ceqe_in_tasklet; i++) { + ceqe = *(GET_CURR_CEQ_ELEM(eq)); + ceqe = be32_to_cpu(ceqe); + + /* HW updates wrapped bit, when it adds eq element event */ + if (EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) + return false; + + ceq_event_handler(ceqs, ceqe); + + eq->cons_idx++; + + if (eq->cons_idx == eq->eq_len) { + eq->cons_idx = 0; + eq->wrapped = (eq->wrapped == 0); + } + + if (++eqe_cnt >= HINIC5_EQ_UPDATE_CI_STEP) { + eqe_cnt = 0; + set_eq_cons_idx(eq, HINIC5_EQ_NOT_ARMED); + } + } + + return true; +} + +static void reschedule_eq_handler(struct hinic5_eq *eq) +{ + if (eq->type == HINIC5_AEQ) { + struct hinic5_aeqs *aeqs = aeq_to_aeqs(eq); + + queue_work_on(hisdk5_get_work_cpu_affinity(eq->hwdev, WORK_TYPE_AEQ), + aeqs->workq, &eq->aeq_work); + } else { + tasklet_schedule(&eq->ceq_tasklet); + } +} + +int hinic5_reschedule_eq(struct hinic5_hwdev *hwdev, enum hinic5_eq_type type, + u16 eq_id) +{ + if (type == HINIC5_AEQ) { + if (eq_id >= hwdev->aeqs->num_aeqs) + return -EINVAL; + + reschedule_eq_handler(&hwdev->aeqs->aeq[eq_id]); + } else { + if (eq_id >= hwdev->ceqs->num_ceqs) + return -EINVAL; + + reschedule_eq_handler(&hwdev->ceqs->ceq[eq_id]); + } + + return 0; +} + +/** + * eq_irq_handler - handler for the eq event + * @data: the event queue of the event + **/ +static bool eq_irq_handler(void *data) +{ + struct hinic5_eq *eq = (struct hinic5_eq *)data; + bool uncompleted = false; + + if (eq->type == HINIC5_AEQ) + uncompleted = aeq_irq_handler(eq); + else + uncompleted = ceq_irq_handler(eq); + + set_eq_cons_idx(eq, uncompleted ? HINIC5_EQ_NOT_ARMED : + HINIC5_EQ_ARMED); + + return uncompleted; +} + +#if defined(__UEFI__) || defined(__VMWARE__) +void hinic5_simulated_irq_aeq(struct hinic5_hwdev *hwdev) +{ + struct hinic5_aeqs *aeqs = NULL; + struct hinic5_eq *eq = NULL; + int q_id; + + if (!hwdev) + return; + + aeqs = hwdev->aeqs; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + eq = &aeqs->aeq[q_id]; + + if (eq->type != HINIC5_AEQ) + break; + eq_irq_handler(eq); + } +} +#elif defined(__WIN__) + +static struct hinic5_eq *find_eq(struct hinic5_hwdev *hwdev, int msix_entry_idx) +{ + struct hinic5_aeqs *aeqs = hwdev->aeqs; + struct hinic5_ceqs *ceqs = hwdev->ceqs; + int i; + + for (i = 0; i < aeqs->num_aeqs; i++) { + struct hinic5_eq *eq = &aeqs->aeq[i]; + + if (eq->eq_irq.msix_entry_idx == msix_entry_idx) + return eq; + } + + if (!ceqs) + return NULL; + + for (i = 0; i < ceqs->num_ceqs; i++) { + struct hinic5_eq *eq = &ceqs->ceq[i]; + + if (eq->eq_irq.msix_entry_idx == msix_entry_idx) + return eq; + } + + return NULL; +} + +/* for windows */ +bool hinic5_eq_intr_handler(void *hwdev, int msix_entry_idx) +{ + struct hinic5_eq *eq; + + eq = find_eq(hwdev, msix_entry_idx); + if (!eq) { + pr_err("Can't find eq in eq interrupt handler\n"); + return false; + } + + return eq_irq_handler(eq); +} +#endif + +/** + * eq_irq_work - eq work for the event + * @work: the work that is associated with the eq + **/ +static void eq_irq_work(struct work_struct *work) +{ + struct hinic5_eq *eq = container_of(work, struct hinic5_eq, aeq_work); + + if (eq_irq_handler(eq)) + reschedule_eq_handler(eq); +} + +/** + * aeq_interrupt - aeq interrupt handler + * @irq: irq number + * @data: the async event queue of the event + **/ +#ifdef __VMWARE__ +static void aeq_interrupt(void *data, u32 irq) +#else +static irqreturn_t aeq_interrupt(int irq, void *data) +#endif +{ + struct hinic5_eq *aeq = (struct hinic5_eq *)data; + struct hinic5_hwdev *hwdev = aeq->hwdev; + struct hinic5_aeqs *aeqs = aeq_to_aeqs(aeq); + struct workqueue_struct *workq = aeqs->workq; + int cpu; + + /* clear resend timer cnt register */ + hinic5_misx_intr_clear_resend_bit(hwdev, aeq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + cpu = hisdk5_get_work_cpu_affinity(hwdev, WORK_TYPE_AEQ); + if (cpu == WORK_CPU_UNBOUND) + cpu = aeq->cpu; + + queue_work_on(cpu, workq, &aeq->aeq_work); + +#ifdef __VMWARE__ + return; +#else + return IRQ_HANDLED; +#endif +} + +/** + * ceq_tasklet - ceq tasklet for the event + * @ceq_data: data that will be used by the tasklet(ceq) + **/ +static void ceq_tasklet(ulong ceq_data) +{ + struct hinic5_eq *eq = (struct hinic5_eq *)(uintptr_t)ceq_data; + + eq->soft_intr_jif = jiffies; + + if (eq_irq_handler(eq)) + reschedule_eq_handler(eq); +} + +/** + * ceq_interrupt - ceq interrupt handler + * @irq: irq number + * @data: the completion event queue of the event + **/ +#ifdef __VMWARE__ +static void ceq_interrupt(void *data, u32 irq) +#else +static irqreturn_t ceq_interrupt(int irq, void *data) +#endif +{ + struct hinic5_eq *ceq = (struct hinic5_eq *)data; + + ceq->hard_intr_jif = jiffies; + + /* clear resend timer counters */ + hinic5_misx_intr_clear_resend_bit(ceq->hwdev, + ceq->eq_irq.msix_entry_idx, + EQ_MSIX_RESEND_TIMER_CLEAR); + + tasklet_schedule(&ceq->ceq_tasklet); + +#ifdef __VMWARE__ + return; +#else + return IRQ_HANDLED; +#endif +} + +/** + * set_eq_ctrls - setting eq's ctrls registers + * @eq: the event queue for setting + **/ +static int set_eq_ctrls(struct hinic5_eq *eq) +{ + enum hinic5_eq_type type = eq->type; + struct hinic5_hwif *hwif = eq->hwdev->hwif; + struct irq_info *eq_irq = &eq->eq_irq; + u32 addr, val, ctrl0, ctrl1, page_size_val, elem_size; + u32 pci_intf_idx = HINIC5_PCI_INTF_IDX(hwif); + int err; + + if (type == HINIC5_AEQ) { + /* set ctrl0 */ + addr = HINIC5_CSR_AEQ_CTRL_0_ADDR; + + val = hinic5_hwif_read_reg(hwif, addr); + + val = AEQ_CTRL_0_CLEAR(val, INTR_IDX) & + AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + AEQ_CTRL_0_CLEAR(val, INTR_MODE); + + ctrl0 = AEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + AEQ_CTRL_0_SET(AEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + AEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + AEQ_CTRL_0_SET(HINIC5_INTR_MODE_ARMED, INTR_MODE); + + val |= ctrl0; + + hinic5_hwif_write_reg(hwif, addr, val); + + /* set ctrl1 */ + addr = HINIC5_CSR_AEQ_CTRL_1_ADDR; + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + + ctrl1 = AEQ_CTRL_1_SET(eq->eq_len, LEN) | + AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + hinic5_hwif_write_reg(hwif, addr, ctrl1); + } else { + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + ctrl0 = CEQ_CTRL_0_SET(eq_irq->msix_entry_idx, INTR_IDX) | + CEQ_CTRL_0_SET(CEQ_DMA_ATTR_DEFAULT, DMA_ATTR) | + CEQ_CTRL_0_SET(CEQ_LMT_KICK_DEFAULT, LIMIT_KICK) | + CEQ_CTRL_0_SET(pci_intf_idx, PCI_INTF_IDX) | + CEQ_CTRL_0_SET(page_size_val, PAGE_SIZE) | + CEQ_CTRL_0_SET(HINIC5_INTR_MODE_ARMED, INTR_MODE); + + ctrl1 = CEQ_CTRL_1_SET(eq->eq_len, LEN); + + /* set ceq ctrl reg through mgmt cpu */ + err = hinic5_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, ctrl0, + ctrl1); + if (err != 0) + return err; + } + + return 0; +} + +/** + * ceq_elements_init - Initialize all the elements in the ceq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void ceq_elements_init(struct hinic5_eq *eq, u32 init_val) +{ + u32 *ceqe = NULL; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + ceqe = GET_CEQ_ELEM(eq, i); + *(ceqe) = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +/** + * aeq_elements_init - initialize all the elements in the aeq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void aeq_elements_init(struct hinic5_eq *eq, u32 init_val) +{ + struct hinic5_aeq_elem *aeqe = NULL; + u32 i; + + for (i = 0; i < eq->eq_len; i++) { + aeqe = GET_AEQ_ELEM(eq, i); + aeqe->desc = cpu_to_be32(init_val); + } + + wmb(); /* Write the init values */ +} + +static void eq_elements_init(struct hinic5_eq *eq, u32 init_val) +{ + if (eq->type == HINIC5_AEQ) + aeq_elements_init(eq, init_val); + else + ceq_elements_init(eq, init_val); +} + +/** + * alloc_eq_pages - allocate the pages for the queue + * @eq: the event queue + **/ +static int alloc_eq_pages(struct hinic5_eq *eq) +{ + struct hinic5_hwif *hwif = eq->hwdev->hwif; + struct hinic5_dma_addr_align *eq_page = NULL; + u32 reg, init_val; + u16 pg_idx, i; + int err; + gfp_t gfp_hinic5_vram; + + eq->eq_pages = kcalloc(eq->num_pages, sizeof(*eq->eq_pages), + GFP_KERNEL); + if (!eq->eq_pages) + return -ENOMEM; + + gfp_hinic5_vram = hinic5_hinic5_vram_get_gfp_hinic5_vram(); + + for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) { + eq_page = &eq->eq_pages[pg_idx]; + err = hinic5_dma_zalloc_coherent_align(eq->hwdev->dev_hdl, + eq->page_size, + HINIC5_MIN_EQ_PAGE_SIZE, + GFP_KERNEL | gfp_hinic5_vram, + eq_page); + if (err != 0) { + sdk_err(eq->hwdev->dev_hdl, "Failed to alloc eq page, page index: %u\n", + pg_idx); + goto dma_alloc_err; + } + + reg = HINIC5_EQ_HI_PHYS_ADDR_REG(eq->type, pg_idx); + hinic5_hwif_write_reg(hwif, reg, + upper_32_bits(eq_page->align_paddr)); + + reg = HINIC5_EQ_LO_PHYS_ADDR_REG(eq->type, pg_idx); + hinic5_hwif_write_reg(hwif, reg, + lower_32_bits(eq_page->align_paddr)); + } + + eq->num_elem_in_pg = GET_EQ_NUM_ELEMS(eq, eq->page_size); + if ((eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) != 0) { + sdk_err(eq->hwdev->dev_hdl, "Number element in eq page != power of 2\n"); + err = -EINVAL; + goto dma_alloc_err; + } + init_val = EQ_WRAPPED(eq); + + eq_elements_init(eq, init_val); + + return 0; + +dma_alloc_err: + for (i = 0; i < pg_idx; i++) + hinic5_dma_free_coherent_align(eq->hwdev->dev_hdl, + &eq->eq_pages[i]); + + kfree(eq->eq_pages); + + return err; +} + +/** + * free_eq_pages - free the pages of the queue + * @eq: the event queue + **/ +static void free_eq_pages(struct hinic5_eq *eq) +{ + u16 pg_idx; + + for (pg_idx = 0; pg_idx < eq->num_pages; pg_idx++) + hinic5_dma_free_coherent_align(eq->hwdev->dev_hdl, + &eq->eq_pages[pg_idx]); + + kfree(eq->eq_pages); +} + +static inline u32 get_page_size(const struct hinic5_eq *eq) +{ + u32 total_size; + u32 count; + + total_size = ALIGN((eq->eq_len * eq->elem_size), + HINIC5_MIN_EQ_PAGE_SIZE); + if (total_size <= (HINIC5_EQ_MAX_PAGES(eq) * HINIC5_MIN_EQ_PAGE_SIZE)) + return HINIC5_MIN_EQ_PAGE_SIZE; + + count = (u32)(ALIGN((total_size / HINIC5_EQ_MAX_PAGES(eq)), + HINIC5_MIN_EQ_PAGE_SIZE) / HINIC5_MIN_EQ_PAGE_SIZE); + + /* round up to nearest power of two */ + count = 1U << (u8)fls(count - 1); + + return ((u32)HINIC5_MIN_EQ_PAGE_SIZE) * count; +} + +#ifdef __VMWARE__ +static VMK_ReturnStatus eq_intr_ack_handler(void *data, vmk_intr_cookie intr_cookie) +{ + return VMK_OK; +} +#endif + +static int request_eq_irq(struct hinic5_eq *eq, struct irq_info *entry) +{ + int err = 0; +#ifdef __VMWARE__ + VMK_ReturnStatus st = VMK_OK; +#endif + + if (eq->type == HINIC5_AEQ) + INIT_WORK(&eq->aeq_work, eq_irq_work); + else + tasklet_init(&eq->ceq_tasklet, ceq_tasklet, (ulong)(uintptr_t)eq); + + if (eq->type == HINIC5_AEQ) { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "hinic5_aeq%u@dev:%s", eq->q_id, dev_name(eq->hwdev->dev_hdl)); +#ifdef __VMWARE__ + st = request_irq(eq->hwdev->adapter_hdl, entry->irq_id, + eq_intr_ack_handler, aeq_interrupt, + eq->irq_name, eq); + if (st) + err = VMK_FAILURE; + else + err = 0; +#else + + err = request_irq(entry->irq_id, aeq_interrupt, 0UL, + eq->irq_name, eq); +#endif + } else { + snprintf(eq->irq_name, sizeof(eq->irq_name), + "hinic5_ceq%u@dev:%s", eq->q_id, dev_name(eq->hwdev->dev_hdl)); +#ifdef __VMWARE__ + st = request_irq(eq->hwdev->adapter_hdl, entry->irq_id, + eq_intr_ack_handler, ceq_interrupt, + eq->irq_name, eq); + if (st) + err = VMK_FAILURE; + else + err = 0; + +#else + err = request_irq(entry->irq_id, ceq_interrupt, 0UL, + eq->irq_name, eq); +#endif + } + + return err; +} + +static void reset_eq(struct hinic5_eq *eq) +{ + /* clear eq_len to force eqe drop in hardware */ + if (eq->type == HINIC5_AEQ) + hinic5_hwif_write_reg(eq->hwdev->hwif, + HINIC5_CSR_AEQ_CTRL_1_ADDR, 0); + else + hinic5_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + + wmb(); /* clear eq_len before clear prod idx */ + + hinic5_hwif_write_reg(eq->hwdev->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); +} + +static void set_eq_cpu(struct hinic5_eq *eq, struct hinic5_hwdev *hwdev) +{ + struct hinic5_aeqs *aeqs = hwdev->aeqs; + u32 i; + + eq->cpu = WORK_CPU_UNBOUND; + + if (eq->type == HINIC5_AEQ && aeqs->aeq_cpu_affinity_nargs > 0) { + i = hinic5_global_func_id(hwdev) + eq->q_id; + eq->cpu = aeqs->aeq_cpu_affinity[i % aeqs->aeq_cpu_affinity_nargs]; + } +} + +/** + * init_eq - initialize eq + * @eq: the event queue + * @hwdev: the pointer to hw device + * @q_id: Queue id number + * @q_len: the number of EQ elements + * @type: the type of the event queue, ceq or aeq + * @entry: msix entry associated with the event queue + * Return: 0 - Success, Negative - failure + **/ +static int init_eq(struct hinic5_eq *eq, struct hinic5_hwdev *hwdev, u16 q_id, + u32 q_len, enum hinic5_eq_type type, struct irq_info *entry) +{ + int err = 0; + + eq->hwdev = hwdev; + eq->q_id = q_id; + eq->type = type; + eq->eq_len = q_len; + + /* Indirect access should set q_id first */ + hinic5_hwif_write_reg(hwdev->hwif, HINIC5_EQ_INDIR_IDX_ADDR(eq->type), + eq->q_id); + wmb(); /* write index before config */ + + reset_eq(eq); + + eq->cons_idx = 0; + eq->wrapped = 0; + + eq->elem_size = (type == HINIC5_AEQ) ? HINIC5_AEQE_SIZE : HINIC5_CEQE_SIZE; + + eq->page_size = get_page_size(eq); + eq->orig_page_size = eq->page_size; + eq->num_pages = GET_EQ_NUM_PAGES(eq, eq->page_size); + + if (eq->num_pages > HINIC5_EQ_MAX_PAGES(eq)) { + sdk_err(hwdev->dev_hdl, "Number pages: %u too many pages for eq\n", + eq->num_pages); + return -EINVAL; + } + + err = alloc_eq_pages(eq); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to allocate pages for eq\n"); + return err; + } + + eq->eq_irq.msix_entry_idx = entry->msix_entry_idx; + eq->eq_irq.irq_id = entry->irq_id; + + hinic5_set_msix_state(hwdev, entry->msix_entry_idx, + HINIC5_MSIX_DISABLE); + err = set_eq_ctrls(eq); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set ctrls for eq\n"); + goto init_eq_ctrls_err; + } + + set_eq_cons_idx(eq, HINIC5_EQ_ARMED); + + err = request_eq_irq(eq, entry); + if (err != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to request irq for the eq, err: %d\n", err); + goto req_irq_err; + } + + set_eq_cpu(eq, hwdev); + + return 0; + +init_eq_ctrls_err: +req_irq_err: + free_eq_pages(eq); + return err; +} + +/** + * remove_eq - remove eq + * @eq: the event queue + **/ +static void remove_eq(struct hinic5_eq *eq) +{ + struct irq_info *entry = &eq->eq_irq; + + hinic5_set_msix_state(eq->hwdev, entry->msix_entry_idx, + HINIC5_MSIX_DISABLE); + synchronize_irq(entry->irq_id); + + free_irq(entry->irq_id, eq); + + /* Indirect access should set q_id first */ + hinic5_hwif_write_reg(eq->hwdev->hwif, + HINIC5_EQ_INDIR_IDX_ADDR(eq->type), + eq->q_id); + + wmb(); /* write index before config */ + + if (eq->type == HINIC5_AEQ) { + cancel_work_sync(&eq->aeq_work); + + /* clear eq_len to avoid hw access host memory */ + hinic5_hwif_write_reg(eq->hwdev->hwif, + HINIC5_CSR_AEQ_CTRL_1_ADDR, 0); + } else { + tasklet_kill(&eq->ceq_tasklet); + + hinic5_set_ceq_ctrl_reg(eq->hwdev, eq->q_id, 0, 0); + } + + /* update cons_idx to avoid invalid interrupt */ + eq->cons_idx = hinic5_hwif_read_reg(eq->hwdev->hwif, + EQ_PROD_IDX_REG_ADDR(eq)); + set_eq_cons_idx(eq, HINIC5_EQ_NOT_ARMED); + + free_eq_pages(eq); +} + +void hinic5_aeqs_valid_params(struct hinic5_hwdev *hwdev) +{ + if (g_aeq_len < HINIC5_MIN_AEQ_LEN || g_aeq_len > HINIC5_MAX_AEQ_LEN) { + sdk_warn(hwdev->dev_hdl, + "Module Parameter g_aeq_len value %u out of range, resetting to %d\n", + g_aeq_len, HINIC5_DEFAULT_AEQ_LEN); + g_aeq_len = HINIC5_DEFAULT_AEQ_LEN; + } +} + +#ifndef __UEFI__ +static void hinic5_aeqs_dump_cpu_affinity(struct hinic5_hwdev *hwdev) +{ + struct hinic5_aeqs *aeqs = NULL; + u16 q_id; + + if (!hwdev || !hwdev->aeqs) + return; + aeqs = hwdev->aeqs; + + pr_info("func %u aeq cpu affinity:", hinic5_global_func_id(hwdev)); + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) + pr_info(" %d", aeqs->aeq[q_id].cpu); +} + +void hinic5_filter_online_cpus(int *cpus, unsigned int *cpus_nargs) +{ + unsigned int i, j, n; + int cpu; + + if (!cpus || !cpus_nargs || *cpus_nargs == 0) + return; + + n = *cpus_nargs; + j = 0; + for (i = 0; i < n; i++) { + cpu = cpus[i]; + if (cpu >= 0 && cpu < num_possible_cpus() && (cpu_online(cpu) != 0)) { + cpus[j] = cpus[i]; + j++; + } + } + *cpus_nargs = j; +} + +static void hinic5_filter_aeq_cpu_affinity(struct hinic5_hwdev *hwdev) +{ + struct hinic5_aeqs *aeqs = hwdev->aeqs; + u32 i; + + if (aeqs->aeq_cpu_affinity_nargs > 0) { + hinic5_filter_online_cpus(aeqs->aeq_cpu_affinity, + &aeqs->aeq_cpu_affinity_nargs); + pr_info("aeq cpu candidates (%u):", aeqs->aeq_cpu_affinity_nargs); + for (i = 0; i < aeqs->aeq_cpu_affinity_nargs; ++i) + pr_info(" %d", aeqs->aeq_cpu_affinity[i]); + } +} +#endif + +void hinic5_set_aeq_cpu_affinity(struct hinic5_hwdev *hwdev) +{ + struct hinic5_aeqs *aeqs = hwdev->aeqs; + + aeqs->aeq_cpu_affinity_nargs = 0; + + if (g_aeq_cpu_affinity_nargs > 0) { + memcpy(aeqs->aeq_cpu_affinity, g_aeq_cpu_affinity, + sizeof(int) * HINIC5_AEQ_CPU_AFFINITY_MAX); + aeqs->aeq_cpu_affinity_nargs = g_aeq_cpu_affinity_nargs; +#ifndef __UEFI__ + hinic5_filter_aeq_cpu_affinity(hwdev); +#endif + } +} + +/** + * hinic5_aeqs_init - init all the aeqs + * @hwdev: the pointer to hw device + * @num_aeqs: number of AEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + **/ +int hinic5_aeqs_init(struct hinic5_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries) +{ + struct hinic5_aeqs *aeqs = NULL; + int err; + u16 i, q_id; + + if (!hwdev) + return -EINVAL; + + aeqs = kzalloc(sizeof(*aeqs), GFP_KERNEL); + if (!aeqs) + return -ENOMEM; + + hwdev->aeqs = aeqs; + aeqs->hwdev = hwdev; + aeqs->num_aeqs = num_aeqs; + aeqs->workq = alloc_workqueue(HINIC5_EQS_WQ_NAME, WQ_MEM_RECLAIM, + HINIC5_MAX_AEQS); + if (!aeqs->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize aeq workqueue\n"); + err = -ENOMEM; + goto create_work_err; + } + + hinic5_aeqs_valid_params(hwdev); + hinic5_set_aeq_cpu_affinity(hwdev); + + for (q_id = 0; q_id < num_aeqs; q_id++) { + err = init_eq(&aeqs->aeq[q_id], hwdev, q_id, g_aeq_len, + HINIC5_AEQ, &msix_entries[q_id]); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init aeq %u\n", + q_id); + goto init_aeq_err; + } + } + for (q_id = 0; q_id < num_aeqs; q_id++) + hinic5_set_msix_state(hwdev, msix_entries[q_id].msix_entry_idx, + HINIC5_MSIX_ENABLE); + +#ifndef __UEFI__ + hinic5_aeqs_dump_cpu_affinity(hwdev); +#endif + return 0; + +init_aeq_err: + for (i = 0; i < q_id; i++) + remove_eq(&aeqs->aeq[i]); + + destroy_workqueue(aeqs->workq); + +create_work_err: + kfree(aeqs); + + return err; +} + +/** + * hinic5_aeqs_free - free all the aeqs + * @hwdev: the pointer to hw device + **/ +void hinic5_aeqs_free(struct hinic5_hwdev *hwdev) +{ + struct hinic5_aeqs *aeqs = hwdev->aeqs; + u32 aeq_event = (u32)HINIC5_HW_INTER_INT; + u32 sw_aeq_event = (u32)HINIC5_STATELESS_EVENT; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) + remove_eq(&aeqs->aeq[q_id]); + + for (; sw_aeq_event < (u32)HINIC5_MAX_AEQ_SW_EVENTS; sw_aeq_event++) + hinic5_aeq_unregister_swe_cb(hwdev, (enum hinic5_aeq_sw_type)sw_aeq_event); + + for (; aeq_event < (u32)HINIC5_MAX_AEQ_EVENTS; aeq_event++) + hinic5_aeq_unregister_hw_cb(hwdev, (enum hinic5_aeq_type)aeq_event); + + destroy_workqueue(aeqs->workq); + + kfree(aeqs); +} + +/** + * hinic5_nic_sw_aeqe_stats - count ucode aeq for sw event + * @hwdev: the pointer to hw device + * @event: soft event for the handler + * @data: cqe data + **/ +u8 hinic5_nic_sw_aeqe_stats(void *hwdev, u8 event, u8 *data) +{ + struct hinic5_hwdev *dev = hwdev; + + if (!dev) + return 0; + + sdk_err(dev->dev_hdl, "Received nic ucode aeq event type: 0x%x, data: 0x%llx\n", + event, *((u64 *)data)); + + if (event < HINIC5_NIC_FATAL_ERROR_MAX) + atomic_inc(&dev->hw_stats.nic_ucode_event_stats[event]); + + return 0; +} +EXPORT_SYMBOL(hinic5_nic_sw_aeqe_stats); + +/** + * hinic5_init_stateless_aeqs - init stateless_aeqs + * @hwdev: the pointer to hw device + * Return: 0 - Success, Negative - failure + **/ +int hinic5_init_stateless_aeqs(void *hwdev) +{ + struct hinic5_stateless_aeqs *stateless_aeqs = NULL; + + if (!hwdev) + return -EINVAL; + + stateless_aeqs = kzalloc(sizeof(*stateless_aeqs), GFP_KERNEL); + if (!stateless_aeqs) + return -ENOMEM; + + ((struct hinic5_hwdev *)hwdev)->stateless_aeqs = stateless_aeqs; + + return 0; +} + +/** + * hinic5_stateless_aeqs_free - free stateless_aeqs + * @hwdev: the pointer to hw device + **/ +void hinic5_stateless_aeqs_free(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + struct hinic5_stateless_aeqs *stateless_aeqs = NULL; + + if (!hwdev) + return; + + stateless_aeqs = dev->stateless_aeqs; + if (!stateless_aeqs) + return; + + clear_bit(HINIC5_AEQ_SW_CB_REG, &stateless_aeqs->stateless_aeq_sw_cb_state); + + while (test_bit(HINIC5_AEQ_SW_CB_RUNNING, + &stateless_aeqs->stateless_aeq_sw_cb_state)) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + stateless_aeqs->stateless_aeq_swe_cb = NULL; + stateless_aeqs->stateless_aeq_swe_cb_data = NULL; + kfree(stateless_aeqs); +} + +/** + * hinic5_register_stateless_aeqs - init stateless_aeqs + * @hwdev: the pointer to hw device + * Return: 0 - Success, Negative - failure + **/ +int hinic5_register_stateless_aeqs(void *hwdev, void *pri_handle, + hinic5_aeq_swe_cb stateless_aeq_swe_cb) +{ + struct hinic5_stateless_aeqs *stateless_aeqs = NULL; + + if (!hwdev) + return -EINVAL; + + stateless_aeqs = ((struct hinic5_hwdev *)hwdev)->stateless_aeqs; + if (!stateless_aeqs) + return -EINVAL; + + stateless_aeqs->stateless_aeq_swe_cb = stateless_aeq_swe_cb; + stateless_aeqs->stateless_aeq_swe_cb_data = pri_handle; + set_bit(HINIC5_AEQ_SW_CB_REG, &stateless_aeqs->stateless_aeq_sw_cb_state); + + return 0; +} +EXPORT_SYMBOL(hinic5_register_stateless_aeqs); + +/** + * hinic5_unregister_stateless_aeqs - free stateless_aeqs + * @hwdev: the pointer to hw device + **/ +void hinic5_unregister_stateless_aeqs(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + struct hinic5_stateless_aeqs *stateless_aeqs = NULL; + + if (!hwdev) + return; + + stateless_aeqs = dev->stateless_aeqs; + if (!stateless_aeqs) + return; + + clear_bit(HINIC5_AEQ_SW_CB_REG, &stateless_aeqs->stateless_aeq_sw_cb_state); + + while (test_bit(HINIC5_AEQ_SW_CB_RUNNING, + &stateless_aeqs->stateless_aeq_sw_cb_state)) + usleep_range(EQ_USLEEP_LOW_BOUND, EQ_USLEEP_HIG_BOUND); + + stateless_aeqs->stateless_aeq_swe_cb = NULL; + stateless_aeqs->stateless_aeq_swe_cb_data = NULL; +} +EXPORT_SYMBOL(hinic5_unregister_stateless_aeqs); + +u8 hinic5_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data) +{ + struct hinic5_hwdev *dev = hwdev; + struct hinic5_stateless_aeqs *stateless_aeqs = NULL; + + if (!dev) + return 0; + + stateless_aeqs = dev->stateless_aeqs; + + set_bit(HINIC5_AEQ_SW_CB_RUNNING, + &stateless_aeqs->stateless_aeq_sw_cb_state); + if (stateless_aeqs->stateless_aeq_swe_cb && + test_bit(HINIC5_AEQ_SW_CB_REG, + &stateless_aeqs->stateless_aeq_sw_cb_state)) + stateless_aeqs->stateless_aeq_swe_cb(stateless_aeqs->stateless_aeq_swe_cb_data, + event, data); + + clear_bit(HINIC5_AEQ_SW_CB_RUNNING, + &stateless_aeqs->stateless_aeq_sw_cb_state); + + return 0; +} + +/** + * hinic5_ceqs_init - init all the ceqs + * @hwdev: the pointer to hw device + * @num_ceqs: number of CEQs + * @msix_entries: msix entries associated with the event queues + * Return: 0 - Success, Negative - failure + **/ +int hinic5_ceqs_init(struct hinic5_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries) +{ + struct hinic5_ceqs *ceqs = NULL; + int err; + u16 i, q_id; + + ceqs = kzalloc(sizeof(*ceqs), GFP_KERNEL); + if (!ceqs) + return -ENOMEM; + + hwdev->ceqs = ceqs; + + ceqs->hwdev = hwdev; + ceqs->num_ceqs = num_ceqs; + + if (g_ceq_len < HINIC5_MIN_CEQ_LEN || g_ceq_len > HINIC5_MAX_CEQ_LEN) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_ceq_len value %u out of range, resetting to %d\n", + g_ceq_len, HINIC5_DEFAULT_CEQ_LEN); + g_ceq_len = HINIC5_DEFAULT_CEQ_LEN; + } + + if (g_num_ceqe_in_tasklet == 0) { + sdk_warn(hwdev->dev_hdl, "Module Parameter g_num_ceqe_in_tasklet can not be zero, resetting to %d\n", + HINIC5_TASK_PROCESS_EQE_LIMIT); + g_num_ceqe_in_tasklet = HINIC5_TASK_PROCESS_EQE_LIMIT; + } + for (q_id = 0; q_id < num_ceqs; q_id++) { + err = init_eq(&ceqs->ceq[q_id], hwdev, q_id, g_ceq_len, + HINIC5_CEQ, &msix_entries[q_id]); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init ceq %u\n", + q_id); + goto init_ceq_err; + } + } + for (q_id = 0; q_id < num_ceqs; q_id++) + hinic5_set_msix_state(hwdev, msix_entries[q_id].msix_entry_idx, + HINIC5_MSIX_ENABLE); + + for (i = 0; i < (u16)HINIC5_MAX_CEQ_EVENTS; i++) + ceqs->ceq_cb_state[i] = 0; + + return 0; + +init_ceq_err: + for (i = 0; i < q_id; i++) + remove_eq(&ceqs->ceq[i]); + + kfree(ceqs); + + return err; +} + +/** + * hinic5_ceqs_free - free all the ceqs + * @hwdev: the pointer to hw device + **/ +void hinic5_ceqs_free(struct hinic5_hwdev *hwdev) +{ + struct hinic5_ceqs *ceqs = hwdev->ceqs; + u32 ceq_event = (u32)HINIC5_CMDQ; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) + remove_eq(&ceqs->ceq[q_id]); + + for (; ceq_event < (u32)HINIC5_MAX_CEQ_EVENTS; ceq_event++) + hinic5_ceq_unregister_cb(hwdev, (enum hinic5_ceq_event)ceq_event); + + kfree(ceqs); +} + +void hinic5_get_ceq_irqs(struct hinic5_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hinic5_ceqs *ceqs = hwdev->ceqs; + u16 q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) { + irqs[q_id].irq_id = ceqs->ceq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + ceqs->ceq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = ceqs->num_ceqs; +} + +void hinic5_get_aeq_irqs(struct hinic5_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs) +{ + struct hinic5_aeqs *aeqs = hwdev->aeqs; + u16 q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs; q_id++) { + irqs[q_id].irq_id = aeqs->aeq[q_id].eq_irq.irq_id; + irqs[q_id].msix_entry_idx = + aeqs->aeq[q_id].eq_irq.msix_entry_idx; + } + + *num_irqs = aeqs->num_aeqs; +} + +void hinic5_dump_aeq_info(struct hinic5_hwdev *hwdev) +{ + struct hinic5_aeq_elem *aeqe_pos = NULL; + struct hinic5_eq *eq = NULL; + u32 addr, ci, pi, ctrl0, idx; + int q_id; + + for (q_id = 0; q_id < hwdev->aeqs->num_aeqs; q_id++) { + eq = &hwdev->aeqs->aeq[q_id]; + /* Indirect access should set q_id first */ + hinic5_hwif_write_reg(eq->hwdev->hwif, HINIC5_EQ_INDIR_IDX_ADDR(eq->type), + eq->q_id); + wmb(); /* write index before config */ + + addr = HINIC5_CSR_AEQ_CTRL_0_ADDR; + + ctrl0 = hinic5_hwif_read_reg(hwdev->hwif, addr); + + idx = hinic5_hwif_read_reg(hwdev->hwif, HINIC5_EQ_INDIR_IDX_ADDR(eq->type)); + + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic5_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic5_hwif_read_reg(hwdev->hwif, addr); + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + sdk_err(hwdev->dev_hdl, + "Aeq id: %d, idx: %u, ctrl0: 0x%08x, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %u, desc: 0x%x swci:0x%x\n", + q_id, idx, ctrl0, ci, pi, work_busy(&eq->aeq_work), + eq->wrapped, be32_to_cpu(aeqe_pos->desc), eq->cons_idx); + } + + hinic5_show_chip_err_info(hwdev); +} + +void hinic5_dump_ceq_info(struct hinic5_hwdev *hwdev) +{ + struct hinic5_eq *eq = NULL; + u32 addr, ci, pi; + int q_id; + + if (!hwdev->ceqs) + return; + + for (q_id = 0; q_id < hwdev->ceqs->num_ceqs; q_id++) { + eq = &hwdev->ceqs->ceq[q_id]; + /* Indirect access should set q_id first */ + hinic5_hwif_write_reg(eq->hwdev->hwif, + HINIC5_EQ_INDIR_IDX_ADDR(eq->type), + eq->q_id); + wmb(); /* write index before config */ + + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic5_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic5_hwif_read_reg(hwdev->hwif, addr); + sdk_err(hwdev->dev_hdl, + "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %u, ceqe: 0x%x\n", + q_id, ci, eq->cons_idx, pi, + tasklet_state(&eq->ceq_tasklet), + eq->wrapped, be32_to_cpu(*(GET_CURR_CEQ_ELEM(eq)))); + + sdk_err(hwdev->dev_hdl, "Ceq last response hard interrupt time: %u\n", + jiffies_to_msecs(jiffies - eq->hard_intr_jif)); + sdk_err(hwdev->dev_hdl, "Ceq last response soft interrupt time: %u\n", + jiffies_to_msecs(jiffies - eq->soft_intr_jif)); + } + + hinic5_show_chip_err_info(hwdev); +} + +int hinic5_get_ceq_info(void *hwdev, u16 q_id, struct hinic5_ceq_info *ceq_info) +{ + struct hinic5_hwdev *dev = hwdev; + struct hinic5_eq *eq = NULL; + + if (!hwdev || !ceq_info) + return -EINVAL; + + if (q_id >= dev->ceqs->num_ceqs) + return -EINVAL; + + eq = &dev->ceqs->ceq[q_id]; + ceq_info->q_len = eq->eq_len; + ceq_info->num_pages = eq->num_pages; + ceq_info->page_size = eq->page_size; + ceq_info->num_elem_in_pg = eq->num_elem_in_pg; + ceq_info->elem_size = eq->elem_size; + sdk_info(dev->dev_hdl, "get_ceq_info: qid=0x%x page_size=%u\n", + q_id, eq->page_size); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_ceq_info); + +int hinic5_init_single_ceq_status(void *hwdev, u16 q_id) +{ + int err = 0; + struct hinic5_hwdev *dev = hwdev; + struct hinic5_eq *eq = NULL; + + if (!hwdev) { + pr_err("%s(%d), hwdev is null\n", __func__, __LINE__); + return -EINVAL; + } + + if (q_id >= dev->ceqs->num_ceqs) { + sdk_err(dev->dev_hdl, "q_id=%u is larger than num_ceqs %u.\n", + q_id, dev->ceqs->num_ceqs); + return -EINVAL; + } + + eq = &dev->ceqs->ceq[q_id]; + /* Indirect access should set q_id first */ + hinic5_hwif_write_reg(dev->hwif, HINIC5_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); + wmb(); /* write index before config */ + + reset_eq(eq); + + err = set_eq_ctrls(eq); + if (err != 0) { + sdk_err(dev->dev_hdl, "Failed to set ctrls for eq\n"); + return err; + } + set_eq_cons_idx(eq, HINIC5_EQ_ARMED); + + return 0; +} +EXPORT_SYMBOL(hinic5_init_single_ceq_status); + +int hinic5_get_ceq_page_phy_addr(void *hwdev, u16 q_id, + u16 page_idx, u64 *page_phy_addr) +{ + struct hinic5_hwdev *dev = hwdev; + struct hinic5_eq *eq = NULL; + + if (!hwdev || !page_phy_addr) + return -EINVAL; + + if (q_id >= dev->ceqs->num_ceqs) + return -EINVAL; + + eq = &dev->ceqs->ceq[q_id]; + if (page_idx >= eq->num_pages) + return -EINVAL; + + *page_phy_addr = eq->eq_pages[page_idx].align_paddr; + sdk_info(dev->dev_hdl, "ceq_page_phy_addr: 0x%llx page_idx=%u\n", + eq->eq_pages[page_idx].align_paddr, page_idx); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_ceq_page_phy_addr); + +int hinic5_set_ceq_irq_disable(void *hwdev, u16 q_id) +{ + struct hinic5_hwdev *dev = hwdev; + struct hinic5_eq *ceq = NULL; + + if (!hwdev || !dev->ceqs) + return -EINVAL; + + if (q_id >= dev->ceqs->num_ceqs) + return -EINVAL; + + ceq = &dev->ceqs->ceq[q_id]; + + hinic5_set_msix_state(ceq->hwdev, ceq->eq_irq.msix_entry_idx, + HINIC5_MSIX_DISABLE); + + return 0; +} +EXPORT_SYMBOL(hinic5_set_ceq_irq_disable); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_hw_api.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_hw_api.c new file mode 100644 index 00000000..dbe196d3 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_hw_api.c @@ -0,0 +1,453 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_common.h" +#include "hinic5_hwdev.h" +#include "hinic5_api_cmd.h" +#include "hinic5_mgmt.h" +#include "hinic5_hw_api.h" + #ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) \ + | (((x) & 0x0000ff00) << 8) \ + | (((x) & 0x00ff0000) >> 8) \ + | (((x) & 0xff000000) >> 24)) +#endif + +static void hinic5_sml_ctr_read_build_req(struct chipif_sml_ctr_rd_req *msg, + u8 instance_id, u8 op_id, + u8 ack, u32 ctr_id) +{ + msg->head.value = 0; + msg->head.bs.instance = instance_id; + msg->head.bs.op_id = op_id; + msg->head.bs.ack = ack; + msg->head.value = HTONL(msg->head.value); + msg->ctr_id = ctr_id; + msg->ctr_id = HTONL(msg->ctr_id); + msg->initial = 0; +} + +static void sml_ctr_htonl_n(u32 *node, u32 len) +{ + u32 i; + u32 *node_new = node; + + for (i = 0; i < len; i++) { + *node_new = HTONL(*node_new); + node_new++; + } +} + +/** + * hinic5_sm_ctr_rd16 - small single 16 counter read + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic5_sm_ctr_rd16(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u16 *value) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic5_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id); + + ret = hinic5_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Sm 16bit counter read fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)(void *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = rsp.bs_ss16_rsp.value1; + + return 0; +} + +/** + * hinic5_sm_ctr_rd32 - small single 32 counter read + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic5_sm_ctr_rd32(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u32 *value) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic5_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id); + + ret = hinic5_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter read fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)(void *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} + +/** + * hinic5_sm_ctr_rd32_clear - small single 32 counter read and clear to zero + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + * according to ACN error code (ERR_OK, ERR_PARAM, ERR_FAILED...etc) + **/ +int hinic5_sm_ctr_rd32_clear(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u32 *value) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic5_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id); + + ret = hinic5_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), + (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Sm 32bit counter clear fail, err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)(void *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = rsp.bs_ss32_rsp.value1; + + return 0; +} + +/** + * hinic5_sm_ctr_rd64_pair - big pair 128 counter read + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value1: read counter value ptr + * @value2: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic5_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance, + u32 ctr_id, u64 *value1, u64 *value2) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!value1) { + pr_err("First value is NULL for read 64 bit pair\n"); + return -EFAULT; + } + + if (!value2) { + pr_err("Second value is NULL for read 64 bit pair\n"); + return -EFAULT; + } + + if (!hwdev || ((ctr_id & 0x1) != 0)) { + pr_err("Hwdev is NULL or ctr_id(%u) is odd number for read 64 bit pair\n", + ctr_id); + return -EFAULT; + } + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic5_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id); + + ret = hinic5_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Sm 64 bit rd pair ret(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)(void *)&rsp, sizeof(rsp) / sizeof(u32)); + *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << BIT_32) | rsp.bs_bp64_rsp.val1_l; + *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << BIT_32) | rsp.bs_bp64_rsp.val2_l; + + return 0; +} + +/** + * hinic5_sm_ctr_rd64_pair_clear - big pair 128 counter read and clear to zero + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value1: read counter value ptr + * @value2: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic5_sm_ctr_rd64_pair_clear(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 *value1, u64 *value2) +{ + struct chipif_sml_ctr_rd_req req = {0}; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value1 || !value2 || ((ctr_id & 0x1) != 0)) { + pr_err("Hwdev or value1 or value2 is NULL or ctr_id(%u) is odd number\n", ctr_id); + return -EINVAL; + } + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + hinic5_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id); + + ret = hinic5_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Sm 64 bit clear pair fail. ret(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)(void *)&rsp, sizeof(rsp) / sizeof(u32)); + *value1 = ((u64)rsp.bs_bp64_rsp.val1_h << BIT_32) | rsp.bs_bp64_rsp.val1_l; + *value2 = ((u64)rsp.bs_bp64_rsp.val2_h << BIT_32) | rsp.bs_bp64_rsp.val2_l; + + return 0; +} + +/** + * hinic5_sm_ctr_rd64 - big counter 64 read + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic5_sm_ctr_rd64(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 *value) +{ + struct chipif_sml_ctr_rd_req req; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + memset(&req, 0, sizeof(req)); + + hinic5_sml_ctr_read_build_req(&req, instance, CHIPIF_SM_CTR_OP_READ, + CHIPIF_ACK, ctr_id); + + ret = hinic5_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Sm 64bit counter read fail err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)(void *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = ((u64)rsp.bs_bs64_rsp.value1 << BIT_32) | rsp.bs_bs64_rsp.value2; + + return 0; +} +EXPORT_SYMBOL(hinic5_sm_ctr_rd64); + +/** + * hinic5_sm_ctr_rd64_clear - big counter 64 read and clear to zero + * @hwdev: the hardware device + * @node: the node id + * @ctr_id: counter id + * @value: read counter value ptr + * Return: 0 - success, negative - failure + **/ +int hinic5_sm_ctr_rd64_clear(void *hwdev, u8 node, u8 instance, u32 ctr_id, + u64 *value) +{ + struct chipif_sml_ctr_rd_req req = {0}; + union ctr_rd_rsp rsp; + int ret; + + if (!hwdev || !value) + return -EINVAL; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + hinic5_sml_ctr_read_build_req(&req, instance, + CHIPIF_SM_CTR_OP_READ_CLEAR, + CHIPIF_ACK, ctr_id); + + ret = hinic5_api_cmd_read_ack(hwdev, node, (u8 *)&req, + (unsigned short)sizeof(req), (void *)&rsp, + (unsigned short)sizeof(rsp)); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Sm 64bit counter clear fail err(%d)\n", ret); + return ret; + } + sml_ctr_htonl_n((u32 *)(void *)&rsp, sizeof(rsp) / sizeof(u32)); + *value = ((u64)rsp.bs_bs64_rsp.value1 << BIT_32) | rsp.bs_bs64_rsp.value2; + + return 0; +} + +int hinic5_api_csr_rd32(void *hwdev, u8 dest, u32 addr, u32 *val) +{ + struct hinic5_csr_request_api_data api_data = {0}; + u32 csr_val = 0; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + memset(&api_data, 0, sizeof(struct hinic5_csr_request_api_data)); + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HINIC5_CSR_OPERATION_READ_CSR; + api_data.dw1.bits.need_response = HINIC5_CSR_NEED_RESP_DATA; + api_data.dw1.bits.data_size = HINIC5_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + + ret = hinic5_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data), + in_size, &csr_val, 0x4); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Read 32 bit csr fail, dest %u addr 0x%x, ret: 0x%x\n", + dest, addr, ret); + return ret; + } + + *val = csr_val; + + return 0; +} + +int hinic5_api_csr_wr32(void *hwdev, u8 dest, u32 addr, u32 val) +{ + struct hinic5_csr_request_api_data api_data; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + memset(&api_data, 0, sizeof(struct hinic5_csr_request_api_data)); + api_data.dw1.bits.operation_id = HINIC5_CSR_OPERATION_WRITE_CSR; + api_data.dw1.bits.need_response = HINIC5_CSR_NO_RESP_DATA; + api_data.dw1.bits.data_size = HINIC5_CSR_DATA_SZ_32; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + api_data.csr_write_data_h = 0xffffffff; + api_data.csr_write_data_l = val; + + ret = hinic5_api_cmd_write_nack(hwdev, dest, (u8 *)(&api_data), + in_size); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Write 32 bit csr fail! dest %u addr 0x%x val 0x%x\n", + dest, addr, val); + return ret; + } + + return 0; +} + +int hinic5_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val) +{ + struct hinic5_csr_request_api_data api_data = {0}; + u64 csr_val = 0; + u16 in_size = sizeof(api_data); + int ret; + + if (!hwdev || !val) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + memset(&api_data, 0, sizeof(struct hinic5_csr_request_api_data)); + api_data.dw0 = 0; + api_data.dw1.bits.operation_id = HINIC5_CSR_OPERATION_READ_CSR; + api_data.dw1.bits.need_response = HINIC5_CSR_NEED_RESP_DATA; + api_data.dw1.bits.data_size = HINIC5_CSR_DATA_SZ_64; + api_data.dw1.val32 = cpu_to_be32(api_data.dw1.val32); + api_data.dw2.bits.csr_addr = addr; + api_data.dw2.val32 = cpu_to_be32(api_data.dw2.val32); + + ret = hinic5_api_cmd_read_ack(hwdev, dest, (u8 *)(&api_data), + in_size, &csr_val, 0x8); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Read 64 bit csr fail, dest %u addr 0x%x\n", + dest, addr); + return ret; + } + + *val = csr_val; + + return 0; +} +EXPORT_SYMBOL(hinic5_api_csr_rd64); + diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_hwif.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_hwif.c new file mode 100644 index 00000000..5510a88b --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_hwif.c @@ -0,0 +1,1335 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/module.h> + +#include "ossl_knl.h" +#include "hinic5_csr_inner.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_common.h" +#include "hinic5_hwdev.h" +#include "hinic5_hwif_inner.h" + +#define WAIT_HWIF_READY_TIMEOUT 30000 +#define MAX_TS_UP_EN_RETRY_CNT 100 + +#define HINIC5_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT 180000 + +#define MAX_MSIX_ENTRY 2048 + +#define DB_IDX(db, db_base) \ + ((u32)(((ulong)(db) - (ulong)(db_base)) / \ + HINIC5_DB_PAGE_SIZE)) + +#define HINIC5_AF0_FUNC_GLOBAL_IDX_SHIFT 0 +#define HINIC5_AF0_P2P_IDX_SHIFT 12 +#define HINIC5_AF0_PCI_INTF_IDX_SHIFT 17 +#define HINIC5_AF0_VF_IN_PF_SHIFT 20 +#define HINIC5_AF0_FUNC_TYPE_SHIFT 28 + +#define HINIC5_AF0_FUNC_GLOBAL_IDX_MASK 0xFFF +#define HINIC5_AF0_P2P_IDX_MASK 0x1F +#define HINIC5_AF0_PCI_INTF_IDX_MASK 0x7 +#define HINIC5_AF0_VF_IN_PF_MASK 0xFF +#define HINIC5_AF0_FUNC_TYPE_MASK 0x1 + +#define HINIC5_AF0_GET(val, member) \ + (((val) >> HINIC5_AF0_##member##_SHIFT) & HINIC5_AF0_##member##_MASK) + +#define HINIC5_AF1_PPF_IDX_SHIFT 0 +#define HINIC5_AF1_AEQS_PER_FUNC_SHIFT 8 +#define HINIC5_AF1_MGMT_INIT_STATUS_SHIFT 30 +#define HINIC5_AF1_PF_INIT_STATUS_SHIFT 31 + +#define HINIC5_AF1_PPF_IDX_MASK 0x3F +#define HINIC5_AF1_AEQS_PER_FUNC_MASK 0x3 +#define HINIC5_AF1_MGMT_INIT_STATUS_MASK 0x1 +#define HINIC5_AF1_PF_INIT_STATUS_MASK 0x1 + +#define HINIC5_AF1_GET(val, member) \ + (((val) >> HINIC5_AF1_##member##_SHIFT) & HINIC5_AF1_##member##_MASK) + +#define HINIC5_AF2_CEQS_PER_FUNC_SHIFT 0 +#define HINIC5_AF2_DMA_ATTR_PER_FUNC_SHIFT 9 +#define HINIC5_AF2_IRQS_PER_FUNC_SHIFT 16 + +#define HINIC5_AF2_CEQS_PER_FUNC_MASK 0x1FF +#define HINIC5_AF2_DMA_ATTR_PER_FUNC_MASK 0x7 +#define HINIC5_AF2_IRQS_PER_FUNC_MASK 0x7FF + +#define HINIC5_AF2_GET(val, member) \ + (((val) >> HINIC5_AF2_##member##_SHIFT) & HINIC5_AF2_##member##_MASK) + +#define HINIC5_AF3_GLOBAL_VF_ID_OF_NXT_PF_SHIFT 0 +#define HINIC5_AF3_GLOBAL_VF_ID_OF_PF_SHIFT 16 + +#define HINIC5_AF3_GLOBAL_VF_ID_OF_NXT_PF_MASK 0xFFF +#define HINIC5_AF3_GLOBAL_VF_ID_OF_PF_MASK 0xFFF + +#define HINIC5_AF3_GET(val, member) \ + (((val) >> HINIC5_AF3_##member##_SHIFT) & HINIC5_AF3_##member##_MASK) + +#define HINIC5_AF4_DOORBELL_CTRL_SHIFT 0 +#define HINIC5_AF4_DOORBELL_CTRL_MASK 0x1 + +#define HINIC5_AF4_GET(val, member) \ + (((val) >> HINIC5_AF4_##member##_SHIFT) & HINIC5_AF4_##member##_MASK) + +#define HINIC5_AF4_SET(val, member) \ + (((val) & HINIC5_AF4_##member##_MASK) << HINIC5_AF4_##member##_SHIFT) + +#define HINIC5_AF4_CLEAR(val, member) \ + ((val) & (~(HINIC5_AF4_##member##_MASK << HINIC5_AF4_##member##_SHIFT))) + +#define HINIC5_AF5_OUTBOUND_CTRL_SHIFT 0 +#define HINIC5_AF5_OUTBOUND_CTRL_MASK 0x1 + +#define HINIC5_AF5_GET(val, member) \ + (((val) >> HINIC5_AF5_##member##_SHIFT) & HINIC5_AF5_##member##_MASK) + +#define HINIC5_AF5_SET(val, member) \ + (((val) & HINIC5_AF5_##member##_MASK) << HINIC5_AF5_##member##_SHIFT) + +#define HINIC5_AF5_CLEAR(val, member) \ + ((val) & (~(HINIC5_AF5_##member##_MASK << HINIC5_AF5_##member##_SHIFT))) + +#define HINIC5_AF6_PF_STATUS_SHIFT 0 +#define HINIC5_AF6_PF_STATUS_MASK 0xFFFF + +#define HINIC5_AF6_FUNC_MAX_SQ_SHIFT 23 +#define HINIC5_AF6_FUNC_MAX_SQ_MASK 0x1FF + +#define HINIC5_AF6_MSIX_FLEX_EN_SHIFT 22 +#define HINIC5_AF6_MSIX_FLEX_EN_MASK 0x1 + +#define HINIC5_AF6_HW_TYPE_SHIFT 17 +#define HINIC5_AF6_HW_TYPE_MASK 0x3 + +#define HINIC5_TASK1_MBOX_TIMEOUT_SHIFT 0 +#define HINIC5_TASK1_MBOX_TIMEOUT_MASK 0x1 + +#define HINIC5_AF6_SET(val, member) \ + ((((u32)(val)) & HINIC5_AF6_##member##_MASK) << \ + HINIC5_AF6_##member##_SHIFT) + +#define HINIC5_AF6_GET(val, member) \ + (((u32)(val) >> HINIC5_AF6_##member##_SHIFT) & HINIC5_AF6_##member##_MASK) + +#define HINIC5_AF6_CLEAR(val, member) \ + ((u32)(val) & (~(HINIC5_AF6_##member##_MASK << \ + HINIC5_AF6_##member##_SHIFT))) + +#define HINIC5_TASK1_SET(val, member) \ + ((((u32)(val)) & HINIC5_TASK1_##member##_MASK) << \ + HINIC5_TASK1_##member##_SHIFT) + +#define HINIC5_TASK1_CLEAR(val, member) \ + ((u32)(val) & (~(HINIC5_TASK1_##member##_MASK << \ + HINIC5_TASK1_##member##_SHIFT))) + +#define HINIC5_PPF_ELECT_PORT_IDX_SHIFT 0 + +#define HINIC5_PPF_ELECT_PORT_IDX_MASK 0x3F + +#define HINIC5_PPF_ELECT_PORT_GET(val, member) \ + (((val) >> HINIC5_PPF_ELECT_PORT_##member##_SHIFT) & \ + HINIC5_PPF_ELECT_PORT_##member##_MASK) + +#define HINIC5_PPF_ELECTION_IDX_SHIFT 0 + +#define HINIC5_PPF_ELECTION_IDX_MASK 0x3F + +#define HINIC5_PPF_ELECTION_SET(val, member) \ + (((val) & HINIC5_PPF_ELECTION_##member##_MASK) << \ + HINIC5_PPF_ELECTION_##member##_SHIFT) + +#define HINIC5_PPF_ELECTION_GET(val, member) \ + (((val) >> HINIC5_PPF_ELECTION_##member##_SHIFT) & \ + HINIC5_PPF_ELECTION_##member##_MASK) + +#define HINIC5_PPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC5_PPF_ELECTION_##member##_MASK << \ + HINIC5_PPF_ELECTION_##member##_SHIFT))) + +#define HINIC5_MPF_ELECTION_IDX_SHIFT 0 + +#define HINIC5_MPF_ELECTION_IDX_MASK 0x1F + +#define HINIC5_MPF_ELECTION_SET(val, member) \ + (((val) & HINIC5_MPF_ELECTION_##member##_MASK) << \ + HINIC5_MPF_ELECTION_##member##_SHIFT) + +#define HINIC5_MPF_ELECTION_GET(val, member) \ + (((val) >> HINIC5_MPF_ELECTION_##member##_SHIFT) & \ + HINIC5_MPF_ELECTION_##member##_MASK) + +#define HINIC5_MPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC5_MPF_ELECTION_##member##_MASK << \ + HINIC5_MPF_ELECTION_##member##_SHIFT))) + +#define HINIC5_GET_REG_FLAG(reg) ((reg) & (~(HINIC5_REGS_FLAG_MASK))) + +#define HINIC5_GET_REG_ADDR(reg) ((reg) & (HINIC5_REGS_FLAG_MASK)) + +#define HINIC5_MPU_BOOT_CAUSE_MAX_NUM 3 + +#define SPU_HOST_ID_BASE 4 +#define SPU_HOST_NUM 2 +#define SPU_HOST_ID_MAX (SPU_HOST_ID_BASE + SPU_HOST_NUM - 1) + +enum { + UBC_SW_HANDSHAKE_VALID, + UBC_SW_HANDSHAKE_NO_VALID, +}; + +u32 hinic5_hwif_read_reg(struct hinic5_hwif *hwif, u32 reg) +{ +#ifndef __UEFI__ + if (HINIC5_GET_REG_FLAG(reg) == HINIC5_MGMT_REGS_FLAG) + return be32_to_cpu(readl(hwif->mgmt_regs_base + + HINIC5_GET_REG_ADDR((u64)reg))); + else + return be32_to_cpu(readl(hwif->cfg_regs_base + + HINIC5_GET_REG_ADDR((u64)reg))); +#else + UINT8 bar_idx; + + if (HINIC5_GET_REG_FLAG(reg) == HINIC5_MGMT_REGS_FLAG) + bar_idx = HINIC5_MGMT_BAR; + else + bar_idx = HINIC5_CFG_BAR; + + return be32_to_cpu(readl_uefi(hwif->bus_dev, HINIC5_GET_REG_ADDR(reg), + bar_idx)); +#endif +} + +void hinic5_hwif_write_reg(struct hinic5_hwif *hwif, u32 reg, u32 val) +{ +#ifndef __UEFI__ + if (HINIC5_GET_REG_FLAG(reg) == HINIC5_MGMT_REGS_FLAG) + writel(cpu_to_be32(val), + hwif->mgmt_regs_base + HINIC5_GET_REG_ADDR((u64)reg)); + else + writel(cpu_to_be32(val), + hwif->cfg_regs_base + HINIC5_GET_REG_ADDR((u64)reg)); +#else + UINT8 bar_idx; + + if (HINIC5_GET_REG_FLAG(reg) == HINIC5_MGMT_REGS_FLAG) + bar_idx = HINIC5_MGMT_BAR; + else + bar_idx = HINIC5_CFG_BAR; + + writel_uefi(hwif->bus_dev, HINIC5_GET_REG_ADDR(reg), bar_idx, + be32_to_cpu(val)); +#endif +} + +bool get_card_present_state(struct hinic5_hwdev *hwdev) +{ + u32 attr1; + + if (!get_handshake_state(hwdev)) + return false; + + attr1 = hinic5_hwif_read_reg(hwdev->hwif, HINIC5_CSR_FUNC_ATTR1_ADDR); + if (attr1 == HINIC5_BUS_LINK_DOWN) { + sdk_warn(hwdev->dev_hdl, "Card is not present\n"); + return false; + } + + return true; +} + +u8 hinic5_get_hw_type(void *hwdev) +{ + struct hinic5_hwdev *dev = hwdev; + + if (unlikely(!dev || !dev->hwif)) + return HINIC5_HW_TYPE_INVALID; + + return dev->hwif->attr.hw_type; +} +EXPORT_SYMBOL(hinic5_get_hw_type); + +/** + * get_handshake_state - for UBC ELR, only 72 use + * @hwdev: the pointer to hw device + * Return: 0 - normal, 1 - in ELR + **/ +bool get_handshake_state(struct hinic5_hwdev *hwdev) +{ +#ifndef __UEFI__ + u32 sw_handshake_chk; + + if (!hinic5_check_htn_device_id(hwdev)) + return true; + + sw_handshake_chk = readl(hwdev->hwif->fers2_reg_base + + HINIC5_GET_REG_ADDR \ + ((u64)HINIC5_CSR_INTC_BAR_SW_HANDSHAKE_0_CSR0_REG)); + if (sw_handshake_chk == UBC_SW_HANDSHAKE_NO_VALID) + return false; +#endif + + return true; +} + +u32 hinic5_get_heartbeat_status(void *hwdev) +{ + u32 attr1; + struct hinic5_hwif *hwif = NULL; + + if (!hwdev) + return HINIC5_BUS_LINK_DOWN; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return HINIC5_BUS_LINK_DOWN; + + attr1 = hinic5_hwif_read_reg(hwif, HINIC5_CSR_FUNC_ATTR1_ADDR); + if (attr1 == HINIC5_BUS_LINK_DOWN) + return attr1; + + return (HINIC5_AF1_GET(attr1, MGMT_INIT_STATUS) == 0); +} +EXPORT_SYMBOL(hinic5_get_heartbeat_status); + +#define MIGRATE_HOST_STATUS_CLEAR(host_id, val) ((val) & (~(1U << (host_id)))) +#define MIGRATE_HOST_STATUS_SET(host_id, enable) (((u8)(enable) & 1U) << (host_id)) +#define MIGRATE_HOST_STATUS_GET(host_id, val) (((val) & (1U << (host_id))) != 0) + +static inline int hinic5_hwdev_check(struct hinic5_hwdev *dev) +{ + if (!dev || !dev->hwif) + return -EINVAL; + if (HINIC5_FUNC_TYPE(dev) != TYPE_PPF) { + sdk_warn(dev->dev_hdl, "hwdev should be ppf\n"); + return -EINVAL; + } + return 0; +} + +int hinic5_set_host_migrate_enable(void *hwdev, u8 host_id, bool enable) +{ + struct hinic5_hwdev *dev = hwdev; + + u32 reg_val; + int ret = hinic5_hwdev_check(dev); + + if (ret != 0) + return ret; + + reg_val = hinic5_hwif_read_reg(dev->hwif, HINIC5_MULT_MIGRATE_HOST_STATUS_ADDR); + reg_val = MIGRATE_HOST_STATUS_CLEAR(host_id, reg_val); + reg_val |= MIGRATE_HOST_STATUS_SET(host_id, enable); + + hinic5_hwif_write_reg(dev->hwif, HINIC5_MULT_MIGRATE_HOST_STATUS_ADDR, reg_val); + + sdk_info(dev->dev_hdl, "Set migrate host %u status %d, reg value: 0x%x\n", + host_id, enable, reg_val); + + return 0; +} +EXPORT_SYMBOL(hinic5_set_host_migrate_enable); + +int hinic5_get_host_migrate_enable(void *hwdev, u8 host_id, u8 *migrate_en) +{ + struct hinic5_hwdev *dev = hwdev; + + u32 reg_val; + int ret = hinic5_hwdev_check(dev); + + if (ret != 0) + return ret; + + reg_val = hinic5_hwif_read_reg(dev->hwif, HINIC5_MULT_MIGRATE_HOST_STATUS_ADDR); + *migrate_en = MIGRATE_HOST_STATUS_GET(host_id, reg_val); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_host_migrate_enable); + +static enum hinic5_wait_return check_hwif_ready_handler(void *priv_data) +{ + u32 status; + + status = hinic5_get_heartbeat_status(priv_data); + if (status == HINIC5_BUS_LINK_DOWN) + return WAIT_PROCESS_ERR; + else if (status == 0) + return WAIT_PROCESS_CPL; + + return WAIT_PROCESS_WAITING; +} + +static int wait_hwif_ready(struct hinic5_hwdev *hwdev) +{ + int ret; + + ret = hinic5_wait_for_timeout(hwdev, check_hwif_ready_handler, + WAIT_HWIF_READY_TIMEOUT, USEC_PER_MSEC); + if (ret == -ETIMEDOUT) { + hwdev->probe_fault_level = FAULT_LEVEL_FATAL; + sdk_err(hwdev->dev_hdl, "Wait for hwif timeout\n"); + } + + return ret; +} + +/** + * set_hwif_attr - set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + * @attr0: the first attribute that was read from the hw + * @attr1: the second attribute that was read from the hw + * @attr2: the third attribute that was read from the hw + * @attr3: the fourth attribute that was read from the hw + **/ +static void set_hwif_attr(struct hinic5_hwif *hwif, u32 attr0, u32 attr1, + u32 attr2, u32 attr3, u32 attr6) +{ + struct hinic5_hwdev *hwdev = hwif->hwdev; + + hwif->attr.func_global_idx = HINIC5_AF0_GET(attr0, FUNC_GLOBAL_IDX); + hwif->attr.port_to_port_idx = HINIC5_AF0_GET(attr0, P2P_IDX); + hwif->attr.pci_intf_idx = HINIC5_AF0_GET(attr0, PCI_INTF_IDX); + hwif->attr.vf_in_pf = HINIC5_AF0_GET(attr0, VF_IN_PF); + hwif->attr.func_type = HINIC5_AF0_GET(attr0, FUNC_TYPE); + + hwif->attr.ppf_idx = HINIC5_AF1_GET(attr1, PPF_IDX); + hwif->attr.num_aeqs = BIT(HINIC5_AF1_GET(attr1, AEQS_PER_FUNC)); + hwif->attr.num_ceqs = (u8)HINIC5_AF2_GET(attr2, CEQS_PER_FUNC); + hwif->attr.num_irqs = HINIC5_AF2_GET(attr2, IRQS_PER_FUNC); + if (hwif->attr.num_irqs > MAX_MSIX_ENTRY) + hwif->attr.num_irqs = MAX_MSIX_ENTRY; + + hwif->attr.num_dma_attr = BIT(HINIC5_AF2_GET(attr2, DMA_ATTR_PER_FUNC)); + + hwif->attr.global_vf_id_of_pf = HINIC5_AF3_GET(attr3, + GLOBAL_VF_ID_OF_PF); + + hwif->attr.num_sq = HINIC5_AF6_GET(attr6, FUNC_MAX_SQ); + hwif->attr.msix_flex_en = HINIC5_AF6_GET(attr6, MSIX_FLEX_EN); + hwif->attr.hw_type = HINIC5_AF6_GET(attr6, HW_TYPE); + + sdk_info(hwdev->dev_hdl, + "func_global_idx: 0x%x, port_to_port_idx: 0x%x, pci_intf_idx: 0x%x\n", + hwif->attr.func_global_idx, hwif->attr.port_to_port_idx, + hwif->attr.pci_intf_idx); + + sdk_info(hwdev->dev_hdl, + "vf_in_pf: 0x%x, func_type: %d msix_flex_en %u\n", + hwif->attr.vf_in_pf, hwif->attr.func_type, + hwif->attr.msix_flex_en); + + sdk_info(hwdev->dev_hdl, + "ppf_idx: 0x%x, num_aeqs: 0x%x, num_ceqs: 0x%x, num_irqs: 0x%x\n", + hwif->attr.ppf_idx, hwif->attr.num_aeqs, + hwif->attr.num_ceqs, hwif->attr.num_irqs); + + sdk_info(hwdev->dev_hdl, + "num_sq: 0x%x, num_dma_attr: 0x%x, global_vf_id_of_pf: %u, hw_type: %u\n", + hwif->attr.num_sq, hwif->attr.num_dma_attr, + hwif->attr.global_vf_id_of_pf, hwif->attr.hw_type); +} + +/** + * get_hwif_attr - read and set the attributes as members in hwif + * @hwif: the hardware interface of a pci function device + **/ +static int get_hwif_attr(struct hinic5_hwif *hwif) +{ + struct hinic5_hwdev *hwdev = hwif->hwdev; + u32 addr, attr0, attr1, attr2, attr3, attr6; + + addr = HINIC5_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic5_hwif_read_reg(hwif, addr); + if (attr0 == HINIC5_BUS_LINK_DOWN) + return -EFAULT; + + addr = HINIC5_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic5_hwif_read_reg(hwif, addr); + if (attr1 == HINIC5_BUS_LINK_DOWN) + return -EFAULT; + + addr = HINIC5_CSR_FUNC_ATTR2_ADDR; + attr2 = hinic5_hwif_read_reg(hwif, addr); + if (attr2 == HINIC5_BUS_LINK_DOWN) + return -EFAULT; + + addr = HINIC5_CSR_FUNC_ATTR3_ADDR; + attr3 = hinic5_hwif_read_reg(hwif, addr); + if (attr3 == HINIC5_BUS_LINK_DOWN) + return -EFAULT; + + addr = HINIC5_CSR_FUNC_ATTR6_ADDR; + attr6 = hinic5_hwif_read_reg(hwif, addr); + if (attr6 == HINIC5_BUS_LINK_DOWN) + return -EFAULT; + + sdk_info(hwdev->dev_hdl, + "attr0: 0x%08x, attr1: 0x%08x, attr2: 0x%08x, attr3: 0x%08x, attr6: 0x%08x\n", + attr0, attr1, attr2, attr3, attr6); + set_hwif_attr(hwif, attr0, attr1, attr2, attr3, attr6); + + return 0; +} + +void hinic5_set_pf_status(struct hinic5_hwif *hwif, + enum hinic5_pf_status status) +{ + u32 attr6 = hinic5_hwif_read_reg(hwif, HINIC5_CSR_FUNC_ATTR6_ADDR); + + attr6 = HINIC5_AF6_CLEAR(attr6, PF_STATUS); + attr6 |= HINIC5_AF6_SET(status, PF_STATUS); + + hinic5_hwif_write_reg(hwif, HINIC5_CSR_FUNC_ATTR6_ADDR, attr6); +} + +enum hinic5_pf_status hinic5_get_pf_status(struct hinic5_hwif *hwif) +{ + u32 attr6 = hinic5_hwif_read_reg(hwif, HINIC5_CSR_FUNC_ATTR6_ADDR); + + return HINIC5_AF6_GET(attr6, PF_STATUS); +} + +static inline enum doorbell_flush_state hinic5_get_doorbell_ctrl_status(struct hinic5_hwif *hwif) +{ + u32 attr4 = hinic5_hwif_read_reg(hwif, HINIC5_CSR_FUNC_ATTR4_ADDR); + + return HINIC5_AF4_GET(attr4, DOORBELL_CTRL); +} + +static inline enum outbound_flush_state hinic5_get_outbound_ctrl_status(struct hinic5_hwif *hwif) +{ + u32 attr5 = hinic5_hwif_read_reg(hwif, HINIC5_CSR_FUNC_ATTR5_ADDR); + + return HINIC5_AF5_GET(attr5, OUTBOUND_CTRL); +} + +void hinic5_enable_doorbell(struct hinic5_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC5_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic5_hwif_read_reg(hwif, addr); + + attr4 = HINIC5_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HINIC5_AF4_SET(DOORBELL_FLUSH_DISABLED, DOORBELL_CTRL); + + hinic5_hwif_write_reg(hwif, addr, attr4); +} + +void hinic5_disable_doorbell(struct hinic5_hwif *hwif) +{ + u32 addr, attr4; + + addr = HINIC5_CSR_FUNC_ATTR4_ADDR; + attr4 = hinic5_hwif_read_reg(hwif, addr); + + attr4 = HINIC5_AF4_CLEAR(attr4, DOORBELL_CTRL); + attr4 |= HINIC5_AF4_SET(DOORBELL_FLUSH_ENABLED, DOORBELL_CTRL); + + hinic5_hwif_write_reg(hwif, addr, attr4); +} + +/** + * set_ppf - try to set hwif as ppf and set the type of hwif in this case + * @hwif: the hardware interface of a pci function device + **/ +static void set_ppf(struct hinic5_hwif *hwif) +{ + struct hinic5_func_attr *attr = &hwif->attr; + u32 addr, val, ppf_election; + + /* Read Modify Write */ + addr = HINIC5_CSR_PPF_ELECTION_ADDR; + + val = hinic5_hwif_read_reg(hwif, addr); + val = HINIC5_PPF_ELECTION_CLEAR(val, IDX); + + ppf_election = HINIC5_PPF_ELECTION_SET(attr->func_global_idx, IDX); + val |= ppf_election; + + hinic5_hwif_write_reg(hwif, addr, val); + + /* Check PPF */ + val = hinic5_hwif_read_reg(hwif, addr); + + attr->ppf_idx = HINIC5_PPF_ELECTION_GET(val, IDX); + if (attr->ppf_idx == attr->func_global_idx) + attr->func_type = TYPE_PPF; +} + +/** + * get_mpf - get the mpf index into the hwif + * @hwif: the hardware interface of a pci function device + **/ +static void get_mpf(struct hinic5_hwif *hwif) +{ + struct hinic5_func_attr *attr = &hwif->attr; + u32 mpf_election, addr; + + addr = HINIC5_CSR_GLOBAL_MPF_ELECTION_ADDR; + + mpf_election = hinic5_hwif_read_reg(hwif, addr); + attr->mpf_idx = HINIC5_MPF_ELECTION_GET(mpf_election, IDX); +} + +/** + * set_mpf - try to set hwif as mpf and set the mpf idx in hwif + * @hwif: the hardware interface of a pci function device + **/ +static void set_mpf(struct hinic5_hwif *hwif) +{ + struct hinic5_func_attr *attr = &hwif->attr; + u32 addr, val, mpf_election; + + /* Read Modify Write */ + addr = HINIC5_CSR_GLOBAL_MPF_ELECTION_ADDR; + + val = hinic5_hwif_read_reg(hwif, addr); + + val = HINIC5_MPF_ELECTION_CLEAR(val, IDX); + mpf_election = HINIC5_MPF_ELECTION_SET(attr->func_global_idx, IDX); + + val |= mpf_election; + hinic5_hwif_write_reg(hwif, addr, val); +} + +static int init_hwif(struct hinic5_hwdev *hwdev, void *fers2_reg_base, + void *cfg_reg_base, void *intr_reg_base, + void *mgmt_regs_base) +{ + struct hinic5_hwif *hwif = NULL; + + hwif = kzalloc(sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return -ENOMEM; + + hwdev->hwif = hwif; +#ifdef __UEFI__ + hwif->bus_dev = hwdev->busdev_hdl; +#endif + hwif->hwdev = hwdev; + + hwif->fers2_reg_base = fers2_reg_base; + /* if function is VF, mgmt_regs_base will be NULL */ + hwif->cfg_regs_base = mgmt_regs_base ? cfg_reg_base : + (u8 *)((uintptr_t)cfg_reg_base + HINIC5_VF_CFG_REG_OFFSET); + + hwif->intr_regs_base = intr_reg_base; + hwif->mgmt_regs_base = mgmt_regs_base; + + hwif->attr.func_type = TYPE_UNKNOWN; + + return 0; +} + +static int init_db_area_idx(struct hinic5_hwif *hwif, u64 db_base_phy, u8 *db_base, + u64 db_dwqe_len) +{ + struct hinic5_free_db_area *free_db_area = &hwif->free_db_area; + u32 db_max_areas; + + hwif->db_base_phy = db_base_phy; + hwif->db_base = db_base; + hwif->db_dwqe_len = db_dwqe_len; + + db_max_areas = (db_dwqe_len > HINIC5_DB_DWQE_SIZE) ? + HINIC5_DB_MAX_AREAS : + (u32)(db_dwqe_len / HINIC5_DB_PAGE_SIZE); + free_db_area->db_bitmap_array = bitmap_zalloc(db_max_areas, GFP_KERNEL); + if (!free_db_area->db_bitmap_array) { + pr_err("Failed to allocate db area.\n"); + return -ENOMEM; + } + free_db_area->db_max_areas = db_max_areas; + spin_lock_init(&free_db_area->idx_lock); + return 0; +} + +static void free_db_area(struct hinic5_free_db_area *free_db_area) +{ + spin_lock_deinit(&free_db_area->idx_lock); + kfree(free_db_area->db_bitmap_array); +} + +static int get_db_idx(struct hinic5_hwif *hwif, u32 *idx) +{ + struct hinic5_free_db_area *free_db_area = &hwif->free_db_area; + u32 pg_idx; + + spin_lock(&free_db_area->idx_lock); + pg_idx = (u32)find_first_zero_bit(free_db_area->db_bitmap_array, + free_db_area->db_max_areas); + if (pg_idx == free_db_area->db_max_areas) { + spin_unlock(&free_db_area->idx_lock); + return -ENOMEM; + } + set_bit(pg_idx, free_db_area->db_bitmap_array); + spin_unlock(&free_db_area->idx_lock); + + *idx = pg_idx; + + return 0; +} + +static void free_db_idx(struct hinic5_hwif *hwif, u32 idx) +{ + struct hinic5_free_db_area *free_db_area = &hwif->free_db_area; + + if (idx >= free_db_area->db_max_areas) + return; + + spin_lock(&free_db_area->idx_lock); + clear_bit((int)idx, free_db_area->db_bitmap_array); + + spin_unlock(&free_db_area->idx_lock); +} + +void hinic5_free_db_addr(void *hwdev, const void __iomem *db_base, + void __iomem *dwqe_base) +{ + struct hinic5_hwif *hwif = NULL; + u32 idx; + + if (!hwdev || !db_base) + return; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return; + idx = DB_IDX((uintptr_t)db_base, (uintptr_t)hwif->db_base); + + free_db_idx(hwif, idx); +} +EXPORT_SYMBOL(hinic5_free_db_addr); + +int hinic5_alloc_db_addr(void *hwdev, void __iomem **db_base, + void __iomem **dwqe_base) +{ + struct hinic5_hwif *hwif = NULL; + u32 idx = 0; +#ifdef __HIFC__ +#define HIFC3_DB_ADDR_RSVD 12 +#define HIFC3_DB_MASK 128 + u64 db_base_phy_fc; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + + db_base_phy_fc = hwif->db_base_phy >> HIFC3_DB_ADDR_RSVD; + + if (db_base_phy_fc & (HIFC3_DB_MASK - 1)) + idx = HIFC3_DB_MASK - (db_base_phy_fc && (HIFC3_DB_MASK - 1)); +#else + int err; + + if (!hwdev || !db_base) + return -EINVAL; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return -EINVAL; + + err = get_db_idx(hwif, &idx); + if (err != 0) + return -EFAULT; +#endif + + *db_base = hwif->db_base + idx * HINIC5_DB_PAGE_SIZE; + + if (dwqe_base) + *dwqe_base = (u8 *)*db_base + HINIC5_DWQE_OFFSET; + + return 0; +} +EXPORT_SYMBOL(hinic5_alloc_db_addr); + +void hinic5_free_db_phy_addr(void *hwdev, u64 db_base, u64 dwqe_base) +{ + struct hinic5_hwif *hwif = NULL; + u32 idx; + + if (!hwdev) + return; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return; + idx = DB_IDX(db_base, hwif->db_base_phy); + + free_db_idx(hwif, idx); +} +EXPORT_SYMBOL(hinic5_free_db_phy_addr); + +int hinic5_alloc_db_phy_addr(void *hwdev, u64 *db_base, u64 *dwqe_base) +{ + struct hinic5_hwif *hwif = NULL; + u32 idx; + int err; + + if (!hwdev || !db_base || !dwqe_base) + return -EINVAL; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return -EINVAL; + + err = get_db_idx(hwif, &idx); + if (err != 0) + return -EFAULT; + + *db_base = hwif->db_base_phy + idx * HINIC5_DB_PAGE_SIZE; + *dwqe_base = *db_base + HINIC5_DWQE_OFFSET; + + return 0; +} +EXPORT_SYMBOL(hinic5_alloc_db_phy_addr); + +void hinic5_set_msix_auto_mask_state(void *hwdev, u16 msix_idx, + enum hinic5_msix_auto_mask flag) +{ + struct hinic5_hwif *hwif = NULL; + u32 mask_bits; + u32 addr; + + if (!hwdev) + return; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return; + + if (flag != 0) + mask_bits = HINIC5_MSI_CLR_INDIR_SET(1, AUTO_MSK_SET); + else + mask_bits = HINIC5_MSI_CLR_INDIR_SET(1, AUTO_MSK_CLR); + + mask_bits = mask_bits | + HINIC5_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); + + addr = HINIC5_CSR_FUNC_MSI_CLR_WR_ADDR; + hinic5_hwif_write_reg(hwif, addr, mask_bits); +} +EXPORT_SYMBOL(hinic5_set_msix_auto_mask_state); + +void hinic5_set_msix_state(void *hwdev, u16 msix_idx, + enum hinic5_msix_state flag) +{ + struct hinic5_hwif *hwif = NULL; + u32 mask_bits; + u32 addr; + u8 int_msk = 1; + + if (!hwdev) + return; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return; + + if (flag != 0) + mask_bits = HINIC5_MSI_CLR_INDIR_SET(int_msk, INT_MSK_SET); + else + mask_bits = HINIC5_MSI_CLR_INDIR_SET(int_msk, INT_MSK_CLR); + mask_bits = mask_bits | + HINIC5_MSI_CLR_INDIR_SET(msix_idx, SIMPLE_INDIR_IDX); + + addr = HINIC5_CSR_FUNC_MSI_CLR_WR_ADDR; + hinic5_hwif_write_reg(hwif, addr, mask_bits); +} +EXPORT_SYMBOL(hinic5_set_msix_state); + +static void disable_all_msix(struct hinic5_hwdev *hwdev) +{ + u16 num_irqs = hwdev->hwif->attr.num_irqs; + u16 i; + + for (i = 0; i < num_irqs; i++) + hinic5_set_msix_state(hwdev, i, HINIC5_MSIX_DISABLE); +} + +static enum hinic5_wait_return check_db_outbound_enable_handler(void *priv_data) +{ + struct hinic5_hwif *hwif = priv_data; + enum doorbell_flush_state db_ctrl; + enum outbound_flush_state outbound_ctrl; + + db_ctrl = hinic5_get_doorbell_ctrl_status(hwif); + outbound_ctrl = hinic5_get_outbound_ctrl_status(hwif); + if (outbound_ctrl == OUTBOUND_FLUSH_DISABLED && db_ctrl == DOORBELL_FLUSH_DISABLED) + return WAIT_PROCESS_CPL; + return WAIT_PROCESS_WAITING; +} + +enum hinic5_wait_return check_outbound_enable_handler(struct hinic5_hwdev *hwdev) +{ + enum outbound_flush_state outbound_ctrl; + /* bypass 计数非零, 表示正在执行可能导致 + * outbound_ctrl_status非零的流程, cmdq和mbox不检测 + */ + if (atomic_read(&hwdev->check_ob_flush_bypass_ref_cnt) > 0) + return WAIT_PROCESS_CPL; + + outbound_ctrl = hinic5_get_outbound_ctrl_status(hwdev->hwif); + if (outbound_ctrl == OUTBOUND_FLUSH_DISABLED) + return WAIT_PROCESS_CPL; + return WAIT_PROCESS_WAITING; +} + +static int wait_until_doorbell_and_outbound_enabled(struct hinic5_hwif *hwif) +{ + return hinic5_wait_for_timeout(hwif, check_db_outbound_enable_handler, + HINIC5_WAIT_DOORBELL_AND_OUTBOUND_TIMEOUT, USEC_PER_MSEC); +} + +static void select_ppf_mpf(struct hinic5_hwdev *hwdev) +{ + struct hinic5_hwif *hwif = hwdev->hwif; + + if (!HINIC5_IS_VF(hwdev)) { + set_ppf(hwif); + + if (HINIC5_IS_PPF(hwdev)) + set_mpf(hwif); + + get_mpf(hwif); + } +} + +/** + * hinic5_init_hwif - initialize the hw interface + * @hwif: the hardware interface of a pci/ubus function device + * Return: 0 - success, negative - failure + **/ +int hinic5_init_hwif(struct hinic5_hwdev *hwdev, void *fers2_reg_base, void *cfg_reg_base, + void *intr_reg_base, void *mgmt_regs_base, u64 db_base_phy, + void *db_base, u64 db_dwqe_len) +{ + struct hinic5_hwif *hwif = NULL; + u32 attr1, attr4, attr5; + int err; + + err = init_hwif(hwdev, fers2_reg_base, cfg_reg_base, intr_reg_base, mgmt_regs_base); + if (err != 0) + return err; + + hwif = hwdev->hwif; + + err = init_db_area_idx(hwif, db_base_phy, db_base, db_dwqe_len); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to init db area.\n"); + goto init_db_area_err; + } + + err = wait_hwif_ready(hwdev); + if (err != 0) { + attr1 = hinic5_hwif_read_reg(hwif, HINIC5_CSR_FUNC_ATTR1_ADDR); + sdk_err(hwdev->dev_hdl, "Chip status is not ready, attr1:0x%x\n", attr1); + goto hwif_ready_err; + } + + err = get_hwif_attr(hwif); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Get hwif attr failed\n"); + goto hwif_ready_err; + } + + err = wait_until_doorbell_and_outbound_enabled(hwif); + if (err != 0) { + attr4 = hinic5_hwif_read_reg(hwif, HINIC5_CSR_FUNC_ATTR4_ADDR); + attr5 = hinic5_hwif_read_reg(hwif, HINIC5_CSR_FUNC_ATTR5_ADDR); + sdk_err(hwdev->dev_hdl, "Hw doorbell/outbound is disabled, attr4 0x%x attr5 0x%x\n", + attr4, attr5); + goto hwif_ready_err; + } + + select_ppf_mpf(hwdev); + + disable_all_msix(hwdev); + /* disable mgmt cpu report any event */ + hinic5_set_pf_status(hwdev->hwif, HINIC5_PF_STATUS_INIT); + + sdk_info(hwdev->dev_hdl, "global_func_idx: %u, func_type: %d, host_id: %u, ppf: %u, mpf: %u\n", + hwif->attr.func_global_idx, hwif->attr.func_type, hwif->attr.pci_intf_idx, + hwif->attr.ppf_idx, hwif->attr.mpf_idx); + + return 0; + +hwif_ready_err: + hinic5_show_chip_err_info(hwdev); + free_db_area(&hwif->free_db_area); +init_db_area_err: + kfree(hwif); + + return err; +} + +/** + * hinic5_free_hwif - free the hw interface + * @hwif: the hardware interface of a pci/ubus function device + **/ +void hinic5_free_hwif(struct hinic5_hwdev *hwdev) +{ + free_db_area(&hwdev->hwif->free_db_area); + kfree(hwdev->hwif); +} + +u16 hinic5_global_func_id(void *hwdev) +{ + struct hinic5_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return 0; + + return hwif->attr.func_global_idx; +} +EXPORT_SYMBOL(hinic5_global_func_id); + +u16 hinic5_intr_num(void *hwdev) +{ + struct hinic5_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return 0; + + return hwif->attr.num_irqs; +} +EXPORT_SYMBOL(hinic5_intr_num); + +u8 hinic5_pf_id_of_vf(void *hwdev) +{ + struct hinic5_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return 0; + + return hwif->attr.port_to_port_idx; +} +EXPORT_SYMBOL(hinic5_pf_id_of_vf); + +u8 hinic5_pcie_itf_id(void *hwdev) +{ + struct hinic5_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return 0; + + return hwif->attr.pci_intf_idx; +} +EXPORT_SYMBOL(hinic5_pcie_itf_id); + +bool hinic5_in_spu(void *hwdev) +{ + const u8 host_id = hinic5_pcie_itf_id(hwdev); + + return SPU_HOST_ID_BASE <= host_id && host_id <= SPU_HOST_ID_MAX; +} +EXPORT_SYMBOL(hinic5_in_spu); + +u8 hinic5_vf_in_pf(void *hwdev) +{ + struct hinic5_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return 0; + + return hwif->attr.vf_in_pf; +} +EXPORT_SYMBOL(hinic5_vf_in_pf); + +enum func_type hinic5_func_type(void *hwdev) +{ + struct hinic5_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return 0; + + return hwif->attr.func_type; +} +EXPORT_SYMBOL(hinic5_func_type); + +u8 hinic5_ceq_num(void *hwdev) +{ + struct hinic5_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return 0; + + return hwif->attr.num_ceqs; +} +EXPORT_SYMBOL(hinic5_ceq_num); + +u16 hinic5_glb_pf_vf_offset(void *hwdev) +{ + struct hinic5_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return 0; + + return hwif->attr.global_vf_id_of_pf; +} +EXPORT_SYMBOL(hinic5_glb_pf_vf_offset); + +u8 hinic5_ppf_idx(void *hwdev) +{ + struct hinic5_hwif *hwif = NULL; + + if (!hwdev) + return 0; + + hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + if (!hwif) + return 0; + + return hwif->attr.ppf_idx; +} +EXPORT_SYMBOL(hinic5_ppf_idx); + +#if !defined(__UEFI__) && !defined(__VMWARE__) && !defined(__WIN__) +int hinic5_ts_up_en(void *hwdev, u32 flags) +{ + u32 retry_cnt; + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (!dev || HINIC5_IS_VF(dev)) + return -EINVAL; + + hinic5_hwif_write_reg(dev->hwif, HINIC5_PTP_REG(UP_EN), flags); + for (retry_cnt = 0; retry_cnt < MAX_TS_UP_EN_RETRY_CNT; retry_cnt++) { + if ((hinic5_hwif_read_reg(dev->hwif, HINIC5_PTP_REG(UP_EN)) & flags) == 0) + return 0; + + udelay(1); + } + return -EINVAL; +} +EXPORT_SYMBOL(hinic5_ts_up_en); + +void hinic5_read_ts_data(void *hwdev, struct timespec64 *ts) +{ + u32 hi, lo; + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (!dev || HINIC5_IS_VF(dev) || !ts) + return; + + ts->tv_nsec = hinic5_hwif_read_reg(dev->hwif, HINIC5_PTP_REG(RD_DATA2)); + lo = hinic5_hwif_read_reg(dev->hwif, HINIC5_PTP_REG(RD_DATA1)); + hi = hinic5_hwif_read_reg(dev->hwif, HINIC5_PTP_REG(RD_DATA0)); + ts->tv_sec = (time64_t)MAKE_64BITS(hi, lo); +} +EXPORT_SYMBOL(hinic5_read_ts_data); + +void hinic5_write_ts_data(void *hwdev, const struct timespec64 *ts) +{ + u32 retry_cnt; + u64 second; + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (!dev || !dev->hwif || HINIC5_IS_VF(dev) || !ts) + return; + + second = (u64)ts->tv_sec; + hinic5_hwif_write_reg(dev->hwif, HINIC5_PTP_REG(WR_DATA2), (u32)ts->tv_nsec); + hinic5_hwif_write_reg(dev->hwif, HINIC5_PTP_REG(WR_DATA1), second & 0xFFFFFFFF); + hinic5_hwif_write_reg(dev->hwif, HINIC5_PTP_REG(WR_DATA0), upper_32_bits(second)); + hinic5_hwif_write_reg(dev->hwif, HINIC5_PTP_REG(UP_EN), 1); + for (retry_cnt = 0; retry_cnt < MAX_TS_UP_EN_RETRY_CNT; retry_cnt++) { + if (hinic5_hwif_read_reg(dev->hwif, HINIC5_PTP_REG(UP_EN)) == 0) + break; + + udelay(1); + } +} +EXPORT_SYMBOL(hinic5_write_ts_data); + +#define PTP_INC_CFG_UP_EN_FLAG BIT(2) +#define PTP_DELTA_UP_EN_FLAG BIT(3) +void hinic5_set_ptp_inc(void *hwdev, u32 inc_val) +{ + u32 retry_cnt; + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (!dev || !dev->hwif || HINIC5_IS_VF(dev)) + return; + + hinic5_hwif_write_reg(dev->hwif, HINIC5_PTP_REG(INC_CFG), inc_val); + hinic5_hwif_write_reg(dev->hwif, HINIC5_PTP_REG(UP_EN), PTP_INC_CFG_UP_EN_FLAG); + for (retry_cnt = 0; retry_cnt < MAX_TS_UP_EN_RETRY_CNT; retry_cnt++) { + if (hinic5_hwif_read_reg(dev->hwif, HINIC5_PTP_REG(UP_EN)) == 0) + break; + + udelay(1); + } +} +EXPORT_SYMBOL(hinic5_set_ptp_inc); + +#define PTP_NS_DELTA_OP_ADD BIT(31) +void hinic5_ptp_ts_update(void *hwdev, s32 delta_ns) +{ + u32 retry_cnt; + u32 update_cfg_val; + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + if (!dev || !dev->hwif || HINIC5_IS_VF(dev)) + return; + + if (delta_ns < 0) { + update_cfg_val = (u32)(-delta_ns); + } else { + update_cfg_val = (u32)delta_ns; + update_cfg_val |= PTP_NS_DELTA_OP_ADD; + } + + hinic5_hwif_write_reg(dev->hwif, HINIC5_PTP_REG(UPDT_CFG), update_cfg_val); + hinic5_hwif_write_reg(dev->hwif, HINIC5_PTP_REG(UP_EN), PTP_DELTA_UP_EN_FLAG); + for (retry_cnt = 0; retry_cnt < MAX_TS_UP_EN_RETRY_CNT; retry_cnt++) { + if (hinic5_hwif_read_reg(dev->hwif, HINIC5_PTP_REG(UP_EN)) == 0) + break; + + udelay(1); + } +} +EXPORT_SYMBOL(hinic5_ptp_ts_update); + +#define HINIC5_N_PTP_HIGH_SHIFT 61 +#define HINIC5_N_PTP_MID_SHIFT 29 +#define HINIC5_N_PTP_HIGH_MASK 7 +#define HINIC5_N_PTP_LOW_MASK 0x1FFFFFFF +int hinic5_read_n_ptp_ts_data(struct hinic5_hwdev *hwdev, u64 *time_ns) +{ + u32 hi, mid, lo; + + if (!hwdev || HINIC5_IS_VF(hwdev) || !time_ns) + return -EINVAL; + + /* 80 bit non-ptp TimeStamp */ + /* | [79 : 64] | [63 : 32] | [31 : 29] | [28 : 0] | + * | hi | mid | rsv | lo | + */ + lo = hinic5_hwif_read_reg(hwdev->hwif, HINIC5_N_PTP_REG(RD_DATA2)); + mid = hinic5_hwif_read_reg(hwdev->hwif, HINIC5_N_PTP_REG(RD_DATA1)); + hi = hinic5_hwif_read_reg(hwdev->hwif, HINIC5_N_PTP_REG(RD_DATA0)); + + /* 64 bit nsec_lo */ + /* | [63 : 61] | [60 : 29] | [28 : 0] | + * | hi[2 : 0] | mid | lo | + */ + *time_ns = (((u64)(lo & HINIC5_N_PTP_LOW_MASK)) | (((u64)mid) << HINIC5_N_PTP_MID_SHIFT) | + (((u64)(hi & HINIC5_N_PTP_HIGH_MASK)) << HINIC5_N_PTP_HIGH_SHIFT)); + return 0; +} + +static inline int hinic5_hwif_wait_n_ptp_up_en(struct hinic5_hwif *hwif, u32 flags) +{ + u32 retry_cnt; + + for (retry_cnt = 0; retry_cnt < MAX_TS_UP_EN_RETRY_CNT; retry_cnt++) { + if ((hinic5_hwif_read_reg(hwif, HINIC5_N_PTP_REG(UP_EN)) & flags) == 0) + return 0; + udelay(1); + } + return -EBUSY; +} + +int hinic5_n_ptp_ts_up_en(struct hinic5_hwdev *hwdev, u32 flags) +{ + if (!hwdev || HINIC5_IS_VF(hwdev)) + return -EINVAL; + + hinic5_hwif_write_reg(hwdev->hwif, HINIC5_N_PTP_REG(UP_EN), flags); + return hinic5_hwif_wait_n_ptp_up_en(hwdev->hwif, flags); +} +#endif + +u8 hinic5_host_ppf_idx(struct hinic5_hwdev *hwdev, u8 host_id) +{ + u32 ppf_elect_port_addr; + u32 val; + + if (!hwdev || !hwdev->hwif) + return 0; + + ppf_elect_port_addr = HINIC5_CSR_FUNC_PPF_ELECT(host_id); + val = hinic5_hwif_read_reg(hwdev->hwif, ppf_elect_port_addr); + + return HINIC5_PPF_ELECT_PORT_GET(val, IDX); +} + +u32 hinic5_get_self_test_result(void *hwdev) +{ + struct hinic5_hwif *hwif = ((struct hinic5_hwdev *)hwdev)->hwif; + + return hinic5_hwif_read_reg(hwif, HINIC5_MGMT_HEALTH_STATUS_ADDR); +} + +void hinic5_show_chip_err_info(struct hinic5_hwdev *hwdev) +{ + const enum func_type func_type = hinic5_func_type(hwdev); + struct hinic5_hwif *hwif = hwdev->hwif; + u32 value; + + if (func_type != TYPE_PPF && func_type != TYPE_PF) + return; + + value = hinic5_hwif_read_reg(hwif, HINIC5_CHIP_BASE_INFO_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip base info: 0x%08x\n", value); + + value = hinic5_hwif_read_reg(hwif, HINIC5_MGMT_HEALTH_STATUS_ADDR); + sdk_warn(hwdev->dev_hdl, "Mgmt CPU health status: 0x%08x\n", value); + + value = hinic5_hwif_read_reg(hwif, HINIC5_CHIP_ERR_STATUS0_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip fatal error status0: 0x%08x\n", value); + value = hinic5_hwif_read_reg(hwif, HINIC5_CHIP_ERR_STATUS1_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip fatal error status1: 0x%08x\n", value); + + value = hinic5_hwif_read_reg(hwif, HINIC5_ERR_INFO0_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info0: 0x%08x\n", value); + value = hinic5_hwif_read_reg(hwif, HINIC5_ERR_INFO1_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info1: 0x%08x\n", value); + value = hinic5_hwif_read_reg(hwif, HINIC5_ERR_INFO2_ADDR); + sdk_warn(hwdev->dev_hdl, "Chip exception info2: 0x%08x\n", value); +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_mbox.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_mbox.c new file mode 100644 index 00000000..385507c5 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_mbox.c @@ -0,0 +1,2187 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/delay.h> +#include <linux/types.h> +#include <linux/semaphore.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include "ossl_knl.h" +#include "comm_defs.h" +#include "mpu_inband_cmd.h" +#include "hinic5_typedef_inner.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_csr_inner.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_eqs.h" +#include "hinic5_prof_adap.h" +#include "hinic5_common.h" +#include "hinic5_chip_info.h" +#include "hinic5_mbox.h" + +#define HINIC5_MBOX_INT_DST_AEQN_SHIFT 10 +#define HINIC5_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 +#define HINIC5_MBOX_INT_STAT_DMA_SHIFT 14 +/* The size of data to be send (unit of 4 bytes) */ +#define HINIC5_MBOX_INT_TX_SIZE_SHIFT 20 +/* SO_RO(strong order, relax order) */ +#define HINIC5_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 +#define HINIC5_MBOX_INT_WB_EN_SHIFT 28 + +#define HINIC5_MBOX_INT_DST_AEQN_MASK 0x3 +#define HINIC5_MBOX_INT_SRC_RESP_AEQN_MASK 0x3 +#define HINIC5_MBOX_INT_STAT_DMA_MASK 0x3F +#define HINIC5_MBOX_INT_TX_SIZE_MASK 0x1F +#define HINIC5_MBOX_INT_STAT_DMA_SO_RO_MASK 0x3 +#define HINIC5_MBOX_INT_WB_EN_MASK 0x1 + +#define WAIT_USEC_50 50 + +#define HINIC5_MBOX_INT_SET(val, field) \ + (((u32)(val) & HINIC5_MBOX_INT_##field##_MASK) << \ + HINIC5_MBOX_INT_##field##_SHIFT) + +enum hinic5_mbox_tx_status { + TX_DONE = 0, + TX_NOT_DONE = 1, +}; + +#define HINIC5_MBOX_CTRL_TRIGGER_AEQE_SHIFT 0 +/* specifies the issue request for the message data. + * 0 - Tx request is done; + * 1 - Tx request is in process. + */ +#define HINIC5_MBOX_CTRL_TX_STATUS_SHIFT 1 +#define HINIC5_MBOX_CTRL_DST_FUNC_SHIFT 16 + +#define HINIC5_MBOX_CTRL_TRIGGER_AEQE_MASK 0x1 +#define HINIC5_MBOX_CTRL_TX_STATUS_MASK 0x1 +#define HINIC5_MBOX_CTRL_DST_FUNC_MASK 0x1FFF + +#define HINIC5_MBOX_CTRL_GET(val, field) \ + (((val) >> HINIC5_MBOX_CTRL_##field##_SHIFT) & \ + HINIC5_MBOX_CTRL_##field##_MASK) +#define HINIC5_MBOX_CTRL_SET(val, field) \ + (((val) & HINIC5_MBOX_CTRL_##field##_MASK) << \ + HINIC5_MBOX_CTRL_##field##_SHIFT) + +#define MBOX_MSG_WAIT_ONCE_TIME_US 10 +#define MBOX_MSG_RETRY_ACK_TIMEOUT 1000U +#define WAIT_MGMT_UNBUSY_TIMEOUT 4000U /* temp value */ + +#define MBOX_MAX_BUF_SZ 2048U +#define MBOX_HEADER_SZ 8 +#define HINIC5_MBOX_DATA_SIZE (MBOX_MAX_BUF_SZ - MBOX_HEADER_SZ) + +/* MBOX size is 64B, 8B for mbox_header, 8B reserved */ +#define MBOX_SEG_LEN 48 +#define MBOX_SEG_LEN_ALIGN 4 +#define MBOX_WB_STATUS_LEN 16UL + +#define SEQ_ID_START_VAL 0 +#define SEQ_ID_MAX_VAL 42 +#define MBOX_LAST_SEG_MAX_LEN (MBOX_MAX_BUF_SZ - \ + SEQ_ID_MAX_VAL * MBOX_SEG_LEN) + +/* Mbox write back status is 16B, only first 2B is used */ +#define MBOX_WB_STATUS_ERRCODE_MASK 0xFFFF +#define MBOX_WB_STATUS_MASK 0xFF +#define MBOX_WB_ERROR_CODE_MASK 0xFF00 +#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF +#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE +#define MBOX_WB_STATUS_NOT_FINISHED 0x00 + +#define MBOX_STATUS_FINISHED(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) != MBOX_WB_STATUS_NOT_FINISHED) +#define MBOX_STATUS_SUCCESS(wb) \ + (((wb) & MBOX_WB_STATUS_MASK) == MBOX_WB_STATUS_FINISHED_SUCCESS) + +/* + * The errcode is specified as: + * 0 - No errors; + * 1 - VF send the mailbox data to the wrong destination functions. + * 2 - PPF send the mailbox data to the wrong destination functions. + * 3 - PF send the mailbox data to the wrong destination functions. + * 4 - The mailbox data size is set to all zero. + * 5 - The sender function attribute has not been learned by CPI hardware. + * 6 - The receiver function attribute has not been learned by CPI hardware. + */ +#define MBOX_STATUS_ERRCODE(wb) \ + ((wb) & MBOX_WB_ERROR_CODE_MASK) + +#define DST_AEQ_IDX_DEFAULT_VAL 0 +#define SRC_AEQ_IDX_DEFAULT_VAL 0 +#define NO_DMA_ATTRIBUTE_VAL 0 + +#define MBOX_MSG_NO_DATA_LEN 1 + +#define MBOX_BODY_FROM_HDR(header) ((u8 *)(header) + MBOX_HEADER_SZ) +#define MBOX_AREA(hwif) \ + ((hwif)->cfg_regs_base + HINIC5_FUNC_CSR_MAILBOX_DATA_OFF) + +#define MBOX_DMA_MSG_QUEUE_DEPTH 32 + +#define MBOX_MQ_CI_OFFSET (HINIC5_CFG_REGS_FLAG + HINIC5_FUNC_CSR_MAILBOX_DATA_OFF + \ + MBOX_HEADER_SZ + MBOX_SEG_LEN) +#define MBOX_MQ_CI_SIZE 4 + +#define MBOX_MQ_SYNC_CI_SHIFT 0 +#define MBOX_MQ_ASYNC_CI_SHIFT 8 + +#define MBOX_MQ_SYNC_CI_MASK 0xFF +#define MBOX_MQ_ASYNC_CI_MASK 0xFF + +#define MBOX_MQ_CI_SET(val, field) \ + (((val) & MBOX_MQ_##field##_CI_MASK) << MBOX_MQ_##field##_CI_SHIFT) +#define MBOX_MQ_CI_GET(val, field) \ + (((val) >> MBOX_MQ_##field##_CI_SHIFT) & MBOX_MQ_##field##_CI_MASK) +#define MBOX_MQ_CI_CLEAR(val, field) \ + ((val) & (~(MBOX_MQ_##field##_CI_MASK << MBOX_MQ_##field##_CI_SHIFT))) + +#define MBOX_EXT_CSR_OFFSET (MBOX_MQ_CI_OFFSET + MBOX_MQ_CI_SIZE) + +#define MBOX_EXT_MGMT_BUSY_SHIFT 0 + +#define MBOX_EXT_MGMT_BUSY_MASK 0x1 + +#define MBOX_EXT_SET(val, field) \ + (((val) & MBOX_EXT_##field##_MASK) << MBOX_EXT_##field##_SHIFT) +#define MBOX_EXT_GET(val, field) \ + (((val) >> MBOX_EXT_##field##_SHIFT) & MBOX_EXT_##field##_MASK) +#define MBOX_EXT_CLEAR(val, field) \ + ((val) & (~(MBOX_EXT_##field##_MASK << MBOX_EXT_##field##_SHIFT))) + +#define IS_PF_OR_PPF_SRC(hwdev, src_func_idx) \ + ((src_func_idx) < HINIC5_MAX_PF_NUM(hwdev)) + +#define MBOX_RESPONSE_ERROR 0x1 +#define MBOX_MSG_ID_MASK 0xF + +static inline u8 inc_mbox_send_msg_id(struct hinic5_mbox *func_to_func) +{ + func_to_func->send_msg_id = (func_to_func->send_msg_id + 1) & MBOX_MSG_ID_MASK; + /* 与旧实现保持一致,从1开始编号;否则dt用来阻塞, + * 推测可能是部分用例对起始msg_id做了假设 + */ + return func_to_func->send_msg_id; +} + +/* max message counter wait to process for one function */ +#define HINIC5_MAX_MSG_CNT_TO_PROCESS 10 + +#define MBOX_MSG_CHANNEL_STOP(func_to_func) \ + ((((func_to_func)->lock_channel_en) && \ + test_bit((func_to_func)->cur_msg_channel, \ + &(func_to_func)->channel_stop)) ? true : false) + +enum mbox_ordering_type { + STRONG_ORDER, +}; + +enum mbox_write_back_type { + WRITE_BACK = 1, +}; + +enum mbox_aeq_trig_type { + NOT_TRIGGER, + TRIGGER, +}; + +static int send_mbox_msg(struct hinic5_mbox *func_to_func, u8 mod, u16 cmd, + void *msg, u16 msg_len, u16 dst_func, + enum hinic5_msg_direction_type direction, + enum hinic5_msg_ack_type ack_type, + struct mbox_msg_info *msg_info); + +static struct hinic5_msg_desc *get_mbox_msg_desc(struct hinic5_mbox *func_to_func, + u64 dir, u64 src_func_id); + +/** + * hinic5_register_ppf_mbox_cb - register mbox callback for ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @pri_handle specific mod's private data that will be used in callback + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic5_register_ppf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + hinic5_ppf_mbox_cb callback) +{ + struct hinic5_mbox *func_to_func = NULL; + + if (mod >= HINIC5_MOD_MAX || !hwdev) + return -EFAULT; + + func_to_func = ((struct hinic5_hwdev *)hwdev)->func_to_func; + if (!func_to_func) + return -EFAULT; + + func_to_func->ppf_mbox_cb[mod] = callback; + func_to_func->ppf_mbox_data[mod] = pri_handle; + + set_bit(HINIC5_PPF_MBOX_CB_REG, &func_to_func->ppf_mbox_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(hinic5_register_ppf_mbox_cb); + +/** + * hinic5_register_pf_mbox_cb - register mbox callback for pf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @pri_handle specific mod's private data that will be used in callback + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic5_register_pf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + hinic5_pf_mbox_cb callback) +{ + struct hinic5_mbox *func_to_func = NULL; + + if (mod >= HINIC5_MOD_MAX || !hwdev) + return -EFAULT; + + func_to_func = ((struct hinic5_hwdev *)hwdev)->func_to_func; + if (!func_to_func) + return -EFAULT; + + func_to_func->pf_mbox_cb[mod] = callback; + func_to_func->pf_mbox_data[mod] = pri_handle; + + set_bit(HINIC5_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(hinic5_register_pf_mbox_cb); + +/** + * hinic5_register_vf_mbox_cb - register mbox callback for vf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + * @pri_handle specific mod's private data that will be used in callback + * @callback: callback function + * Return: 0 - success, negative - failure + */ +int hinic5_register_vf_mbox_cb(void *hwdev, u8 mod, void *pri_handle, + hinic5_vf_mbox_cb callback) +{ + struct hinic5_mbox *func_to_func = NULL; + + if (mod >= HINIC5_MOD_MAX || !hwdev) + return -EFAULT; + + func_to_func = ((struct hinic5_hwdev *)hwdev)->func_to_func; + if (!func_to_func) + return -EFAULT; + + func_to_func->vf_mbox_cb[mod] = callback; + func_to_func->vf_mbox_data[mod] = pri_handle; + + set_bit(HINIC5_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(hinic5_register_vf_mbox_cb); + +/** + * hinic5_unregister_ppf_mbox_cb - unregister the mbox callback for ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic5_unregister_ppf_mbox_cb(void *hwdev, u8 mod) +{ + struct hinic5_mbox *func_to_func = NULL; + + if (mod >= HINIC5_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct hinic5_hwdev *)hwdev)->func_to_func; + if (!func_to_func) + return; + + clear_bit(HINIC5_PPF_MBOX_CB_REG, + &func_to_func->ppf_mbox_cb_state[mod]); + + while (test_bit(HINIC5_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[mod])) + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + + func_to_func->ppf_mbox_data[mod] = NULL; + func_to_func->ppf_mbox_cb[mod] = NULL; +} +EXPORT_SYMBOL(hinic5_unregister_ppf_mbox_cb); + +/** + * hinic5_unregister_ppf_mbox_cb - unregister the mbox callback for pf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic5_unregister_pf_mbox_cb(void *hwdev, u8 mod) +{ + struct hinic5_mbox *func_to_func = NULL; + + if (mod >= HINIC5_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct hinic5_hwdev *)hwdev)->func_to_func; + if (!func_to_func) + return; + + clear_bit(HINIC5_PF_MBOX_CB_REG, &func_to_func->pf_mbox_cb_state[mod]); + + while (test_bit(HINIC5_PF_MBOX_CB_RUNNING, &func_to_func->pf_mbox_cb_state[mod]) != 0) + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + + func_to_func->pf_mbox_data[mod] = NULL; + func_to_func->pf_mbox_cb[mod] = NULL; +} +EXPORT_SYMBOL(hinic5_unregister_pf_mbox_cb); + +/** + * hinic5_unregister_vf_mbox_cb - unregister the mbox callback for vf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic5_unregister_vf_mbox_cb(void *hwdev, u8 mod) +{ + struct hinic5_mbox *func_to_func = NULL; + + if (mod >= HINIC5_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct hinic5_hwdev *)hwdev)->func_to_func; + if (!func_to_func) + return; + + clear_bit(HINIC5_VF_MBOX_CB_REG, &func_to_func->vf_mbox_cb_state[mod]); + + while (test_bit(HINIC5_VF_MBOX_CB_RUNNING, &func_to_func->vf_mbox_cb_state[mod]) != 0) + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + + func_to_func->vf_mbox_data[mod] = NULL; + func_to_func->vf_mbox_cb[mod] = NULL; +} +EXPORT_SYMBOL(hinic5_unregister_vf_mbox_cb); + +/** + * hinic5_unregister_ppf_mbox_cb - unregister the mbox callback for pf from ppf + * @hwdev: the pointer to hw device + * @mod: specific mod that the callback will handle + */ +void hinic5_unregister_ppf_to_pf_mbox_cb(void *hwdev, u8 mod) +{ + struct hinic5_mbox *func_to_func = NULL; + + if (mod >= HINIC5_MOD_MAX || !hwdev) + return; + + func_to_func = ((struct hinic5_hwdev *)hwdev)->func_to_func; + if (!func_to_func) + return; + + clear_bit(HINIC5_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + while (test_bit(HINIC5_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod])) + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + + func_to_func->pf_recv_ppf_mbox_data[mod] = NULL; + func_to_func->pf_recv_ppf_mbox_cb[mod] = NULL; +} + +static int recv_vf_mbox_handler(struct hinic5_mbox *func_to_func, + struct hinic5_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size) +{ + hinic5_vf_mbox_cb cb = NULL; + int ret; + + if (recv_mbox->mod >= HINIC5_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %u\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC5_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->vf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC5_VF_MBOX_CB_REG, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod])) { + ret = cb(func_to_func->vf_mbox_data[recv_mbox->mod], + recv_mbox->cmd, recv_mbox->msg, + recv_mbox->msg_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "VF mbox cb is not registered\n"); + ret = -EINVAL; + } + + clear_bit(HINIC5_VF_MBOX_CB_RUNNING, + &func_to_func->vf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static int recv_pf_from_ppf_handler(struct hinic5_mbox *func_to_func, + struct hinic5_recv_mbox *recv_mbox, + void *buf_out, u16 *out_size) +{ + hinic5_pf_recv_from_ppf_mbox_cb cb = NULL; + enum hinic5_mod_type mod = recv_mbox->mod; + int ret; + + if (mod >= HINIC5_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %d\n", + mod); + return -EINVAL; + } + + set_bit(HINIC5_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + cb = func_to_func->pf_recv_ppf_mbox_cb[mod]; + if (cb && test_bit(HINIC5_PPF_TO_PF_MBOX_CB_REG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]) != 0) { + ret = cb(func_to_func->pf_recv_ppf_mbox_data[mod], + recv_mbox->cmd, recv_mbox->msg, recv_mbox->msg_len, + buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PF receive ppf mailbox callback is not registered\n"); + ret = -EINVAL; + } + + clear_bit(HINIC5_PPF_TO_PF_MBOX_CB_RUNNIG, + &func_to_func->ppf_to_pf_mbox_cb_state[mod]); + + return ret; +} + +static int recv_ppf_mbox_handler(struct hinic5_mbox *func_to_func, + struct hinic5_recv_mbox *recv_mbox, + u8 pf_id, void *buf_out, u16 *out_size) +{ + hinic5_ppf_mbox_cb cb = NULL; + u16 vf_id = 0; + int ret; + + if (recv_mbox->mod >= HINIC5_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %u\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC5_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->ppf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC5_PPF_MBOX_CB_REG, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod])) { + ret = cb(func_to_func->ppf_mbox_data[recv_mbox->mod], + pf_id, vf_id, recv_mbox->cmd, recv_mbox->msg, + recv_mbox->msg_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PPF mbox cb is not registered, mod = %u\n", + recv_mbox->mod); + ret = -EINVAL; + } + + clear_bit(HINIC5_PPF_MBOX_CB_RUNNING, + &func_to_func->ppf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static int recv_pf_from_vf_mbox_handler(struct hinic5_mbox *func_to_func, + struct hinic5_recv_mbox *recv_mbox, + u16 src_func_idx, void *buf_out, + u16 *out_size) +{ + hinic5_pf_mbox_cb cb = NULL; + u16 vf_id = 0; + int ret; + + if (recv_mbox->mod >= HINIC5_MOD_MAX) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Receive illegal mbox message, mod = %u\n", + recv_mbox->mod); + return -EINVAL; + } + + set_bit(HINIC5_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); + + cb = func_to_func->pf_mbox_cb[recv_mbox->mod]; + if (cb && test_bit(HINIC5_PF_MBOX_CB_REG, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]) != 0) { + vf_id = src_func_idx - + hinic5_glb_pf_vf_offset(func_to_func->hwdev); + ret = cb(func_to_func->pf_mbox_data[recv_mbox->mod], + vf_id, recv_mbox->cmd, recv_mbox->msg, + recv_mbox->msg_len, buf_out, out_size); + } else { + sdk_warn(func_to_func->hwdev->dev_hdl, "PF mbox mod(0x%x) cb is not registered\n", + recv_mbox->mod); + ret = -EINVAL; + } + + clear_bit(HINIC5_PF_MBOX_CB_RUNNING, + &func_to_func->pf_mbox_cb_state[recv_mbox->mod]); + + return ret; +} + +static void response_for_recv_func_mbox(struct hinic5_mbox *func_to_func, + struct hinic5_recv_mbox *recv_mbox, + int err, u16 out_size, u16 src_func_idx) +{ + struct mbox_msg_info msg_info = {0}; + u16 size = out_size; + + msg_info.msg_id = recv_mbox->msg_id; + if (err != 0) + /* when mailbox processing got an error, status bit of mailbox header is set. */ + msg_info.status = HINIC5_MBOX_PF_SEND_ERR; + + /* if not data need to response, set out_size to 1 */ + if (out_size == 0 || err != 0) + size = MBOX_MSG_NO_DATA_LEN; + + if (size > HINIC5_MBOX_DATA_SIZE) { + sdk_err(func_to_func->hwdev->dev_hdl, "Response msg len(%u) exceed limit(%u)\n", + size, HINIC5_MBOX_DATA_SIZE); + size = HINIC5_MBOX_DATA_SIZE; + } + + send_mbox_msg(func_to_func, recv_mbox->mod, recv_mbox->cmd, + recv_mbox->resp_buff, size, src_func_idx, + HINIC5_MSG_RESPONSE, HINIC5_MSG_NO_ACK, &msg_info); +} + +static void recv_func_mbox_handler(struct hinic5_mbox *func_to_func, + struct hinic5_recv_mbox *recv_mbox) +{ + struct hinic5_hwdev *dev = func_to_func->hwdev; + void *buf_out = recv_mbox->resp_buff; + u16 src_func_idx = recv_mbox->src_func_idx; + u16 out_size = HINIC5_MBOX_DATA_SIZE; + int err = 0; + + if (HINIC5_IS_VF(dev)) { + err = recv_vf_mbox_handler(func_to_func, recv_mbox, buf_out, &out_size); + goto out; + } + /* pf/ppf process */ + if (IS_PF_OR_PPF_SRC(dev, src_func_idx)) { + if (HINIC5_IS_PPF(dev)) { + err = recv_ppf_mbox_handler(func_to_func, recv_mbox, + (u8)src_func_idx, buf_out, &out_size); + goto out; + } else { + err = recv_pf_from_ppf_handler(func_to_func, recv_mbox, buf_out, &out_size); + goto out; + } + /* The source is neither PF nor PPF, so it is from VF */ + } else { + err = recv_pf_from_vf_mbox_handler(func_to_func, recv_mbox, + src_func_idx, buf_out, &out_size); + } + +out: + if (recv_mbox->ack_type == HINIC5_MSG_ACK) + response_for_recv_func_mbox(func_to_func, recv_mbox, err, out_size, src_func_idx); +} + +static struct hinic5_recv_mbox *alloc_recv_mbox(void) +{ + struct hinic5_recv_mbox *recv_msg = NULL; + + recv_msg = kzalloc(sizeof(*recv_msg), GFP_KERNEL); + if (!recv_msg) + return NULL; + + recv_msg->msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!recv_msg->msg) + goto alloc_msg_err; + + recv_msg->resp_buff = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!recv_msg->resp_buff) + goto alloc_resp_bff_err; + + return recv_msg; + +alloc_resp_bff_err: + kfree(recv_msg->msg); + +alloc_msg_err: + kfree(recv_msg); + + return NULL; +} + +static void free_recv_mbox(struct hinic5_recv_mbox *recv_msg) +{ + kfree(recv_msg->resp_buff); + kfree(recv_msg->msg); + kfree(recv_msg); +} + +static void recv_func_mbox_work_handler(struct work_struct *work) +{ + struct hinic5_mbox_work *mbox_work = + container_of(work, struct hinic5_mbox_work, work); + + recv_func_mbox_handler(mbox_work->func_to_func, mbox_work->recv_mbox); + + atomic_dec(&mbox_work->msg_ch->recv_msg_cnt); + + destroy_work(&mbox_work->work); + + free_recv_mbox(mbox_work->recv_mbox); + kfree(mbox_work); +} + +static void resp_mbox_handler(struct hinic5_mbox *func_to_func, + const struct hinic5_msg_desc *msg_desc) +{ + spin_lock(&func_to_func->mbox_lock); + if (msg_desc->msg_info.msg_id == func_to_func->send_msg_id && + func_to_func->event_flag == EVENT_START) { + /* indicate that mailbox ack response is received */ + func_to_func->event_flag = EVENT_SUCCESS; + } else { + sdk_err(func_to_func->hwdev->dev_hdl, + "Unexpected mailbox response, event(%d), last send msg id(0x%x), recv msg id(0x%x), status(0x%x)\n", + func_to_func->event_flag, func_to_func->send_msg_id, + msg_desc->msg_info.msg_id, msg_desc->msg_info.status); + } + + spin_unlock(&func_to_func->mbox_lock); +} + +static void recv_mbox_msg_handler(struct hinic5_mbox *func_to_func, + struct hinic5_msg_desc *msg_desc, + u64 mbox_header) +{ + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + struct hinic5_recv_mbox *recv_msg = NULL; + struct hinic5_mbox_work *mbox_work = NULL; + struct hinic5_msg_channel *msg_ch = + container_of(msg_desc, struct hinic5_msg_channel, recv_msg); + u16 src_func_idx = HINIC5_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + u8 version; + + if (atomic_read(&msg_ch->recv_msg_cnt) > + HINIC5_MAX_MSG_CNT_TO_PROCESS) { + sdk_warn(hwdev->dev_hdl, "This function(%u) have %d message wait to process, can't add to work queue\n", + src_func_idx, atomic_read(&msg_ch->recv_msg_cnt)); + return; + } + + recv_msg = alloc_recv_mbox(); + if (!recv_msg) { + sdk_err(hwdev->dev_hdl, "Failed to alloc receive mbox message buffer\n"); + return; + } + recv_msg->msg_len = msg_desc->msg_len; + memcpy(recv_msg->msg, msg_desc->msg, recv_msg->msg_len); + version = hinic5_mbox_get_version(hwdev, &mbox_header); + recv_msg->msg_id = msg_desc->msg_info.msg_id; + recv_msg->mod = hinic5_mbox_get_mod_id(version, &mbox_header); + recv_msg->cmd = HINIC5_MSG_HEADER_GET(mbox_header, CMD); + recv_msg->ack_type = HINIC5_MSG_HEADER_GET(mbox_header, NO_ACK); + recv_msg->src_func_idx = src_func_idx; + + mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); + if (!mbox_work) { + free_recv_mbox(recv_msg); + return; + } + + atomic_inc(&msg_ch->recv_msg_cnt); + + mbox_work->func_to_func = func_to_func; + mbox_work->recv_mbox = recv_msg; + mbox_work->msg_ch = msg_ch; + + INIT_WORK(&mbox_work->work, recv_func_mbox_work_handler); + queue_work_on(hisdk5_get_work_cpu_affinity(hwdev, WORK_TYPE_MBOX), + func_to_func->workq, &mbox_work->work); +} + +static bool check_mbox_segment(struct hinic5_mbox *func_to_func, + struct hinic5_msg_desc *msg_desc, + u64 mbox_header, void *mbox_body) +{ + u8 seq_id, seg_len, msg_id, mod, version; + u16 src_func_idx, cmd; + + version = hinic5_mbox_get_version(func_to_func->hwdev, &mbox_header); + seq_id = HINIC5_MSG_HEADER_GET(mbox_header, SEQID); + seg_len = hinic5_mbox_get_seg_len(version, &mbox_header); + mod = hinic5_mbox_get_mod_id(version, &mbox_header); + msg_id = HINIC5_MSG_HEADER_GET(mbox_header, MSG_ID); + cmd = HINIC5_MSG_HEADER_GET(mbox_header, CMD); + src_func_idx = HINIC5_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (seq_id > SEQ_ID_MAX_VAL || seg_len > MBOX_SEG_LEN || + (seq_id == SEQ_ID_MAX_VAL && seg_len > MBOX_LAST_SEG_MAX_LEN)) + goto seg_err; + + if (seq_id == 0) { + msg_desc->seq_id = seq_id; + msg_desc->msg_info.msg_id = msg_id; + msg_desc->mod = mod; + msg_desc->cmd = cmd; + } else { + if (seq_id != msg_desc->seq_id + 1 || msg_id != msg_desc->msg_info.msg_id || + mod != msg_desc->mod || cmd != msg_desc->cmd) + goto seg_err; + + msg_desc->seq_id = seq_id; + } + + return true; + +seg_err: + sdk_err(func_to_func->hwdev->dev_hdl, + "Mailbox segment check failed, src func id: 0x%x, front seg info: seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", + src_func_idx, msg_desc->seq_id, msg_desc->msg_info.msg_id, + msg_desc->mod, msg_desc->cmd); + sdk_err(func_to_func->hwdev->dev_hdl, + "Current seg info: seg len: 0x%x, seq id: 0x%x, msg id: 0x%x, mod: 0x%x, cmd: 0x%x\n", + seg_len, seq_id, msg_id, mod, cmd); + + return false; +} + +static void recv_mbox_handler(struct hinic5_mbox *func_to_func, + u64 *header, struct hinic5_msg_desc *msg_desc) +{ + u64 mbox_header = *header; + void *mbox_body = MBOX_BODY_FROM_HDR(((void *)header)); + u8 seq_id, seg_len, version; + int pos; + + if (!check_mbox_segment(func_to_func, msg_desc, mbox_header, mbox_body)) { + msg_desc->seq_id = SEQ_ID_MAX_VAL; + return; + } + + version = hinic5_mbox_get_version(func_to_func->hwdev, &mbox_header); + seq_id = HINIC5_MSG_HEADER_GET(mbox_header, SEQID); + seg_len = hinic5_mbox_get_seg_len(version, &mbox_header); + pos = seq_id * MBOX_SEG_LEN; + memcpy((u8 *)msg_desc->msg + pos, mbox_body, seg_len); + + if (HINIC5_MSG_HEADER_GET(mbox_header, LAST) == 0) + return; + + msg_desc->msg_len = hinic5_mbox_get_msg_len(version, &mbox_header); + msg_desc->msg_info.status = HINIC5_MSG_HEADER_GET(mbox_header, STATUS); + + if (HINIC5_MSG_HEADER_GET(mbox_header, DIRECTION) == + HINIC5_MSG_RESPONSE) { + resp_mbox_handler(func_to_func, msg_desc); + return; + } + + recv_mbox_msg_handler(func_to_func, msg_desc, mbox_header); +} + +void hinic5_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size) +{ + struct hinic5_mbox *func_to_func = NULL; + struct hinic5_msg_desc *msg_desc = NULL; + u64 mbox_header = *((u64 *)header); + u64 src, dir; + + func_to_func = ((struct hinic5_hwdev *)handle)->func_to_func; + if (!func_to_func) { + pr_err("func to func is null\n"); + return; + } + dir = HINIC5_MSG_HEADER_GET(mbox_header, DIRECTION); + src = HINIC5_MSG_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + msg_desc = get_mbox_msg_desc(func_to_func, dir, src); + if (!msg_desc) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mailbox source function id: %u is invalid for current function\n", + (u32)src); + return; + } + + recv_mbox_handler(func_to_func, (u64 *)header, msg_desc); +} + +static int init_mbox_dma_queue(struct hinic5_hwdev *hwdev, struct mbox_dma_queue *mq) +{ + u32 size; + + mq->depth = MBOX_DMA_MSG_QUEUE_DEPTH; + mq->prod_idx = 0; + mq->cons_idx = 0; + + size = mq->depth * MBOX_MAX_BUF_SZ; + mq->dma_buff_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, size, &mq->dma_buff_paddr, + GFP_KERNEL); + if (!mq->dma_buff_vaddr) { + sdk_err(hwdev->dev_hdl, "Failed to alloc dma_buffer\n"); + return -ENOMEM; + } + + return 0; +} + +static void deinit_mbox_dma_queue(struct hinic5_hwdev *hwdev, struct mbox_dma_queue *mq) +{ + dma_free_coherent(hwdev->dev_hdl, mq->depth * MBOX_MAX_BUF_SZ, + mq->dma_buff_vaddr, mq->dma_buff_paddr); +} + +static int hinic5_init_mbox_dma_queue(struct hinic5_mbox *func_to_func) +{ + u32 val; + int err; + + err = init_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); + if (err != 0) + return err; + + err = init_mbox_dma_queue(func_to_func->hwdev, &func_to_func->async_msg_queue); + if (err != 0) { + deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); + return err; + } + + val = hinic5_hwif_read_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET); + val = MBOX_MQ_CI_CLEAR(val, SYNC); + val = MBOX_MQ_CI_CLEAR(val, ASYNC); + hinic5_hwif_write_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET, val); + + return 0; +} + +static void hinic5_deinit_mbox_dma_queue(struct hinic5_mbox *func_to_func) +{ + deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->sync_msg_queue); + deinit_mbox_dma_queue(func_to_func->hwdev, &func_to_func->async_msg_queue); +} + +#define MBOX_DMA_MSG_INIT_XOR_VAL 0x5a5a5a5a +#define MBOX_XOR_DATA_ALIGN 4 +static u32 mbox_dma_msg_xor(u32 *data, u16 msg_len) +{ + u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL; + u16 dw_len = msg_len / sizeof(u32); + u16 i; + + for (i = 0; i < dw_len; i++) + xor ^= data[i]; + + return xor; +} + +#define MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1)) +#define IS_MSG_QUEUE_FULL(mq) (MQ_ID_MASK(mq, (mq)->prod_idx + 1) == \ + MQ_ID_MASK(mq, (mq)->cons_idx)) + +static int mbox_prepare_dma_entry(const struct hinic5_mbox *func_to_func, struct mbox_dma_queue *mq, + struct mbox_dma_msg *dma_msg, const void *msg, u16 msg_len) +{ + u64 dma_addr, offset; + void *dma_vaddr = NULL; + + if (IS_MSG_QUEUE_FULL(mq)) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mbox sync message queue is busy, pi: %u, ci: %d\n", + mq->prod_idx, MQ_ID_MASK(mq, mq->cons_idx)); + return -EBUSY; + } + + /* copy data to DMA buffer */ + offset = mq->prod_idx * MBOX_MAX_BUF_SZ; + dma_vaddr = (u8 *)mq->dma_buff_vaddr + offset; + memcpy(dma_vaddr, msg, msg_len); + + dma_addr = mq->dma_buff_paddr + offset; + dma_msg->dma_addr_high = upper_32_bits(dma_addr); + dma_msg->dma_addr_low = lower_32_bits(dma_addr); + dma_msg->msg_len = msg_len; + /* The firmware obtains message based on 4B alignment. */ + dma_msg->xor = mbox_dma_msg_xor(dma_vaddr, ALIGN(msg_len, MBOX_XOR_DATA_ALIGN)); + + mq->prod_idx++; + mq->prod_idx = MQ_ID_MASK(mq, mq->prod_idx); + + return 0; +} + +static int mbox_prepare_dma_msg(struct hinic5_mbox *func_to_func, enum hinic5_msg_ack_type ack_type, + struct mbox_dma_msg *dma_msg, void *msg, u16 msg_len) +{ + struct mbox_dma_queue *mq = NULL; + u32 val; + + val = hinic5_hwif_read_reg(func_to_func->hwdev->hwif, MBOX_MQ_CI_OFFSET); + if (ack_type == HINIC5_MSG_ACK) { + mq = &func_to_func->sync_msg_queue; + mq->cons_idx = MBOX_MQ_CI_GET(val, SYNC); + } else { + mq = &func_to_func->async_msg_queue; + mq->cons_idx = MBOX_MQ_CI_GET(val, ASYNC); + } + + return mbox_prepare_dma_entry(func_to_func, mq, dma_msg, msg, msg_len); +} + +#ifdef __UEFI__ +static void write_mbox_reg(struct hinic5_hwif *hwif, u32 offset, u32 val) +{ + BUS_IO_PROTOCOL *bus_io = hwif->bus_dev; + EFI_STATUS Status; + u32 mbox_reg = HINIC5_FUNC_CSR_MAILBOX_DATA_OFF + offset; + + if (!bus_io) { + DebugPrint(DEBUG_ERROR, "Write_reg() bus_io == NULL\n"); + return; + } + + MemoryFence(); + Status = bus_io->Mem.Write(bus_io, EfiBusIoWidthUint32, HINIC5_CFG_BAR, + mbox_reg, 1, (void *)(&val)); + MemoryFence(); + if (EFI_ERROR(Status)) + DebugPrint(DEBUG_ERROR, + "bus_io->Mem.Write() fails: %r\n", Status); +} +#endif + +static void mbox_copy_header(struct hinic5_hwdev *hwdev, + struct hinic5_send_mbox *mbox, u64 *header) +{ + u32 *data = (u32 *)(void *)header; + u32 i, idx_max = MBOX_HEADER_SZ / sizeof(u32); + + for (i = 0; i < idx_max; i++) { +#ifndef __UEFI__ + __raw_writel(cpu_to_be32(*(data + i)), + mbox->data + i * sizeof(u32)); +#else + write_mbox_reg(hwdev->hwif, i * sizeof(u32), + cpu_to_be32(*(data + i))); +#endif + } +} + +static int mbox_copy_send_data(struct hinic5_hwdev *hwdev, + struct hinic5_send_mbox *mbox, void *seg, + u16 seg_len) +{ + u32 *data = seg; + u32 data_len, chk_sz = sizeof(u32); + u32 i, idx_max; + u8 mbox_max_buf[MBOX_SEG_LEN] = {0}; + + /* The mbox message should be aligned in 4 bytes. */ + if ((seg_len % chk_sz) != 0) { + memcpy(mbox_max_buf, seg, seg_len); + data = (u32 *)mbox_max_buf; + } + + data_len = seg_len; + idx_max = ALIGN(data_len, chk_sz) / chk_sz; + + for (i = 0; i < idx_max; i++) { +#ifndef __UEFI__ + __raw_writel(cpu_to_be32(*(data + i)), + mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); +#else + write_mbox_reg(hwdev->hwif, MBOX_HEADER_SZ + i * sizeof(u32), + cpu_to_be32(*(data + i))); +#endif + } + + return 0; +} + +static void write_mbox_msg_attr(struct hinic5_mbox *func_to_func, + u16 dst_func, u16 dst_aeqn, u16 seg_len) +{ + u32 mbox_int, mbox_ctrl; + u16 func = dst_func; + + /* for VF to PF's message, dest func id will self-learning by HW */ + if (HINIC5_IS_VF(func_to_func->hwdev) && dst_func != HINIC5_MGMT_SRC_ID) + func = 0; /* the destination is the VF's PF */ + + mbox_int = HINIC5_MBOX_INT_SET(dst_aeqn, DST_AEQN) | + HINIC5_MBOX_INT_SET(0, SRC_RESP_AEQN) | + HINIC5_MBOX_INT_SET(NO_DMA_ATTRIBUTE_VAL, STAT_DMA) | + HINIC5_MBOX_INT_SET(ALIGN((u32)seg_len + MBOX_HEADER_SZ, + MBOX_SEG_LEN_ALIGN) >> 2, + TX_SIZE) | + HINIC5_MBOX_INT_SET(STRONG_ORDER, STAT_DMA_SO_RO) | + HINIC5_MBOX_INT_SET(WRITE_BACK, WB_EN); + + hinic5_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC5_FUNC_CSR_MAILBOX_INT_OFFSET_OFF, mbox_int); + wmb(); // Ensure all previous writes are completed before signaling the hardware + + mbox_ctrl = HINIC5_MBOX_CTRL_SET(TX_NOT_DONE, TX_STATUS) | + HINIC5_MBOX_CTRL_SET(NOT_TRIGGER, TRIGGER_AEQE) | + HINIC5_MBOX_CTRL_SET(func, DST_FUNC); + + hinic5_hwif_write_reg(func_to_func->hwdev->hwif, + HINIC5_FUNC_CSR_MAILBOX_CONTROL_OFF, mbox_ctrl); +} + +static void clear_mbox_result(struct hinic5_send_mbox *mbox) +{ + *mbox->wb_status = 0; + + /* clear mailbox write back status */ + wmb(); +} + +STATIC u16 get_mbox_status(const struct hinic5_send_mbox *mbox) +{ + /* write back is 16B, but only use first 2B */ + u64 wb_val = be64_to_cpu(*mbox->wb_status); + + rmb(); /* verify reading before check */ + + return (u16)(wb_val & MBOX_WB_STATUS_ERRCODE_MASK); +} + +static enum hinic5_wait_return check_mbox_wb_status(void *priv_data) +{ + struct hinic5_mbox *func_to_func = priv_data; + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + u16 wb_status; + + if (MBOX_MSG_CHANNEL_STOP(func_to_func) || !hinic5_is_chip_present(hwdev)) + return WAIT_PROCESS_ERR; + + if (check_outbound_enable_handler(hwdev) != WAIT_PROCESS_CPL) + return WAIT_PROCESS_ERR; + + wb_status = get_mbox_status(&func_to_func->send_mbox); + + return MBOX_STATUS_FINISHED(wb_status) ? + WAIT_PROCESS_CPL : WAIT_PROCESS_WAITING; +} + +static int wait_mbox_completed(struct hinic5_mbox *func_to_func) +{ + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + struct hinic5_hwif *hwif = hwdev->hwif; + struct hinic5_send_mbox *send_mbox = &func_to_func->send_mbox; + void *dev = hwdev->dev_hdl; + u32 timeout = hwdev->timeout_info->mbox_poll_timeout; + u32 mbox_ctrl, mbox_int_off, mbox_wb_h, mbox_wb_l, func_attr5; + u16 wb_status; + bool tx_done; + int err; + + err = hinic5_wait_for_timeout(func_to_func, check_mbox_wb_status, + timeout, MBOX_MSG_WAIT_ONCE_TIME_US); + if (likely(err == 0)) + return 0; + + mbox_ctrl = hinic5_hwif_read_reg(hwif, HINIC5_FUNC_CSR_MAILBOX_CONTROL_OFF); + if (mbox_ctrl == HINIC5_BUS_LINK_DOWN) { + sdk_err(dev, "Send mailbox segment fail, link down.\n"); + return -EIO; + } + + /* + * Cancel uncompleted mbox request. + * CPI attempts to move mailbox data when TX_STATUS (af_mb_tx_req) bit is set. + */ + tx_done = HINIC5_MBOX_CTRL_GET(mbox_ctrl, TX_STATUS) == TX_DONE; + if (!tx_done) + hinic5_hwif_write_reg(hwif, HINIC5_FUNC_CSR_MAILBOX_CONTROL_OFF, 0); + + /* Mailbox completed before request cancellation. */ + wb_status = get_mbox_status(send_mbox); + if (unlikely(MBOX_STATUS_FINISHED(wb_status))) { + sdk_warn(dev, "Mailbox segment send completed at the last monent.\n"); + return 0; + } + + /* Mbox send timeout */ + mbox_int_off = hinic5_hwif_read_reg(hwif, HINIC5_FUNC_CSR_MAILBOX_INT_OFFSET_OFF); + mbox_wb_h = hinic5_hwif_read_reg(hwif, HINIC5_FUNC_CSR_MAILBOX_RESULT_H_OFF); + mbox_wb_l = hinic5_hwif_read_reg(hwif, HINIC5_FUNC_CSR_MAILBOX_RESULT_L_OFF); + func_attr5 = hinic5_hwif_read_reg(hwif, HINIC5_CSR_FUNC_ATTR5_ADDR); + + sdk_err(dev, "Send mailbox segment timeout, wb status 0x%x, tx done %d.\n", + wb_status, tx_done); + sdk_err(dev, "Mailbox control reg 0x%x\n", mbox_ctrl); + sdk_err(dev, "Mailbox interrupt offset 0x%x\n", mbox_int_off); + sdk_err(dev, "Mailbox result back 0x%x-0x%x (0x%lx)\n", + mbox_wb_h, mbox_wb_l, (uintptr_t)send_mbox->wb_paddr); + sdk_err(dev, "Function attr5 0x%x\n", func_attr5); /* for func/port outbound flush */ + + /* CPI dma write fail */ + if (tx_done) + sdk_err(dev, "Mailbox segment send completed but no result back\n"); + + /* Write back address changed unexpectedly; may be caused by FLR. */ + if (mbox_wb_h != upper_32_bits(send_mbox->wb_paddr) || + mbox_wb_l != lower_32_bits(send_mbox->wb_paddr)) + sdk_err(dev, "Mailbox result back has changed\n"); + + return -ETIMEDOUT; +} + +static int send_mbox_seg(struct hinic5_mbox *func_to_func, u64 header, + u16 dst_func, void *seg, u16 seg_len, void *msg_info) +{ + struct hinic5_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + u8 num_aeqs = hwdev->hwif->attr.num_aeqs; + u16 dst_aeqn, wb_status = 0, errcode; + u16 seq_dir = HINIC5_MSG_HEADER_GET(header, DIRECTION); + int err; + + /* mbox to mgmt cpu, hardware don't care dst aeq id */ + if (num_aeqs > HINIC5_MBOX_RSP_MSG_AEQ) + dst_aeqn = (seq_dir == HINIC5_MSG_DIRECT_SEND) ? + HINIC5_ASYNC_MSG_AEQ : HINIC5_MBOX_RSP_MSG_AEQ; + else + dst_aeqn = 0; + + clear_mbox_result(send_mbox); + + mbox_copy_header(hwdev, send_mbox, &header); + + err = mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); + if (err != 0) + return err; + + write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len); + wmb(); // Ensure all previous writes are completed before signaling the hardware + + err = wait_mbox_completed(func_to_func); + if (err != 0) + return err; + + wb_status = get_mbox_status(send_mbox); + if (!MBOX_STATUS_SUCCESS(wb_status)) { + sdk_err(hwdev->dev_hdl, "Send mailbox segment to function %u unsuccess, wb status: 0x%x\n", + dst_func, wb_status); + errcode = MBOX_STATUS_ERRCODE(wb_status); + return (errcode != 0) ? errcode : -EFAULT; + } + + return 0; +} + +static void mbox_msg_header_set_pre(u64 *header, const struct mbox_msg_info *msg_info, + enum hinic5_msg_ack_type ack_type, + struct hinic5_hwdev *hwdev) +{ + *header |= + HINIC5_MSG_HEADER_SET(msg_info->msg_id, MSG_ID) | + HINIC5_MSG_HEADER_SET((msg_info->status != 0), STATUS) | + HINIC5_MSG_HEADER_SET(hinic5_global_func_id(hwdev), SRC_GLB_FUNC_IDX) | + HINIC5_MSG_HEADER_SET(ack_type, NO_ACK); +} + +static void mbox_msg_header_set_mid(u64 *header, u16 rsp_aeq_id, u16 cmd, + enum hinic5_msg_direction_type direction, + enum hinic5_data_type data_type) +{ + *header |= HINIC5_MSG_HEADER_SET(data_type, DATA_TYPE) | + HINIC5_MSG_HEADER_SET(NOT_LAST_SEGMENT, LAST) | + HINIC5_MSG_HEADER_SET(SEQ_ID_START_VAL, SEQID) | + HINIC5_MSG_HEADER_SET(direction, DIRECTION) | + HINIC5_MSG_HEADER_SET(cmd, CMD) | + HINIC5_MSG_HEADER_SET(rsp_aeq_id, AEQ_ID) | + HINIC5_MSG_HEADER_SET(HINIC5_MSG_FROM_MBOX, SOURCE); +} + +void mbox_msg_header_set_last(u64 *header, u16 msg_len, u8 mod, + u16 seg_len, struct hinic5_hwdev *hwdev) +{ + u8 version = hinic5_mbox_get_send_version(hwdev, mod); + *header |= hinic5_mbox_set_msg_len(version, msg_len) | + hinic5_mbox_set_mod_id(version, mod) | + hinic5_mbox_set_seg_len(version, seg_len) | + hinic5_mbox_set_version(version); +} + +static inline u16 mbox_msg_rsp_aeq(const struct hinic5_hwdev *hwdev) +{ + return (hwdev->poll || hwdev->hwif->attr.num_aeqs >= 0x2) ? + HINIC5_MBOX_RSP_MSG_AEQ : HINIC5_ASYNC_MSG_AEQ; +} + +static inline void dump_mbox_header(struct hinic5_hwdev *hwdev, u64 header) +{ + sdk_err(hwdev->dev_hdl, "Mailbox Header: %llx\n", header); +} + +/* + * DMA message is only support send from non-SPU function to the MGMT. + */ +static inline bool support_dma_msg(struct hinic5_hwdev *hwdev, u16 dst_func) +{ + return dst_func == HINIC5_MGMT_SRC_ID && !hinic5_in_spu(hwdev); +} + +static int send_mbox_msg(struct hinic5_mbox *func_to_func, u8 mod, u16 cmd, + void *msg, u16 msg_len, u16 dst_func, + enum hinic5_msg_direction_type direction, + enum hinic5_msg_ack_type ack_type, + struct mbox_msg_info *msg_info) +{ + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + struct mbox_dma_msg dma_msg = {0}; + enum hinic5_data_type data_type = HINIC5_DATA_INLINE; + bool using_dma_msg = support_dma_msg(hwdev, dst_func); + int err = 0; + u32 seq_id = 0; + u16 seg_len = MBOX_SEG_LEN; + u16 rsp_aeq_id, left; + u8 *msg_seg = NULL; + u64 header = 0; + u16 msg_len_tmp = msg_len; + u8 version = hinic5_mbox_get_send_version(hwdev, mod); + + if (!COMM_SUPPORT_MBOX_HEAD_VER1(hwdev) && mod >= V0_MOD_ID_MAX) + return -EINVAL; + + if (unlikely(dst_func == HINIC5_MGMT_SRC_ID && hinic5_is_chip_error(hwdev))) { + /* Stop VF sending mailbox to the Mgmt when chip is error */ + if (HINIC5_IS_VF(hwdev)) { + sdk_err(hwdev->dev_hdl, + "Stop sending mbox to mgmt, mod %u, cmd %u\n", + mod, cmd); + return -EPERM; + } + + /* No longer support DMA msg when chip is error */ + using_dma_msg = false; + } + + rsp_aeq_id = mbox_msg_rsp_aeq(hwdev); + + mutex_lock(&func_to_func->msg_send_lock); + + if (using_dma_msg) { + err = mbox_prepare_dma_msg(func_to_func, ack_type, &dma_msg, msg, msg_len_tmp); + if (err != 0) + goto send_err; + + msg = &dma_msg; + msg_len_tmp = sizeof(dma_msg); + data_type = HINIC5_DATA_DMA; + } + + msg_seg = (u8 *)msg; + left = msg_len_tmp; + + mbox_msg_header_set_pre(&header, msg_info, ack_type, hwdev); + mbox_msg_header_set_mid(&header, rsp_aeq_id, cmd, direction, data_type); + mbox_msg_header_set_last(&header, msg_len_tmp, mod, seg_len, hwdev); + + while (HINIC5_MSG_HEADER_GET(header, LAST) == 0) { + if (left <= MBOX_SEG_LEN) { + header &= ~(hinic5_mbox_get_seg_len_mask(version)); + header |= (hinic5_mbox_set_seg_len(version, left) | + HINIC5_MSG_HEADER_SET(LAST_SEGMENT, LAST)); + + seg_len = left; + } + + msg_info->header = header; + err = send_mbox_seg(func_to_func, header, dst_func, msg_seg, seg_len, msg_info); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Send mbox seg unsuccess, seq_id=0x%llx\n", + HINIC5_MSG_HEADER_GET(header, SEQID)); + dump_mbox_header(hwdev, header); + goto send_err; + } + + if (left < MBOX_SEG_LEN) + goto send_err; + left -= MBOX_SEG_LEN; + msg_seg += MBOX_SEG_LEN; + + seq_id++; + header &= ~(HINIC5_MSG_HEADER_SET(HINIC5_MSG_HEADER_SEQID_MASK, SEQID)); + header |= HINIC5_MSG_HEADER_SET(seq_id, SEQID); + } + +send_err: + mutex_unlock(&func_to_func->msg_send_lock); + + return err; +} + +static void set_mbox_to_func_event(struct hinic5_mbox *func_to_func, + enum mbox_event_state event_flag) +{ + spin_lock(&func_to_func->mbox_lock); + func_to_func->event_flag = event_flag; + spin_unlock(&func_to_func->mbox_lock); +} + +/** + * Check if mgmt is in busy state. + * When link is down, this also returns true. + */ +static inline bool is_mgmt_busy(struct hinic5_hwif *hwif) +{ + u32 val = hinic5_hwif_read_reg(hwif, MBOX_EXT_CSR_OFFSET); + + return MBOX_EXT_GET(val, MGMT_BUSY) != 0; +} + +static enum hinic5_wait_return check_mgmt_busy(void *priv_data) +{ + struct hinic5_mbox *func_to_func = priv_data; + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + + if (unlikely(!hinic5_is_chip_present(func_to_func->hwdev))) + return WAIT_PROCESS_ERR; + + if (unlikely(check_outbound_enable_handler(hwdev) != + WAIT_PROCESS_CPL)) { + return WAIT_PROCESS_ERR; + } + + if (unlikely(is_mgmt_busy(hwdev->hwif))) + return WAIT_PROCESS_WAITING; + + return WAIT_PROCESS_CPL; +} + +static inline int wait_mgmt_unbusy(struct hinic5_mbox *func_to_func, u32 timeout) +{ + return hinic5_wait_for_timeout(func_to_func, check_mgmt_busy, timeout, WAIT_USEC_50); +} + +static enum hinic5_wait_return check_mbox_msg_finish(void *priv_data) +{ + struct hinic5_mbox *func_to_func = priv_data; + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + + if (MBOX_MSG_CHANNEL_STOP(func_to_func) || !hinic5_is_chip_present(hwdev)) + return WAIT_PROCESS_ERR; + + if (check_outbound_enable_handler(hwdev) != WAIT_PROCESS_CPL) + return WAIT_PROCESS_ERR; + + if (hwdev->poll) { +#if defined(__UEFI__) || defined(__VMWARE__) + hinic5_simulated_irq_aeq(hwdev); +#endif + } + + return (func_to_func->event_flag == EVENT_SUCCESS) ? + WAIT_PROCESS_CPL : WAIT_PROCESS_WAITING; +} + +static int wait_mbox_msg_completion(struct hinic5_mbox *func_to_func, + u32 timeout) +{ + u32 wait_time; + u16 rsp_aeq_id; + int err; + + wait_time = (timeout != 0) ? timeout : func_to_func->hwdev->timeout_info->mbox_timeout; + err = hinic5_wait_for_timeout(func_to_func, check_mbox_msg_finish, + wait_time, WAIT_USEC_50); + if (err == 0) + goto success; + + if (!func_to_func->hwdev->poll) { + sdk_warn(func_to_func->hwdev->dev_hdl, "Retry mbox msg, timeout: %u, expect_timeout: %u\n", + wait_time, func_to_func->hwdev->timeout_info->mbox_timeout); + + rsp_aeq_id = mbox_msg_rsp_aeq(func_to_func->hwdev); + err = hinic5_reschedule_eq(func_to_func->hwdev, HINIC5_AEQ, rsp_aeq_id); + if (err != 0) + goto timeout; + + err = hinic5_wait_for_timeout(func_to_func, check_mbox_msg_finish, + MBOX_MSG_RETRY_ACK_TIMEOUT, WAIT_USEC_50); + if (err == 0) + goto success; + } + +timeout: + set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); + return -ETIMEDOUT; + +success: + set_mbox_to_func_event(func_to_func, EVENT_END); + return 0; +} + +#define TRY_MBOX_LOCK_SLEPP 1000 +static int send_mbox_msg_lock(struct hinic5_mbox *func_to_func, u16 channel) +{ + if (!func_to_func->lock_channel_en) { + mutex_lock(&func_to_func->mbox_send_lock); + return 0; + } + + while (test_bit(channel, &func_to_func->channel_stop) == 0) { + if (mutex_trylock(&func_to_func->mbox_send_lock) != 0) + return 0; + + usleep_range(TRY_MBOX_LOCK_SLEPP - 1, TRY_MBOX_LOCK_SLEPP); + } + + return -EAGAIN; +} + +static void send_mbox_msg_unlock(struct hinic5_mbox *func_to_func) +{ + mutex_unlock(&func_to_func->mbox_send_lock); +} + +static void mbox_cmd_cost_time(struct hinic5_hwdev *hwdev, u8 mod, u16 cmd, struct timeval start) +{ + struct timeval end = {0}; + u64 cost_usec; + + if (hinic5_get_perf_en(HINIC5_MAILBOX_PERF)) { + do_gettimeofday(&end); + cost_usec = (u64)((end.tv_sec - start.tv_sec) * MSEC_PER_SEC * USEC_PER_MSEC + + end.tv_usec - start.tv_usec); + sdk_info(hwdev->dev_hdl, + "Mailbox mod: %u cmd: %u, cost time: %llu us\n", mod, cmd, cost_usec); + } +} + +int hinic5_mbox_to_func(struct hinic5_mbox *func_to_func, u8 mod, u16 cmd, + u16 dst_func, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + /* use mbox_resp to hole data which responsed from other function */ + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + struct hinic5_msg_desc *msg_desc = NULL; + struct mbox_msg_info msg_info = {0}; + struct timeval start = {0}; + int err; + + if (hinic5_get_perf_en(HINIC5_MAILBOX_PERF)) + do_gettimeofday(&start); + + if (!hinic5_is_chip_present(hwdev)) + return -EPERM; + + /* expect response message */ + msg_desc = get_mbox_msg_desc(func_to_func, HINIC5_MSG_RESPONSE, dst_func); + if (!msg_desc) + return -EFAULT; + + err = send_mbox_msg_lock(func_to_func, channel); + if (err != 0) + return err; + + if (dst_func == HINIC5_MGMT_SRC_ID) { + err = wait_mgmt_unbusy(func_to_func, WAIT_MGMT_UNBUSY_TIMEOUT); + if (err != 0) { + sdk_err(hwdev->dev_hdl, + "Wait for mgmt unbusy failed, err %d\n", err); + goto send_err; + } + } + + func_to_func->cur_msg_channel = channel; + msg_info.msg_id = inc_mbox_send_msg_id(func_to_func); + + set_mbox_to_func_event(func_to_func, EVENT_START); + + err = send_mbox_msg(func_to_func, mod, cmd, buf_in, in_size, dst_func, + HINIC5_MSG_DIRECT_SEND, HINIC5_MSG_ACK, &msg_info); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Send mailbox mod %u, cmd %u failed, msg_id: %u, err: %d\n", + mod, cmd, msg_info.msg_id, err); + set_mbox_to_func_event(func_to_func, EVENT_FAIL); + goto send_err; + } + func_to_func->hwdev->mbox_send_cnt++; + + if (wait_mbox_msg_completion(func_to_func, timeout) != 0) { + sdk_err(hwdev->dev_hdl, "Wait for mbox mod: %u, cmd: %u msg response timeout, msg_id: %u\n", + mod, cmd, msg_info.msg_id); + dump_mbox_header(hwdev, msg_info.header); + hinic5_dump_aeq_info(hwdev); + err = -ETIMEDOUT; + goto send_err; + } + func_to_func->hwdev->mbox_ack_cnt++; + + if (mod != msg_desc->mod || cmd != msg_desc->cmd) { + sdk_err(hwdev->dev_hdl, + "Invalid response mbox message, mod: 0x%x, cmd: 0x%x, expect mod: 0x%x, cmd: 0x%x\n", + msg_desc->mod, msg_desc->cmd, mod, cmd); + err = -EFAULT; + goto send_err; + } + + if (msg_desc->msg_info.status != 0) { + /* status bit of mailbox header is set */ + sdk_err(hwdev->dev_hdl, "Mailbox processing got an error.\n"); + err = msg_desc->msg_info.status; + goto send_err; + } + + if (!buf_out || !out_size) { + /* 无需传输数据的场景 */ + goto send_err; + } + + memcpy(buf_out, msg_desc->msg, msg_desc->msg_len); + *out_size = msg_desc->msg_len; + +send_err: + send_mbox_msg_unlock(func_to_func); + mbox_cmd_cost_time(hwdev, mod, cmd, start); + + return err; +} + +static int mbox_func_params_valid(struct hinic5_mbox *func_to_func, + const void *buf_in, u16 in_size, u16 channel) +{ + if (!func_to_func || !buf_in || in_size == 0) + return -EINVAL; + + if (in_size > HINIC5_MBOX_DATA_SIZE) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Mbox msg len %u exceed limit: [1, %u]\n", + in_size, HINIC5_MBOX_DATA_SIZE); + return -EINVAL; + } + + if (channel >= HINIC5_CHANNEL_MAX) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Invalid channel id: 0x%x\n", channel); + return -EINVAL; + } + + return 0; +} + +static int hinic5_mbox_to_func_no_ack(struct hinic5_hwdev *hwdev, u16 func_idx, + u8 mod, u16 cmd, void *buf_in, u16 in_size, + u16 channel) +{ + struct mbox_msg_info msg_info = {0}; + struct timeval start = {0}; + int err; + + if (hinic5_get_perf_en(HINIC5_MAILBOX_PERF)) + do_gettimeofday(&start); + + err = mbox_func_params_valid(hwdev->func_to_func, buf_in, in_size, + channel); + if (err != 0) + return err; + + err = send_mbox_msg_lock(hwdev->func_to_func, channel); + if (err != 0) + return err; + + err = send_mbox_msg(hwdev->func_to_func, mod, cmd, buf_in, in_size, + func_idx, HINIC5_MSG_DIRECT_SEND, + HINIC5_MSG_NO_ACK, &msg_info); + if (err != 0) + sdk_err(hwdev->dev_hdl, "Send mailbox no ack unsuccess\n"); + + send_mbox_msg_unlock(hwdev->func_to_func); + mbox_cmd_cost_time(hwdev, mod, cmd, start); + + return err; +} + +int hinic5_send_mbox_to_mgmt(struct hinic5_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + struct hinic5_mbox *func_to_func = hwdev->func_to_func; + int err = mbox_func_params_valid(func_to_func, buf_in, in_size, + channel); + if (err != 0) + return err; + + if (mod == HINIC5_MOD_COMM && cmd == COMM_MGMT_CMD_SEND_API_ACK_BY_UP) + return 0; + + return hinic5_mbox_to_func(func_to_func, mod, cmd, HINIC5_MGMT_SRC_ID, + buf_in, in_size, buf_out, out_size, timeout, + channel); +} + +void hinic5_response_mbox_to_mgmt(struct hinic5_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 msg_id) +{ + struct mbox_msg_info msg_info; + int err; + + if (!hwdev || !hwdev->func_to_func) { + pr_err("hwdev is null\n"); + return; + } + + msg_info.msg_id = (u8)msg_id; + msg_info.status = 0; + + err = send_mbox_msg(hwdev->func_to_func, mod, cmd, buf_in, in_size, + HINIC5_MGMT_SRC_ID, HINIC5_MSG_RESPONSE, + HINIC5_MSG_NO_ACK, &msg_info); + if (err != 0) + sdk_err(hwdev->dev_hdl, "Failed to send mbox msg, err: %d\n", err); +} + +int hinic5_send_mbox_to_mgmt_no_ack(struct hinic5_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel) +{ + struct hinic5_mbox *func_to_func = hwdev->func_to_func; + int err = mbox_func_params_valid(func_to_func, buf_in, in_size, + channel); + if (err != 0) + return err; + + return hinic5_mbox_to_func_no_ack(hwdev, HINIC5_MGMT_SRC_ID, mod, cmd, + buf_in, in_size, channel); +} + +int hinic5_mbox_ppf_to_host(void *hwdev, u8 mod, u16 cmd, u8 host_id, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel) +{ + struct hinic5_hwdev *dev = hwdev; + u16 dst_ppf_func; + int err; + + if (!hwdev) + return -EINVAL; + + if (!hinic5_is_chip_present(dev)) + return -EPERM; + + err = mbox_func_params_valid(dev->func_to_func, buf_in, in_size, + channel); + if (err != 0) + return err; + + if (!HINIC5_IS_PPF(dev)) { + sdk_err(dev->dev_hdl, "Params error, only ppf support send mbox to ppf. func_type: %d\n", + hinic5_func_type(dev)); + return -EINVAL; + } + + if (host_id >= HINIC5_MAX_HOST_NUM(dev) || + host_id == HINIC5_PCI_INTF_IDX(dev->hwif)) { + sdk_err(dev->dev_hdl, "Params error, host id: %u\n", host_id); + return -EINVAL; + } + + dst_ppf_func = hinic5_host_ppf_idx(dev, host_id); + if (dst_ppf_func >= HINIC5_MAX_PF_NUM(dev)) { + sdk_err(dev->dev_hdl, "Dest host(%u) have not elect ppf(0x%x).\n", + host_id, dst_ppf_func); + return -EINVAL; + } + + return hinic5_mbox_to_func(dev->func_to_func, mod, cmd, + dst_ppf_func, buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(hinic5_mbox_ppf_to_host); + +int hinic5_mbox_to_pf(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout, u16 channel) +{ + struct hinic5_hwdev *dev = hwdev; + int err; + + if (!hwdev) + return -EINVAL; + + if (!hinic5_is_chip_present(dev)) + return -EPERM; + + err = mbox_func_params_valid(dev->func_to_func, buf_in, in_size, + channel); + if (err != 0) + return err; + + if (!HINIC5_IS_VF(dev)) { + sdk_err(dev->dev_hdl, "Params error, func_type: %d\n", + hinic5_func_type(dev)); + return -EINVAL; + } + + return hinic5_mbox_to_func(dev->func_to_func, mod, cmd, + hinic5_pf_id_of_vf(dev), buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(hinic5_mbox_to_pf); + +int hinic5_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, + u16 channel) +{ + struct hinic5_mbox *func_to_func = NULL; + int err = 0; + u16 dst_func_idx; + + if (!hwdev) + return -EINVAL; + + func_to_func = ((struct hinic5_hwdev *)hwdev)->func_to_func; + if (!func_to_func) + return -EINVAL; + + err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); + if (err != 0) + return err; + + if (HINIC5_IS_VF((struct hinic5_hwdev *)hwdev)) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", + hinic5_func_type(hwdev)); + return -EINVAL; + } + + if (vf_id == 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "VF id(%u) error!\n", vf_id); + return -EINVAL; + } + + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_idx = hinic5_glb_pf_vf_offset(hwdev) + vf_id; + + return hinic5_mbox_to_func(func_to_func, mod, cmd, dst_func_idx, buf_in, + in_size, buf_out, out_size, timeout, + channel); +} +EXPORT_SYMBOL(hinic5_mbox_to_vf); + +int hinic5_mbox_to_vf_without_ack(void *hwdev, u16 vf_id, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel) +{ + struct hinic5_mbox *func_to_func = NULL; + int err = 0; + u16 dst_func_idx; + + if (!hwdev) + return -EINVAL; + + func_to_func = ((struct hinic5_hwdev *)hwdev)->func_to_func; + if (!func_to_func) + return -EINVAL; + + err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); + if (err != 0) + return err; + + if (HINIC5_IS_VF((struct hinic5_hwdev *)hwdev)) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", + hinic5_func_type(hwdev)); + return -EINVAL; + } + + if (vf_id == 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "VF id(%u) error!\n", vf_id); + return -EINVAL; + } + + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_idx = hinic5_glb_pf_vf_offset(hwdev) + vf_id; + + return hinic5_mbox_to_func_no_ack(hwdev, dst_func_idx, mod, cmd, + buf_in, in_size, channel); +} +EXPORT_SYMBOL(hinic5_mbox_to_vf_without_ack); + +/* This is an old API, which is to be deprecated. */ +int hinic5_mbox_to_vf_no_ack(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u16 channel) +{ + return hinic5_mbox_to_vf_without_ack(hwdev, vf_id, mod, cmd, buf_in, in_size, channel); +} +EXPORT_SYMBOL(hinic5_mbox_to_vf_no_ack); + +int hinic5_mbox_set_channel_status(struct hinic5_hwdev *hwdev, u16 channel, + bool enable) +{ + if (channel >= HINIC5_CHANNEL_MAX) { + sdk_err(hwdev->dev_hdl, "Invalid channel id: 0x%x\n", channel); + return -EINVAL; + } + + if (enable) + clear_bit(channel, &hwdev->func_to_func->channel_stop); + else + set_bit(channel, &hwdev->func_to_func->channel_stop); + + sdk_info(hwdev->dev_hdl, "%s mbox channel 0x%x\n", + enable ? "Enable" : "Disable", channel); + + return 0; +} + +void hinic5_mbox_enable_channel_lock(struct hinic5_hwdev *hwdev, bool enable) +{ + hwdev->func_to_func->lock_channel_en = enable; + + sdk_info(hwdev->dev_hdl, "%s mbox channel lock\n", + enable ? "Enable" : "Disable"); +} + +static int alloc_mbox_msg_channel(struct hinic5_msg_channel *msg_ch) +{ + msg_ch->resp_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!msg_ch->resp_msg.msg) + return -ENOMEM; + + msg_ch->recv_msg.msg = kzalloc(MBOX_MAX_BUF_SZ, GFP_KERNEL); + if (!msg_ch->recv_msg.msg) { + kfree(msg_ch->resp_msg.msg); + return -ENOMEM; + } + + msg_ch->resp_msg.seq_id = SEQ_ID_MAX_VAL; + msg_ch->recv_msg.seq_id = SEQ_ID_MAX_VAL; + atomic_set(&msg_ch->recv_msg_cnt, 0); + + return 0; +} + +static void free_mbox_msg_channel(struct hinic5_msg_channel *msg_ch) +{ + kfree(msg_ch->recv_msg.msg); + kfree(msg_ch->resp_msg.msg); +} + +static int init_mgmt_msg_channel(struct hinic5_mbox *func_to_func) +{ + int err; + + err = alloc_mbox_msg_channel(&func_to_func->mgmt_msg); + if (err != 0) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc mgmt message channel\n"); + return err; + } + + err = hinic5_init_mbox_dma_queue(func_to_func); + if (err != 0) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to init mbox dma queue\n"); + free_mbox_msg_channel(&func_to_func->mgmt_msg); + } + + return err; +} + +static void deinit_mgmt_msg_channel(struct hinic5_mbox *func_to_func) +{ + hinic5_deinit_mbox_dma_queue(func_to_func); + free_mbox_msg_channel(&func_to_func->mgmt_msg); +} + +int hinic5_mbox_init_host_msg_channel(struct hinic5_hwdev *hwdev) +{ + struct hinic5_mbox *func_to_func = hwdev->func_to_func; + u8 host_num = HINIC5_MAX_HOST_NUM(hwdev); + int i, host_id, err; + + if (host_num == 0) + return 0; + + func_to_func->host_msg = kcalloc(host_num, + sizeof(*func_to_func->host_msg), + GFP_KERNEL); + if (!func_to_func->host_msg) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc host message array\n"); + return -ENOMEM; + } + + for (host_id = 0; host_id < host_num; host_id++) { + err = alloc_mbox_msg_channel(&func_to_func->host_msg[host_id]); + if (err != 0) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Failed to alloc host %d message channel\n", + host_id); + goto alloc_msg_ch_err; + } + } + + func_to_func->support_h2h_msg = true; + + return 0; + +alloc_msg_ch_err: + for (i = 0; i < host_id; i++) + free_mbox_msg_channel(&func_to_func->host_msg[i]); + + kfree(func_to_func->host_msg); + func_to_func->host_msg = NULL; + + return -ENOMEM; +} + +static void deinit_host_msg_channel(struct hinic5_mbox *func_to_func) +{ + int i; + + if (!func_to_func->host_msg) + return; + + for (i = 0; i < HINIC5_MAX_HOST_NUM(func_to_func->hwdev); i++) + free_mbox_msg_channel(&func_to_func->host_msg[i]); + + kfree(func_to_func->host_msg); + func_to_func->host_msg = NULL; +} + +int hinic5_init_func_mbox_msg_channel(void *hwdev, u16 num_func) +{ + struct hinic5_hwdev *dev = hwdev; + struct hinic5_mbox *func_to_func = NULL; + u16 func_id, i; + int err; + + if (!hwdev || num_func == 0 || num_func > HINIC5_MAX_FUNCTIONS) + return -EINVAL; + + func_to_func = dev->func_to_func; + if (func_to_func->func_msg) + return (func_to_func->num_func_msg == num_func) ? 0 : -EFAULT; + + func_to_func->func_msg = + kcalloc(num_func, sizeof(*func_to_func->func_msg), GFP_KERNEL); + if (!func_to_func->func_msg) { + sdk_err(func_to_func->hwdev->dev_hdl, "Failed to alloc func message array\n"); + return -ENOMEM; + } + + for (func_id = 0; func_id < num_func; func_id++) { + err = alloc_mbox_msg_channel(&func_to_func->func_msg[func_id]); + if (err != 0) { + sdk_err(func_to_func->hwdev->dev_hdl, + "Failed to alloc func %u message channel\n", + func_id); + goto alloc_msg_ch_err; + } + } + + func_to_func->num_func_msg = num_func; + + return 0; + +alloc_msg_ch_err: + for (i = 0; i < func_id; i++) + free_mbox_msg_channel(&func_to_func->func_msg[i]); + + kfree(func_to_func->func_msg); + func_to_func->func_msg = NULL; + + return -ENOMEM; +} + +static void hinic5_deinit_func_mbox_msg_channel(struct hinic5_hwdev *hwdev) +{ + struct hinic5_mbox *func_to_func = hwdev->func_to_func; + u16 i; + + if (!func_to_func->func_msg) + return; + + for (i = 0; i < func_to_func->num_func_msg; i++) + free_mbox_msg_channel(&func_to_func->func_msg[i]); + + kfree(func_to_func->func_msg); + func_to_func->func_msg = NULL; +} + +static struct hinic5_msg_desc *get_mbox_msg_desc(struct hinic5_mbox *func_to_func, + u64 dir, u64 src_func_id) +{ + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + struct hinic5_msg_channel *msg_ch = NULL; + u16 id; + + if (src_func_id == HINIC5_MGMT_SRC_ID) { + msg_ch = &func_to_func->mgmt_msg; + } else if (HINIC5_IS_VF(hwdev)) { + /* message from pf */ + msg_ch = func_to_func->func_msg; + if (src_func_id != hinic5_pf_id_of_vf(hwdev) || !msg_ch) + return NULL; + } else if (src_func_id > hinic5_glb_pf_vf_offset(hwdev)) { + /* message from vf */ + id = (u16)(src_func_id - 1U) - hinic5_glb_pf_vf_offset(hwdev); + if (id >= func_to_func->num_func_msg) + return NULL; + + msg_ch = &func_to_func->func_msg[id]; + } else { + /* message from other host's ppf */ + if (!func_to_func->support_h2h_msg) + return NULL; + + for (id = 0; id < HINIC5_MAX_HOST_NUM(hwdev); id++) { + if (src_func_id == hinic5_host_ppf_idx(hwdev, (u8)id)) + break; + } + + if (id == HINIC5_MAX_HOST_NUM(hwdev) || !func_to_func->host_msg) + return NULL; + + msg_ch = &func_to_func->host_msg[id]; + } + + return (dir == HINIC5_MSG_DIRECT_SEND) ? + &msg_ch->recv_msg : &msg_ch->resp_msg; +} + +static void prepare_send_mbox(struct hinic5_mbox *func_to_func) +{ + struct hinic5_send_mbox *send_mbox = &func_to_func->send_mbox; + + send_mbox->data = MBOX_AREA(func_to_func->hwdev->hwif); +} + +static int alloc_mbox_wb_result(struct hinic5_mbox *func_to_func) +{ + struct hinic5_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + u32 addr_h, addr_l; + + send_mbox->wb_vaddr = dma_zalloc_coherent(hwdev->dev_hdl, + MBOX_WB_STATUS_LEN, + &send_mbox->wb_paddr, + GFP_KERNEL); + if (!send_mbox->wb_vaddr) + return -ENOMEM; + + send_mbox->wb_status = send_mbox->wb_vaddr; + + addr_h = upper_32_bits(send_mbox->wb_paddr); + addr_l = lower_32_bits(send_mbox->wb_paddr); + + hinic5_hwif_write_reg(hwdev->hwif, HINIC5_FUNC_CSR_MAILBOX_RESULT_H_OFF, + addr_h); + hinic5_hwif_write_reg(hwdev->hwif, HINIC5_FUNC_CSR_MAILBOX_RESULT_L_OFF, + addr_l); + + return 0; +} + +static void free_mbox_wb_result(struct hinic5_mbox *func_to_func) +{ + struct hinic5_send_mbox *send_mbox = &func_to_func->send_mbox; + struct hinic5_hwdev *hwdev = func_to_func->hwdev; + + hinic5_hwif_write_reg(hwdev->hwif, HINIC5_FUNC_CSR_MAILBOX_RESULT_H_OFF, + 0); + hinic5_hwif_write_reg(hwdev->hwif, HINIC5_FUNC_CSR_MAILBOX_RESULT_L_OFF, + 0); + + dma_free_coherent(hwdev->dev_hdl, MBOX_WB_STATUS_LEN, + send_mbox->wb_vaddr, send_mbox->wb_paddr); +} + +int hinic5_func_to_func_init(struct hinic5_hwdev *hwdev) +{ + struct hinic5_mbox *func_to_func = NULL; + int err = -ENOMEM; + + func_to_func = kzalloc(sizeof(*func_to_func), GFP_KERNEL); + if (!func_to_func) + return -ENOMEM; + + hwdev->func_to_func = func_to_func; + func_to_func->hwdev = hwdev; + + mutex_init(&func_to_func->mbox_send_lock); + mutex_init(&func_to_func->msg_send_lock); + spin_lock_init(&func_to_func->mbox_lock); + func_to_func->workq = create_singlethread_workqueue(HINIC5_MBOX_WQ_NAME); + if (!func_to_func->workq) { + sdk_err(hwdev->dev_hdl, "Failed to initialize MBOX workqueue\n"); + goto create_mbox_workq_err; + } + + err = init_mgmt_msg_channel(func_to_func); + if (err != 0) + goto init_mgmt_msg_ch_err; + + if (HINIC5_IS_VF(hwdev)) { + /* VF to PF mbox message channel */ + err = hinic5_init_func_mbox_msg_channel(hwdev, 1); + if (err != 0) + goto init_func_msg_ch_err; + } + + err = alloc_mbox_wb_result(func_to_func); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to alloc mbox write back status\n"); + goto alloc_wb_status_err; + } + + prepare_send_mbox(func_to_func); + + return 0; + +alloc_wb_status_err: + if (HINIC5_IS_VF(hwdev)) + hinic5_deinit_func_mbox_msg_channel(hwdev); + +init_func_msg_ch_err: + deinit_mgmt_msg_channel(func_to_func); + +init_mgmt_msg_ch_err: + destroy_workqueue(func_to_func->workq); + +create_mbox_workq_err: + spin_lock_deinit(&func_to_func->mbox_lock); + mutex_deinit(&func_to_func->msg_send_lock); + mutex_deinit(&func_to_func->mbox_send_lock); + kfree(func_to_func); + + return err; +} + +void hinic5_func_to_func_free(struct hinic5_hwdev *hwdev) +{ + struct hinic5_mbox *func_to_func = hwdev->func_to_func; + + /* destroy workqueue before free related mbox resources in case of + * illegal resource access + */ + destroy_workqueue(func_to_func->workq); + + free_mbox_wb_result(func_to_func); + if (HINIC5_IS_PPF(hwdev)) + deinit_host_msg_channel(func_to_func); + hinic5_deinit_func_mbox_msg_channel(hwdev); + deinit_mgmt_msg_channel(func_to_func); + spin_lock_deinit(&func_to_func->mbox_lock); + mutex_deinit(&func_to_func->mbox_send_lock); + mutex_deinit(&func_to_func->msg_send_lock); + + kfree(func_to_func); +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_mgmt.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_mgmt.c new file mode 100644 index 00000000..6e11fa61 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_mgmt.c @@ -0,0 +1,1569 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/completion.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/semaphore.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_common.h" +#include "mpu_inband_cmd.h" +#include "hinic5_hwdev.h" +#include "hinic5_bus.h" +#include "hinic5_eqs.h" +#include "hinic5_mbox.h" +#include "hinic5_api_cmd.h" +#include "hinic5_prof_adap.h" +#include "hinic5_csr_inner.h" +#include "hinic5_mgmt.h" + +#define HINIC5_MSG_TO_MGMT_MAX_LEN 2016 + +#define HINIC5_API_CHAIN_AEQ_ID 2 +#define MAX_PF_MGMT_BUF_SIZE 2048UL +#define SEGMENT_LEN 48 +#define ASYNC_MSG_FLAG 0x8 +#define MGMT_MSG_MAX_SEQ_ID (ALIGN(HINIC5_MSG_TO_MGMT_MAX_LEN, \ + SEGMENT_LEN) / SEGMENT_LEN) + +#define MGMT_MSG_LAST_SEG_MAX_LEN (MAX_PF_MGMT_BUF_SIZE - \ + SEGMENT_LEN * MGMT_MSG_MAX_SEQ_ID) + +#define BUF_OUT_DEFAULT_SIZE 1 + +#define MGMT_MSG_SIZE_MIN 20 +#define MGMT_MSG_SIZE_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 + +#define SYNC_MSG_ID_MASK 0x7 +#define ASYNC_MSG_ID_MASK 0x7 + +#define SYNC_FLAG 0 +#define ASYNC_FLAG 1 + +#define MSG_NO_RESP 0xFFFF + +#ifdef PLATFORM_MODE_FPGA +#define MGMT_MSG_TIMEOUT 200000 +#else +#define MGMT_MSG_TIMEOUT 20000 +#endif + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + (SYNC_MSG_ID(pf_to_mgmt) + 1) & SYNC_MSG_ID_MASK) +#define ASYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->async_msg_id) + +#define ASYNC_MSG_ID_INC(pf_to_mgmt) (ASYNC_MSG_ID(pf_to_mgmt) = \ + ((ASYNC_MSG_ID(pf_to_mgmt) + 1) & ASYNC_MSG_ID_MASK) \ + | ASYNC_MSG_FLAG) + +static void pf_to_mgmt_send_event_set(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt, + int event_flag) +{ + spin_lock(&pf_to_mgmt->sync_event_lock); + pf_to_mgmt->event_flag = event_flag; + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +/** + * hinic5_register_mgmt_msg_cb - register sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + * @pri_handle: specific mod's private data that will be used in callback + * @callback: the handler for a sync message that will handle messages + **/ +int hinic5_register_mgmt_msg_cb(void *hwdev, u8 mod, void *pri_handle, + hinic5_mgmt_msg_cb callback) +{ + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt = NULL; + + if (mod >= HINIC5_MOD_HW_MAX || !hwdev) + return -EFAULT; + + pf_to_mgmt = ((struct hinic5_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return -EINVAL; + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = callback; + pf_to_mgmt->recv_mgmt_msg_data[mod] = pri_handle; + + set_bit(HINIC5_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + return 0; +} +EXPORT_SYMBOL(hinic5_register_mgmt_msg_cb); + +/** + * hinic5_unregister_mgmt_msg_cb - unregister sync msg handler for a module + * @hwdev: the pointer to hw device + * @mod: module in the chip that this handler will handle its sync messages + **/ +void hinic5_unregister_mgmt_msg_cb(void *hwdev, u8 mod) +{ + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt = NULL; + + if (!hwdev || mod >= HINIC5_MOD_HW_MAX) + return; + + pf_to_mgmt = ((struct hinic5_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + clear_bit(HINIC5_MGMT_MSG_CB_REG, &pf_to_mgmt->mgmt_msg_cb_state[mod]); + + while (test_bit(HINIC5_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[mod])) + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + + pf_to_mgmt->recv_mgmt_msg_cb[mod] = NULL; + pf_to_mgmt->recv_mgmt_msg_data[mod] = NULL; +} +EXPORT_SYMBOL(hinic5_unregister_mgmt_msg_cb); + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * Return: the total message length + **/ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* u64 - the size of the header */ + u16 msg_size; + + msg_size = (u16)(MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len); + + if (msg_size > MGMT_MSG_SIZE_MIN) + msg_size = MGMT_MSG_SIZE_MIN + + ALIGN((msg_size - MGMT_MSG_SIZE_MIN), + MGMT_MSG_SIZE_STEP); + else + msg_size = MGMT_MSG_SIZE_MIN; + + return msg_size; +} + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @header: pointer of the header to prepare + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @direction: the direction of the original message + * @msg_id: message id + **/ +static void prepare_header(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt, + u64 *header, u16 msg_len, u8 mod, + enum hinic5_msg_ack_type ack_type, + enum hinic5_mgmt_cmd cmd, u32 msg_id) +{ + struct hinic5_hwif *hwif = pf_to_mgmt->hwdev->hwif; + + mbox_msg_header_set_last(header, msg_len, mod, msg_len, pf_to_mgmt->hwdev); + + *header |= HINIC5_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC5_MSG_HEADER_SET(HINIC5_DATA_INLINE, DATA_TYPE) | + HINIC5_MSG_HEADER_SET(0, SEQID) | + HINIC5_MSG_HEADER_SET(HINIC5_API_CHAIN_AEQ_ID, AEQ_ID) | + HINIC5_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC5_MSG_HEADER_SET(HINIC5_MSG_DIRECT_SEND, DIRECTION) | + HINIC5_MSG_HEADER_SET(cmd, CMD) | + HINIC5_MSG_HEADER_SET(HINIC5_MSG_FROM_MGMT, SOURCE) | + HINIC5_MSG_HEADER_SET(hwif->attr.func_global_idx, + SRC_GLB_FUNC_IDX) | + HINIC5_MSG_HEADER_SET(msg_id, MSG_ID); +} + +static void clp_prepare_header(struct hinic5_hwdev *hwdev, u64 *header, + u16 msg_len, u8 mod, + enum hinic5_mgmt_cmd cmd) +{ + struct hinic5_hwif *hwif = hwdev->hwif; + + mbox_msg_header_set_last(header, msg_len, mod, msg_len, hwdev); + *header |= HINIC5_MSG_HEADER_SET(0, NO_ACK) | + HINIC5_MSG_HEADER_SET(HINIC5_DATA_INLINE, DATA_TYPE) | + HINIC5_MSG_HEADER_SET(0, SEQID) | + HINIC5_MSG_HEADER_SET(HINIC5_API_CHAIN_AEQ_ID, AEQ_ID) | + HINIC5_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC5_MSG_HEADER_SET(0, DIRECTION) | + HINIC5_MSG_HEADER_SET(cmd, CMD) | + HINIC5_MSG_HEADER_SET(hwif->attr.func_global_idx, + SRC_GLB_FUNC_IDX) | + HINIC5_MSG_HEADER_SET(0, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header to prepare + * @msg: the data of the message + * @msg_len: the length of the message + **/ +static int prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, + int msg_len) +{ + u8 *mgmt_cmd_new = mgmt_cmd; + + memset(mgmt_cmd_new, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd_new += MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd_new, header, sizeof(*header)); + + mgmt_cmd_new += sizeof(*header); + memcpy(mgmt_cmd_new, msg, (size_t)(u32)msg_len); + + return 0; +} + +/** + * send_msg_to_mgmt_sync - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the msg data + * @msg_len: the msg data length + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_sync(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, const void *msg, u16 msg_len) +{ + void *mgmt_cmd = pf_to_mgmt->sync_msg_buf; + struct hinic5_api_cmd_chain *chain = NULL; + u8 node_id = HINIC5_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); + u64 header = 0; + u16 cmd_size = mgmt_msg_len(msg_len); + int ret; + + if (hinic5_get_chip_present_flag(pf_to_mgmt->hwdev) == 0) + return -EFAULT; + + if (cmd_size > HINIC5_MSG_TO_MGMT_MAX_LEN) + return -EFAULT; + + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC5_MSG_ACK, + cmd, SYNC_MSG_ID_INC(pf_to_mgmt)); + chain = pf_to_mgmt->cmd_chain[HINIC5_API_CMD_WRITE_TO_MGMT_CPU]; + + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START); + + ret = prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + if (ret != 0) + return ret; + + return hinic5_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); +} + +/** + * send_msg_to_mgmt_async - send async message + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @msg: the data of the message + * @msg_len: the length of the message + * @direction: the direction of the original message + * Return: 0 - success, negative - failure + **/ +static int send_msg_to_mgmt_async(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, const void *msg, u16 msg_len) +{ + void *mgmt_cmd = pf_to_mgmt->async_msg_buf; + struct hinic5_api_cmd_chain *chain = NULL; + u8 node_id = HINIC5_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); + u64 header = 0; + u16 cmd_size = mgmt_msg_len(msg_len); + int ret; + + if (hinic5_get_chip_present_flag(pf_to_mgmt->hwdev) == 0) + return -EFAULT; + + if (cmd_size > HINIC5_MSG_TO_MGMT_MAX_LEN) + return -EFAULT; + + prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC5_MSG_NO_ACK, + cmd, ASYNC_MSG_ID(pf_to_mgmt)); + + ret = prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + if (ret != 0) + return ret; + + chain = pf_to_mgmt->cmd_chain[HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU]; + + return hinic5_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); +} + +static inline int msg_to_mgmt_pre(u8 mod, void *buf_in, u16 in_size) +{ + struct hinic5_msg_head *msg_head = NULL; + + /* set aeq fix num to 3, need to ensure response aeq id < 3 */ + if (mod == HINIC5_MOD_COMM || mod == HINIC5_MOD_L2NIC) { + if (in_size < sizeof(struct hinic5_msg_head)) + return -EINVAL; + + msg_head = buf_in; + + if (msg_head->resp_aeq_num >= HINIC5_MAX_AEQS) + msg_head->resp_aeq_num = 0; + } + + return 0; +} + +static int msg_to_mgmt_wait_completion(void *hwdev, u32 timeout) +{ + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt = NULL; + void *dev = ((struct hinic5_hwdev *)hwdev)->dev_hdl; + struct hinic5_recv_msg *recv_msg = NULL; + ulong timeo; + ulong ret; + + pf_to_mgmt = ((struct hinic5_hwdev *)hwdev)->pf_to_mgmt; + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + + timeo = msecs_to_jiffies((timeout != 0) ? timeout : MGMT_MSG_TIMEOUT); + + ret = wait_for_completion_timeout(&recv_msg->recv_done, timeo); + if (ret == 0) { + sdk_err(dev, "Mgmt response sync cmd timeout, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + hinic5_dump_aeq_info((struct hinic5_hwdev *)hwdev); + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_TIMEOUT); + return -ETIMEDOUT; + } + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (pf_to_mgmt->event_flag == SEND_EVENT_TIMEOUT) { + spin_unlock(&pf_to_mgmt->sync_event_lock); + return -ETIMEDOUT; + } + spin_unlock(&pf_to_mgmt->sync_event_lock); + + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_END); + + if (!hinic5_is_chip_present(hwdev)) + return -ETIMEDOUT; + + return 0; +} + +int hinic5_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt = NULL; + void *dev = ((struct hinic5_hwdev *)hwdev)->dev_hdl; + struct hinic5_recv_msg *recv_msg = NULL; + struct completion *recv_done = NULL; + int err; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + if (!buf_in || in_size == 0) + return -EINVAL; + + err = msg_to_mgmt_pre(mod, buf_in, in_size); + if (err != 0) + return -EINVAL; + + pf_to_mgmt = ((struct hinic5_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + recv_done = &recv_msg->recv_done; + + init_completion(recv_done); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size); + if (err != 0) { + sdk_err(dev, "Failed to send sync msg to mgmt, sync_msg_id: %u\n", + pf_to_mgmt->sync_msg_id); + pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_FAIL); + goto unlock_sync_msg; + } + + err = msg_to_mgmt_wait_completion(hwdev, timeout); + if (err != 0) + goto unlock_sync_msg; + + if (buf_out && out_size) { + if (*out_size < recv_msg->msg_len) { + sdk_err(dev, "Invalid response message length: %u for mod %u cmd %u from mgmt, should less than: %u\n", + recv_msg->msg_len, mod, cmd, *out_size); + err = -EFAULT; + goto unlock_sync_msg; + } + + if (recv_msg->msg_len != 0) + memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); + + *out_size = recv_msg->msg_len; + } + +unlock_sync_msg: + destroy_completion(recv_done); + up(&pf_to_mgmt->sync_msg_lock); + return err; +} + +int hinic5_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size) +{ + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt = NULL; + void *dev = ((struct hinic5_hwdev *)hwdev)->dev_hdl; + int err; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hinic5_hwdev *)hwdev)->pf_to_mgmt; + + /* Lock the async_msg_buf */ + spin_lock_bh(&pf_to_mgmt->async_msg_lock); + ASYNC_MSG_ID_INC(pf_to_mgmt); + + err = send_msg_to_mgmt_async(pf_to_mgmt, mod, cmd, buf_in, in_size); + spin_unlock_bh(&pf_to_mgmt->async_msg_lock); + + if (err != 0) { + sdk_err(dev, "Failed to send async mgmt msg\n"); + return err; + } + + return 0; +} + +int hinic5_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout) +{ + if (!hwdev) + return -EINVAL; + + if (hinic5_get_chip_present_flag(hwdev) == 0) + return -EPERM; + + if (in_size > HINIC5_MSG_TO_MGMT_MAX_LEN) + return -EINVAL; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + return hinic5_pf_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); +} + +int hinic5_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (hinic5_get_chip_present_flag(hwdev) == 0) + return -EPERM; + + return hinic5_send_mbox_to_mgmt(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout, channel); +} +EXPORT_SYMBOL(hinic5_msg_to_mgmt_sync); + +int hinic5_msg_to_mgmt_no_ack(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + if (hinic5_get_chip_present_flag(hwdev) == 0) + return -EPERM; + + return hinic5_send_mbox_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, + in_size, channel); +} +EXPORT_SYMBOL(hinic5_msg_to_mgmt_no_ack); + +int hinic5_msg_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, u16 channel) +{ + return hinic5_msg_to_mgmt_api_chain_async(hwdev, mod, cmd, buf_in, + in_size); +} +EXPORT_SYMBOL(hinic5_msg_to_mgmt_async); + +int hinic5_msg_to_mgmt_api_chain_sync(void *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout) +{ + if (!hwdev) + return -EINVAL; + + if (hinic5_get_chip_present_flag(hwdev) == 0) + return -EPERM; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "PF don't support api chain\n"); + return -EPERM; + } + + return hinic5_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout); +} + +int hinic5_msg_to_mgmt_api_chain_async(void *hwdev, u8 mod, u16 cmd, + const void *buf_in, u16 in_size) +{ + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic5_func_type(hwdev) == TYPE_VF) { + err = -EFAULT; + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "VF don't support async cmd\n"); + } else if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) { + err = -EPERM; + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "PF don't support api chain\n"); + } else { + err = hinic5_pf_to_mgmt_async(hwdev, mod, cmd, buf_in, in_size); + } + + return err; +} +EXPORT_SYMBOL(hinic5_msg_to_mgmt_api_chain_async); + +bool hinic5_is_multi_bm(void *hwdev) +{ + struct hinic5_hwdev *hw_dev = hwdev; + + if (!hwdev) + return false; + + return ((IS_BMGW_SLAVE_HOST(hw_dev)) || (IS_BMGW_MASTER_HOST(hw_dev))) ? true : false; +} +EXPORT_SYMBOL(hinic5_is_multi_bm); + +bool hinic5_is_slave_host(void *hwdev) +{ + struct hinic5_hwdev *hw_dev = hwdev; + + if (!hwdev) { + pr_err("hwdev is null\n"); + return false; + } + + return ((IS_BMGW_SLAVE_HOST(hw_dev)) || (IS_VM_SLAVE_HOST(hw_dev))) ? true : false; +} +EXPORT_SYMBOL(hinic5_is_slave_host); + +bool hinic5_is_master_host(void *hwdev) +{ + struct hinic5_hwdev *hw_dev = hwdev; + + if (!hwdev) { + pr_err("hwdev is null\n"); + return false; + } + + return ((IS_BMGW_MASTER_HOST(hw_dev)) || (IS_VM_MASTER_HOST(hw_dev))) ? true : false; +} +EXPORT_SYMBOL(hinic5_is_master_host); + +static void send_mgmt_ack(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, void *buf_in, u16 in_size, + u16 msg_id) +{ + u16 buf_size; + + if (in_size == 0) + buf_size = BUF_OUT_DEFAULT_SIZE; + else + buf_size = in_size; + + hinic5_response_mbox_to_mgmt(pf_to_mgmt->hwdev, mod, cmd, buf_in, + buf_size, msg_id); +} + +static void mgmt_recv_msg_handler(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt, + u8 mod, u16 cmd, void *buf_in, u16 in_size, + u16 msg_id, bool need_resp) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + void *buf_out = pf_to_mgmt->mgmt_ack_buf; + enum hinic5_mod_type tmp_mod = mod; + u16 out_size = 0; + + memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE); + + if (mod >= HINIC5_MOD_HW_MAX) { + sdk_warn(dev, "Receive illegal message from mgmt cpu, mod = %u\n", + mod); + goto unsupported; + } + + set_bit(HINIC5_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + + if (!pf_to_mgmt->recv_mgmt_msg_cb[mod] || + !test_bit(HINIC5_MGMT_MSG_CB_REG, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod])) { + sdk_warn(dev, "Receive mgmt callback is null, mod = %u, cmd=%u\n", mod, cmd); + clear_bit(HINIC5_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + goto unsupported; + } + + pf_to_mgmt->recv_mgmt_msg_cb[tmp_mod](pf_to_mgmt->recv_mgmt_msg_data[tmp_mod], + cmd, buf_in, in_size, + buf_out, &out_size); + + clear_bit(HINIC5_MGMT_MSG_CB_RUNNING, + &pf_to_mgmt->mgmt_msg_cb_state[tmp_mod]); + + goto resp; + +unsupported: + out_size = sizeof(struct mgmt_msg_head); + ((struct mgmt_msg_head *)buf_out)->status = HINIC5_MGMT_CMD_UNSUPPORTED; + +resp: + if (need_resp) + send_mgmt_ack(pf_to_mgmt, mod, cmd, buf_out, out_size, msg_id); +} + +/** + * mgmt_resp_msg_handler - handler for response message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_resp_msg_handler(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic5_recv_msg *recv_msg) +{ + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + /* delete async msg */ + if ((recv_msg->msg_id & ASYNC_MSG_FLAG) != 0) + return; + + spin_lock(&pf_to_mgmt->sync_event_lock); + if (recv_msg->msg_id == pf_to_mgmt->sync_msg_id && + pf_to_mgmt->event_flag == SEND_EVENT_START) { + pf_to_mgmt->event_flag = SEND_EVENT_SUCCESS; + complete(&recv_msg->recv_done); + } else if (recv_msg->msg_id != pf_to_mgmt->sync_msg_id) { + sdk_err(dev, "Send msg id(0x%x) recv msg id(0x%x) dismatch, event state=%d\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } else { + sdk_err(dev, "Wait timeout, send msg id(0x%x) recv msg id(0x%x), event state=%d!\n", + pf_to_mgmt->sync_msg_id, recv_msg->msg_id, + pf_to_mgmt->event_flag); + } + spin_unlock(&pf_to_mgmt->sync_event_lock); +} + +static void recv_mgmt_msg_work_handler(struct work_struct *work) +{ + struct hinic5_mgmt_msg_handle_work *mgmt_work = + container_of(work, struct hinic5_mgmt_msg_handle_work, work); + + mgmt_recv_msg_handler(mgmt_work->pf_to_mgmt, mgmt_work->mod, + mgmt_work->cmd, mgmt_work->msg, + mgmt_work->msg_len, mgmt_work->msg_id, + (mgmt_work->async_mgmt_to_pf == 0)); + + destroy_work(&mgmt_work->work); + + kfree(mgmt_work->msg); + kfree(mgmt_work); +} + +static bool check_mgmt_head_info(struct hinic5_recv_msg *recv_msg, + u8 seq_id, u8 seg_len, u16 msg_id) +{ + if (seq_id > MGMT_MSG_MAX_SEQ_ID || seg_len > SEGMENT_LEN || + (seq_id == MGMT_MSG_MAX_SEQ_ID && seg_len > MGMT_MSG_LAST_SEG_MAX_LEN)) + return false; + + if (seq_id == 0) { + recv_msg->seq_id = seq_id; + recv_msg->msg_id = msg_id; + } else { + if (seq_id != recv_msg->seq_id + 1 || msg_id != recv_msg->msg_id) + return false; + + recv_msg->seq_id = seq_id; + } + + return true; +} + +static void init_mgmt_msg_work(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt, + struct hinic5_recv_msg *recv_msg) +{ + struct hinic5_mgmt_msg_handle_work *mgmt_work = NULL; + struct hinic5_hwdev *hwdev = pf_to_mgmt->hwdev; + + mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL); + if (!mgmt_work) + return; + + if (recv_msg->msg_len != 0) { + mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL); + if (!mgmt_work->msg) + goto msg_alloc_err; + } + + mgmt_work->pf_to_mgmt = pf_to_mgmt; + mgmt_work->msg_len = recv_msg->msg_len; + memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len); + + mgmt_work->msg_id = recv_msg->msg_id; + mgmt_work->mod = recv_msg->mod; + mgmt_work->cmd = recv_msg->cmd; + mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf; + + INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler); + queue_work_on(hisdk5_get_work_cpu_affinity(hwdev, WORK_TYPE_MGMT_MSG), + pf_to_mgmt->workq, &mgmt_work->work); + return; + +msg_alloc_err: + kfree(mgmt_work); +} + +/** + * recv_mgmt_msg_handler - handler a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + **/ +static void recv_mgmt_msg_handler(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt, + u8 *header, struct hinic5_recv_msg *recv_msg) +{ + struct hinic5_hwdev *hwdev = pf_to_mgmt->hwdev; + u64 mbox_header = *((u64 *)header); + void *msg_body = (void *)((uintptr_t)header + sizeof(mbox_header)); + u8 seq_id, seq_len, version; + u16 msg_id; + u32 offset; + u64 dir; + + version = hinic5_mbox_get_version(hwdev, &mbox_header); + /* Don't need to get anything from hw when cmd is async */ + dir = HINIC5_MSG_HEADER_GET(mbox_header, DIRECTION); + if (dir == HINIC5_MSG_RESPONSE && + ((HINIC5_MSG_HEADER_GET(mbox_header, MSG_ID) & ASYNC_MSG_FLAG) != 0)) + return; + + seq_len = hinic5_mbox_get_seg_len(version, &mbox_header); + seq_id = HINIC5_MSG_HEADER_GET(mbox_header, SEQID); + msg_id = HINIC5_MSG_HEADER_GET(mbox_header, MSG_ID); + if (!check_mgmt_head_info(recv_msg, seq_id, seq_len, msg_id)) { + sdk_err(hwdev->dev_hdl, "Mgmt msg sequence id and segment length check failed\n"); + sdk_err(hwdev->dev_hdl, + "Front seq_id: 0x%x,current seq_id: 0x%x, seg len: 0x%x, front msg_id: %u, cur: %u\n", + recv_msg->seq_id, seq_id, seq_len, recv_msg->msg_id, msg_id); + /* set seq_id to invalid seq_id */ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + return; + } + + offset = seq_id * SEGMENT_LEN; + memcpy((u8 *)recv_msg->msg + offset, msg_body, seq_len); + + if (HINIC5_MSG_HEADER_GET(mbox_header, LAST) == 0) + return; + + recv_msg->cmd = HINIC5_MSG_HEADER_GET(mbox_header, CMD); + recv_msg->mod = hinic5_mbox_get_mod_id(version, &mbox_header); + recv_msg->async_mgmt_to_pf = HINIC5_MSG_HEADER_GET(mbox_header, + NO_ACK); + recv_msg->msg_len = hinic5_mbox_get_msg_len(version, &mbox_header); + recv_msg->msg_id = msg_id; + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + if (HINIC5_MSG_HEADER_GET(mbox_header, DIRECTION) == + HINIC5_MSG_RESPONSE) { + mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); + return; + } + + init_mgmt_msg_work(pf_to_mgmt, recv_msg); +} + +/** + * hinic5_mgmt_msg_aeqe_handler - handler for a mgmt message event + * @handle: PF to MGMT channel + * @header: the header of the message + * @size: unused + **/ +void hinic5_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size) +{ + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct hinic5_recv_msg *recv_msg = NULL; + bool is_send_dir = false; + + if ((HINIC5_MSG_HEADER_GET(*(u64 *)header, SOURCE) == + HINIC5_MSG_FROM_MBOX)) { + hinic5_mbox_func_aeqe_handler(hwdev, header, size); + return; + } + + pf_to_mgmt = dev->pf_to_mgmt; + if (!pf_to_mgmt) + return; + + is_send_dir = (HINIC5_MSG_HEADER_GET(*(u64 *)header, DIRECTION) == + HINIC5_MSG_DIRECT_SEND) ? true : false; + + recv_msg = is_send_dir ? &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); +} + +/** + * alloc_recv_msg - allocate received message memory + * @recv_msg: pointer that will hold the allocated data + * Return: 0 - success, negative - failure + **/ +static int alloc_recv_msg(struct hinic5_recv_msg *recv_msg) +{ + recv_msg->seq_id = MGMT_MSG_MAX_SEQ_ID; + + recv_msg->msg = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!recv_msg->msg) + return -ENOMEM; + + return 0; +} + +/** + * free_recv_msg - free received message memory + * @recv_msg: pointer that holds the allocated data + **/ +static void free_recv_msg(struct hinic5_recv_msg *recv_msg) +{ + kfree(recv_msg->msg); +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static int alloc_msg_buf(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt) +{ + int err; + void *dev = pf_to_mgmt->hwdev->dev_hdl; + + err = alloc_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + if (err != 0) { + sdk_err(dev, "Failed to allocate recv msg\n"); + return err; + } + + err = alloc_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err != 0) { + sdk_err(dev, "Failed to allocate resp recv msg\n"); + goto alloc_msg_for_resp_err; + } + + pf_to_mgmt->async_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->async_msg_buf) { + err = -ENOMEM; + goto async_msg_buf_err; + } + + pf_to_mgmt->sync_msg_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) { + err = -ENOMEM; + goto sync_msg_buf_err; + } + + pf_to_mgmt->mgmt_ack_buf = kzalloc(MAX_PF_MGMT_BUF_SIZE, GFP_KERNEL); + if (!pf_to_mgmt->mgmt_ack_buf) { + err = -ENOMEM; + goto ack_msg_buf_err; + } + + return 0; + +ack_msg_buf_err: + kfree(pf_to_mgmt->sync_msg_buf); + +sync_msg_buf_err: + kfree(pf_to_mgmt->async_msg_buf); + +async_msg_buf_err: + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + +alloc_msg_for_resp_err: + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); + return err; +} + +/** + * free_msg_buf - free all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * Return: 0 - success, negative - failure + **/ +static void free_msg_buf(struct hinic5_msg_pf_to_mgmt *pf_to_mgmt) +{ + kfree(pf_to_mgmt->mgmt_ack_buf); + kfree(pf_to_mgmt->sync_msg_buf); + kfree(pf_to_mgmt->async_msg_buf); + + free_recv_msg(&pf_to_mgmt->recv_resp_msg_from_mgmt); + free_recv_msg(&pf_to_mgmt->recv_msg_from_mgmt); +} + +/** + * hinic_pf_to_mgmt_init - initialize PF to MGMT channel + * @hwdev: the pointer to hw device + * Return: 0 - success, negative - failure + **/ +int hinic5_pf_to_mgmt_init(struct hinic5_hwdev *hwdev) +{ + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt = NULL; + void *dev = hwdev->dev_hdl; + int err; + + pf_to_mgmt = kzalloc(sizeof(*pf_to_mgmt), GFP_KERNEL); + if (!pf_to_mgmt) + return -ENOMEM; + + hwdev->pf_to_mgmt = pf_to_mgmt; + pf_to_mgmt->hwdev = hwdev; + spin_lock_init(&pf_to_mgmt->async_msg_lock); + spin_lock_init(&pf_to_mgmt->sync_event_lock); + sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->workq = create_singlethread_workqueue(HINIC5_MGMT_WQ_NAME); + if (!pf_to_mgmt->workq) { + sdk_err(dev, "Failed to initialize MGMT workqueue\n"); + err = -ENOMEM; + goto create_mgmt_workq_err; + } + + err = alloc_msg_buf(pf_to_mgmt); + if (err != 0) { + sdk_err(dev, "Failed to allocate msg buffers\n"); + goto alloc_msg_buf_err; + } + + err = hinic5_api_cmd_init(hwdev, pf_to_mgmt->cmd_chain); + if (err != 0) { + sdk_err(dev, "Failed to init the api cmd chains\n"); + goto api_cmd_init_err; + } + + return 0; + +api_cmd_init_err: + free_msg_buf(pf_to_mgmt); + +alloc_msg_buf_err: + destroy_workqueue(pf_to_mgmt->workq); + +create_mgmt_workq_err: + spin_lock_deinit(&pf_to_mgmt->sync_event_lock); + spin_lock_deinit(&pf_to_mgmt->async_msg_lock); + sema_deinit(&pf_to_mgmt->sync_msg_lock); + kfree(pf_to_mgmt); + + return err; +} + +/** + * hinic_pf_to_mgmt_free - free PF to MGMT channel + * @hwdev: the pointer to hw device + **/ +void hinic5_pf_to_mgmt_free(struct hinic5_hwdev *hwdev) +{ + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt = hwdev->pf_to_mgmt; + + /* destroy workqueue before free related pf_to_mgmt resources in case of + * illegal resource access + */ + destroy_workqueue(pf_to_mgmt->workq); + hinic5_api_cmd_free(hwdev, pf_to_mgmt->cmd_chain); + + free_msg_buf(pf_to_mgmt); + spin_lock_deinit(&pf_to_mgmt->sync_event_lock); + spin_lock_deinit(&pf_to_mgmt->async_msg_lock); + sema_deinit(&pf_to_mgmt->sync_msg_lock); + kfree(pf_to_mgmt); +} + +void hinic5_flush_mgmt_workq(void *hwdev) +{ + struct hinic5_hwdev *dev = (struct hinic5_hwdev *)hwdev; + + flush_workqueue(dev->aeqs->workq); + + if (hinic5_func_type(dev) != TYPE_VF) + flush_workqueue(dev->pf_to_mgmt->workq); +} + +int hinic5_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, + u16 size, void *ack, u16 ack_size) +{ + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct hinic5_api_cmd_chain *chain = NULL; + + if (!hwdev || !cmd || (ack_size != 0 && !ack) || size > MAX_PF_MGMT_BUF_SIZE) + return -EINVAL; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hinic5_hwdev *)hwdev)->pf_to_mgmt; + if (!pf_to_mgmt) + return -EINVAL; + + chain = pf_to_mgmt->cmd_chain[HINIC5_API_CMD_POLL_READ]; + + if (!hinic5_is_chip_present(hwdev)) + return -EPERM; + + return hinic5_api_cmd_read(chain, dest, cmd, size, ack, ack_size); +} + +/** + * api cmd write or read bypass default use poll, if want to use aeq interrupt, + * please set wb_trigger_aeqe to 1 + **/ +int hinic5_api_cmd_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size) +{ + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt = NULL; + struct hinic5_api_cmd_chain *chain = NULL; + + if (!hwdev || size == 0 || !cmd || size > MAX_PF_MGMT_BUF_SIZE) + return -EINVAL; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + pf_to_mgmt = ((struct hinic5_hwdev *)hwdev)->pf_to_mgmt; + chain = pf_to_mgmt->cmd_chain[HINIC5_API_CMD_POLL_WRITE]; + + if (!hinic5_is_chip_present(hwdev)) + return -EPERM; + + return hinic5_api_cmd_write(chain, dest, cmd, size); +} + +static int get_clp_reg(void *hwdev, enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *reg_addr) +{ + switch (reg_type) { + case HINIC5_CLP_BA_HOST: + *reg_addr = (data_type == HINIC5_CLP_REQ_HOST) ? + HINIC5_CLP_REG(REQBASE) : + HINIC5_CLP_REG(RSPBASE); + break; + + case HINIC5_CLP_SIZE_HOST: + *reg_addr = HINIC5_CLP_REG(SIZE); + break; + + case HINIC5_CLP_LEN_HOST: + *reg_addr = (data_type == HINIC5_CLP_REQ_HOST) ? + HINIC5_CLP_REG(REQ) : HINIC5_CLP_REG(RSP); + break; + + case HINIC5_CLP_START_REQ_HOST: + *reg_addr = HINIC5_CLP_REG(REQ); + break; + + case HINIC5_CLP_READY_RSP_HOST: + *reg_addr = HINIC5_CLP_REG(RSP); + break; + + default: + *reg_addr = 0; + break; + } + if (*reg_addr == 0) + return -EINVAL; + + return 0; +} + +static inline int clp_param_valid(struct hinic5_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type) +{ + if (data_type == HINIC5_CLP_REQ_HOST && + reg_type == HINIC5_CLP_READY_RSP_HOST) + return -EINVAL; + + if (data_type == HINIC5_CLP_RSP_HOST && + reg_type == HINIC5_CLP_START_REQ_HOST) + return -EINVAL; + + return 0; +} + +static u32 get_clp_reg_value(struct hinic5_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 reg_addr) +{ + u32 value; + + value = hinic5_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HINIC5_CLP_BA_HOST: + value = ((value >> HINIC5_CLP_OFFSET(BASE)) & + HINIC5_CLP_MASK(BASE)); + break; + + case HINIC5_CLP_SIZE_HOST: + if (data_type == HINIC5_CLP_REQ_HOST) + value = ((value >> HINIC5_CLP_OFFSET(REQ_SIZE)) & + HINIC5_CLP_MASK(SIZE)); + else + value = ((value >> HINIC5_CLP_OFFSET(RSP_SIZE)) & + HINIC5_CLP_MASK(SIZE)); + break; + + case HINIC5_CLP_LEN_HOST: + value = ((value >> HINIC5_CLP_OFFSET(LEN)) & + HINIC5_CLP_MASK(LEN)); + break; + + case HINIC5_CLP_START_REQ_HOST: + value = ((value >> HINIC5_CLP_OFFSET(START)) & + HINIC5_CLP_MASK(START)); + break; + + case HINIC5_CLP_READY_RSP_HOST: + value = ((value >> HINIC5_CLP_OFFSET(READY)) & + HINIC5_CLP_MASK(READY)); + break; + + default: + break; + } + + return value; +} + +static int hinic5_read_clp_reg(struct hinic5_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 *read_value) +{ + u32 reg_addr; + int err; + + err = clp_param_valid(hwdev, data_type, reg_type); + if (err != 0) + return err; + + err = get_clp_reg(hwdev, data_type, reg_type, ®_addr); + if (err != 0) + return err; + + *read_value = get_clp_reg_value(hwdev, data_type, reg_type, reg_addr); + + return 0; +} + +static int check_data_type(enum clp_data_type data_type, + enum clp_reg_type reg_type) +{ + if (data_type == HINIC5_CLP_REQ_HOST && + reg_type == HINIC5_CLP_READY_RSP_HOST) + return -EINVAL; + if (data_type == HINIC5_CLP_RSP_HOST && + reg_type == HINIC5_CLP_START_REQ_HOST) + return -EINVAL; + + return 0; +} + +static int check_reg_value(enum clp_reg_type reg_type, u32 value) +{ + if (reg_type == HINIC5_CLP_BA_HOST && + value > HINIC5_CLP_SRAM_BASE_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC5_CLP_SIZE_HOST && + value > HINIC5_CLP_SRAM_SIZE_REG_MAX) + return -EINVAL; + + if (reg_type == HINIC5_CLP_LEN_HOST && + value > HINIC5_CLP_LEN_REG_MAX) + return -EINVAL; + + if ((reg_type == HINIC5_CLP_START_REQ_HOST || + reg_type == HINIC5_CLP_READY_RSP_HOST) && + value > HINIC5_CLP_START_OR_READY_REG_MAX) + return -EINVAL; + + return 0; +} + +static int hinic5_check_clp_init_status(struct hinic5_hwdev *hwdev) +{ + int err; + u32 reg_value = 0; + + err = hinic5_read_clp_reg(hwdev, HINIC5_CLP_REQ_HOST, + HINIC5_CLP_BA_HOST, ®_value); + if (err != 0 || reg_value == 0) { + sdk_err(hwdev->dev_hdl, "Wrong req ba value: 0x%x\n", + reg_value); + return -EINVAL; + } + + err = hinic5_read_clp_reg(hwdev, HINIC5_CLP_RSP_HOST, + HINIC5_CLP_BA_HOST, ®_value); + if (err != 0 || reg_value == 0) { + sdk_err(hwdev->dev_hdl, "Wrong rsp ba value: 0x%x\n", + reg_value); + return -EINVAL; + } + + err = hinic5_read_clp_reg(hwdev, HINIC5_CLP_REQ_HOST, + HINIC5_CLP_SIZE_HOST, ®_value); + if (err != 0 || reg_value == 0) { + sdk_err(hwdev->dev_hdl, "Wrong req size\n"); + return -EINVAL; + } + + err = hinic5_read_clp_reg(hwdev, HINIC5_CLP_RSP_HOST, + HINIC5_CLP_SIZE_HOST, ®_value); + if (err != 0 || reg_value == 0) { + sdk_err(hwdev->dev_hdl, "Wrong rsp size\n"); + return -EINVAL; + } + + return 0; +} + +static void hinic5_write_clp_reg(struct hinic5_hwdev *hwdev, + enum clp_data_type data_type, + enum clp_reg_type reg_type, u32 value) +{ + u32 reg_addr, reg_value; + + if (check_data_type(data_type, reg_type) != 0) + return; + + if (check_reg_value(reg_type, value) != 0) + return; + + if (get_clp_reg(hwdev, data_type, reg_type, ®_addr) != 0) + return; + + reg_value = hinic5_hwif_read_reg(hwdev->hwif, reg_addr); + + switch (reg_type) { + case HINIC5_CLP_LEN_HOST: + reg_value = reg_value & + (~(HINIC5_CLP_MASK(LEN) << HINIC5_CLP_OFFSET(LEN))); + reg_value = reg_value | (value << HINIC5_CLP_OFFSET(LEN)); + break; + + case HINIC5_CLP_START_REQ_HOST: + reg_value = reg_value & + (~(HINIC5_CLP_MASK(START) << + HINIC5_CLP_OFFSET(START))); + reg_value = reg_value | (value << HINIC5_CLP_OFFSET(START)); + break; + + case HINIC5_CLP_READY_RSP_HOST: + reg_value = reg_value & + (~(HINIC5_CLP_MASK(READY) << + HINIC5_CLP_OFFSET(READY))); + reg_value = reg_value | (value << HINIC5_CLP_OFFSET(READY)); + break; + + default: + return; + } + + hinic5_hwif_write_reg(hwdev->hwif, reg_addr, reg_value); +} + +static int hinic5_read_clp_data(struct hinic5_hwdev *hwdev, + void *buf_out, u16 *out_size) +{ + int err; + u32 reg = HINIC5_CLP_DATA(RSP); + u32 ready, delay_cnt; + u32 *ptr = (u32 *)buf_out; + u32 temp_out_size = 0; + + err = hinic5_read_clp_reg(hwdev, HINIC5_CLP_RSP_HOST, + HINIC5_CLP_READY_RSP_HOST, &ready); + if (err != 0) + return err; + + delay_cnt = 0; + while (ready == 0) { + usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */ + delay_cnt++; + err = hinic5_read_clp_reg(hwdev, HINIC5_CLP_RSP_HOST, + HINIC5_CLP_READY_RSP_HOST, &ready); + if (err != 0 || delay_cnt > HINIC5_CLP_DELAY_CNT_MAX) { + sdk_err(hwdev->dev_hdl, "Timeout with delay_cnt: %u\n", + delay_cnt); + return -EINVAL; + } + } + + err = hinic5_read_clp_reg(hwdev, HINIC5_CLP_RSP_HOST, + HINIC5_CLP_LEN_HOST, &temp_out_size); + if (err != 0) + return err; + + if (temp_out_size > HINIC5_CLP_SRAM_SIZE_REG_MAX || temp_out_size == 0) { + sdk_err(hwdev->dev_hdl, "Invalid temp_out_size: %u\n", + temp_out_size); + return -EINVAL; + } + + *out_size = (u16)temp_out_size; + for (; temp_out_size > 0; temp_out_size--) { + *ptr = hinic5_hwif_read_reg(hwdev->hwif, reg); + ptr++; + /* read 4 bytes every time */ + reg = reg + 4; + } + + hinic5_write_clp_reg(hwdev, HINIC5_CLP_RSP_HOST, + HINIC5_CLP_READY_RSP_HOST, (u32)0x0); + hinic5_write_clp_reg(hwdev, HINIC5_CLP_RSP_HOST, HINIC5_CLP_LEN_HOST, + (u32)0x0); + + return 0; +} + +static int hinic5_write_clp_data(struct hinic5_hwdev *hwdev, + void *buf_in, u16 in_size) +{ + int err; + u32 reg = HINIC5_CLP_DATA(REQ); + u32 start = 1; + u32 delay_cnt = 0; + u32 *ptr = (u32 *)buf_in; + u16 size_in = in_size; + + err = hinic5_read_clp_reg(hwdev, HINIC5_CLP_REQ_HOST, + HINIC5_CLP_START_REQ_HOST, &start); + if (err != 0) + return err; + + while (start == 1) { + usleep_range(9000, 10000); /* sleep 9000 us ~ 10000 us */ + delay_cnt++; + err = hinic5_read_clp_reg(hwdev, HINIC5_CLP_REQ_HOST, + HINIC5_CLP_START_REQ_HOST, &start); + if (err != 0 || delay_cnt > HINIC5_CLP_DELAY_CNT_MAX) + return -EINVAL; + } + + hinic5_write_clp_reg(hwdev, HINIC5_CLP_REQ_HOST, + HINIC5_CLP_LEN_HOST, size_in); + hinic5_write_clp_reg(hwdev, HINIC5_CLP_REQ_HOST, + HINIC5_CLP_START_REQ_HOST, (u32)0x1); + + for (; size_in > 0; size_in--) { + hinic5_hwif_write_reg(hwdev->hwif, reg, *ptr); + ptr++; + reg = reg + sizeof(u32); + } + + return 0; +} + +static void hinic5_clear_clp_data(struct hinic5_hwdev *hwdev, + enum clp_data_type data_type) +{ + u32 reg = (data_type == HINIC5_CLP_REQ_HOST) ? + HINIC5_CLP_DATA(REQ) : HINIC5_CLP_DATA(RSP); + u32 count = HINIC5_CLP_INPUT_BUF_LEN_HOST / HINIC5_CLP_DATA_UNIT_HOST; + + for (; count > 0; count--) { + hinic5_hwif_write_reg(hwdev->hwif, reg, 0x0); + reg = reg + sizeof(u32); + } +} + +static int clp_to_mgmt_response(void *hwdev, void *buf_out, const u16 *out_size) +{ + struct hinic5_hwdev *dev = hwdev; + u8 *clp_msg_buf = NULL; + u64 header; + u16 real_size = 0; + int ret; + + clp_msg_buf = ((struct hinic5_hwdev *)hwdev)->clp_pf_to_mgmt->clp_msg_buf; + + memset(clp_msg_buf, 0x0, HINIC5_CLP_INPUT_BUF_LEN_HOST); + ret = hinic5_read_clp_data(hwdev, clp_msg_buf, &real_size); + hinic5_clear_clp_data(dev, HINIC5_CLP_RSP_HOST); + if (ret != 0) { + sdk_err(dev->dev_hdl, "Read clp response failed\n"); + return -EINVAL; + } + + real_size = (u16)((real_size * HINIC5_CLP_DATA_UNIT_HOST) & 0xffff); + if (real_size <= sizeof(header) || real_size > HINIC5_CLP_INPUT_BUF_LEN_HOST) { + sdk_err(dev->dev_hdl, "Invalid response size: %u", real_size); + return -EINVAL; + } + real_size = real_size - sizeof(header); + if (real_size != *out_size) { + sdk_err(dev->dev_hdl, "Invalid real_size:%u, out_size: %u\n", real_size, *out_size); + return -EINVAL; + } + + memcpy(buf_out, (clp_msg_buf + sizeof(header)), real_size); + return 0; +} + +int hinic5_pf_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, const u16 *out_size) +{ + struct hinic5_clp_pf_to_mgmt *clp_pf_to_mgmt = NULL; + struct hinic5_hwdev *dev = hwdev; + u8 *clp_msg_buf = NULL; + u64 header = 0; + u16 real_size; + int err; + + if (!COMM_SUPPORT_CLP(dev)) + return -EPERM; + + clp_pf_to_mgmt = ((struct hinic5_hwdev *)hwdev)->clp_pf_to_mgmt; + if (!clp_pf_to_mgmt) + return -EPERM; + + clp_msg_buf = clp_pf_to_mgmt->clp_msg_buf; + + /* 4 bytes alignment */ + real_size = in_size + (u16)sizeof(header) + + (((in_size % HINIC5_CLP_DATA_UNIT_HOST) != 0) ? HINIC5_CLP_DATA_UNIT_HOST : 0); + real_size = real_size / HINIC5_CLP_DATA_UNIT_HOST; + + if (real_size > (HINIC5_CLP_INPUT_BUF_LEN_HOST / HINIC5_CLP_DATA_UNIT_HOST)) { + sdk_err(dev->dev_hdl, "Invalid real_size: %u\n", real_size); + return -EINVAL; + } + down(&clp_pf_to_mgmt->clp_msg_lock); + + err = hinic5_check_clp_init_status(dev); + if (err != 0) { + sdk_err(dev->dev_hdl, "Check clp init status failed\n"); + goto unlock_clp_msg; + } + + hinic5_clear_clp_data(dev, HINIC5_CLP_RSP_HOST); + hinic5_write_clp_reg(dev, HINIC5_CLP_RSP_HOST, HINIC5_CLP_READY_RSP_HOST, 0x0); + + /* Send request */ + memset(clp_msg_buf, 0x0, HINIC5_CLP_INPUT_BUF_LEN_HOST); + clp_prepare_header(dev, &header, in_size, mod, cmd); + + memcpy(clp_msg_buf, &header, sizeof(header)); + + clp_msg_buf += sizeof(header); + memcpy(clp_msg_buf, buf_in, in_size); + + hinic5_clear_clp_data(dev, HINIC5_CLP_REQ_HOST); + if (hinic5_write_clp_data(hwdev, clp_pf_to_mgmt->clp_msg_buf, real_size) != 0) { + sdk_err(dev->dev_hdl, "Send clp request failed\n"); + err = -EINVAL; + goto unlock_clp_msg; + } + + /* Get response */ + err = clp_to_mgmt_response(hwdev, buf_out, out_size); + +unlock_clp_msg: + up(&clp_pf_to_mgmt->clp_msg_lock); + return err; +} + +int hinic5_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) + +{ + struct hinic5_hwdev *dev = hwdev; + int err; + + if (!dev) + return -EINVAL; + + if (!hinic5_is_chip_present(hwdev)) + return -EPERM; + + if (hinic5_func_type(hwdev) == TYPE_VF) + return -EINVAL; + + if (!COMM_SUPPORT_CLP(dev)) + return -EPERM; + + err = hinic5_pf_clp_to_mgmt(dev, mod, cmd, buf_in, in_size, buf_out, + out_size); + + return err; +} + +int hinic5_clp_pf_to_mgmt_init(struct hinic5_hwdev *hwdev) +{ + struct hinic5_clp_pf_to_mgmt *clp_pf_to_mgmt = NULL; + + if (!COMM_SUPPORT_CLP(hwdev)) + return 0; + + clp_pf_to_mgmt = kzalloc(sizeof(*clp_pf_to_mgmt), GFP_KERNEL); + if (!clp_pf_to_mgmt) + return -ENOMEM; + + clp_pf_to_mgmt->clp_msg_buf = kzalloc(HINIC5_CLP_INPUT_BUF_LEN_HOST, + GFP_KERNEL); + if (!clp_pf_to_mgmt->clp_msg_buf) { + kfree(clp_pf_to_mgmt); + return -ENOMEM; + } + sema_init(&clp_pf_to_mgmt->clp_msg_lock, 1); + + hwdev->clp_pf_to_mgmt = clp_pf_to_mgmt; + + return 0; +} + +void hinic5_clp_pf_to_mgmt_free(struct hinic5_hwdev *hwdev) +{ + struct hinic5_clp_pf_to_mgmt *clp_pf_to_mgmt = hwdev->clp_pf_to_mgmt; + + if (!COMM_SUPPORT_CLP(hwdev)) + return; + + sema_deinit(&clp_pf_to_mgmt->clp_msg_lock); + kfree(clp_pf_to_mgmt->clp_msg_buf); + kfree(clp_pf_to_mgmt); +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_sm_lt.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_sm_lt.h new file mode 100644 index 00000000..67c020dc --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_sm_lt.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CHIPIF_SM_LT_H +#define CHIPIF_SM_LT_H + +#include <linux/types.h> + +#define SM_LT_LOAD (0x12) +#define SM_LT_STORE (0x14) + +#define SM_LT_NUM_OFFSET 13 +#define SM_LT_ABUF_FLG_OFFSET 12 +#define SM_LT_BC_OFFSET 11 + +#define SM_LT_ENTRY_16B 16 +#define SM_LT_ENTRY_32B 32 +#define SM_LT_ENTRY_48B 48 +#define SM_LT_ENTRY_64B 64 + +#define TBL_LT_OFFSET_DEFAULT 0 + +#define SM_CACHE_LINE_SHFT 4 /* log2(16) */ +#define SM_CACHE_LINE_SIZE 16 /* the size of cache line */ + +#define MAX_SM_LT_READ_LINE_NUM 4 +#define MAX_SM_LT_WRITE_LINE_NUM 3 + +#define SM_LT_FULL_BYTEENB 0xFFFF + +#define TBL_GET_ENB3_MASK(bitmask) ((u16)(((bitmask) >> 32) & 0xFFFF)) +#define TBL_GET_ENB2_MASK(bitmask) ((u16)(((bitmask) >> 16) & 0xFFFF)) +#define TBL_GET_ENB1_MASK(bitmask) ((u16)((bitmask) & 0xFFFF)) + +enum { + SM_LT_NUM_0 = 0, /* lt num = 0, load/store 16B */ + SM_LT_NUM_1, /* lt num = 1, load/store 32B */ + SM_LT_NUM_2, /* lt num = 2, load/store 48B */ + SM_LT_NUM_3 /* lt num = 3, load 64B */ +}; + +/* lt load request */ +union sml_lt_req_head { + struct { + u32 offset : 8; + u32 pad : 3; + u32 bc : 1; + u32 abuf_flg : 1; + u32 num : 2; + u32 ack : 1; + u32 op_id : 5; + u32 instance : 6; + u32 src : 5; + } bs; + + u32 value; +}; + +struct sml_lt_load_req { + u32 extra; + union sml_lt_req_head head; + u32 index; + u32 pad0; + u32 pad1; +}; + +struct sml_lt_store_req { + u32 extra; + union sml_lt_req_head head; + u32 index; + u32 byte_enb[2]; + u8 write_data[48]; +}; + +enum { + SM_LT_OFFSET_1 = 1, + SM_LT_OFFSET_2, + SM_LT_OFFSET_3, + SM_LT_OFFSET_4, + SM_LT_OFFSET_5, + SM_LT_OFFSET_6, + SM_LT_OFFSET_7, + SM_LT_OFFSET_8, + SM_LT_OFFSET_9, + SM_LT_OFFSET_10, + SM_LT_OFFSET_11, + SM_LT_OFFSET_12, + SM_LT_OFFSET_13, + SM_LT_OFFSET_14, + SM_LT_OFFSET_15 +}; + +enum HINIC_CSR_API_DATA_OPERATION_ID { + HINIC_CSR_OPERATION_WRITE_CSR = 0x1E, + HINIC_CSR_OPERATION_READ_CSR = 0x1F +}; + +enum HINIC_CSR_API_DATA_NEED_RESPONSE_DATA { + HINIC_CSR_NO_RESP_DATA = 0, + HINIC_CSR_NEED_RESP_DATA = 1 +}; + +enum HINIC_CSR_API_DATA_DATA_SIZE { + HINIC_CSR_DATA_SZ_32 = 0, + HINIC_CSR_DATA_SZ_64 = 1 +}; + +struct hinic_csr_request_api_data { + u32 dw0; + + union { + struct { + u32 reserved1 : 13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size : 2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response : 1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id : 5; + u32 reserved2 : 6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id : 5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr : 26; + u32 reserved3 : 6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; +#endif + diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_sml_lt.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_sml_lt.c new file mode 100644 index 00000000..e9651b09 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_sml_lt.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/module.h> + +#include "ossl_knl.h" +#include "hinic5_common.h" +#include "hinic5_sm_lt.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_api_cmd.h" +#include "hinic5_mgmt.h" + +#define ACK 1 +#define NOACK 0 + +#define LT_LOAD16_API_SIZE (16 + 4) +#define LT_STORE16_API_SIZE (32 + 4) + +#ifndef HTONL +#define HTONL(x) \ + ((((x) & 0x000000ff) << 24) \ + | (((x) & 0x0000ff00) << 8) \ + | (((x) & 0x00ff0000) >> 8) \ + | (((x) & 0xff000000) >> 24)) +#endif + +static inline void sm_lt_build_head(union sml_lt_req_head *head, + u8 instance_id, + u8 op_id, u8 ack) +{ + head->value = 0; + head->bs.instance = instance_id; + head->bs.op_id = op_id; + head->bs.ack = ack; + head->bs.num = 0; + head->bs.abuf_flg = 0; + head->bs.bc = 1; + head->bs.offset = 0; + head->value = HTONL((head->value)); +} + +static inline void sm_lt_load_build_req(struct sml_lt_load_req *req, + u8 instance_id, + u8 op_id, u8 ack, + u32 lt_index) +{ + sm_lt_build_head(&req->head, instance_id, op_id, ack); + req->extra = 0; + req->index = lt_index; + req->index = HTONL(req->index); + req->pad0 = 0; + req->pad1 = 0; +} + +static void sml_lt_store_data(u32 *dst, const u32 *src, u8 num) +{ + u32 sm_lt_idx; + + if (num > SM_LT_NUM_2) + return; + for (sm_lt_idx = 0; sm_lt_idx <= num; sm_lt_idx++) { + // 16Byte each + u32 offset = sm_lt_idx * SM_LT_OFFSET_4; + *(dst + SM_LT_OFFSET_3 + offset) = *(src + SM_LT_OFFSET_3 + offset); + *(dst + SM_LT_OFFSET_2 + offset) = *(src + SM_LT_OFFSET_2 + offset); + *(dst + SM_LT_OFFSET_1 + offset) = *(src + SM_LT_OFFSET_1 + offset); + *(dst + offset) = *(src + offset); + } +} + +static inline void sm_lt_store_build_req(struct sml_lt_store_req *req, + u8 instance_id, + u8 op_id, u8 ack, + u32 lt_index, + u16 byte_enb1, + u8 *data) +{ + sm_lt_build_head(&req->head, instance_id, op_id, ack); + req->index = lt_index; + req->index = HTONL(req->index); + req->extra = 0; + req->byte_enb[0] = 0; + req->byte_enb[0] = HTONL(req->byte_enb[0]); + req->byte_enb[1] = HTONL(byte_enb1); + sml_lt_store_data((u32 *)req->write_data, (u32 *)(void *)data, 0); +} + +int hinic5_dbg_lt_rd_16byte(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data) +{ + struct sml_lt_load_req req; + int ret; + + if (!hwdev) + return -EFAULT; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + sm_lt_load_build_req(&req, instance, SM_LT_LOAD, ACK, lt_index); + + ret = hinic5_api_cmd_read_ack(hwdev, dest, (u8 *)(&req), + LT_LOAD16_API_SIZE, (void *)data, 0x10); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Read linear table 16byte fail, err: %d\n", ret); + return -EFAULT; + } + + return 0; +} + +int hinic5_dbg_lt_wr_16byte_mask(void *hwdev, u8 dest, u8 instance, + u32 lt_index, u8 *data, u16 mask) +{ + struct sml_lt_store_req req; + int ret; + + if (!hwdev || !data) + return -EINVAL; + + if (!COMM_SUPPORT_API_CHAIN((struct hinic5_hwdev *)hwdev)) + return -EPERM; + + sm_lt_store_build_req(&req, instance, SM_LT_STORE, NOACK, lt_index, + mask, data); + + ret = hinic5_api_cmd_write_nack(hwdev, dest, &req, LT_STORE16_API_SIZE); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "Write linear table 16byte fail, err: %d\n", ret); + return -EFAULT; + } + + return 0; +} + diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_wq.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_wq.c new file mode 100644 index 00000000..3992269a --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/hwif/hinic5_wq.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/dma-mapping.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "ossl_knl.h" +#include "hinic5_common.h" +#include "hinic5_hwdev.h" +#include "hinic5_wq.h" + +#define WQ_MIN_DEPTH 64 +#define WQ_MAX_DEPTH 65536 +#define WQ_MAX_NUM_PAGES (PAGE_SIZE / sizeof(u64)) + +static int wq_init_wq_block(struct hinic5_wq *wq) +{ + int i; + + if (WQ_IS_0_LEVEL_CLA(wq)) { + wq->wq_block_paddr = wq->wq_pages[0].align_paddr; + wq->wq_block_vaddr = wq->wq_pages[0].align_vaddr; + + return 0; + } + + if (wq->num_wq_pages > WQ_MAX_NUM_PAGES) { + sdk_err(wq->dev_hdl, "num_wq_pages exceed limit: %lu\n", + WQ_MAX_NUM_PAGES); + return -EFAULT; + } + + wq->wq_block_vaddr = dma_zalloc_coherent(wq->dev_hdl, PAGE_SIZE, + &wq->wq_block_paddr, + GFP_KERNEL); + if (!wq->wq_block_vaddr) { + sdk_err(wq->dev_hdl, "Failed to alloc wq block\n"); + return -ENOMEM; + } + + for (i = 0; i < wq->num_wq_pages; i++) + wq->wq_block_vaddr[i] = + cpu_to_be64(wq->wq_pages[i].align_paddr); + + return 0; +} + +static int wq_alloc_pages(struct hinic5_wq *wq) +{ + int i, page_idx, err; + u32 wqe_page_size_align = ALIGN(wq->wq_page_size, PAGE_SIZE); + + wq->wq_pages = kcalloc(wq->num_wq_pages, sizeof(*wq->wq_pages), + GFP_KERNEL); + if (!wq->wq_pages) + return -ENOMEM; + + for (page_idx = 0; page_idx < wq->num_wq_pages; page_idx++) { + err = hinic5_dma_zalloc_coherent_align(wq->dev_hdl, + wqe_page_size_align, + wqe_page_size_align, + GFP_KERNEL, + &wq->wq_pages[page_idx]); + if (err != 0) { + sdk_err(wq->dev_hdl, "Failed to alloc wq page\n"); + goto free_wq_pages; + } + } + + err = wq_init_wq_block(wq); + if (err != 0) + goto free_wq_pages; + + return 0; + +free_wq_pages: + for (i = 0; i < page_idx; i++) + hinic5_dma_free_coherent_align(wq->dev_hdl, &wq->wq_pages[i]); + + kfree(wq->wq_pages); + wq->wq_pages = NULL; + + return -ENOMEM; +} + +static void wq_free_pages(struct hinic5_wq *wq) +{ + int i; + + if (!WQ_IS_0_LEVEL_CLA(wq)) + dma_free_coherent(wq->dev_hdl, PAGE_SIZE, wq->wq_block_vaddr, + wq->wq_block_paddr); + + for (i = 0; i < wq->num_wq_pages; i++) + hinic5_dma_free_coherent_align(wq->dev_hdl, &wq->wq_pages[i]); + + kfree(wq->wq_pages); + wq->wq_pages = NULL; +} + +int hinic5_wq_create(void *hwdev, struct hinic5_wq *wq, u32 q_depth, + u16 wqebb_size) +{ + struct hinic5_hwdev *dev = hwdev; + u32 wq_page_size; + + if (!wq || !dev) { + pr_err("Invalid wq or dev_hdl\n"); + return -EINVAL; + } + wq_page_size = dev->wq_page_size; // make sure HINIC5_HW_WQ_PAGE_SIZE align + + if (q_depth < WQ_MIN_DEPTH || q_depth > WQ_MAX_DEPTH || + ((q_depth & (q_depth - 1)) != 0) || wqebb_size == 0 || + ((wqebb_size & (wqebb_size - 1)) != 0)) { + sdk_err(dev->dev_hdl, "Wq q_depth(%u) or wqebb_size(%u) is invalid\n", + q_depth, wqebb_size); + return -EINVAL; + } + + memset(wq, 0, sizeof(struct hinic5_wq)); + wq->dev_hdl = dev->dev_hdl; + wq->q_depth = q_depth; + wq->idx_mask = (u16)(q_depth - 1); + wq->wqebb_size = wqebb_size; + wq->wqebb_size_shift = (u16)ilog2(wq->wqebb_size); + wq->wq_page_size = wq_page_size; + + wq->wqebbs_per_page = wq_page_size / wqebb_size; + /* In case of wq_page_size is larger than q_depth * wqebb_size */ + if (wq->wqebbs_per_page > q_depth) + wq->wqebbs_per_page = q_depth; + wq->wqebbs_per_page_shift = (u16)ilog2(wq->wqebbs_per_page); + wq->wqebbs_per_page_mask = (u16)(wq->wqebbs_per_page - 1); + wq->num_wq_pages = (u16)(ALIGN(((u32)q_depth * wqebb_size), + wq_page_size) / wq_page_size); + + return wq_alloc_pages(wq); +} +EXPORT_SYMBOL(hinic5_wq_create); + +void hinic5_wq_destroy(struct hinic5_wq *wq) +{ + if (!wq) + return; + + wq_free_pages(wq); +} +EXPORT_SYMBOL(hinic5_wq_destroy); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_api_cmd.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_api_cmd.h new file mode 100644 index 00000000..5476b97b --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_api_cmd.h @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_API_CMD_H +#define HINIC5_API_CMD_H + +#include <linux/semaphore.h> + +#include "hinic5_eqs.h" +#include "hinic5_hwif_inner.h" + +/* api_cmd_cell.ctrl structure */ +#define HINIC5_API_CMD_CELL_CTRL_CELL_LEN_SHIFT 0 +#define HINIC5_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_SHIFT 16 +#define HINIC5_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_SHIFT 24 +#define HINIC5_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 + +#define HINIC5_API_CMD_CELL_CTRL_CELL_LEN_MASK 0x3FU +#define HINIC5_API_CMD_CELL_CTRL_RD_DMA_ATTR_OFF_MASK 0x3FU +#define HINIC5_API_CMD_CELL_CTRL_WR_DMA_ATTR_OFF_MASK 0x3FU +#define HINIC5_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFFU + +#define HINIC5_API_CMD_CELL_CTRL_SET(val, member) \ + ((((u64)(val)) & HINIC5_API_CMD_CELL_CTRL_##member##_MASK) << \ + HINIC5_API_CMD_CELL_CTRL_##member##_SHIFT) + +/* api_cmd_cell.desc structure */ +#define HINIC5_API_CMD_DESC_API_TYPE_SHIFT 0 +#define HINIC5_API_CMD_DESC_RD_WR_SHIFT 1 +#define HINIC5_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 +#define HINIC5_API_CMD_DESC_RESP_AEQE_EN_SHIFT 3 +#define HINIC5_API_CMD_DESC_APICHN_RSVD_SHIFT 4 +#define HINIC5_API_CMD_DESC_APICHN_CODE_SHIFT 6 +#define HINIC5_API_CMD_DESC_PRIV_DATA_SHIFT 8 +#define HINIC5_API_CMD_DESC_DEST_SHIFT 32 +#define HINIC5_API_CMD_DESC_SIZE_SHIFT 40 +#define HINIC5_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 + +#define HINIC5_API_CMD_DESC_API_TYPE_MASK 0x1U +#define HINIC5_API_CMD_DESC_RD_WR_MASK 0x1U +#define HINIC5_API_CMD_DESC_MGMT_BYPASS_MASK 0x1U +#define HINIC5_API_CMD_DESC_RESP_AEQE_EN_MASK 0x1U +#define HINIC5_API_CMD_DESC_APICHN_RSVD_MASK 0x3U +#define HINIC5_API_CMD_DESC_APICHN_CODE_MASK 0x3U +#define HINIC5_API_CMD_DESC_PRIV_DATA_MASK 0xFFFFFFU +#define HINIC5_API_CMD_DESC_DEST_MASK 0x1FU +#define HINIC5_API_CMD_DESC_SIZE_MASK 0x7FFU +#define HINIC5_API_CMD_DESC_XOR_CHKSUM_MASK 0xFFU + +#define HINIC5_API_CMD_DESC_SET(val, member) \ + ((((u64)(val)) & HINIC5_API_CMD_DESC_##member##_MASK) << \ + HINIC5_API_CMD_DESC_##member##_SHIFT) + +/* api_cmd_status header */ +#define HINIC5_API_CMD_STATUS_HEADER_VALID_SHIFT 0 +#define HINIC5_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 + +#define HINIC5_API_CMD_STATUS_HEADER_VALID_MASK 0xFFU +#define HINIC5_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFFU + +#define HINIC5_API_CMD_STATUS_HEADER_GET(val, member) \ + (((val) >> HINIC5_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ + HINIC5_API_CMD_STATUS_HEADER_##member##_MASK) + +/* API_CHAIN_REQ CSR: 0x0020+api_idx*0x080 */ +#define HINIC5_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 +#define HINIC5_API_CMD_CHAIN_REQ_WB_TRIGGER_SHIFT 2 + +#define HINIC5_API_CMD_CHAIN_REQ_RESTART_MASK 0x1U +#define HINIC5_API_CMD_CHAIN_REQ_WB_TRIGGER_MASK 0x1U + +#define HINIC5_API_CMD_CHAIN_REQ_SET(val, member) \ + (((val) & HINIC5_API_CMD_CHAIN_REQ_##member##_MASK) << \ + HINIC5_API_CMD_CHAIN_REQ_##member##_SHIFT) + +#define HINIC5_API_CMD_CHAIN_REQ_GET(val, member) \ + (((val) >> HINIC5_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ + HINIC5_API_CMD_CHAIN_REQ_##member##_MASK) + +#define HINIC5_API_CMD_CHAIN_REQ_CLEAR(val, member) \ + ((val) & (~(HINIC5_API_CMD_CHAIN_REQ_##member##_MASK \ + << HINIC5_API_CMD_CHAIN_REQ_##member##_SHIFT))) + +/* API_CHAIN_CTL CSR: 0x0014+api_idx*0x080 */ +#define HINIC5_API_CMD_CHAIN_CTRL_RESTART_EN_SHIFT 1 +#define HINIC5_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 +#define HINIC5_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 +#define HINIC5_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 +#define HINIC5_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 +#define HINIC5_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 + +#define HINIC5_API_CMD_CHAIN_CTRL_RESTART_EN_MASK 0x1U +#define HINIC5_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1U +#define HINIC5_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1U +#define HINIC5_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3U +#define HINIC5_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3U +#define HINIC5_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3U + +#define HINIC5_API_CMD_CHAIN_CTRL_SET(val, member) \ + (((val) & HINIC5_API_CMD_CHAIN_CTRL_##member##_MASK) << \ + HINIC5_API_CMD_CHAIN_CTRL_##member##_SHIFT) + +#define HINIC5_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ + ((val) & (~(HINIC5_API_CMD_CHAIN_CTRL_##member##_MASK \ + << HINIC5_API_CMD_CHAIN_CTRL_##member##_SHIFT))) + +/* api_cmd rsp header */ +#define HINIC5_API_CMD_RESP_HEAD_VALID_SHIFT 0 +#define HINIC5_API_CMD_RESP_HEAD_STATUS_SHIFT 8 +#define HINIC5_API_CMD_RESP_HEAD_CHAIN_ID_SHIFT 16 +#define HINIC5_API_CMD_RESP_HEAD_RESP_LEN_SHIFT 24 +#define HINIC5_API_CMD_RESP_HEAD_DRIVER_PRIV_SHIFT 40 + +#define HINIC5_API_CMD_RESP_HEAD_VALID_MASK 0xFF +#define HINIC5_API_CMD_RESP_HEAD_STATUS_MASK 0xFFU +#define HINIC5_API_CMD_RESP_HEAD_CHAIN_ID_MASK 0xFFU +#define HINIC5_API_CMD_RESP_HEAD_RESP_LEN_MASK 0x1FFU +#define HINIC5_API_CMD_RESP_HEAD_DRIVER_PRIV_MASK 0xFFFFFFU + +#define HINIC5_API_CMD_RESP_HEAD_VALID_CODE 0xFF + +#define HINIC5_API_CMD_RESP_HEADER_VALID(val) \ + (((val) & HINIC5_API_CMD_RESP_HEAD_VALID_MASK) == \ + HINIC5_API_CMD_RESP_HEAD_VALID_CODE) + +#define HINIC5_API_CMD_RESP_HEAD_GET(val, member) \ + (((val) >> HINIC5_API_CMD_RESP_HEAD_##member##_SHIFT) & \ + HINIC5_API_CMD_RESP_HEAD_##member##_MASK) + +/* API_STATUS_0 CSR: 0x0030+api_idx*0x080 */ +#define HINIC5_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFFU +#define HINIC5_API_CMD_STATUS_CONS_IDX_SHIFT 0 + +#define HINIC5_API_CMD_STATUS_FSM_MASK 0xFU +#define HINIC5_API_CMD_STATUS_FSM_SHIFT 24 + +#define HINIC5_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3U +#define HINIC5_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 + +#define HINIC5_API_CMD_STATUS_CPLD_ERR_MASK 0x1U +#define HINIC5_API_CMD_STATUS_CPLD_ERR_SHIFT 30 + +#define HINIC5_API_CMD_STATUS_CONS_IDX(val) \ + ((val) & HINIC5_API_CMD_STATUS_CONS_IDX_MASK) + +#define HINIC5_API_CMD_STATUS_CHKSUM_ERR(val) \ + (((val) >> HINIC5_API_CMD_STATUS_CHKSUM_ERR_SHIFT) & \ + HINIC5_API_CMD_STATUS_CHKSUM_ERR_MASK) + +#define HINIC5_API_CMD_STATUS_GET(val, member) \ + (((val) >> HINIC5_API_CMD_STATUS_##member##_SHIFT) & \ + HINIC5_API_CMD_STATUS_##member##_MASK) + +enum hinic5_api_cmd_chain_type { + /* write to mgmt cpu command with completion */ + HINIC5_API_CMD_WRITE_TO_MGMT_CPU = 2, + /* multi read command with completion notification - not used */ + HINIC5_API_CMD_MULTI_READ = 3, + /* write command without completion notification */ + HINIC5_API_CMD_POLL_WRITE = 4, + /* read command without completion notification */ + HINIC5_API_CMD_POLL_READ = 5, + /* read from mgmt cpu command with completion */ + HINIC5_API_CMD_WRITE_ASYNC_TO_MGMT_CPU = 6, + HINIC5_API_CMD_MAX, +}; + +struct hinic5_api_cmd_status { + u64 header; + u32 buf_desc; + u32 cell_addr_hi; + u32 cell_addr_lo; + u32 rsvd0; + u64 rsvd1; +}; + +/* HW struct */ +struct hinic5_api_cmd_cell { + u64 ctrl; + + /* address is 64 bit in HW struct */ + u64 next_cell_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_cmd_paddr; + } write; + + struct { + u64 hw_wb_resp_paddr; + u64 hw_cmd_paddr; + } read; + }; +}; + +struct hinic5_api_cmd_resp_fmt { + u64 header; + u64 resp_data; +}; + +struct hinic5_api_cmd_cell_ctxt { + struct hinic5_api_cmd_cell *cell_vaddr; + + void *api_cmd_vaddr; + + struct hinic5_api_cmd_resp_fmt *resp; + + struct completion done; + int status; + + u32 saved_prod_idx; + struct hinic5_hwdev *hwdev; +}; + +struct hinic5_api_cmd_chain_attr { + struct hinic5_hwdev *hwdev; + enum hinic5_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 rsp_size; + u16 cell_size; +}; + +struct hinic5_api_cmd_chain { + struct hinic5_hwdev *hwdev; + enum hinic5_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; + u16 rsp_size; + u32 rsvd1; + + /* HW members is 24 bit format */ + u32 prod_idx; + u32 cons_idx; + + struct semaphore sem; + /* Async cmd can not be scheduling */ + spinlock_t async_lock; + + dma_addr_t wb_status_paddr; + struct hinic5_api_cmd_status *wb_status; + + dma_addr_t head_cell_paddr; + struct hinic5_api_cmd_cell *head_node; + + struct hinic5_api_cmd_cell_ctxt *cell_ctxt; + struct hinic5_api_cmd_cell *curr_node; + + struct hinic5_dma_addr_align cells_addr; + + u8 *cell_vaddr_base; + u64 cell_paddr_base; + u8 *rsp_vaddr_base; + u64 rsp_paddr_base; + u8 *buf_vaddr_base; + u64 buf_paddr_base; + u64 cell_size_align; + u64 rsp_size_align; + u64 buf_size_align; + + u64 rsvd2; +}; + +int hinic5_api_cmd_write(struct hinic5_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 size); + +int hinic5_api_cmd_read(struct hinic5_api_cmd_chain *chain, u8 node_id, + const void *cmd, u16 size, void *ack, u16 ack_size); + +int hinic5_api_cmd_init(struct hinic5_hwdev *hwdev, + struct hinic5_api_cmd_chain **chain); + +void hinic5_api_cmd_free(const struct hinic5_hwdev *hwdev, struct hinic5_api_cmd_chain **chain); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_cmdq.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_cmdq.h new file mode 100644 index 00000000..1e623fcc --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_cmdq.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CMDQ_H +#define HINIC5_CMDQ_H + +#include <linux/types.h> +#include <linux/completion.h> +#include <linux/spinlock.h> + +#include "mpu_inband_cmd_defs.h" +#include "hinic5_cmdq_enhance.h" +#include "hinic5_hw.h" +#include "hinic5_wq.h" +#include "hinic5_common.h" +#include "ossl_knl.h" +#include "hinic5_hwdev.h" + +#define HINIC5_SCMD_DATA_LEN 16 + +#define HINIC5_CMDQ_DEPTH 4096 +#define HINIC5_CMDQ_MAX_BUF_SIZE 2048U +#define HINIC5_CMDQ_MIN_BUF_SIZE 4U +#define HINIC5_CMDQ_BUF_ALIGN 2048U + +#define HINIC5_CMDQ_CQE_DW0_ERR_CODE_SHIFT 29 +#define HINIC5_CMDQ_CQE_DW0_ERR_CODE_MASK 0x3 +#define HINIC5_CMDQ_CQE_DW0_ERR_STATUS_MASK 0x1fffffff /* 29 bits */ + +enum hinic5_cmdq_mode { + HINIC5_NORMAL_CMDQ, + HINIC5_ENHANCE_CMDQ, +}; + +enum hinic5_cmdq_type { + HINIC5_CMDQ_SYNC, + HINIC5_CMDQ_ASYNC, + HINIC5_CMDQ_FAST_MSG, + HINIC5_MAX_CMDQ_TYPES = 4 +}; + +enum hinic5_db_src_type { + HINIC5_DB_SRC_CMDQ_TYPE, + HINIC5_DB_SRC_L2NIC_SQ_TYPE, +}; + +enum hinic5_cmdq_db_type { + HINIC5_DB_SQ_RQ_TYPE, + HINIC5_DB_CMDQ_TYPE, +}; + +enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, + BUFDESC_SCMD_LEN = 3, + BUFDESC_ENHANCE_CMD_LEN = 3, /* 64B aligned */ +}; + +/* hardware define: cmdq wqe */ +struct hinic5_cmdq_header { + u32 header_info; + u32 saved_data; +}; + +struct hinic5_scmd_bufdesc { + u32 buf_len; + u32 rsvd; + u8 data[HINIC5_SCMD_DATA_LEN]; +}; + +struct hinic5_lcmd_bufdesc { + struct hinic5_sge sge; + u32 rsvd1; + u64 saved_async_buf; + u64 rsvd3; +}; + +struct hinic5_cmdq_db { + u32 db_head; + u32 db_info; +}; + +struct hinic5_status { + u32 status_info; +}; + +struct hinic5_ctrl { + u32 ctrl_info; +}; + +struct hinic5_sge_resp { + struct hinic5_sge sge; + u32 rsvd; +}; + +struct hinic5_cmdq_completion { + union { + struct hinic5_sge_resp sge_resp; + u64 direct_resp; + }; +}; + +struct hinic5_cmdq_wqe_scmd { + struct hinic5_cmdq_header header; + u64 rsvd; + struct hinic5_status status; + struct hinic5_ctrl ctrl; + struct hinic5_cmdq_completion completion; + struct hinic5_scmd_bufdesc buf_desc; +}; + +struct hinic5_cmdq_wqe_lcmd { + struct hinic5_cmdq_header header; + struct hinic5_status status; + struct hinic5_ctrl ctrl; + struct hinic5_cmdq_completion completion; + struct hinic5_lcmd_bufdesc buf_desc; +}; + +struct hinic5_cmdq_inline_wqe { + struct hinic5_cmdq_wqe_scmd wqe_scmd; +}; + +struct hinic5_cmdq_wqe { + union { + struct hinic5_cmdq_inline_wqe inline_wqe; + struct hinic5_cmdq_wqe_lcmd wqe_lcmd; + struct hinic5_enhanced_cmdq_wqe enhanced_cmdq_wqe; + }; +}; + +struct hinic5_cmdq_arm_bit { + u32 q_type; + u32 q_id; +}; + +enum hinic5_cmdq_status { + HINIC5_CMDQ_ENABLE = BIT(0), +}; + +enum hinic5_cmdq_cmd_type { + HINIC5_CMD_TYPE_NONE, + HINIC5_CMD_TYPE_SET_ARM, + HINIC5_CMD_TYPE_DIRECT_RESP, + HINIC5_CMD_TYPE_SGE_RESP, + HINIC5_CMD_TYPE_ASYNC, + HINIC5_CMD_TYPE_FAKE_TIMEOUT, + HINIC5_CMD_TYPE_TIMEOUT, + HINIC5_CMD_TYPE_FORCE_STOP, + HINIC5_CMD_TYPE_INLINE_DATA, +}; + +enum data_format { + DATA_SGE, + DATA_DIRECT, +}; + +#define WQ_BLOCK_PFN_SHIFT 9 +#define CMDQ_PFN_SHIFT 12 +#define CMDQ_PFN(addr) ((addr) >> CMDQ_PFN_SHIFT) +#define WQ_BLOCK_PFN(addr) ((addr) >> WQ_BLOCK_PFN_SHIFT) + +struct hinic5_cmdq_cmd_info { + enum hinic5_cmdq_cmd_type cmd_type; + u16 channel; + u16 rsvd1; + u16 wqebb_use_num; + + struct completion *done; + int *errcode; + int *cmpt_code; + u64 *direct_resp; + u64 cmdq_msg_id; + + struct hinic5_cmd_buf *buf_in; + struct hinic5_cmd_buf *buf_out; +}; + +struct hinic5_cmdq { + struct hinic5_wq wq; + + enum hinic5_cmdq_type cmdq_type; + int wrapped; + + /* spinlock for send cmdq commands */ + spinlock_t cmdq_lock; + + struct cmdq_ctxt_info cmdq_ctxt; + struct enhance_cmdq_ctxt_info cmdq_enhance_ctxt; + + struct hinic5_cmdq_cmd_info *cmd_infos; + + struct hinic5_hwdev *hwdev; + struct hinic5_cmdqs *cmdqs; + u64 rsvd1[2]; +}; + +struct hinic5_cmdqs { + struct hinic5_hwdev *hwdev; + + struct dma_pool *cmd_buf_pool; + /* doorbell area */ + u8 __iomem *cmdqs_db_base; + + /* All cmdq's CLA of a VF occupy a PAGE when cmdq wq is 1-level CLA */ + dma_addr_t wq_block_paddr; + void *wq_block_vaddr; + struct hinic5_cmdq cmdq[HINIC5_MAX_CMDQ_TYPES]; + + u32 status; + u32 disable_flag; + + bool lock_channel_en; + ulong channel_stop; + u8 cmdq_num; + u8 cmdq_mode; + u8 wqebb_size; + u8 wqebb_use_num; + u8 rsvd1; + u64 rsvd2; + u32 cmd_buf_size; + bool poll; /* use polling mode or int mode */ +}; + +void hinic5_cmdq_ceq_handler(void *handle, u32 ceqe_data); + +int hinic5_reinit_cmdq_ctxts(struct hinic5_hwdev *hwdev); + +bool hinic5_cmdq_idle(struct hinic5_cmdq *cmdq); + +int hinic5_cmdqs_init(struct hinic5_hwdev *hwdev); + +void hinic5_cmdqs_free(struct hinic5_hwdev *hwdev); + +void hinic5_cmdq_flush_cmd(struct hinic5_hwdev *hwdev, + struct hinic5_cmdq *cmdq); + +int hinic5_cmdq_set_channel_status(struct hinic5_hwdev *hwdev, u16 channel, + bool enable); + +void hinic5_cmdq_enable_channel_lock(struct hinic5_hwdev *hwdev, bool enable); + +void hinic5_cmdq_flush_sync_cmd(struct hinic5_hwdev *hwdev); + +void enhanced_cmdq_set_wqe(struct hinic5_cmdq_wqe *wqe, + enum hinic5_cmdq_cmd_type cmd_type, + const struct hinic5_cmdq_cmd_param *cmd_buf, + int wrapped); +void enhanced_cmdq_init_queue_ctxt(struct hinic5_cmdqs *cmdqs, struct hinic5_cmdq *cmdq); +void hinic5_cmdqs_param_init(struct hinic5_hwdev *hwdev, struct hinic5_cmdqs *cmdqs); +int hinic5_send_fast_msg_need_resp(void *hwdev, u8 mod, u8 cmd, + struct hinic5_cmd_buf *buf_in, + u64 *out_param); +int hinic5_cos_id_direct_resp(void *hwdev, u8 mod, u8 cmd, u16 cos_id, + struct hinic5_cmd_buf *buf_in, u64 *out_param, + u32 timeout, u16 channel); +#endif + diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_cmdq_enhance.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_cmdq_enhance.h new file mode 100644 index 00000000..957bd5b0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_cmdq_enhance.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_ENHANCED_CMDQ_H +#define HINIC5_ENHANCED_CMDQ_H + +#include "hinic5_hw.h" + +struct hinic5_cmdq; +struct hinic5_cmdq_cmd_info; + +enum complete_format { + INLINE_DATA = 0, + SGE_RESPONSE = 1, +}; + +#define HINIC5_CMDQ_MAX_INLINE_DATA_SIZE 160U +#define HINIC5_CMDQ_WQE_INLINE_DATA_PI_OFFSET 2 + +/* first part 16B */ +#define ENHANCED_CMDQ_CTXT0_CI_WQE_ADDR_SHIFT 0 +#define ENHANCED_CMDQ_CTXT0_RSV1_SHIFT 52 +#define ENHANCED_CMDQ_CTXT0_EQ_SHIFT 53 +#define ENHANCED_CMDQ_CTXT0_CEQ_ARM_SHIFT 61 +#define ENHANCED_CMDQ_CTXT0_CEQ_EN_SHIFT 62 +#define ENHANCED_CMDQ_CTXT0_HW_BUSY_BIT_SHIFT 63 + +#define ENHANCED_CMDQ_CTXT0_CI_WQE_ADDR_MASK 0xFFFFFFFFFFFFFU +#define ENHANCED_CMDQ_CTXT0_RSV1_MASK 0x1U +#define ENHANCED_CMDQ_CTXT0_EQ_MASK 0xFFU +#define ENHANCED_CMDQ_CTXT0_CEQ_ARM_MASK 0x1U +#define ENHANCED_CMDQ_CTXT0_CEQ_EN_MASK 0x1U +#define ENHANCED_CMDQ_CTXT0_HW_BUSY_BIT_MASK 0x1U + +#define ENHANCED_CMDQ_CTXT1_Q_DIS_SHIFT 0 +#define ENHANCED_CMDQ_CTXT1_ERR_CODE_SHIFT 1 +#define ENHANCED_CMDQ_CTXT1_RSV1_SHIFT 3 +#define ENHANCED_CMDQ_CTXT1_PI_SHIFT 32 +#define ENHANCED_CMDQ_CTXT1_CI_SHIFT 48 + +#define ENHANCED_CMDQ_CTXT1_Q_DIS_MASK 0x1U +#define ENHANCED_CMDQ_CTXT1_ERR_CODE_MASK 0x3U +#define ENHANCED_CMDQ_CTXT1_RSV1_MASK 0x1FFFFFFFU +#define ENHANCED_CMDQ_CTXT1_PI_MASK 0xFFFFU +#define ENHANCED_CMDQ_CTXT1_CI_MASK 0xFFFFU + +/* second PART 16B */ +#define ENHANCED_CMDQ_CTXT2_PFT_CI_SHIFT 0 +#define ENHANCED_CMDQ_CTXT2_O_BIT_SHIFT 4 +#define ENHANCED_CMDQ_CTXT2_PFT_THD_SHIFT 32 +#define ENHANCED_CMDQ_CTXT2_PFT_MAX_SHIFT 46 +#define ENHANCED_CMDQ_CTXT2_PFT_MIN_SHIFT 57 + +#define ENHANCED_CMDQ_CTXT2_PFT_CI_MASK 0xFU +#define ENHANCED_CMDQ_CTXT2_O_BIT_MASK 0x1U +#define ENHANCED_CMDQ_CTXT2_PFT_THD_MASK 0x3FFFFU +#define ENHANCED_CMDQ_CTXT2_PFT_MAX_MASK 0x7FFFU +#define ENHANCED_CMDQ_CTXT2_PFT_MIN_MASK 0x7FU + +#define ENHANCED_CMDQ_CTXT3_PFT_CI_ADDR_SHIFT 0 +#define ENHANCED_CMDQ_CTXT3_PFT_CI_SHIFT 52 + +#define ENHANCED_CMDQ_CTXT3_PFT_CI_ADDR_MASK 0xFFFFFFFFFFFFFU +#define ENHANCED_CMDQ_CTXT3_PFT_CI_MASK 0xFFFFU + +/* THIRD PART 16B */ +#define ENHANCED_CMDQ_CTXT4_CI_CLA_ADDR_SHIFT 0 + +#define ENHANCED_CMDQ_CTXT4_CI_CLA_ADDR_MASK 0x7FFFFFFFFFFFFFU + +#define ENHANCED_CMDQ_SET(val, member) \ + (((u64)(val) & ENHANCED_CMDQ_##member##_MASK) << \ + ENHANCED_CMDQ_##member##_SHIFT) + +#define WQ_PREFETCH_MAX 4 +#define WQ_PREFETCH_MIN 1 +#define WQ_PREFETCH_THRESHOLD 256 + +#define CI_IDX_HIGH_SHIFH 12 +#define CI_HIGN_IDX(val) ((val) >> CI_IDX_HIGH_SHIFH) + +#define ENHANCE_CMDQ_WQE_HEADER_SEND_SGE_LEN_SHIFT 0 +#define ENHANCE_CMDQ_WQE_HEADER_BDSL_SHIFT 19 +#define ENHANCE_CMDQ_WQE_HEADER_DF_SHIFT 28 +#define ENHANCE_CMDQ_WQE_HEADER_DN_SHIFT 29 +#define ENHANCE_CMDQ_WQE_HEADER_EC_SHIFT 30 +#define ENHANCE_CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31 + +#define ENHANCE_CMDQ_WQE_HEADER_SEND_SGE_LEN_MASK 0x3FFFFU +#define ENHANCE_CMDQ_WQE_HEADER_BDSL_MASK 0xFFU +#define ENHANCE_CMDQ_WQE_HEADER_DF_MASK 0x1U +#define ENHANCE_CMDQ_WQE_HEADER_DN_MASK 0x1U +#define ENHANCE_CMDQ_WQE_HEADER_EC_MASK 0x1U +#define ENHANCE_CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U + +#define ENHANCE_CMDQ_WQE_HEADER_SET(val, member) \ + ((((u32)(val)) & ENHANCE_CMDQ_WQE_HEADER_##member##_MASK) << \ + ENHANCE_CMDQ_WQE_HEADER_##member##_SHIFT) + +#define ENHANCE_CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> ENHANCE_CMDQ_WQE_HEADER_##member##_SHIFT) & \ + ENHANCE_CMDQ_WQE_HEADER_##member##_MASK) + +#define ENHANCE_CMDQ_WQE_CS_ERR_CODE_SHIFT 0 +#define ENHANCE_CMDQ_WQE_CS_ERR_STATUS_28_18_SHIFT 21 /* dw0 shift */ +#define ENHANCE_CMDQ_WQE_CS_ERR_STATUS_17_0_SHIFT 14 /* dw3 shift */ +#define ENHANCE_CMDQ_WQE_CS_CMD_SHIFT 4 +#define ENHANCE_CMDQ_WQE_CS_ACK_TYPE_SHIFT 12 +#define ENHANCE_CMDQ_WQE_CS_HW_BUSY_SHIFT 14 +#define ENHANCE_CMDQ_WQE_CS_RN_SHIFT 15 +#define ENHANCE_CMDQ_WQE_CS_MOD_SHIFT 16 +#define ENHANCE_CMDQ_WQE_CS_CF_SHIFT 31 + +#define ENHANCE_CMDQ_WQE_CS_ERR_CODE_MASK 0xFU +#define ENHANCE_CMDQ_WQE_CS_ERR_STATUS_28_18_MASK 0xFFFU /* dw0 mask */ +#define ENHANCE_CMDQ_WQE_CS_ERR_STATUS_17_0_MASK 0x3FFFFU /* dw3 mask */ +#define ENHANCE_CMDQ_WQE_CS_CMD_MASK 0xFFU +#define ENHANCE_CMDQ_WQE_CS_ACK_TYPE_MASK 0x3U +#define ENHANCE_CMDQ_WQE_CS_HW_BUSY_MASK 0x1U +#define ENHANCE_CMDQ_WQE_CS_RN_MASK 0x1U +#define ENHANCE_CMDQ_WQE_CS_MOD_MASK 0x1FU +#define ENHANCE_CMDQ_WQE_CS_CF_MASK 0x1U + +#define ENHANCE_CMDQ_WQE_CS_SET(val, member) \ + ((((u32)(val)) & ENHANCE_CMDQ_WQE_CS_##member##_MASK) << \ + ENHANCE_CMDQ_WQE_CS_##member##_SHIFT) + +#define ENHANCE_CMDQ_WQE_CS_GET(val, member) \ + (((val) >> ENHANCE_CMDQ_WQE_CS_##member##_SHIFT) & \ + ENHANCE_CMDQ_WQE_CS_##member##_MASK) + +#define ENHANCE_CMDQ_WQE_CS_ERR_STATUS_28_18_OFFSET 18 + +union hinic5_cmdq_enhance_completion { + struct { + u32 cs_format; + u32 sge_resp_hi_addr; + u32 sge_resp_lo_addr; + u32 sge_resp_len; /* bit 14~31 rsvd, soft can't use. */ + }; + + u32 dw[0x4]; +}; + +struct hinic5_cmdq_enhance_response { + u32 cs_format; + u32 resvd; + u64 direct_data; +}; + +struct sge_send_info { + u32 sge_hi_addr; + u32 sge_li_addr; + u32 seg_len; + u32 rsvd; +}; + +#define NORMAL_WQE_TYPE 0 +#define COMPACT_WQE_TYPE 1 +struct hinic5_ctrl_section { + u32 header; + u32 rsv; + u32 sge_send_hi_addr; + u32 sge_send_lo_addr; +}; + +struct hinic5_enhanced_cmd_bufdesc { + u32 len; + u32 rsv; + u32 sge_send_hi_addr; + u32 sge_send_lo_addr; +}; + +struct hinic5_enhanced_cmdq_wqe { + struct hinic5_ctrl_section ctrl_sec; /* 16B */ + union hinic5_cmdq_enhance_completion completion; /* 16B */ + union { + struct hinic5_enhanced_cmd_bufdesc buf_desc[2]; /* 32B */ + u8 inline_data[HINIC5_CMDQ_MAX_INLINE_DATA_SIZE]; /* 160B max */ + }; +}; + +void enhanced_cmdq_update_cmd_status(struct hinic5_cmdq *cmdq, + struct hinic5_cmdq_cmd_info *cmd_info, + struct hinic5_enhanced_cmdq_wqe *wqe); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_csr_inner.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_csr_inner.h new file mode 100644 index 00000000..69127955 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_csr_inner.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_CSR_INNER_H +#define HINIC5_CSR_INNER_H + +/* bit31/bit30 for bar index flag + * 00: bar0 + * 01: bar1 + * 10: bar2 + * 11: bar3 + */ +#define HINIC5_CFG_REGS_FLAG 0x40000000 + +#define HINIC5_MGMT_REGS_FLAG 0xC0000000 + +#define HINIC5_REGS_FLAG_MASK 0x3FFFFFFF + +#define HINIC5_VF_CFG_REG_OFFSET 0x2000 + +#define HINIC5_MISC_INTC_BAR 0x18000 + +#define HINIC5_HOST_CSR_BASE_ADDR (HINIC5_MGMT_REGS_FLAG + 0x6000) +#define HINIC5_CSR_GLOBAL_BASE_ADDR (HINIC5_MGMT_REGS_FLAG + 0x6400) + +/* HW interface registers */ +#define HINIC5_CSR_FUNC_ATTR0_ADDR (HINIC5_CFG_REGS_FLAG + 0x0) +#define HINIC5_CSR_FUNC_ATTR1_ADDR (HINIC5_CFG_REGS_FLAG + 0x4) +#define HINIC5_CSR_FUNC_ATTR2_ADDR (HINIC5_CFG_REGS_FLAG + 0x8) +#define HINIC5_CSR_FUNC_ATTR3_ADDR (HINIC5_CFG_REGS_FLAG + 0xC) +#define HINIC5_CSR_FUNC_ATTR4_ADDR (HINIC5_CFG_REGS_FLAG + 0x10) +#define HINIC5_CSR_FUNC_ATTR5_ADDR (HINIC5_CFG_REGS_FLAG + 0x14) +#define HINIC5_CSR_FUNC_ATTR6_ADDR (HINIC5_CFG_REGS_FLAG + 0x18) +#define HINIC5_CSR_FUNC_TASK1_ADDR (HINIC5_CFG_REGS_FLAG + 0x24) + +#define HINIC5_CSR_INTC_BAR_SW_HANDSHAKE_0_CSR0_REG (HINIC5_MISC_INTC_BAR + 0x44) + +#define HINIC5_FUNC_CSR_MAILBOX_DATA_OFF 0x80 +#define HINIC5_FUNC_CSR_MAILBOX_CONTROL_OFF \ + (HINIC5_CFG_REGS_FLAG + 0x0100) +#define HINIC5_FUNC_CSR_MAILBOX_INT_OFFSET_OFF \ + (HINIC5_CFG_REGS_FLAG + 0x0104) +#define HINIC5_FUNC_CSR_MAILBOX_RESULT_H_OFF \ + (HINIC5_CFG_REGS_FLAG + 0x0108) +#define HINIC5_FUNC_CSR_MAILBOX_RESULT_L_OFF \ + (HINIC5_CFG_REGS_FLAG + 0x010C) +/* CLP registers */ +#define HINIC5_BAR3_CLP_BASE_ADDR (HINIC5_MGMT_REGS_FLAG + 0x0000) + +#define HINIC5_UCPU_CLP_SIZE_REG (HINIC5_HOST_CSR_BASE_ADDR + 0x40) +#define HINIC5_UCPU_CLP_REQBASE_REG (HINIC5_HOST_CSR_BASE_ADDR + 0x44) +#define HINIC5_UCPU_CLP_RSPBASE_REG (HINIC5_HOST_CSR_BASE_ADDR + 0x48) +#define HINIC5_UCPU_CLP_REQ_REG (HINIC5_HOST_CSR_BASE_ADDR + 0x4c) +#define HINIC5_UCPU_CLP_RSP_REG (HINIC5_HOST_CSR_BASE_ADDR + 0x50) +#define HINIC5_CLP_REG(member) (HINIC5_UCPU_CLP_##member##_REG) + +#define HINIC5_CLP_REQ_DATA HINIC5_BAR3_CLP_BASE_ADDR +#define HINIC5_CLP_RSP_DATA (HINIC5_BAR3_CLP_BASE_ADDR + 0x1000) +#define HINIC5_CLP_DATA(member) (HINIC5_CLP_##member##_DATA) + +#define HINIC5_PTP_TS_UPDT_CFG_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3AC) +#define HINIC5_PTP_TS_INC_CFG_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3B0) +#define HINIC5_PTP_TS_CALIBRATION_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3B4) +#define HINIC5_PTP_TS_WR_DATA0_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3B8) +#define HINIC5_PTP_TS_WR_DATA1_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3BC) +#define HINIC5_PTP_TS_WR_DATA2_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3C0) +#define HINIC5_PTP_TS_RD_DATA0_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3C4) +#define HINIC5_PTP_TS_RD_DATA1_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3C8) +#define HINIC5_PTP_TS_RD_DATA2_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3CC) +#define HINIC5_PTP_TS_UP_EN_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3D0) +#define HINIC5_PTP_TS_DSTR_CFG_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3D4) +#define HINIC5_PTP_REG(member) (HINIC5_PTP_TS_##member##_REG) + +#define HINIC5_N_PTP_TS_WR_DATA0_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3E0) +#define HINIC5_N_PTP_TS_WR_DATA1_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3E4) +#define HINIC5_N_PTP_TS_WR_DATA2_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3E8) +#define HINIC5_N_PTP_TS_RD_DATA0_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3EC) +#define HINIC5_N_PTP_TS_RD_DATA1_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3F0) +#define HINIC5_N_PTP_TS_RD_DATA2_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3F4) +#define HINIC5_N_PTP_TS_UP_EN_REG (HINIC5_CSR_GLOBAL_BASE_ADDR + 0x3F8) +#define HINIC5_N_PTP_REG(member) (HINIC5_N_PTP_TS_##member##_REG) + +#define HINIC5_PPF_ELECTION_OFFSET 0x0 +#define HINIC5_MPF_ELECTION_OFFSET 0x20 + +#define HINIC5_CSR_PPF_ELECTION_ADDR \ + (HINIC5_HOST_CSR_BASE_ADDR + HINIC5_PPF_ELECTION_OFFSET) + +#define HINIC5_CSR_GLOBAL_MPF_ELECTION_ADDR \ + (HINIC5_HOST_CSR_BASE_ADDR + HINIC5_MPF_ELECTION_OFFSET) + +#define HINIC5_CSR_FUNC_PPF_ELECT_BASE_ADDR (HINIC5_CFG_REGS_FLAG + 0x60) +#define HINIC5_CSR_FUNC_PPF_ELECT_PORT_STRIDE 0x4 + +#define HINIC5_CSR_FUNC_PPF_ELECT(host_idx) \ + (HINIC5_CSR_FUNC_PPF_ELECT_BASE_ADDR + \ + (host_idx) * HINIC5_CSR_FUNC_PPF_ELECT_PORT_STRIDE) + +#define HINIC5_CSR_DMA_ATTR_TBL_ADDR (HINIC5_CFG_REGS_FLAG + 0x380) +#define HINIC5_CSR_DMA_ATTR_INDIR_IDX_ADDR (HINIC5_CFG_REGS_FLAG + 0x390) + +/* MSI-X registers */ +#define HINIC5_CSR_MSIX_INDIR_IDX_ADDR (HINIC5_CFG_REGS_FLAG + 0x310) +#define HINIC5_CSR_MSIX_CTRL_ADDR (HINIC5_CFG_REGS_FLAG + 0x300) +#define HINIC5_CSR_MSIX_CNT_ADDR (HINIC5_CFG_REGS_FLAG + 0x304) +#define HINIC5_CSR_FUNC_MSI_CLR_WR_ADDR (HINIC5_CFG_REGS_FLAG + 0x58) + +#define HINIC5_MSI_CLR_INDIR_RESEND_TIMER_CLR_SHIFT 0 +#define HINIC5_MSI_CLR_INDIR_INT_MSK_SET_SHIFT 1 +#define HINIC5_MSI_CLR_INDIR_INT_MSK_CLR_SHIFT 2 +#define HINIC5_MSI_CLR_INDIR_AUTO_MSK_SET_SHIFT 3 +#define HINIC5_MSI_CLR_INDIR_AUTO_MSK_CLR_SHIFT 4 +#define HINIC5_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_SHIFT 22 + +#define HINIC5_MSI_CLR_INDIR_RESEND_TIMER_CLR_MASK 0x1U +#define HINIC5_MSI_CLR_INDIR_INT_MSK_SET_MASK 0x1U +#define HINIC5_MSI_CLR_INDIR_INT_MSK_CLR_MASK 0x1U +#define HINIC5_MSI_CLR_INDIR_AUTO_MSK_SET_MASK 0x1U +#define HINIC5_MSI_CLR_INDIR_AUTO_MSK_CLR_MASK 0x1U +#define HINIC5_MSI_CLR_INDIR_SIMPLE_INDIR_IDX_MASK 0x3FFU + +#define HINIC5_MSI_CLR_INDIR_SET(val, member) \ + (((val) & HINIC5_MSI_CLR_INDIR_##member##_MASK) << \ + HINIC5_MSI_CLR_INDIR_##member##_SHIFT) + +/* EQ registers */ +#define HINIC5_AEQ_INDIR_IDX_ADDR (HINIC5_CFG_REGS_FLAG + 0x210) +#define HINIC5_CEQ_INDIR_IDX_ADDR (HINIC5_CFG_REGS_FLAG + 0x290) + +#define HINIC5_EQ_INDIR_IDX_ADDR(type) \ + ((type == HINIC5_AEQ) ? \ + HINIC5_AEQ_INDIR_IDX_ADDR : HINIC5_CEQ_INDIR_IDX_ADDR) + +#define HINIC5_AEQ_MTT_OFF_BASE_ADDR (HINIC5_CFG_REGS_FLAG + 0x240) +#define HINIC5_CEQ_MTT_OFF_BASE_ADDR (HINIC5_CFG_REGS_FLAG + 0x2C0) + +#define HINIC5_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define HINIC5_AEQ_HI_PHYS_ADDR_REG(pg_num) \ + (HINIC5_AEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * HINIC5_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC5_AEQ_LO_PHYS_ADDR_REG(pg_num) \ + (HINIC5_AEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * HINIC5_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC5_CEQ_HI_PHYS_ADDR_REG(pg_num) \ + (HINIC5_CEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * HINIC5_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC5_CEQ_LO_PHYS_ADDR_REG(pg_num) \ + (HINIC5_CEQ_MTT_OFF_BASE_ADDR + \ + (pg_num) * HINIC5_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC5_CSR_AEQ_CTRL_0_ADDR (HINIC5_CFG_REGS_FLAG + 0x200) +#define HINIC5_CSR_AEQ_CTRL_1_ADDR (HINIC5_CFG_REGS_FLAG + 0x204) +#define HINIC5_CSR_AEQ_CONS_IDX_ADDR (HINIC5_CFG_REGS_FLAG + 0x208) +#define HINIC5_CSR_AEQ_PROD_IDX_ADDR (HINIC5_CFG_REGS_FLAG + 0x20C) +#define HINIC5_CSR_AEQ_CI_SIMPLE_INDIR_ADDR (HINIC5_CFG_REGS_FLAG + 0x50) + +#define HINIC5_CSR_CEQ_CTRL_0_ADDR (HINIC5_CFG_REGS_FLAG + 0x280) +#define HINIC5_CSR_CEQ_CTRL_1_ADDR (HINIC5_CFG_REGS_FLAG + 0x284) +#define HINIC5_CSR_CEQ_CONS_IDX_ADDR (HINIC5_CFG_REGS_FLAG + 0x288) +#define HINIC5_CSR_CEQ_PROD_IDX_ADDR (HINIC5_CFG_REGS_FLAG + 0x28c) +#define HINIC5_CSR_CEQ_CI_SIMPLE_INDIR_ADDR (HINIC5_CFG_REGS_FLAG + 0x54) + +/* API CMD registers */ +#define HINIC5_CSR_API_CMD_BASE (HINIC5_MGMT_REGS_FLAG + 0x2000) + +#define HINIC5_CSR_API_CMD_STRIDE 0x80 + +#define HINIC5_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ + (HINIC5_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC5_CSR_API_CMD_STRIDE) + +#define HINIC5_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ + (HINIC5_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC5_CSR_API_CMD_STRIDE) + +#define HINIC5_CSR_API_CMD_STATUS_HI_ADDR(idx) \ + (HINIC5_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC5_CSR_API_CMD_STRIDE) + +#define HINIC5_CSR_API_CMD_STATUS_LO_ADDR(idx) \ + (HINIC5_CSR_API_CMD_BASE + 0xC + (idx) * HINIC5_CSR_API_CMD_STRIDE) + +#define HINIC5_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ + (HINIC5_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC5_CSR_API_CMD_STRIDE) + +#define HINIC5_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ + (HINIC5_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC5_CSR_API_CMD_STRIDE) + +#define HINIC5_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ + (HINIC5_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC5_CSR_API_CMD_STRIDE) + +#define HINIC5_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ + (HINIC5_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC5_CSR_API_CMD_STRIDE) + +#define HINIC5_CSR_API_CMD_STATUS_0_ADDR(idx) \ + (HINIC5_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC5_CSR_API_CMD_STRIDE) + +/* self test register */ +#define HINIC5_MGMT_HEALTH_STATUS_ADDR (HINIC5_MGMT_REGS_FLAG + 0x983c) + +#define HINIC5_CHIP_BASE_INFO_ADDR (HINIC5_MGMT_REGS_FLAG + 0xB02C) + +#define HINIC5_CHIP_ERR_STATUS0_ADDR (HINIC5_MGMT_REGS_FLAG + 0xC0EC) +#define HINIC5_CHIP_ERR_STATUS1_ADDR (HINIC5_MGMT_REGS_FLAG + 0xC0F0) + +#define HINIC5_ERR_INFO0_ADDR (HINIC5_MGMT_REGS_FLAG + 0xC0F4) +#define HINIC5_ERR_INFO1_ADDR (HINIC5_MGMT_REGS_FLAG + 0xC0F8) +#define HINIC5_ERR_INFO2_ADDR (HINIC5_MGMT_REGS_FLAG + 0xC0FC) + +#define HINIC5_MULT_HOST_SLAVE_STATUS_ADDR (HINIC5_MGMT_REGS_FLAG + 0xEA08) +#define HINIC5_MULT_MIGRATE_HOST_STATUS_ADDR (HINIC5_MGMT_REGS_FLAG + 0xEA0C) + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_eqs.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_eqs.h new file mode 100644 index 00000000..330bd6aa --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_eqs.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_EQS_H +#define HINIC5_EQS_H + +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> + +#include "hinic5_common.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_hwif_inner.h" + +#define HINIC5_MAX_AEQS 4 +#define HINIC5_MAX_CEQS 32 + +#define HINIC5_AEQ_MAX_PAGES 4 +#define HINIC5_CEQ_MAX_PAGES 8 + +#define HINIC5_AEQE_SIZE 64 +#define HINIC5_CEQE_SIZE 4 + +#define HINIC5_AEQE_DESC_SIZE 4 +#define HINIC5_AEQE_DATA_SIZE \ + (HINIC5_AEQE_SIZE - HINIC5_AEQE_DESC_SIZE) + +#define HINIC5_AEQ_CPU_AFFINITY_MAX 16 + +#if defined(__UEFI__) +#define HINIC5_DEFAULT_AEQ_LEN 64 +#define HINIC5_DEFAULT_CEQ_LEN 1024 +#elif defined(__PANGEA_BINARY__) +#define HINIC5_DEFAULT_AEQ_LEN 64 +#define HINIC5_DEFAULT_CEQ_LEN 8192 +#elif (!defined(__PANGEA_BINARY__) && defined(__HIFC__)) +#define HINIC5_DEFAULT_AEQ_LEN 4096 +#define HINIC5_DEFAULT_CEQ_LEN 8192 +#else +#define HINIC5_DEFAULT_AEQ_LEN 0x10000 +#define HINIC5_DEFAULT_CEQ_LEN 0x10000 +#endif + +#define HINIC5_MIN_EQ_PAGE_SIZE 0x1000 /* min eq page size 4K Bytes */ +#define HINIC5_MAX_EQ_PAGE_SIZE 0x400000 /* max eq page size 4M Bytes */ + +#define HINIC5_MIN_AEQ_LEN 64 +#define HINIC5_MAX_AEQ_LEN \ + ((HINIC5_MAX_EQ_PAGE_SIZE / HINIC5_AEQE_SIZE) * HINIC5_AEQ_MAX_PAGES) + +#define HINIC5_MIN_CEQ_LEN 64 +#define HINIC5_MAX_CEQ_LEN \ + ((HINIC5_MAX_EQ_PAGE_SIZE / HINIC5_CEQE_SIZE) * HINIC5_CEQ_MAX_PAGES) +#define HINIC5_CEQ_ID_CMDQ 0 + +#define EQ_IRQ_NAME_LEN 64 + +#define EQ_USLEEP_LOW_BOUND 900 +#define EQ_USLEEP_HIG_BOUND 1000 + +enum hinic5_eq_type { + HINIC5_AEQ, + HINIC5_CEQ +}; + +enum hinic5_eq_intr_mode { + HINIC5_INTR_MODE_ARMED, + HINIC5_INTR_MODE_ALWAYS, +}; + +enum hinic5_eq_ci_arm_state { + HINIC5_EQ_NOT_ARMED, + HINIC5_EQ_ARMED, +}; + +struct hinic5_eq { + struct hinic5_hwdev *hwdev; + u16 q_id; + u16 rsvd1; + enum hinic5_eq_type type; + u32 page_size; + u32 orig_page_size; + u32 eq_len; + + u32 cons_idx; + u16 wrapped; + u16 rsvd2; + + u16 elem_size; + u16 num_pages; + u32 num_elem_in_pg; + + struct irq_info eq_irq; + char irq_name[EQ_IRQ_NAME_LEN]; + + struct hinic5_dma_addr_align *eq_pages; + + struct work_struct aeq_work; + struct tasklet_struct ceq_tasklet; + + u64 hard_intr_jif; + u64 soft_intr_jif; + + int cpu; + u64 rsvd3; +}; + +struct hinic5_aeq_elem { + u8 aeqe_data[HINIC5_AEQE_DATA_SIZE]; + u32 desc; +}; + +enum hinic5_aeq_cb_state { + HINIC5_AEQ_HW_CB_REG = 0, + HINIC5_AEQ_HW_CB_RUNNING, + HINIC5_AEQ_SW_CB_REG, + HINIC5_AEQ_SW_CB_RUNNING, +}; + +struct hinic5_stateless_aeqs { + hinic5_aeq_swe_cb stateless_aeq_swe_cb; + void *stateless_aeq_swe_cb_data; + ulong stateless_aeq_sw_cb_state; +}; + +struct hinic5_aeqs { + struct hinic5_hwdev *hwdev; + + hinic5_aeq_hwe_cb aeq_hwe_cb[HINIC5_MAX_AEQ_EVENTS]; + void *aeq_hwe_cb_data[HINIC5_MAX_AEQ_EVENTS]; + hinic5_aeq_swe_cb aeq_swe_cb[HINIC5_MAX_AEQ_SW_EVENTS]; + void *aeq_swe_cb_data[HINIC5_MAX_AEQ_SW_EVENTS]; + ulong aeq_hw_cb_state[HINIC5_MAX_AEQ_EVENTS]; + ulong aeq_sw_cb_state[HINIC5_MAX_AEQ_SW_EVENTS]; + + struct hinic5_eq aeq[HINIC5_MAX_AEQS]; + u16 num_aeqs; + u16 rsvd1; + u32 rsvd2; + + int aeq_cpu_affinity[HINIC5_AEQ_CPU_AFFINITY_MAX]; + unsigned int aeq_cpu_affinity_nargs; + + struct workqueue_struct *workq; +}; + +enum hinic5_ceq_cb_state { + HINIC5_CEQ_CB_REG = 0, + HINIC5_CEQ_CB_RUNNING, +}; + +struct hinic5_ceqs { + struct hinic5_hwdev *hwdev; + + hinic5_ceq_event_cb ceq_cb[HINIC5_MAX_CEQ_EVENTS]; + void *ceq_cb_data[HINIC5_MAX_CEQ_EVENTS]; + void *ceq_data[HINIC5_MAX_CEQ_EVENTS]; + ulong ceq_cb_state[HINIC5_MAX_CEQ_EVENTS]; + + struct hinic5_eq ceq[HINIC5_MAX_CEQS]; + u16 num_ceqs; + u16 rsvd1; + u32 rsvd2; +}; + +int hinic5_aeqs_init(struct hinic5_hwdev *hwdev, u16 num_aeqs, + struct irq_info *msix_entries); + +void hinic5_aeqs_free(struct hinic5_hwdev *hwdev); + +int hinic5_ceqs_init(struct hinic5_hwdev *hwdev, u16 num_ceqs, + struct irq_info *msix_entries); + +void hinic5_ceqs_free(struct hinic5_hwdev *hwdev); + +void hinic5_get_ceq_irqs(struct hinic5_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hinic5_get_aeq_irqs(struct hinic5_hwdev *hwdev, struct irq_info *irqs, + u16 *num_irqs); + +void hinic5_dump_ceq_info(struct hinic5_hwdev *hwdev); + +void hinic5_dump_aeq_info(struct hinic5_hwdev *hwdev); + +int hinic5_reschedule_eq(struct hinic5_hwdev *hwdev, enum hinic5_eq_type type, + u16 eq_id); + +int hinic5_init_stateless_aeqs(void *hwdev); + +void hinic5_stateless_aeqs_free(void *hwdev); + +u8 hinic5_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data); + +#if defined(__UEFI__) || defined(__VMWARE__) +void hinic5_simulated_irq_aeq(struct hinic5_hwdev *hwdev); +#elif defined(__WIN__) +bool hinic5_eq_intr_handler(void *hwdev, int msix_entry_idx); +#endif + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hw_api.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hw_api.h new file mode 100644 index 00000000..cb72a6bb --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hw_api.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_HW_API_H +#define HINIC5_HW_API_H + +#include <linux/types.h> + +#define CHIPIF_ACK 1 +#define CHIPIF_NOACK 0 + +#define CHIPIF_SM_CTR_OP_READ 0x2 +#define CHIPIF_SM_CTR_OP_READ_CLEAR 0x6 + +#define BIT_32 32 + +/* request head */ +union chipif_sml_ctr_req_head { + struct { + u32 pad : 15; + u32 ack : 1; + u32 op_id : 5; + u32 instance : 6; + u32 src : 5; + } bs; + + u32 value; +}; + +/* counter read request struct */ +struct chipif_sml_ctr_rd_req { + u32 extra; + union chipif_sml_ctr_req_head head; + u32 ctr_id; + u32 initial; + u32 pad; +}; + +struct hinic5_csr_request_api_data { + u32 dw0; + + union { + struct { + u32 reserved1 : 13; + /* this field indicates the write/read data size: + * 2'b00: 32 bits + * 2'b01: 64 bits + * 2'b10~2'b11:reserved + */ + u32 data_size : 2; + /* this field indicates that requestor expect receive a + * response data or not. + * 1'b0: expect not to receive a response data. + * 1'b1: expect to receive a response data. + */ + u32 need_response : 1; + /* this field indicates the operation that the requestor + * expected. + * 5'b1_1110: write value to csr space. + * 5'b1_1111: read register from csr space. + */ + u32 operation_id : 5; + u32 reserved2 : 6; + /* this field specifies the Src node ID for this API + * request message. + */ + u32 src_node_id : 5; + } bits; + + u32 val32; + } dw1; + + union { + struct { + /* it specifies the CSR address. */ + u32 csr_addr : 26; + u32 reserved3 : 6; + } bits; + + u32 val32; + } dw2; + + /* if data_size=2'b01, it is high 32 bits of write data. else, it is + * 32'hFFFF_FFFF. + */ + u32 csr_write_data_h; + /* the low 32 bits of write data. */ + u32 csr_write_data_l; +}; + +/* counter read response union */ +union ctr_rd_rsp { + struct { + u32 value1 : 16; + u32 pad0 : 16; + u32 pad1[3]; + } bs_ss16_rsp; + + struct { + u32 value1; + u32 pad[3]; + } bs_ss32_rsp; + + struct { + u32 value1 : 20; + u32 pad0 : 12; + u32 value2 : 12; + u32 pad1 : 20; + u32 pad2[2]; + } bs_sp_rsp; + + struct { + u32 value1; + u32 value2; + u32 pad[2]; + } bs_bs64_rsp; + + struct { + u32 val1_h; + u32 val1_l; + u32 val2_h; + u32 val2_l; + } bs_bp64_rsp; +}; + +enum HINIC5_CSR_API_DATA_OPERATION_ID { + HINIC5_CSR_OPERATION_WRITE_CSR = 0x1E, + HINIC5_CSR_OPERATION_READ_CSR = 0x1F +}; + +enum HINIC5_CSR_API_DATA_NEED_RESPONSE_DATA { + HINIC5_CSR_NO_RESP_DATA = 0, + HINIC5_CSR_NEED_RESP_DATA = 1 +}; + +enum HINIC5_CSR_API_DATA_DATA_SIZE { + HINIC5_CSR_DATA_SZ_32 = 0, + HINIC5_CSR_DATA_SZ_64 = 1 +}; + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hwdev.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hwdev.h new file mode 100644 index 00000000..49703de6 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hwdev.h @@ -0,0 +1,355 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_HWDEV_H +#define HINIC5_HWDEV_H + +#include "hinic5_mt.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_profile.h" +#include "hinic5_common.h" +#include "hinic5_chip_info.h" +#include "hinic5_vram_common.h" + +#ifndef __UEFI__ +#include <linux/mutex.h> +#include <linux/timer.h> +#include <linux/workqueue.h> +#endif + +#include <linux/device.h> + +#define sdk_err(dev, format, ...) dev_err(dev, "[COMM]" format, ##__VA_ARGS__) +#define sdk_warn(dev, format, ...) dev_warn(dev, "[COMM]" format, ##__VA_ARGS__) +#define sdk_notice(dev, format, ...) dev_notice(dev, "[COMM]" format, ##__VA_ARGS__) +#define sdk_info(dev, format, ...) dev_info(dev, "[COMM]" format, ##__VA_ARGS__) + +struct cfg_mgmt_info; + +struct hinic5_hwif; +struct hinic5_aeqs; +struct hinic5_ceqs; +struct hinic5_mbox; +struct hinic5_msg_pf_to_mgmt; +struct hinic5_hwdev; +struct hinic5_wq; +struct sdk_cmdq_wqe_desc; + +#define HINIC5_CHANNEL_DETECT_PERIOD (5 * 1000) +#define HINIC5_CHANNEL_DETECT_MAX_BUSY (3) + +/**< 系统和芯片时间同步周期单位毫秒 */ +#define HINIC5_NON_PTP_SYNC_FW_TIME_PERIOD (500) + +/** + * @brief 定义一个函数指针类型,用于处理hinic5事件 + * @param handle 设备句柄 + * @param event 事件信息 + * + * @return 无 + */ +typedef void (*hinic5_event_handler)(void *handle, struct hinic5_event_info *event); + +struct hinic5_page_addr { + void *virt_addr; + u64 phys_addr; +}; + +struct mqm_addr_trans_tbl_info { + u32 chunk_num; + u32 search_gpa_num; + u32 page_size; + u32 page_num; + struct hinic5_dma_addr_align *brm_srch_page_addr; +}; + +struct hinic5_devlink { + struct hinic5_hwdev *hwdev; + u8 activate_fw; /* 0 ~ 7 */ + u8 switch_cfg; /* 0 ~ 7 */ +}; + +enum hinic5_func_mode { + /* single host */ + FUNC_MOD_NORMAL_HOST, + /* multi host, bare-metal, sdi side */ + FUNC_MOD_MULTI_BM_MASTER, + /* multi host, bare-metal, host side */ + FUNC_MOD_MULTI_BM_SLAVE, + /* multi host, vm mode, sdi side */ + FUNC_MOD_MULTI_VM_MASTER, + /* multi host, vm mode, host side */ + FUNC_MOD_MULTI_VM_SLAVE, +}; + +enum hinic5_pcie_nosnoop { + HINIC5_PCIE_SNOOP = 0, + HINIC5_PCIE_NO_SNOOP = 1, +}; + +enum hinic5_pcie_tph { + HINIC5_PCIE_TPH_DISABLE = 0, + HINIC5_PCIE_TPH_ENABLE = 1, +}; + +enum hinic5_perf_bitmap { + HINIC5_CMDQ_PERF = 0, + HINIC5_MAILBOX_PERF = 1, +}; + +#define IS_BMGW_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_MASTER) +#define IS_BMGW_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_BM_SLAVE) +#define IS_VM_MASTER_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_MASTER) +#define IS_VM_SLAVE_HOST(hwdev) \ + ((hwdev)->func_mode == FUNC_MOD_MULTI_VM_SLAVE) + +#define IS_MASTER_HOST(hwdev) \ + (IS_BMGW_MASTER_HOST(hwdev) || IS_VM_MASTER_HOST(hwdev)) + +#define IS_SLAVE_HOST(hwdev) \ + (IS_BMGW_SLAVE_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev)) + +#define IS_MULTI_HOST(hwdev) \ + (IS_BMGW_MASTER_HOST(hwdev) || IS_BMGW_SLAVE_HOST(hwdev) || \ + IS_VM_MASTER_HOST(hwdev) || IS_VM_SLAVE_HOST(hwdev)) + +#define NEED_MBOX_FORWARD(hwdev) IS_BMGW_SLAVE_HOST(hwdev) + +enum hinic5_host_mode_e { + HINIC5_MODE_NORMAL = 0, + HINIC5_SDI_MODE_VM, + HINIC5_SDI_MODE_BM, + HINIC5_SDI_MODE_MAX, +}; + +struct mqm_eqm_hinic5_vram_name_s { + char hinic5_vram_name[HINIC5_VRAM_NAME_MAX_LEN]; +}; + +struct hinic5_sdk_timeout_info { + enum hinic5_hw_type hw_type; /**< 硬件类型 FPGA etc. */ + const char *hw_type_desc; /**< 硬件类型字符串表示. */ + u32 mbox_poll_timeout; /* < 等待cpi回写mailbox status超时时间 */ + u32 mbox_timeout; /**< 等待mailbox ack response超时时间 */ + u32 cmdq_timeout; /**< cmdq 超时时间 */ +}; + +struct hinic5_hwdev { + void *adapter_hdl; /* pointer to hinic5_adev or NDIS_Adapter */ +#ifdef __UEFI__ + void *busdev_hdl; /* pointer to pcidev or ub dev */ +#endif + void *dev_hdl; /* pointer to pcidev->dev or Handler, for + * sdk_err() or dma_alloc() + */ + + void *service_adapter[SERVICE_T_MAX]; + void *chip_node; + void *ppf_hwdev; + + u32 wq_page_size; + int chip_present_flag; + bool poll; /* use polling mode or int mode */ + u32 rsvd1; + + struct hinic5_hwif *hwif; /* include void __iomem *bar */ + struct comm_global_attr glb_attr; + u64 features[COMM_MAX_FEATURE_QWORD]; + + struct cfg_mgmt_info *cfg_mgmt; + + struct hinic5_cmdqs *cmdqs; + struct hinic5_stateless_aeqs *stateless_aeqs; + struct hinic5_aeqs *aeqs; + struct hinic5_ceqs *ceqs; + struct hinic5_mbox *func_to_func; + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic5_clp_pf_to_mgmt *clp_pf_to_mgmt; + + void *fw_update_hdl; + + void *hinic5_cqm_hdl; + struct mqm_addr_trans_tbl_info mqm_att; + struct hinic5_page_addr page_pa0; + struct hinic5_page_addr page_pa1; + u32 stateful_ref_cnt; + u32 rsvd2; + + struct mqm_eqm_hinic5_vram_name_s *mqm_eqm_hinic5_vram_name; + + struct mutex stateful_mutex; /* protect hinic5_cqm init and deinit */ + + struct hinic5_hw_stats hw_stats; + u8 *chip_fault_stats; + + hinic5_event_handler event_callback; + void *event_pri_handle; + + struct hinic5_board_info board_info; + +#if !defined(__UEFI__) && !defined(__VMWARE__) && !defined(__WIN__) + struct delayed_work sync_time_task; + struct delayed_work sync_kernel_time_task; + struct delayed_work channel_detect_task; + void *non_ptp_cdev; // TODO: 待解决结构体定义引用 +#endif + + struct hinic5_prof_attr *prof_attr; + const struct hinic5_prof_adapter *prof_adap; + + struct workqueue_struct *workq; + + u32 rd_bar_err_cnt; + u32 linkdown_threshold; + u32 heartbeat_period; + atomic_t bus_link_down; + atomic_t heartbeat_lost; + struct timer_list heartbeat_timer; + struct work_struct heartbeat_lost_work; + atomic_t check_ob_flush_bypass_ref_cnt; + + ulong func_state; + spinlock_t channel_lock; /* protect channel init and deinit */ + + u16 probe_fault_level; + + struct hinic5_devlink *devlink_dev; + + enum hinic5_func_mode func_mode; + u32 rsvd3; + + u64 cur_recv_aeq_cnt; + u64 last_recv_aeq_cnt; + u32 aeq_busy_cnt; + u32 max_aeq_busy_cnt; + u8 rsvd4[52]; + + u64 mbox_send_cnt; + u64 mbox_ack_cnt; + + u8 cmdq_mode; + u8 cmdq_cos_offset; + u8 rsvd5[5]; // 热补丁预留 + struct hisdk5_fast_msg_to_func *fast_msg_to_func; + const struct hinic5_sdk_timeout_info *timeout_info; +}; + +#define HINIC5_DRV_FEATURE_QW0 \ + (COMM_F_API_CHAIN | COMM_F_CLP | COMM_F_MBOX_SEGMENT | \ + COMM_F_CMDQ_NUM | COMM_F_VIRTIO_VQ_SIZE | COMM_F_EXTEND_CAP | \ + COMM_F_SMF_CACHE_INVALID | COMM_F_ONLY_ENHANCE_CMDQ | \ + COMM_F_USE_REAL_RX_BUF_SIZE | COMM_F_CMD_BUF_SIZE | \ + COMM_F_HTN_CMD | COMM_F_MBOX_MSG_HEAD_SUPP_VER1 | COMM_F_FAST_MSG | \ + COMM_F_UFHD | COMM_F_VIRTIO_FC_CACHE_MODE | COMM_F_NON_PTP_SYNC | \ + COMM_F_HT_GPA | COMM_F_UFHD_FLEX_SEG) + +#define HINIC5_MAX_HOST_NUM(hwdev) ((hwdev)->glb_attr.max_host_num) +#define HINIC5_MAX_PF_NUM(hwdev) ((hwdev)->glb_attr.max_pf_num) +#define HINIC5_MGMT_CPU_NODE_ID(hwdev) ((hwdev)->glb_attr.mgmt_host_node_id) + +#define COMM_FEATURE_QW0(hwdev, feature) (((hwdev)->features[0] & COMM_F_##feature) != 0) +#define COMM_SUPPORT_API_CHAIN(hwdev) COMM_FEATURE_QW0(hwdev, API_CHAIN) +#define COMM_SUPPORT_CLP(hwdev) COMM_FEATURE_QW0(hwdev, CLP) +#define COMM_SUPPORT_CHANNEL_DETECT(hwdev) COMM_FEATURE_QW0(hwdev, CHANNEL_DETECT) +#define COMM_SUPPORT_CMDQ_NUM(hwdev) COMM_FEATURE_QW0(hwdev, CMDQ_NUM) +#define COMM_SUPPORT_CMD_BUF_SIZE(hwdev) COMM_FEATURE_QW0(hwdev, CMD_BUF_SIZE) +#define COMM_SUPPORT_VIRTIO_VQ_SIZE(hwdev) COMM_FEATURE_QW0(hwdev, VIRTIO_VQ_SIZE) +#define COMM_IS_USE_REAL_RX_BUF_SIZE(hwdev) COMM_FEATURE_QW0(hwdev, USE_REAL_RX_BUF_SIZE) +#define COMM_SUPPORT_EXTEND_CAPBILITY(hwdev) COMM_FEATURE_QW0(hwdev, EXTEND_CAP) +#define COMM_SUPPORT_SMF_CACHE_INVALID(hwdev) COMM_FEATURE_QW0(hwdev, SMF_CACHE_INVALID) +#define COMM_SUPPORT_ONLY_ENHANCE_CMDQ(hwdev) COMM_FEATURE_QW0(hwdev, ONLY_ENHANCE_CMDQ) +#define COMM_SUPPORT_HTN_CMD(hwdev) COMM_FEATURE_QW0(hwdev, HTN_CMD) +#define COMM_SUPPORT_FAST_MSG(hwdev) COMM_FEATURE_QW0(hwdev, FAST_MSG) +#define COMM_SUPPORT_MBOX_HEAD_VER1(hwdev) COMM_FEATURE_QW0(hwdev, MBOX_MSG_HEAD_SUPP_VER1) +#define COMM_SUPPORT_UFHD(hwdev) COMM_FEATURE_QW0(hwdev, UFHD) +#define COMM_SUPPORT_VIRTIO_FC_CACHE(hwdev) COMM_FEATURE_QW0(hwdev, VIRTIO_FC_CACHE_MODE) +#define COMM_SUPPORT_NON_PTP_SYNC(hwdev) COMM_FEATURE_QW0(hwdev, NON_PTP_SYNC) +#define COMM_SUPPORT_HT_GPA(hwdev) COMM_FEATURE_QW0(hwdev, HT_GPA) +#define COMM_SUPPORT_UFHD_FLEX_SEG(hwdev) COMM_FEATURE_QW0(hwdev, UFHD_FLEX_SEG) + +bool hinic5_get_perf_en(enum hinic5_perf_bitmap perf_bit); + +#define HINIC5_CHIP_PRESENT 1 +#define HINIC5_CHIP_ABSENT 0 + +/** + * The chip will be absent when + * - link down + * - PCI shutdown + * - PCI reset done + */ +static inline bool hinic5_is_chip_present(const struct hinic5_hwdev *hwdev) +{ + return hwdev->chip_present_flag == HINIC5_CHIP_PRESENT; +} + +/** + * The chip will be error when + * - heartbeat lost + * - Level-2 or lower chip faults, see enum hinic5_fault_err_level + */ +static inline bool hinic5_is_chip_error(const struct hinic5_hwdev *hwdev) +{ + struct card_node *chip_info = (struct card_node *)hwdev->chip_node; + + return chip_info->exception_flag; +} + +static inline bool hinic5_channel_detect_should_stop(const struct hinic5_hwdev *hwdev) +{ + struct card_node *chip_node = (struct card_node *)hwdev->chip_node; + + return atomic_read(&chip_node->channel_busy_cnt) >= HINIC5_CHANNEL_DETECT_MAX_BUSY; +} + +/** + * @brief hinic5_event_register - register hardware event + * @param dev: device pointer to hwdev + * @param pri_handle: private data will be used by the callback + * @param callback: callback function + * + * @return 0:成功,非0:错误码 + */ +int hinic5_event_register(void *dev, void *pri_handle, hinic5_event_handler callback); + +/** + * @brief hinic5_event_unregister - unregister hardware event + * @param dev: device pointer to hwdev + */ +void hinic5_event_unregister(void *dev); + +bool hinic5_check_htn_device_id(void *hwdev); + +void *hinic5_get_ppf_dev(void); +bool hinic5_is_function_active(struct hinic5_hwdev *hwdev); + +/** + * @brief Dump CMDQ 工作队列 wqebb + * @param[in] hwdev 硬件设备 + * @param[in] cmdq_id 查询的 CMDQ id + * @param[in] wqe_idx 查询的 wqebb idx + * @param[out] wqe_desc 查询到的 wqebb 信息 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_dump_cmdq_wqebb(struct hinic5_hwdev *hwdev, u16 cmdq_id, u16 wqe_idx, + struct sdk_cmdq_wqe_desc *wqe_desc); + +/** + * @brief Dump CMDQ 工作队列信息 + * @param[in] hwdev 硬件设备 + * @param[in] cmdq_id 查询的 CMDQ id + * @param[out] wq 查询到的 CMDQ 工作队列信息 + * + * @return 是否成功 + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_dump_cmdq_wq(struct hinic5_hwdev *hwdev, u16 cmdq_id, struct hinic5_wq *wq); +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hwif_inner.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hwif_inner.h new file mode 100644 index 00000000..1323eba9 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_hwif_inner.h @@ -0,0 +1,321 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_HWIF_INNER_H +#define HINIC5_HWIF_INNER_H + +#include "hinic5_hwdev.h" + +#define HINIC5_BUS_LINK_DOWN 0xFFFFFFFF +#define MAKE_64BITS(hi, lo) ((((u64)(hi)) << 32) | ((u64)((u32)(lo)))) + +struct hinic5_free_db_area { + unsigned long *db_bitmap_array; + u32 db_max_areas; + /* spinlock for allocating doorbell area */ + spinlock_t idx_lock; +}; + +struct hinic5_func_attr { + u16 func_global_idx; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 vf_in_pf; + u8 rsvd1; + u16 rsvd2; + enum func_type func_type; + + u8 mpf_idx; + + u8 ppf_idx; + + u16 num_irqs; /* max: 2 ^ 15 */ + u8 num_aeqs; /* max: 2 ^ 3 */ + u8 num_ceqs; /* max: 2 ^ 7 */ + + u16 num_sq; /* max: 2 ^ 8 */ + u8 num_dma_attr; /* max: 2 ^ 6 */ + u8 msix_flex_en; + + u16 global_vf_id_of_pf; + u8 hw_type; +}; + +struct hinic5_hwif { + u8 __iomem *fers2_reg_base; + u8 __iomem *cfg_regs_base; + u8 __iomem *intr_regs_base; + u8 __iomem *mgmt_regs_base; /* only for PPF/PF */ + u64 db_base_phy; + u64 db_dwqe_len; + u8 __iomem *db_base; + + struct hinic5_free_db_area free_db_area; + + struct hinic5_func_attr attr; + +#ifdef __UEFI__ + void *bus_dev; /* pcie场景下代表的是pdev ub场景下代表ub dev */ +#endif + void *hwdev; + + u64 rsvd; +}; + +enum outbound_flush_state { + OUTBOUND_FLUSH_DISABLED = 0, + OUTBOUND_FLUSH_ENABLED = 1, +}; + +enum doorbell_flush_state { + DOORBELL_FLUSH_DISABLED = 0, + DOORBELL_FLUSH_ENABLED = 1, +}; + +enum hinic5_wait_return check_outbound_enable_handler(struct hinic5_hwdev *hwdev); + +enum hinic5_pf_status { + HINIC5_PF_STATUS_INIT = 0X0, + HINIC5_PF_STATUS_ACTIVE_FLAG = 0x11, + HINIC5_PF_STATUS_FLR_START_FLAG = 0x12, + HINIC5_PF_STATUS_FLR_FINISH_FLAG = 0x13, +}; + +#define HINIC5_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) +#define HINIC5_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) +#define HINIC5_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) +#define HINIC5_HWIF_GLOBAL_IDX(hwif) ((hwif)->attr.func_global_idx) +#define HINIC5_HWIF_GLOBAL_VF_OFFSET(hwif) ((hwif)->attr.global_vf_id_of_pf) +#define HINIC5_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx) +#define HINIC5_PCI_INTF_IDX(hwif) ((hwif)->attr.pci_intf_idx) + +#define HINIC5_FUNC_TYPE(dev) ((dev)->hwif->attr.func_type) +#define HINIC5_IS_PF(dev) (HINIC5_FUNC_TYPE(dev) == TYPE_PF) +#define HINIC5_IS_VF(dev) (HINIC5_FUNC_TYPE(dev) == TYPE_VF) +#define HINIC5_IS_PPF(dev) (HINIC5_FUNC_TYPE(dev) == TYPE_PPF) + +struct hinic5_health_status { + u32 rsvd : 7; + u32 fw_img_load_fail : 1; + u32 smu_lastword : 1; + u32 npu_lastword : 1; + u32 mpu_wdog : 1; + u32 mpu_lastword : 1; + u32 wr_phy_timeout : 1; + u32 wr_mem_timeout : 1; + u32 wr_reg_timeout : 1; + u32 sfp_high_temperature_port : 4; + u32 chip_low_temperature : 1; + u32 chip_high_temperature : 1; + u32 logic_except : 1; + u32 host_heart : 5; + u32 mpu_init_done : 2; + u32 mpu_boot_cause : 3; +}; + +struct hinic5_chip_base { + u32 chip_type : 2; + u32 chip_ver : 2; + u32 spu_en : 1; + u32 host_num : 3; + u32 cfg_template_id : 4; + u32 board_type : 8; + u32 board_id : 4; + u32 mpu_ver : 8; +}; + +struct hinic5_chip_info { + union { + struct hinic5_health_status health_status; + struct hinic5_chip_base chip_base; + u32 value; + }; +}; + +struct hinic5_logic_except { + u32 err_type : 16; + u32 err_level : 8; + u32 mode_id : 8; +}; + +struct hinic5_temperature_alarm { + u32 cur_temperature : 16; + u32 limit_temperature : 16; +}; + +struct hinic5_mpu_exception { + u32 abnormal_thread_id : 16; + u32 abnormal_reason : 16; +}; + +struct hinic5_sfp_high_temperature_port { + u32 front_actual_temperature : 8; + u32 front_alarm_threshold_temperature : 8; + u32 after_actual_temperature : 8; + u32 after_alarm_threshold_temperature : 8; +}; + +struct hinic5_eco0_info { + u32 stfqu_uncrt_err : 1; + u32 pqm_uncrt_err : 1; + u32 mqm_uncrt_err : 1; + u32 stlqu_uncrt_err : 1; + u32 smf_uncrt_err : 4; + u32 sml_uncrt_err : 4; + u32 stftile_uncrt_err : 4; + u32 stltile_uncrt_err : 4; + u32 mpu_uncrt_err : 1; + u32 cpi_uncrt_err : 1; + u32 lcam_uncrt_err : 1; + u32 ipsutx_uncrt_err : 1; + u32 perx_uncrt_err : 1; + u32 ipsurx_uncrt_err : 1; + u32 petx_uncrt_err : 1; + u32 cpb_uncrt_err : 1; + u32 ckd_err_int : 2; + u32 pcie_uncrt_err : 1; + u32 cryptorx_uncrt_err : 1; +}; + +struct hinic5_eco1_info { + u32 cryptotx_uncrt_err : 1; + u32 ts_uncrt_err : 1; + u32 mag_uncrt_err : 1; + u32 fc_uncrt_err : 1; + u32 hva_uncrt_err : 1; + u32 reserved : 27; +}; + +struct hinic5_eco2_info { + union { + struct hinic5_logic_except logic_except; + struct hinic5_temperature_alarm temperature_alarm; + struct hinic5_mpu_exception mpu_exception; + u32 value; + u16 short_value; + }; +}; + +struct hinic5_eco3_info { + union { + struct hinic5_sfp_high_temperature_port sfp_high_temperature_port; + u32 value; + }; +}; + +struct hinic5_eco4_info { + union { + struct hinic5_sfp_high_temperature_port sfp_high_temperature_port; + u32 value; + }; +}; + +u32 hinic5_hwif_read_reg(struct hinic5_hwif *hwif, u32 reg); + +void hinic5_hwif_write_reg(struct hinic5_hwif *hwif, u32 reg, u32 val); + +void hinic5_set_pf_status(struct hinic5_hwif *hwif, + enum hinic5_pf_status status); + +enum hinic5_pf_status hinic5_get_pf_status(struct hinic5_hwif *hwif); + +void hinic5_disable_doorbell(struct hinic5_hwif *hwif); + +void hinic5_enable_doorbell(struct hinic5_hwif *hwif); + +int hinic5_init_hwif(struct hinic5_hwdev *hwdev, void *fers2_reg_base, void *cfg_reg_base, + void *intr_reg_base, void *mgmt_regs_base, u64 db_base_phy, + void *db_base, u64 db_dwqe_len); + +void hinic5_free_hwif(struct hinic5_hwdev *hwdev); + +void hinic5_show_chip_err_info(struct hinic5_hwdev *hwdev); + +u8 hinic5_host_ppf_idx(struct hinic5_hwdev *hwdev, u8 host_id); + +bool get_card_present_state(struct hinic5_hwdev *hwdev); + +bool get_handshake_state(struct hinic5_hwdev *hwdev); + +int hinic5_n_ptp_ts_up_en(struct hinic5_hwdev *hwdev, u32 flags); + +int hinic5_read_n_ptp_ts_data(struct hinic5_hwdev *hwdev, u64 *time_ns); + +/** + * @brief enum hinic5_aeq_type - CPI hardware生成的AEQ事件类型 + * @details aeqe.sw 属性为0(aeqe由cpi hardware产生的)支持的事件类型 + */ +enum hinic5_aeq_type { + HINIC5_HW_INTER_INT = 0, /**< 硬件中断事件 */ + HINIC5_MBX_FROM_FUNC = 1, /**< 来自function的mailbox */ + HINIC5_MSG_FROM_MGMT_CPU = 2, /**< 来自MPU的mailbox */ + HINIC5_API_RSP = 3, /**< API response data */ + HINIC5_API_CHAIN_STS = 4, /**< API chain status data */ + HINIC5_MBX_SEND_RSLT = 5, /**< mailbox sending result */ + HINIC5_MAX_AEQ_EVENTS /**< 支持的事件类型个数 */ +}; + +/** + * @brief enum hinic5_aeq_sw_type - 微码(Tile)生成的AEQ事件类型 + * @details aeqe.sw 属性为1(aeqe有微码产生的)支持的事件类型 + */ +enum hinic5_aeq_sw_type { + HINIC5_STATELESS_EVENT = 0, /**< 无状态事件 */ + HINIC5_STATEFUL_EVENT = 1, /**< 有状态事件 */ + HINIC5_MAX_AEQ_SW_EVENTS /**< 支持的事件类型个数 */ +}; + +/** + * @brief 定义一个函数指针类型,用于处理AEQ中断 + * @param pri_handle 设备句柄 + * @param data 中断数据 + * @param size 中断数据大小 + * + * @return 无 + */ +typedef void (*hinic5_aeq_hwe_cb)(void *pri_handle, u8 *data, u8 size); + +/** + * @brief hinic5_aeq_register_hw_cb - register aeq hardware callback + * @param hwdev: device pointer to hwdev + * @param event: event type + * @param hwe_cb: callback function + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_aeq_register_hw_cb(void *hwdev, void *pri_handle, + enum hinic5_aeq_type event, hinic5_aeq_hwe_cb hwe_cb); + +/** + * @brief hinic5_aeq_unregister_hw_cb - unregister aeq hardware callback + * + * @return + * @param hwdev: device pointer to hwdev + * @param event: event type + */ +void hinic5_aeq_unregister_hw_cb(void *hwdev, enum hinic5_aeq_type event); + +/** + * @brief hinic5_aeq_register_swe_cb - register aeq soft event callback + * @param hwdev: device pointer to hwdev + * @pri_handle: the pointer to private invoker device + * @param event: event type + * @param aeq_swe_cb: callback function + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_aeq_register_swe_cb(void *hwdev, void *pri_handle, enum hinic5_aeq_sw_type event, + hinic5_aeq_swe_cb aeq_swe_cb); + +/** + * @brief hinic5_aeq_unregister_swe_cb - unregister aeq soft event callback + * @param hwdev: device pointer to hwdev + * @param event: event type + **/ +void hinic5_aeq_unregister_swe_cb(void *hwdev, enum hinic5_aeq_sw_type event); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_lld_inner.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_lld_inner.h new file mode 100644 index 00000000..c51a7443 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_lld_inner.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2025 Huawei Technologies Co., Ltd */ + +#ifndef __HINIC5_LLD_INNER_H +#define __HINIC5_LLD_INNER_H + +#include "hinic5_crm.h" +#include "hinic5_lld.h" + +/** + * @brief 获取全部ULD名称数组 + * + * @return 返回ULD名称数组指针 + */ +const char **hinic5_get_uld_names(void); + +/** + * @brief hinic5_get_uld_info_by_type - get udl info by service type + * @param type: service type + * + * @return uld_info + **/ +const struct hinic5_uld_info *hinic5_get_uld_info_by_type(enum hinic5_service_type type); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_mbox.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_mbox.h new file mode 100644 index 00000000..9b5dddc3 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_mbox.h @@ -0,0 +1,379 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_MBOX_H +#define HINIC5_MBOX_H + +#include <linux/workqueue.h> +#include <linux/mutex.h> + +#include "comm_defs.h" +#include "hinic5_crm.h" +#include "mpu_mailbox_msg_header.h" +#include "hinic5_hwdev.h" + +#define HINIC5_MBOX_PF_SEND_ERR 0x1 + +#define HINIC5_MGMT_SRC_ID 0x1FFF +#define HINIC5_MAX_FUNCTIONS 4096 +#define MBOX_SEG_LEN_UNIT 0x4 +#define MBOX_MSG_LEN_UNIT 0x4 + +#define HINIC5_MSG_HEADER_SRC_GLB_FUNC_IDX_SHIFT 0 +#define HINIC5_MSG_HEADER_STATUS_SHIFT 13 +#define HINIC5_MSG_HEADER_VERSION_SHIFT 14 +#define HINIC5_MSG_HEADER_SOURCE_SHIFT 15 +#define HINIC5_MSG_HEADER_AEQ_ID_SHIFT 16 +#define HINIC5_MSG_HEADER_MSG_ID_SHIFT 18 +#define HINIC5_MSG_HEADER_CMD_SHIFT 22 + +#define HINIC5_MSG_HEADER_V0_MSG_LEN_SHIFT 32 +#define HINIC5_MSG_HEADER_V1_MSG_LEN_SHIFT 32 +#define HINIC5_MSG_HEADER_V0_MODULE_SHIFT 43 +#define HINIC5_MSG_HEADER_V1_MODULE_SHIFT 43 +#define HINIC5_MSG_HEADER_V0_SEG_LEN_SHIFT 48 +#define HINIC5_MSG_HEADER_V1_SEG_LEN_SHIFT 50 +#define HINIC5_MSG_HEADER_NO_ACK_SHIFT 54 +#define HINIC5_MSG_HEADER_DATA_TYPE_SHIFT 55 +#define HINIC5_MSG_HEADER_SEQID_SHIFT 56 +#define HINIC5_MSG_HEADER_LAST_SHIFT 62 +#define HINIC5_MSG_HEADER_DIRECTION_SHIFT 63 + +#define HINIC5_MSG_HEADER_SRC_GLB_FUNC_IDX_MASK 0x1FFF +#define HINIC5_MSG_HEADER_STATUS_MASK 0x1 +#define HINIC5_MSG_HEADER_VERSION_MASK 0x1 +#define HINIC5_MSG_HEADER_SOURCE_MASK 0x1 +#define HINIC5_MSG_HEADER_AEQ_ID_MASK 0x3 +#define HINIC5_MSG_HEADER_MSG_ID_MASK 0xF +#define HINIC5_MSG_HEADER_CMD_MASK 0x3FF + +#define HINIC5_MSG_HEADER_V0_MSG_LEN_MASK 0x7FF +#define HINIC5_MSG_HEADER_V1_MSG_LEN_MASK 0x1FF +#define HINIC5_MSG_HEADER_V0_MODULE_MASK 0x1F +#define HINIC5_MSG_HEADER_V1_MODULE_MASK 0x3F +#define HINIC5_MSG_HEADER_V0_SEG_LEN_MASK 0x3F +#define HINIC5_MSG_HEADER_V1_SEG_LEN_MASK 0xF +#define HINIC5_MSG_HEADER_NO_ACK_MASK 0x1 +#define HINIC5_MSG_HEADER_DATA_TYPE_MASK 0x1 +#define HINIC5_MSG_HEADER_SEQID_MASK 0x3F +#define HINIC5_MSG_HEADER_LAST_MASK 0x1 +#define HINIC5_MSG_HEADER_DIRECTION_MASK 0x1 + +#define HINIC5_MSG_HEADER_GET(val, field) \ + (((val) >> HINIC5_MSG_HEADER_##field##_SHIFT) & \ + HINIC5_MSG_HEADER_##field##_MASK) +#define HINIC5_MSG_HEADER_SET(val, field) \ + ((u64)(((u64)(val)) & HINIC5_MSG_HEADER_##field##_MASK) << \ + HINIC5_MSG_HEADER_##field##_SHIFT) + +#define HINIC5_MSG_HEADER_GET_V0(val, field) \ + (((val) >> HINIC5_MSG_HEADER_V0_##field##_SHIFT) & \ + HINIC5_MSG_HEADER_V0_##field##_MASK) +#define HINIC5_MSG_HEADER_SET_V0(val, field) \ + ((u64)(((u64)(val)) & HINIC5_MSG_HEADER_V0_##field##_MASK) << \ + HINIC5_MSG_HEADER_V0_##field##_SHIFT) + +#define HINIC5_MSG_HEADER_GET_V1(val, field) \ + (((val) >> HINIC5_MSG_HEADER_V1_##field##_SHIFT) & \ + HINIC5_MSG_HEADER_V1_##field##_MASK) +#define HINIC5_MSG_HEADER_SET_V1(val, field) \ + ((u64)(((u64)(val)) & HINIC5_MSG_HEADER_V1_##field##_MASK) << \ + HINIC5_MSG_HEADER_V1_##field##_SHIFT) + +enum hinic5_msg_direction_type { + HINIC5_MSG_DIRECT_SEND = 0, + HINIC5_MSG_RESPONSE = 1, +}; + +enum hinic5_msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum hinic5_msg_ack_type { + HINIC5_MSG_ACK, + HINIC5_MSG_NO_ACK, +}; + +enum hinic5_data_type { + HINIC5_DATA_INLINE = 0, + HINIC5_DATA_DMA = 1, +}; + +enum hinic5_msg_src_type { + HINIC5_MSG_FROM_MGMT = 0, + HINIC5_MSG_FROM_MBOX = 1, +}; + +enum hinic5_msg_aeq_type { + HINIC5_ASYNC_MSG_AEQ = 0, + /* indicate dest func or mgmt cpu which aeq to response mbox message */ + HINIC5_MBOX_RSP_MSG_AEQ = 1, + /* indicate mgmt cpu which aeq to response api cmd message */ + HINIC5_MGMT_RSP_MSG_AEQ = 2, +}; + +#define HINIC5_MBOX_WQ_NAME "hinic5_mbox" + +struct mbox_msg_info { + u64 header; + u8 msg_id; + u8 status; /* can only use 1 bit */ +}; + +struct hinic5_msg_desc { + void *msg; + u16 msg_len; + u8 seq_id; + u8 mod; + u16 cmd; + struct mbox_msg_info msg_info; +}; + +struct hinic5_msg_channel { + struct hinic5_msg_desc resp_msg; + struct hinic5_msg_desc recv_msg; + + atomic_t recv_msg_cnt; +}; + +/* Receive other functions mbox message */ +struct hinic5_recv_mbox { + void *msg; + u16 msg_len; + u8 msg_id; + u8 mod; + u16 cmd; + u16 src_func_idx; + + enum hinic5_msg_ack_type ack_type; + u32 rsvd1; + + void *resp_buff; +}; + +struct hinic5_send_mbox { + u8 *data; + + u64 *wb_status; /* write back status */ + void *wb_vaddr; + dma_addr_t wb_paddr; +}; + +enum mbox_event_state { + EVENT_START = 0, + EVENT_FAIL, + EVENT_SUCCESS, + EVENT_TIMEOUT, + EVENT_END, +}; + +enum hinic5_mbox_cb_state { + HINIC5_VF_MBOX_CB_REG = 0, + HINIC5_VF_MBOX_CB_RUNNING, + HINIC5_PF_MBOX_CB_REG, + HINIC5_PF_MBOX_CB_RUNNING, + HINIC5_PPF_MBOX_CB_REG, + HINIC5_PPF_MBOX_CB_RUNNING, + HINIC5_PPF_TO_PF_MBOX_CB_REG, + HINIC5_PPF_TO_PF_MBOX_CB_RUNNIG, +}; + +struct mbox_dma_msg { + u32 xor; + u32 dma_addr_high; + u32 dma_addr_low; + u32 msg_len; + u64 rsvd; +}; + +struct mbox_dma_queue { + void *dma_buff_vaddr; + dma_addr_t dma_buff_paddr; + + u16 depth; + u16 prod_idx; + u16 cons_idx; +}; + +struct hinic5_mbox { + struct hinic5_hwdev *hwdev; + + bool lock_channel_en; + ulong channel_stop; + u16 cur_msg_channel; + u32 rsvd1; + + /* lock for send mbox message and ack message */ + struct mutex mbox_send_lock; + /* lock for send mbox message */ + struct mutex msg_send_lock; + struct hinic5_send_mbox send_mbox; + + struct mbox_dma_queue sync_msg_queue; + struct mbox_dma_queue async_msg_queue; + + struct workqueue_struct *workq; + + struct hinic5_msg_channel mgmt_msg; /* driver and MGMT CPU */ + struct hinic5_msg_channel *host_msg; /* PPF message between hosts */ + struct hinic5_msg_channel *func_msg; /* PF to VF or VF to PF */ + u16 num_func_msg; + bool support_h2h_msg; /* host to host */ + + /* vf receive pf/ppf callback */ + hinic5_vf_mbox_cb vf_mbox_cb[HINIC5_MOD_MAX]; + void *vf_mbox_data[HINIC5_MOD_MAX]; + /* pf/ppf receive vf callback */ + hinic5_pf_mbox_cb pf_mbox_cb[HINIC5_MOD_MAX]; + void *pf_mbox_data[HINIC5_MOD_MAX]; + /* ppf receive pf/ppf callback */ + hinic5_ppf_mbox_cb ppf_mbox_cb[HINIC5_MOD_MAX]; + void *ppf_mbox_data[HINIC5_MOD_MAX]; + /* pf receive ppf callback */ + hinic5_pf_recv_from_ppf_mbox_cb pf_recv_ppf_mbox_cb[HINIC5_MOD_MAX]; + void *pf_recv_ppf_mbox_data[HINIC5_MOD_MAX]; + ulong ppf_to_pf_mbox_cb_state[HINIC5_MOD_MAX]; + ulong ppf_mbox_cb_state[HINIC5_MOD_MAX]; + ulong pf_mbox_cb_state[HINIC5_MOD_MAX]; + ulong vf_mbox_cb_state[HINIC5_MOD_MAX]; + + u8 send_msg_id; + u8 rsvd2; + u16 rsvd3; + enum mbox_event_state event_flag; + /* lock for mbox event flag */ + spinlock_t mbox_lock; + u64 rsvd4; +}; + +struct hinic5_mbox_work { + struct work_struct work; + struct hinic5_mbox *func_to_func; + struct hinic5_recv_mbox *recv_mbox; + struct hinic5_msg_channel *msg_ch; +}; + +struct vf_cmd_check_handle { + u16 cmd; + bool (*check_cmd)(struct hinic5_hwdev *hwdev, u16 src_func_idx, + void *buf_in, u16 in_size); +}; + +void hinic5_mbox_func_aeqe_handler(void *handle, u8 *header, u8 size); + +bool hinic5_mbox_check_cmd_valid(struct hinic5_hwdev *hwdev, + struct vf_cmd_check_handle *cmd_handle, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + u8 size); + +int hinic5_func_to_func_init(struct hinic5_hwdev *hwdev); + +void hinic5_func_to_func_free(struct hinic5_hwdev *hwdev); + +int hinic5_send_mbox_to_mgmt(struct hinic5_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, u16 channel); + +void hinic5_response_mbox_to_mgmt(struct hinic5_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 msg_id); + +int hinic5_send_mbox_to_mgmt_no_ack(struct hinic5_hwdev *hwdev, u8 mod, u16 cmd, + void *buf_in, u16 in_size, u16 channel); +int hinic5_mbox_to_func(struct hinic5_mbox *func_to_func, u8 mod, u16 cmd, + u16 dst_func, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +int hinic5_mbox_init_host_msg_channel(struct hinic5_hwdev *hwdev); + +int hinic5_mbox_set_channel_status(struct hinic5_hwdev *hwdev, u16 channel, + bool enable); + +void hinic5_mbox_enable_channel_lock(struct hinic5_hwdev *hwdev, bool enable); + +void mbox_msg_header_set_last(u64 *header, u16 msg_len, u8 mod, + u16 seg_len, struct hinic5_hwdev *hwdev); +#define V0_MOD_ID_MAX 32 +static inline u64 hinic5_mbox_set_msg_len(u8 version, u16 msg_len) +{ + if (version != 0) + return HINIC5_MSG_HEADER_SET_V1(msg_len / MBOX_MSG_LEN_UNIT, MSG_LEN); + else + return HINIC5_MSG_HEADER_SET_V0(msg_len, MSG_LEN); +} + +static inline u16 hinic5_mbox_get_msg_len(u8 version, u64 *header) +{ + if (version != 0) + return (u16)(HINIC5_MSG_HEADER_GET_V1(*header, MSG_LEN) * MBOX_MSG_LEN_UNIT); + else + return HINIC5_MSG_HEADER_GET_V0(*header, MSG_LEN); +} + +static inline u64 hinic5_mbox_set_seg_len(u8 version, u16 seg_len) +{ + if (version != 0) + return HINIC5_MSG_HEADER_SET_V1(seg_len / MBOX_SEG_LEN_UNIT, SEG_LEN); + else + return HINIC5_MSG_HEADER_SET_V0(seg_len, SEG_LEN); +} + +static inline u8 hinic5_mbox_get_seg_len(u8 version, const u64 *header) +{ + if (version != 0) + return (u8)(HINIC5_MSG_HEADER_GET_V1(*header, SEG_LEN) * MBOX_SEG_LEN_UNIT); + else + return HINIC5_MSG_HEADER_GET_V0(*header, SEG_LEN); +} + +static inline u64 hinic5_mbox_set_mod_id(u8 version, u8 mod) +{ + if (version != 0) + return HINIC5_MSG_HEADER_SET_V1(mod, MODULE); + else + return HINIC5_MSG_HEADER_SET_V0(mod, MODULE); +} + +static inline u8 hinic5_mbox_get_mod_id(u8 version, const u64 *header) +{ + if (version != 0) + return HINIC5_MSG_HEADER_GET_V1(*header, MODULE); + else + return HINIC5_MSG_HEADER_GET_V0(*header, MODULE); +} + +static inline u64 hinic5_mbox_set_version(u8 version) +{ + if (version != 0) + return HINIC5_MSG_HEADER_SET(MPU_MAILBOX_HEADER_VER_1, VERSION); + else + return HINIC5_MSG_HEADER_SET(MPU_MAILBOX_HEADER_VER_0, VERSION); +} + +static inline u8 hinic5_mbox_get_version(const struct hinic5_hwdev *hwdev, const u64 *header) +{ + return HINIC5_MSG_HEADER_GET(*header, VERSION); +} + +#define MBOX_SEGLEN_MASK_V0 \ + HINIC5_MSG_HEADER_SET_V0(HINIC5_MSG_HEADER_V0_SEG_LEN_MASK, SEG_LEN) +#define MBOX_SEGLEN_MASK_V1 \ + HINIC5_MSG_HEADER_SET_V1(HINIC5_MSG_HEADER_V1_SEG_LEN_MASK, SEG_LEN) + +static inline u64 hinic5_mbox_get_seg_len_mask(u8 version) +{ + if (version != 0) + return MBOX_SEGLEN_MASK_V1; + else + return MBOX_SEGLEN_MASK_V0; +} + +static inline u8 hinic5_mbox_get_send_version(const struct hinic5_hwdev *hwdev, u8 mod) +{ + if (COMM_SUPPORT_MBOX_HEAD_VER1(hwdev) && mod >= V0_MOD_ID_MAX) + return MPU_MAILBOX_HEADER_VER_1; + return MPU_MAILBOX_HEADER_VER_0; +} + +#endif + diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_mgmt.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_mgmt.h new file mode 100644 index 00000000..9f7ecf74 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_mgmt.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_MGMT_H +#define HINIC5_MGMT_H + +#include <linux/types.h> +#include <linux/completion.h> +#include <linux/semaphore.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include "comm_defs.h" +#include "mpu_cmd_base_defs.h" +#include "hinic5_hw.h" +#include "hinic5_api_cmd.h" +#include "hinic5_hwdev.h" + +#define HINIC5_MGMT_WQ_NAME "hinic5_mgmt" + +#define HINIC5_CLP_REG_GAP 0x20 +#define HINIC5_CLP_INPUT_BUF_LEN_HOST 4096UL +#define HINIC5_CLP_DATA_UNIT_HOST 4UL + +enum clp_data_type { + HINIC5_CLP_REQ_HOST = 0, + HINIC5_CLP_RSP_HOST = 1 +}; + +enum clp_reg_type { + HINIC5_CLP_BA_HOST = 0, + HINIC5_CLP_SIZE_HOST = 1, + HINIC5_CLP_LEN_HOST = 2, + HINIC5_CLP_START_REQ_HOST = 3, + HINIC5_CLP_READY_RSP_HOST = 4 +}; + +#define HINIC5_CLP_REQ_SIZE_OFFSET 0 +#define HINIC5_CLP_RSP_SIZE_OFFSET 16 +#define HINIC5_CLP_BASE_OFFSET 0 +#define HINIC5_CLP_LEN_OFFSET 0 +#define HINIC5_CLP_START_OFFSET 31 +#define HINIC5_CLP_READY_OFFSET 31 +#define HINIC5_CLP_OFFSET(member) (HINIC5_CLP_##member##_OFFSET) + +#define HINIC5_CLP_SIZE_MASK 0x7ffUL +#define HINIC5_CLP_BASE_MASK 0x7ffffffUL +#define HINIC5_CLP_LEN_MASK 0x7ffUL +#define HINIC5_CLP_START_MASK 0x1UL +#define HINIC5_CLP_READY_MASK 0x1UL +#define HINIC5_CLP_MASK(member) (HINIC5_CLP_##member##_MASK) + +#define HINIC5_CLP_DELAY_CNT_MAX 200UL +#define HINIC5_CLP_SRAM_SIZE_REG_MAX 0x3ff +#define HINIC5_CLP_SRAM_BASE_REG_MAX 0x7ffffff +#define HINIC5_CLP_LEN_REG_MAX 0x3ff +#define HINIC5_CLP_START_OR_READY_REG_MAX 0x1 + +struct hinic5_recv_msg { + void *msg; + + u16 msg_len; + u16 rsvd1; + enum hinic5_mod_type mod; + + u16 cmd; + u8 seq_id; + u8 rsvd2; + u16 msg_id; + u16 rsvd3; + + int async_mgmt_to_pf; + u32 rsvd4; + + struct completion recv_done; +}; + +struct hinic5_msg_head { + u8 status; + u8 version; + u8 resp_aeq_num; + u8 rsvd0[5]; +}; + +enum comm_pf_to_mgmt_event_state { + SEND_EVENT_UNINIT = 0, + SEND_EVENT_START, + SEND_EVENT_SUCCESS, + SEND_EVENT_FAIL, + SEND_EVENT_TIMEOUT, + SEND_EVENT_END, +}; + +enum hinic5_mgmt_msg_cb_state { + HINIC5_MGMT_MSG_CB_REG = 0, + HINIC5_MGMT_MSG_CB_RUNNING, +}; + +struct hinic5_clp_pf_to_mgmt { + struct semaphore clp_msg_lock; + void *clp_msg_buf; +}; + +struct hinic5_msg_pf_to_mgmt { + struct hinic5_hwdev *hwdev; + + /* Async cmd can not be scheduling */ + spinlock_t async_msg_lock; + struct semaphore sync_msg_lock; + + struct workqueue_struct *workq; + + void *async_msg_buf; + void *sync_msg_buf; + void *mgmt_ack_buf; + + struct hinic5_recv_msg recv_msg_from_mgmt; + struct hinic5_recv_msg recv_resp_msg_from_mgmt; + + u16 async_msg_id; + u16 sync_msg_id; + u32 rsvd1; + struct hinic5_api_cmd_chain *cmd_chain[HINIC5_API_CMD_MAX]; + + hinic5_mgmt_msg_cb recv_mgmt_msg_cb[HINIC5_MOD_HW_MAX]; + void *recv_mgmt_msg_data[HINIC5_MOD_HW_MAX]; + ulong mgmt_msg_cb_state[HINIC5_MOD_HW_MAX]; + + void *async_msg_cb_data[HINIC5_MOD_HW_MAX]; + + /* lock when sending msg */ + spinlock_t sync_event_lock; + enum comm_pf_to_mgmt_event_state event_flag; + u64 rsvd2; +}; + +struct hinic5_mgmt_msg_handle_work { + struct work_struct work; + struct hinic5_msg_pf_to_mgmt *pf_to_mgmt; + + void *msg; + u16 msg_len; + u16 rsvd1; + + enum hinic5_mod_type mod; + u16 cmd; + u16 msg_id; + + int async_mgmt_to_pf; +}; + +void hinic5_mgmt_msg_aeqe_handler(void *hwdev, u8 *header, u8 size); + +int hinic5_pf_to_mgmt_init(struct hinic5_hwdev *hwdev); + +void hinic5_pf_to_mgmt_free(struct hinic5_hwdev *hwdev); + +int hinic5_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout); +int hinic5_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size); + +int hinic5_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u32 timeout); + +int hinic5_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, u16 size, + void *ack, u16 ack_size); + +int hinic5_api_cmd_write_nack(void *hwdev, u8 dest, const void *cmd, u16 size); + +int hinic5_pf_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, + u16 in_size, void *buf_out, const u16 *out_size); + +int hinic5_clp_pf_to_mgmt_init(struct hinic5_hwdev *hwdev); + +void hinic5_clp_pf_to_mgmt_free(struct hinic5_hwdev *hwdev); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_typedef_inner.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_typedef_inner.h new file mode 100644 index 00000000..c2a659ea --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/include/hinic5_typedef_inner.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2025 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_TYPESDEF_INNER_H +#define HINIC5_TYPESDEF_INNER_H + +/* static methods testable */ +#ifdef EXPORT_STATIC_SYMBOL +#define STATIC (__weak noinline) +#define INLINE (__weak noinline) +#else +#define STATIC static +#define INLINE inline +#endif + +#ifndef GIT_COMMIT_ID +#define GIT_COMMIT_ID "unknown" +#endif + +#endif /* HINIC5_TYPESDEF_INNER_H */ diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/CMakeLists.txt b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/CMakeLists.txt new file mode 100644 index 00000000..ee7056e0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/CMakeLists.txt @@ -0,0 +1,94 @@ +if("${BUILD_VERSION}" MATCHES "ub_ascend") + set(UMMU_CORE_BUILD_DIR ${UBUS_BUILD_DIR}/kernel/ummu-core-v1) +elseif("${PRODUCT}" STREQUAL "ascend910D" OR "${PRODUCT}" STREQUAL "ascend910Desl") + set(UBUS_DIR ${TOP_DIR}/drivers/ubus) + set(KDIR ${KERNEL_WORK_DIR}/../linux-4.19) + set(UMMU_CORE_BUILD_DIR ${UBUS_DIR}/kernel/ummu-core-v1) +else() + set(UMMU_CORE_BUILD_DIR ${UBUS_BUILD_DIR}/kernel/ummu-core) +endif() + +set(UBUS_UBC_BUILD_DIR ${UBUS_BUILD_DIR}/kernel/ubus) + +if("${BUILD_VERSION}" MATCHES "b173") + set(B173_VALUE y) +else() + set(B173_VALUE n) +endif() + +if("${BUILD_VERSION}" MATCHES "b177" OR "${BUILD_VERSION}" MATCHES "b180") + set(B177_VALUE y) + set(B173_VALUE y) +else() + set(B177_VALUE n) +endif() + +# =============================== 使用KCompat自动化工具时适配1650的SDK 驱动编译 =============================== +set(SDK_KCOMPAT_GENERATOR_PATH "${TOP_DIR}/ChipSolution/build/host/linux/sdk/sdk-kcompat-generator.sh") +set(SDK_KCOMPAT_PATH "${TOP_DIR}/ChipSolution/src/dpu_develop_interface/drv_sdk_intf/ossl/sdk_kcompat.h") +if("${KDIR}" MATCHES "2403_SP2") + message(STATUS "KNL_HEADER_TYPE: UB1650") + set(KERN_VER "NULL") + set(KSRC "${KDIR}/../../../../open_source/2403_SP2") + message(STATUS "KSRC = ${KSRC}") + + if(EXISTS ${SDK_KCOMPAT_GENERATOR_PATH}) + message(STATUS "${SDK_KCOMPAT_GENERATOR_PATH} file is exist!") + endif() + + string(RANDOM LENGTH 4 RAND_NUM) + set(SDK_KCOMPAT_GENERATOR_PATH_TMP "${TOP_DIR}/ChipSolution/build/host/linux/sdk/sdk-kcompat-generator_${RAND_NUM}.sh") + + if(EXISTS ${SDK_KCOMPAT_GENERATOR_PATH_TMP}) + file(REMOVE ${SDK_KCOMPAT_GENERATOR_PATH_TMP}) + endif() + + file(COPY_FILE "${SDK_KCOMPAT_GENERATOR_PATH}" "${SDK_KCOMPAT_GENERATOR_PATH_TMP}") + if(EXISTS ${SDK_KCOMPAT_GENERATOR_PATH_TMP}) + message(STATUS "${SDK_KCOMPAT_GENERATOR_PATH_TMP} file copy succeed") + else() + message(STATUS "${SDK_KCOMPAT_GENERATOR_PATH_TMP} file copy failed") + endif() + + file(READ ${SDK_KCOMPAT_GENERATOR_PATH_TMP} FILE_CONTENTS) + string(REPLACE "KERN_VER=\$(uname -r)" "KERN_VER=${KERN_VER}" FILE_CONTENTS "${FILE_CONTENTS}") + string(REPLACE "KSRC=\"\"" "KSRC=\"${KSRC}\"" FILE_CONTENTS "${FILE_CONTENTS}") + file(WRITE ${SDK_KCOMPAT_GENERATOR_PATH_TMP} "${FILE_CONTENTS}") + + execute_process( + COMMAND bash -c "source ${SDK_KCOMPAT_GENERATOR_PATH_TMP} && gen_sdk_kcompat ${SDK_KCOMPAT_PATH}" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + RESULT_VARIABLE RESULT + ) + if(EXISTS ${SDK_KCOMPAT_GENERATOR_PATH_TMP}) + file(REMOVE ${SDK_KCOMPAT_GENERATOR_PATH_TMP}) + endif() +else() + message(STATUS "KNL_HEADER_TYPE: DEFAULT") + execute_process( + COMMAND bash -c "source ${SDK_KCOMPAT_GENERATOR_PATH} && gen_sdk_kcompat ${SDK_KCOMPAT_PATH}" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + RESULT_VARIABLE RESULT + ) +endif() + +if("${PRODUCT}" STREQUAL "ascend910D" OR "${PRODUCT}" STREQUAL "ascend910Desl") + set(hisdk5_depends ubus) + add_device_ko(LOCAL_MODULE hisdk5 + KO_SRC_FOLDER ${CMAKE_CURRENT_SOURCE_DIR} + MAKE_ARGS "IS_ASCEND=true" + TARGETE_DPENDS "${hisdk5_depends}") +else() +add_custom_target(hisdk5_ko + COMMENT echo "build ${CMAKE_CURRENT_SOURCE_DIR} start." + COMMAND cd ${TOP_DIR}/ChipSolution && git apply build/host/linux/sdk/patch_code/knl6_6_compile.patch && cd - + COMMAND cp -f ${CMAKE_CURRENT_SOURCE_DIR}/Makefile ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${MAKE} -j64 -C ${KDIR} M=${CMAKE_CURRENT_BINARY_DIR} src=${CMAKE_CURRENT_SOURCE_DIR} UBUS_UBC_BUILD_DIR=${UBUS_UBC_BUILD_DIR} UMMU_CORE_BUILD_DIR=${UMMU_CORE_BUILD_DIR} HI1823_TRUNK_DIR=${TOP_DIR}/ChipSolution HI1823_BUILD_DIR=${TOP_DIR}/ChipSolution CONFIG_UBUS_DEVICE=y HI1823_OS_TYPE=openEuler UB_BUILD_B173=${B173_VALUE} UB_BUILD_B177=${B177_VALUE} + COMMAND cp -f *.ko ${CMAKE_INSTALL_PREFIX}/ko + COMMAND cd ${TOP_DIR}/ChipSolution && git apply --reverse build/host/linux/sdk/patch_code/knl6_6_compile.patch && cd - + DEPENDS kernel +) + +add_dependencies(hisdk5_ko ubus_ko) +add_dependencies(hisdk5_ko ummu_core_ko) +endif() \ No newline at end of file diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_bus.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_bus.c new file mode 100644 index 00000000..1a5762ed --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_bus.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include "ossl_knl.h" +#include "hinic5_hwdev.h" +#include "hinic5_pcie.h" +#ifdef __UBUS_DRIVER__ +#include "hinic5_ubus.h" +#endif +#include "hinic5_bus.h" + +#ifdef __UBUS_DRIVER__ +static struct hinic5_bus_ops g_ubus_ops = { + .is_virtfn = hinic5_ubus_is_virtfn, + .get_pf_adev = hinic5_ubus_get_pf_adev, + .set_func_en = hinic5_ubus_set_func_en, + .get_vf_adev_by_pf = hinic5_ubus_get_vf_adev_by_pf, + .get_device_id = hinic5_ubus_get_device_id, + .irq_vectors_alloc = hinic5_ubus_irq_vectors_alloc, + .irq_vectors_free = hinic5_ubus_irq_vectors_free, + .get_vf_num = hinic5_ubus_get_vf_num, + .irq_vector = hinic5_ubus_irq_vector, + .init_device_info = hinic5_ub_init_device_info, + .virt_configure = hinic5_ubus_numvds_store_vds_process, + .fault_process = hinic5_ubus_probe_fault_process, +}; +#endif + +static struct hinic5_bus_ops g_pcie_ops = { + .is_virtfn = hinic5_pci_is_virtfn, + .get_pf_adev = hinic5_pdev_get_pf_adev, + .set_func_en = hinic5_pci_set_func_en, + .get_vf_adev_by_pf = hinic5_pci_get_vf_adev_by_pf, + .get_device_id = hinic5_pci_get_device_id, + .irq_vectors_alloc = hinic5_pci_irq_vectors_alloc, + .irq_vectors_free = hinic5_pci_irq_vectors_free, + .get_vf_num = hinic5_pci_get_vf_num, + .irq_vector = hinic5_pci_irq_vector, + .init_device_info = hinic5_pci_init_device_info, + .virt_configure = hinic5_pci_sriov_enable_ops, + .fault_process = hinic5_pci_probe_fault_process, +}; + +struct hinic5_bus_ops *hinic5_get_dev_ops(struct hinic5_adev *adev) +{ + switch (adev->lld_dev.dev_type) { + case HINIC5_DEVICE_T_PCI: + return &g_pcie_ops; +#ifdef __UBUS_DRIVER__ + case HINIC5_DEVICE_T_UB: + return &g_ubus_ops; +#endif + default: + break; + } + sdk_err(adev->dev, "Failed to get valid struct hinic5_bus_ops object, dev_type=%d\n", + adev->lld_dev.dev_type); + return NULL; +} + +/* + * SDK驱动需要同时支持UB、PCI场景,ub和pci driver都需要注册 + */ +int hinic5_register_driver(void) +{ + int err; + + err = hinic5_pci_register_driver(); + if (err != 0) + return err; + +#ifdef __UBUS_DRIVER__ + err = hinic5_ubus_register_driver(); + if (err != 0) { + hinic5_pci_unregister_driver(); + return err; + } +#endif + return 0; +} + +void hinic5_unregister_driver(void) +{ + hinic5_pci_unregister_driver(); +#ifdef __UBUS_DRIVER__ + hinic5_ubus_unregister_driver(); +#endif +} + +/** + * @brief 判断当前设备是否为vf类型 + * @param adev 设备适配层结构体指针 + * @details vf is considered as pf in a virtual machine, that is + vf in host -- return 1 + vf in vm -- return 0 + * + * @return 0表示为pf,1表示为vf + */ +bool hinic5_adev_is_virtfn(struct hinic5_adev *adev) +{ + return adev->bus_ops->is_virtfn(adev); +} + +struct hinic5_adev *hinic5_adev_get_pf_adev(struct hinic5_adev *adev) +{ + return adev->bus_ops->get_pf_adev(adev); +} + +int hinic5_set_func_en(struct hinic5_adev *adev, bool en, u16 vf_func_id) +{ + return adev->bus_ops->set_func_en(adev, en, vf_func_id); +} + +struct hinic5_adev *hinic5_get_vf_adev_by_pf(struct hinic5_adev *adev, u16 func_id) +{ + return adev->bus_ops->get_vf_adev_by_pf(adev, func_id); +} + +int hinic5_adev_get_vf_num(struct hinic5_adev *adev) +{ + return adev->bus_ops->get_vf_num(adev); +} +EXPORT_SYMBOL(hinic5_adev_get_vf_num); + +u16 hinic5_adev_get_device_id(struct hinic5_adev *adev) +{ + if (!adev || !adev->bus_ops->get_device_id) { + pr_err("get_device_id is null\n"); + return 0; + } + + return adev->bus_ops->get_device_id(adev); +} + +int hinic5_adev_irq_vectors_alloc(struct hinic5_adev *adev, void *entry, u32 irqs_min, u32 irqs_num) +{ + return adev->bus_ops->irq_vectors_alloc(adev, entry, irqs_min, irqs_num); +} + +void hinic5_adev_irq_vectors_free(struct hinic5_adev *adev) +{ + return adev->bus_ops->irq_vectors_free(adev); +} + +int hinic5_adev_irq_vector(struct hinic5_adev *adev, u32 idx) +{ + return adev->bus_ops->irq_vector(adev, idx); +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_bus.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_bus.h new file mode 100644 index 00000000..0d1c28ed --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_bus.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_BUS_H +#define HINIC5_BUS_H +#include <linux/types.h> +#include <linux/pci.h> + +struct hinic5_adev; + +struct hinic5_bus_ops { + bool (*is_virtfn)(struct hinic5_adev *adev); + struct hinic5_adev *(*get_pf_adev)(struct hinic5_adev *adev); + int (*set_func_en)(struct hinic5_adev *dst_adev, bool en, u16 vf_func_id); + struct hinic5_adev *(*get_vf_adev_by_pf)(struct hinic5_adev *adev, u16 func_id); + u16 (*get_device_id)(struct hinic5_adev *adev); + int (*irq_vectors_alloc)(struct hinic5_adev *adev, void *entry, u32 irqs_min, u32 irqs_num); + void (*irq_vectors_free)(struct hinic5_adev *adev); + int (*irq_vector)(struct hinic5_adev *adev, u32 idx); + int (*get_vf_num)(struct hinic5_adev *adev); + int (*init_device_info)(struct hinic5_adev *adev); + void (*virt_configure)(struct hinic5_adev *adev, int nums); + void (*fault_process)(struct hinic5_adev *adev); +}; + +enum hinic5_sriov_state { + HINIC5_SRIOV_DISABLE, + HINIC5_SRIOV_ENABLE, + HINIC5_FUNC_PERSENT, +}; + +struct hinic5_sriov_info { + bool sriov_enabled; + unsigned int num_vfs; + ulong state; + unsigned short first_ue_idx; /* ubus当前pf下第一个vf的ue_idx */ +}; + +bool hinic5_adev_is_virtfn(struct hinic5_adev *adev); +struct hinic5_adev *hinic5_adev_get_pf_adev(struct hinic5_adev *adev); +struct hinic5_adev *hinic5_get_vf_adev_by_pf(struct hinic5_adev *adev, u16 func_id); +int hinic5_set_func_en(struct hinic5_adev *adev, bool en, u16 vf_func_id); +int hinic5_adev_get_vf_num(struct hinic5_adev *adev); +u16 hinic5_adev_get_device_id(struct hinic5_adev *adev); + +#ifndef __UEFI__ +int hinic5_adev_irq_vectors_alloc(struct hinic5_adev *adev, + void *entry, u32 irqs_min, u32 irqs_num); +void hinic5_adev_irq_vectors_free(struct hinic5_adev *adev); +int hinic5_adev_irq_vector(struct hinic5_adev *adev, u32 idx); +#endif + +struct hinic5_bus_ops *hinic5_get_dev_ops(struct hinic5_adev *adev); +int hinic5_register_driver(void); +void hinic5_unregister_driver(void); + +void *hinic5_get_hwdev_by_pcidev(struct pci_dev *pdev); +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_dev_mgmt.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_dev_mgmt.c new file mode 100644 index 00000000..0952c114 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_dev_mgmt.c @@ -0,0 +1,1030 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <net/addrconf.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/io-mapping.h> +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/debugfs.h> + +#include "ossl_knl.h" +#include "mpu_inband_cmd_defs.h" +#include "bond_pub_cmd.h" +#include "hinic5_mt.h" +#include "hinic5_crm.h" +#include "hinic5_lld.h" +#include "hinic5_lld_inner.h" +#include "hinic5_sriov.h" +#include "hinic5_pci_id_tbl.h" +#include "hinic5_hwdev.h" +#include "hinic5_fw_update.h" +#include "hinic5_dev_mgmt.h" + +#define HINIC5_WAIT_TOOL_CNT_TIMEOUT 10000 +#define HINIC5_WAIT_TOOL_MIN_USLEEP_TIME 9900 +#define HINIC5_WAIT_TOOL_MAX_USLEEP_TIME 10000 + +static ulong card_bit_map; + +LIST_HEAD(g_hinic5_chip_list); + +inline struct list_head *get_hinic5_chip_list(void) +{ + return &g_hinic5_chip_list; +} + +void hinic5_uld_dev_hold(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type) +{ + struct hinic5_adev *adev = NULL; + + if (type >= SERVICE_T_MAX) { + pr_err("array uld_ref_cnt upper bound\n"); + return; + } + if (!lld_dev || !to_hinic5_adev(lld_dev)) { + pr_err("lld_dev is null, srv_type = 0x%x, when uld dev hold\n", type); + return; + } + adev = to_hinic5_adev(lld_dev); + + atomic_inc(&adev->uld_ref_cnt[type]); +} +EXPORT_SYMBOL(hinic5_uld_dev_hold); + +void hinic5_uld_dev_put(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type) +{ + struct hinic5_adev *adev = NULL; + + if (!lld_dev || !to_hinic5_adev(lld_dev)) { + pr_err("lld_dev is null, srv_type = 0x%x, when uld dev put\n", type); + return; + } + if (type >= SERVICE_T_MAX) { + pr_err("array uld_ref_cnt upper bound\n"); + return; + } + adev = to_hinic5_adev(lld_dev); + + atomic_dec(&adev->uld_ref_cnt[type]); +} +EXPORT_SYMBOL(hinic5_uld_dev_put); + +void lld_dev_cnt_init(struct hinic5_adev *adev) +{ + atomic_set(&adev->ref_cnt, 0); +} + +void hinic5_lld_dev_hold(struct hinic5_lld_dev *dev) +{ + struct hinic5_adev *adev = to_hinic5_adev(dev); + + atomic_inc(&adev->ref_cnt); +} +EXPORT_SYMBOL(hinic5_lld_dev_hold); + +void hinic5_lld_dev_put(struct hinic5_lld_dev *dev) +{ + struct hinic5_adev *adev = to_hinic5_adev(dev); + + atomic_dec(&adev->ref_cnt); +} +EXPORT_SYMBOL(hinic5_lld_dev_put); + +void wait_lld_dev_unused(struct hinic5_adev *adev) +{ + ulong end; + + end = jiffies + msecs_to_jiffies(HINIC5_WAIT_TOOL_CNT_TIMEOUT); + do { + if (atomic_read(&adev->ref_cnt) == 0) + return; + + /* if sleep 10ms, use usleep_range to be more precise */ + usleep_range(HINIC5_WAIT_TOOL_MIN_USLEEP_TIME, + HINIC5_WAIT_TOOL_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); +} + +enum hinic5_lld_status { + HINIC5_NODE_CHANGE = BIT(0), +}; + +struct hinic5_lld_lock { + /* lock for chip list */ + struct mutex lld_mutex; + ulong status; + atomic_t dev_ref_cnt; +}; + +struct hinic5_lld_lock g_lld_lock; + +/* max mbox timeout (200s) + extra (100s) */ +#define WAIT_LLD_DEV_HOLD_TIMEOUT (300 * 1000) +#define WAIT_LLD_DEV_NODE_CHANGED WAIT_LLD_DEV_HOLD_TIMEOUT +#define WAIT_LLD_DEV_REF_CNT_EMPTY (2 * 60 * 1000) /* 2minutes */ +#define PRINT_TIMEOUT_INTERVAL 10000 +#define MS_PER_SEC 1000 +#define LLD_LOCK_MIN_USLEEP_TIME 900 +#define LLD_LOCK_MAX_USLEEP_TIME 1000 + +/* node in chip_node will changed, tools or driver can't get node + * during this situation + */ +void lld_lock_chip_node(void) +{ + ulong end; + bool timeout = true; + u32 loop_cnt; + + mutex_lock(&g_lld_lock.lld_mutex); + + loop_cnt = 0; + end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_NODE_CHANGED); + do { + if (!test_and_set_bit(HINIC5_NODE_CHANGE, &g_lld_lock.status)) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for lld node change complete for %us\n", + loop_cnt / MS_PER_SEC); + + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(LLD_LOCK_MIN_USLEEP_TIME, + LLD_LOCK_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); + + if (timeout && test_and_set_bit(HINIC5_NODE_CHANGE, &g_lld_lock.status)) + pr_warn("Wait for lld node change complete timeout when trying to get lld lock\n"); + + loop_cnt = 0; + timeout = true; + end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_NODE_CHANGED); + do { + if (atomic_read(&g_lld_lock.dev_ref_cnt) == 0) { + timeout = false; + break; + } + + loop_cnt++; + if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait for lld dev unused for %us, reference count: %d\n", + loop_cnt / MS_PER_SEC, + atomic_read(&g_lld_lock.dev_ref_cnt)); + + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(LLD_LOCK_MIN_USLEEP_TIME, + LLD_LOCK_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); + + if (timeout && (atomic_read(&g_lld_lock.dev_ref_cnt) != 0)) + pr_warn("Wait for lld dev unused timeout\n"); + + mutex_unlock(&g_lld_lock.lld_mutex); +} + +void lld_unlock_chip_node(void) +{ + clear_bit(HINIC5_NODE_CHANGE, &g_lld_lock.status); +} + +/* When tools or other drivers want to get node of chip_node, use this function + * to prevent node be freed + */ +void lld_hold(void) +{ + ulong end; + u32 loop_cnt = 0; + + /* ensure there have not any chip node in changing */ + mutex_lock(&g_lld_lock.lld_mutex); + + end = jiffies + msecs_to_jiffies(WAIT_LLD_DEV_HOLD_TIMEOUT); + do { + if (!test_bit(HINIC5_NODE_CHANGE, &g_lld_lock.status)) + break; + + loop_cnt++; + + if (loop_cnt % PRINT_TIMEOUT_INTERVAL == 0) + pr_warn("Wait lld node change complete for %us\n", + loop_cnt / MS_PER_SEC); + /* if sleep 1ms, use usleep_range to be more precise */ + usleep_range(LLD_LOCK_MIN_USLEEP_TIME, + LLD_LOCK_MAX_USLEEP_TIME); + } while (time_before(jiffies, end)); + + if (test_bit(HINIC5_NODE_CHANGE, &g_lld_lock.status)) + pr_warn("Wait lld node change complete timeout when trying to hode lld dev\n"); + + atomic_inc(&g_lld_lock.dev_ref_cnt); + mutex_unlock(&g_lld_lock.lld_mutex); +} + +void lld_put(void) +{ + atomic_dec(&g_lld_lock.dev_ref_cnt); +} + +void hinic5_lld_lock_init(void) +{ + mutex_init(&g_lld_lock.lld_mutex); + atomic_set(&g_lld_lock.dev_ref_cnt, 0); +} + +void hinic5_get_all_chip_id(void *id_info) +{ + struct nic_card_id *card_id = (struct nic_card_id *)id_info; + struct card_node *chip_node = NULL; + int i = 0; + int id, err; + + lld_hold(); + list_for_each_entry(chip_node, get_hinic5_chip_list(), node) { + err = sscanf(chip_node->chip_name, HINIC5_CHIP_NAME "%d", &id); + if (err != 1) { + pr_err("Failed to get hinic5 id\n"); + continue; + } + card_id->id[i] = (u32)id; + i++; + } + lld_put(); + card_id->num = (u32)i; +} + +static bool is_pcidev_match_chip_name(const char *ifname, struct hinic5_adev *adev, + struct card_node *chip_node, enum func_type type) +{ + if (strncmp(chip_node->chip_name, ifname, IFNAMSIZ) == 0) { + if (hinic5_func_type(adev->hwdev) != type) + return false; + return true; + } + + return false; +} + +/** + * @brief get_dst_type_lld_dev_by_chip_name - 根据设备名称获取lld 设备指针 + * + * @param[in] ifname 设备名称 hinic0 + * @param[in] type function类型 + * @param[in] check_active_flag 是否判断function的状态可用 + * + * @details NA + * + * @attention: NA + * + * @return: 返回查询到的设备 + * @retval NULL 查询无匹配 + * @retval 非NULL 匹配设备 + */ +static struct hinic5_lld_dev *get_dst_type_lld_dev_by_chip_name(const char *ifname, + enum func_type type, + bool check_active_flag) +{ + struct card_node *chip_node = NULL; + struct hinic5_adev *adev = NULL; + + list_for_each_entry(chip_node, get_hinic5_chip_list(), node) { + list_for_each_entry(adev, &chip_node->func_list, node) { + if ((!check_active_flag || hinic5_is_function_active(adev->hwdev)) && + is_pcidev_match_chip_name(ifname, adev, chip_node, type)) { + return &adev->lld_dev; + } + } + } + + return NULL; +} + +struct hinic5_lld_dev *hinic5_get_lld_dev_by_chip_name(const char *chip_name) +{ + struct hinic5_lld_dev *dev = NULL; + int i; + bool check_active_flag[] = {true, false}; + + lld_hold(); + + for (i = 0; i < sizeof(check_active_flag) / sizeof(check_active_flag[0]); i++) { + dev = get_dst_type_lld_dev_by_chip_name(chip_name, TYPE_PPF, check_active_flag[i]); + if (dev) + goto out; + + dev = get_dst_type_lld_dev_by_chip_name(chip_name, TYPE_PF, check_active_flag[i]); + if (dev) + goto out; + + dev = get_dst_type_lld_dev_by_chip_name(chip_name, TYPE_VF, check_active_flag[i]); + if (dev) + goto out; + } + +out: + if (dev) + hinic5_lld_dev_hold(dev); + lld_put(); + + return dev; +} +EXPORT_SYMBOL(hinic5_get_lld_dev_by_chip_name); + +static int get_dynamic_bond_uld_dev_name(struct hinic5_adev *adev, enum hinic5_service_type type, + char *ifname) +{ + const struct hinic5_uld_info *uld_info = hinic5_get_uld_info_by_type(type); + u32 out_size = IFNAMSIZ; + + if (!uld_info || !uld_info->ioctl || !adev->uld_dev[type]) + return -EFAULT; + + return uld_info->ioctl(adev->uld_dev[type], CMD_CUSTOM_BOND_GET_ULD_DEV_NAME, + NULL, 0, ifname, &out_size); +} + +static int get_dynamic_uld_dev_name(struct hinic5_adev *adev, enum hinic5_service_type type, + char *ifname) +{ + const struct hinic5_uld_info *uld_info = hinic5_get_uld_info_by_type(type); + u32 out_size = IFNAMSIZ; + + if (!uld_info || !uld_info->ioctl) + return -EFAULT; + + return uld_info->ioctl(adev->uld_dev[type], GET_ULD_DEV_NAME, + NULL, 0, ifname, &out_size); +} + +static bool is_pcidev_match_dev_name(const char *dev_name, struct hinic5_adev *adev, + enum hinic5_service_type type) +{ + int i; + char nic_uld_name[IFNAMSIZ] = {0}; + int err; + + if (type > SERVICE_T_MAX) + return false; + + if (type == SERVICE_T_MAX) { + for (i = SERVICE_T_OVS; i < SERVICE_T_MAX; i++) { + if (strncmp(adev->uld_dev_name[i], dev_name, IFNAMSIZ) == 0) + return true; + } + } else { + if (strncmp(adev->uld_dev_name[type], dev_name, IFNAMSIZ) == 0) + return true; + } + + if (type == SERVICE_T_CUSTOM) { + err = get_dynamic_bond_uld_dev_name(adev, SERVICE_T_CUSTOM, (char *)nic_uld_name); + if (err == 0) { + if (strncmp(nic_uld_name, dev_name, IFNAMSIZ) == 0) + return true; + } + } + + err = get_dynamic_uld_dev_name(adev, SERVICE_T_NIC, (char *)nic_uld_name); + if (err == 0) { + if (strncmp(nic_uld_name, dev_name, IFNAMSIZ) == 0) + return true; + } + + return false; +} + +static struct hinic5_lld_dev *get_lld_dev_by_dev_name(const char *dev_name, + enum hinic5_service_type type, bool hold) +{ + struct card_node *chip_node = NULL; + struct hinic5_adev *adev = NULL; + + lld_hold(); + + list_for_each_entry(chip_node, get_hinic5_chip_list(), node) { + list_for_each_entry(adev, &chip_node->func_list, node) { + if (is_pcidev_match_dev_name(dev_name, adev, type)) { + if (hold) + hinic5_lld_dev_hold(&adev->lld_dev); + lld_put(); + return &adev->lld_dev; + } + } + } + + lld_put(); + + return NULL; +} + +struct hinic5_lld_dev *hinic5_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id) +{ + struct card_node *chip_node = NULL; + struct hinic5_adev *adev = NULL; + + lld_hold(); + list_for_each_entry(chip_node, get_hinic5_chip_list(), node) { + list_for_each_entry(adev, &chip_node->func_list, node) { + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + continue; + + if ((hinic5_physical_port_id(adev->hwdev) == port_id) && + (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ) == 0) && + (hinic5_support_nic(adev->hwdev, NULL) != 0)) { + hinic5_lld_dev_hold(&adev->lld_dev); + lld_put(); + + return &adev->lld_dev; + } + } + } + lld_put(); + + return NULL; +} + +struct hinic5_lld_dev *hinic5_get_lld_dev_with_l3i_enabled(const char *chip_name) +{ + struct card_node *chip_node = NULL; + struct hinic5_adev *adev = NULL; + struct hinic5_hwdev *hwdev = NULL; + + lld_hold(); + list_for_each_entry(chip_node, get_hinic5_chip_list(), node) { + if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ) != 0) + continue; + + list_for_each_entry(adev, &chip_node->func_list, node) { + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + continue; + + hwdev = (struct hinic5_hwdev *)adev->hwdev; + + if (hinic5_fw_update_ddr_enabled(hwdev)) { + hinic5_lld_dev_hold(&adev->lld_dev); + lld_put(); + + return &adev->lld_dev; + } + } + } + lld_put(); + + return NULL; +} + +void *hinic5_get_ppf_dev(void) +{ + struct card_node *chip_node = NULL; + struct hinic5_adev *adev = NULL; + struct list_head *chip_list = NULL; + + lld_hold(); + chip_list = get_hinic5_chip_list(); + + list_for_each_entry(chip_node, chip_list, node) + list_for_each_entry(adev, &chip_node->func_list, node) + if (hinic5_func_type(adev->hwdev) == TYPE_PPF) { + pr_info("Get ppf_func_id:%u", hinic5_global_func_id(adev->hwdev)); + lld_put(); + return adev->lld_dev.hwdev; + } + + lld_put(); + return NULL; +} +EXPORT_SYMBOL(hinic5_get_ppf_dev); + +struct hinic5_lld_dev *hinic5_get_lld_dev_by_dev_name(const char *dev_name, + enum hinic5_service_type type) +{ + if (!dev_name) { + pr_err("dev_name is null\n"); + return NULL; + } + return get_lld_dev_by_dev_name(dev_name, type, true); +} + +struct hinic5_lld_dev *hinic5_get_lld_dev_by_dev_name_unsafe(const char *dev_name, + enum hinic5_service_type type) +{ + if (!dev_name) { + pr_err("dev_name is null\n"); + return NULL; + } + return get_lld_dev_by_dev_name(dev_name, type, false); +} +EXPORT_SYMBOL(hinic5_get_lld_dev_by_dev_name_unsafe); + +static void *get_uld_by_lld_dev(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type, + bool hold) +{ + struct hinic5_adev *adev = NULL; + void *uld = NULL; + + if (!lld_dev) + return NULL; + + adev = to_hinic5_adev(lld_dev); + if (!adev) + return NULL; + + spin_lock_bh(&adev->uld_lock); + if (!adev->uld_dev[type] || !test_bit(type, &adev->uld_state)) { + spin_unlock_bh(&adev->uld_lock); + return NULL; + } + uld = adev->uld_dev[type]; + + if (hold) + atomic_inc(&adev->uld_ref_cnt[type]); + spin_unlock_bh(&adev->uld_lock); + + return uld; +} + +void *hinic5_get_uld_dev(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type) +{ + if (!lld_dev) { + pr_err("lld_dev is null, srv_type = 0x%x, when get uld dev\n", type); + return NULL; + } + return get_uld_by_lld_dev(lld_dev, type, true); +} +EXPORT_SYMBOL(hinic5_get_uld_dev); + +void *hinic5_get_uld_dev_unsafe(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type) +{ + if (!lld_dev) { + pr_err("lld_dev is null, srv_type = 0x%x, when get uld dev unsafe\n", type); + return NULL; + } + return get_uld_by_lld_dev(lld_dev, type, false); +} +EXPORT_SYMBOL(hinic5_get_uld_dev_unsafe); + +static struct hinic5_lld_dev *get_ppf_lld_dev(struct hinic5_lld_dev *lld_dev, bool hold) +{ + struct hinic5_adev *adev = NULL; + struct card_node *chip_node = NULL; + struct hinic5_adev *temp_adev = NULL; + + if (!lld_dev) + return NULL; + + adev = to_hinic5_adev(lld_dev); + if (!adev) + return NULL; + + lld_hold(); + chip_node = adev->chip_node; + list_for_each_entry(temp_adev, &chip_node->func_list, node) { + /* 单卡多cpihost场景下,链表中存在多个ppf */ + if (temp_adev->hwdev && hinic5_func_type(temp_adev->hwdev) == TYPE_PPF && + hinic5_global_func_id(temp_adev->hwdev) == hinic5_ppf_idx(lld_dev->hwdev)) { + if (hold) + hinic5_lld_dev_hold(&temp_adev->lld_dev); + lld_put(); + return &temp_adev->lld_dev; + } + } + lld_put(); + + return NULL; +} + +struct hinic5_lld_dev *hinic5_get_ppf_lld_dev(struct hinic5_lld_dev *lld_dev) +{ + return get_ppf_lld_dev(lld_dev, true); +} +EXPORT_SYMBOL(hinic5_get_ppf_lld_dev); + +struct hinic5_lld_dev *hinic5_get_ppf_lld_dev_unsafe(struct hinic5_lld_dev *lld_dev) +{ + return get_ppf_lld_dev(lld_dev, false); +} +EXPORT_SYMBOL(hinic5_get_ppf_lld_dev_unsafe); + +void *hinic5_get_ppf_hw_dev_unsafe(void *hwdev) +{ + struct hinic5_hwdev *handle = hwdev; + struct hinic5_adev *adev = NULL; + struct card_node *chip_node = NULL; + + if (unlikely(!handle)) + return NULL; + + list_for_each_entry(chip_node, get_hinic5_chip_list(), node) { + list_for_each_entry(adev, &chip_node->func_list, node) { + if (adev->hwdev && hinic5_func_type(adev->hwdev) == TYPE_PPF) + return adev->lld_dev.hwdev; + } + } + + sdk_warn(handle->dev_hdl, "Current host has no PPF.\n"); + return NULL; +} +EXPORT_SYMBOL(hinic5_get_ppf_hw_dev_unsafe); + +int hinic5_get_chip_name(struct hinic5_lld_dev *lld_dev, char *chip_name, u16 max_len) +{ + struct hinic5_adev *adev = NULL; + + if (!lld_dev || !chip_name || max_len == 0) + return -EINVAL; + + adev = to_hinic5_adev(lld_dev); + if (!adev) + return -EFAULT; + + lld_hold(); + strscpy(chip_name, adev->chip_node->chip_name, max_len); + chip_name[max_len - 1] = '\0'; + + lld_put(); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_chip_name); + +void *hinic5_get_sdk_hwdev_by_lld(struct hinic5_lld_dev *lld_dev) +{ + return lld_dev->hwdev; +} + +struct card_node *hinic5_get_chip_node_by_lld(struct hinic5_lld_dev *lld_dev) +{ + struct hinic5_adev *adev = to_hinic5_adev(lld_dev); + + return adev->chip_node; +} + +static struct card_node *hinic5_get_chip_node_by_hwdev(const void *hwdev) +{ + struct card_node *chip_node = NULL; + struct card_node *node_tmp = NULL; + struct hinic5_adev *adev = NULL; + + if (!hwdev) + return NULL; + + lld_hold(); + + list_for_each_entry(node_tmp, get_hinic5_chip_list(), node) { + if (!chip_node) { + list_for_each_entry(adev, &node_tmp->func_list, node) { + if (adev->hwdev == hwdev) { + chip_node = node_tmp; + break; + } + } + } + } + + lld_put(); + + return chip_node; +} + +static bool is_func_valid(struct hinic5_adev *adev) +{ + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + return false; + + return true; +} + +void hinic5_get_card_info(const void *hwdev, const void *bufin, void *bufout) +{ + struct card_node *chip_node = NULL; + const struct card_info *info = (const struct card_info *)bufin; + struct card_info *out_info = (struct card_info *)bufout; + struct hinic5_adev *adev = NULL; + void *fun_hwdev = NULL; + u32 i = 0; + u32 j = 0; + + out_info->pf_num = 0; + + chip_node = hinic5_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + lld_hold(); + + list_for_each_entry(adev, &chip_node->func_list, node) { + if (!is_func_valid(adev)) + continue; + + /* 当已获取的pf数量超过数组的大小时, + * 不再获取info信息,仅统计pf数量 + */ + if (j >= PF_MAX_SIZE) { + out_info->pf_num++; + j = out_info->pf_num; + continue; + } + + // 跳过前边已获取的pf + if (i < info->pf_num) { + i++; + continue; + } + + fun_hwdev = adev->hwdev; + + if (hinic5_support_nic(fun_hwdev, NULL)) { + if (adev->uld_dev[SERVICE_T_NIC]) { + out_info->pf[j].pf_type |= (u32)BIT(SERVICE_T_NIC); + get_dynamic_uld_dev_name(adev, SERVICE_T_NIC, out_info->pf[j].name); + } + } + + if (hinic5_support_ppa(fun_hwdev, NULL)) { + if (adev->uld_dev[SERVICE_T_PPA]) { + out_info->pf[j].pf_type |= (u32)BIT(SERVICE_T_PPA); + get_dynamic_uld_dev_name(adev, SERVICE_T_PPA, out_info->pf[j].name); + } + } + + if (hinic5_support_bifur(fun_hwdev)) + if (adev->uld_dev[SERVICE_T_BIFUR]) + get_dynamic_uld_dev_name(adev, SERVICE_T_BIFUR, + out_info->pf[j].name); + + if (hinic5_func_for_mgmt(fun_hwdev)) + strscpy(out_info->pf[j].name, "FOR_MGMT", IFNAMSIZ); + + strscpy(out_info->pf[j].bus_info, dev_name(adev->dev), + sizeof(out_info->pf[j].bus_info)); + out_info->pf_num++; + j = out_info->pf_num; + i++; + } + + lld_put(); +} + +bool hinic5_is_in_host(void) +{ + struct card_node *chip_node = NULL; + struct hinic5_adev *adev = NULL; + + lld_hold(); + list_for_each_entry(chip_node, get_hinic5_chip_list(), node) { + list_for_each_entry(adev, &chip_node->func_list, node) { + if (hinic5_func_type(adev->hwdev) != TYPE_VF) { + lld_put(); + return true; + } + } + } + + lld_put(); + + return false; +} + +static bool chip_node_is_exist(struct hinic5_adev *adev) +{ +#if !defined(__VMWARE__) + struct card_node *chip_node = NULL; + + /* SPU通过HVA连接, 每个function占用一个bus number, + * 需特殊处理; SPU PCI场景未适配多卡 + */ + if (adev->lld_dev.dev_type == HINIC5_DEVICE_T_PCI && + HINIC5_IS_SPU_DEV(hinic5_adev_get_device_id(adev))) { + if (!list_empty(get_hinic5_chip_list())) { + adev->chip_node = + list_first_entry(get_hinic5_chip_list(), struct card_node, node); + return true; + } + return false; + } + + list_for_each_entry(chip_node, get_hinic5_chip_list(), node) { + if (chip_node->id == adev->info.id) { + adev->chip_node = chip_node; + return true; + } + } + +#endif /* __VMWARE__ */ + + return false; +} + +int alloc_chip_node(struct hinic5_adev *adev) +{ + struct card_node *chip_node = NULL; + unsigned char i; + + if (chip_node_is_exist(adev)) + return 0; + + for (i = 0; i < CARD_MAX_SIZE; i++) { + if (test_and_set_bit(i, &card_bit_map) == 0) + break; + } + + if (i == CARD_MAX_SIZE) { + sdk_err(adev->dev, "Failed to alloc card id\n"); + return -EFAULT; + } + + chip_node = kzalloc(sizeof(*chip_node), GFP_KERNEL); + if (!chip_node) { + clear_bit(i, &card_bit_map); + sdk_err(adev->dev, + "Failed to alloc chip node\n"); + return -ENOMEM; + } + + chip_node->id = adev->info.id; + + if (snprintf(chip_node->chip_name, IFNAMSIZ, "%s%u", HINIC5_CHIP_NAME, i) < 0) { + clear_bit(i, &card_bit_map); + kfree(chip_node); + return -EINVAL; + } + + spin_lock_init(&chip_node->fw_update_context_lock); + spin_lock_init(&chip_node->dbgtool_info_lock); + + sdk_info(adev->dev, + "Add new chip %s to global list succeed\n", + chip_node->chip_name); + + list_add_tail(&chip_node->node, get_hinic5_chip_list()); + + INIT_LIST_HEAD(&chip_node->func_list); + adev->chip_node = chip_node; + + return 0; +} + +void free_chip_node(struct hinic5_adev *adev) +{ + struct card_node *chip_node = adev->chip_node; + int id, err; + + if (list_empty(&chip_node->func_list) != 0) { + list_del(&chip_node->node); + sdk_info(adev->dev, + "Delete chip %s from global list succeed\n", + chip_node->chip_name); + + spin_lock_deinit(&chip_node->dbgtool_info_lock); + spin_lock_deinit(&chip_node->fw_update_context_lock); + + err = sscanf(chip_node->chip_name, HINIC5_CHIP_NAME "%d", &id); + if (err != 1) + sdk_err(adev->dev, "Failed to get hinic5 id\n"); + + if (chip_node->fw_update_context) { + hinic5_fw_update_free_context(chip_node->fw_update_context); + chip_node->fw_update_context = NULL; + } + + clear_bit(id, &card_bit_map); + + kfree(chip_node); + } +} + +int hinic5_get_pf_id(struct card_node *chip_node, u32 port_id, u32 *pf_id, u32 *isvalid) +{ + struct hinic5_adev *adev = NULL; + + lld_hold(); + list_for_each_entry(adev, &chip_node->func_list, node) { + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + continue; + + if (hinic5_physical_port_id(adev->hwdev) == port_id) { + *pf_id = hinic5_global_func_id(adev->hwdev); + *isvalid = 1; + break; + } + } + lld_put(); + + return 0; +} + +struct hinic5_lld_dev *hinic5_get_lld_dev_by_func_id(const char *chip_name, u32 func_id) +{ + struct hinic5_adev *adev = NULL; + struct card_node *chip_node = NULL; + + lld_hold(); + list_for_each_entry(chip_node, get_hinic5_chip_list(), node) { + if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ) != 0) + continue; + list_for_each_entry(adev, &chip_node->func_list, node) { + if (hinic5_global_func_id(adev->hwdev) == func_id) { + hinic5_lld_dev_hold(&adev->lld_dev); + lld_put(); + return &adev->lld_dev; + } + } + } + lld_put(); + + return NULL; +} + +void hinic5_get_mbox_cnt(const void *hwdev, void *buf_out) +{ + struct card_node *chip_node = NULL; + struct card_mbox_cnt_info *info = (struct card_mbox_cnt_info *)buf_out; + struct hinic5_adev *adev = NULL; + struct hinic5_hwdev *func_hwdev = NULL; + u32 i = 0; + + info->func_num = 0; + chip_node = hinic5_get_chip_node_by_hwdev(hwdev); + if (!chip_node) + return; + + lld_hold(); + + list_for_each_entry(adev, &chip_node->func_list, node) { + func_hwdev = (struct hinic5_hwdev *)adev->hwdev; + strscpy(info->func_info[i].bus_info, dev_name(adev->dev), + sizeof(info->func_info[i].bus_info)); + + info->func_info[i].send_cnt = func_hwdev->mbox_send_cnt; + info->func_info[i].ack_cnt = func_hwdev->mbox_ack_cnt; + info->func_num++; + i = info->func_num; + if (i >= ARRAY_SIZE(info->func_info)) { + sdk_err(adev->dev, "chip_node->func_list bigger than pf_max + vf_max\n"); + break; + } + } + + lld_put(); +} + +int hinic5_get_card_nic_uld_array(struct hinic5_lld_dev *lld_dev, u32 *dev_cnt, void *array[]) +{ + struct hinic5_adev *adev = to_hinic5_adev(lld_dev); + struct card_node *chip_node = NULL; + void *uld_temp = NULL; + u32 cnt; + + if (!lld_dev || !adev || !hinic5_support_nic(adev->hwdev, NULL) || !array) + return -EINVAL; + + lld_hold(); + + cnt = 0; + chip_node = adev->chip_node; + list_for_each_entry(adev, &chip_node->func_list, node) { + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + continue; + + uld_temp = hinic5_get_uld_dev_unsafe(&adev->lld_dev, SERVICE_T_NIC); + if (!uld_temp) + continue; + + array[cnt] = uld_temp; + cnt++; + } + lld_put(); + + *dev_cnt = cnt; + + return 0; +} +EXPORT_SYMBOL(hinic5_get_card_nic_uld_array); + +int hinic5_get_device_info(struct hinic5_lld_dev *lld_dev, struct hinic5_device_info *info) +{ + struct hinic5_adev *adev = to_hinic5_adev(lld_dev); + + if (!info) + return -EINVAL; + + switch (lld_dev->dev_type) { + case HINIC5_DEVICE_T_PCI: + case HINIC5_DEVICE_T_UB: + memcpy((info), &adev->info, sizeof(adev->info)); + return 0; + default: + break; + } + return -EINVAL; +} +EXPORT_SYMBOL(hinic5_get_device_info); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_dev_mgmt.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_dev_mgmt.h new file mode 100644 index 00000000..05d74be4 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_dev_mgmt.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_DEV_MGMT_H +#define HINIC5_DEV_MGMT_H +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/proc_fs.h> + +#include "ossl_knl.h" +#include "hinic5_bus.h" +#include "hinic5_sriov.h" +#include "hinic5_chip_info.h" +#ifndef __WIN__ +#include "hinic5_lld.h" +#endif + +#define HINIC5_VF_PCI_CFG_REG_BAR 0 +#define HINIC5_PF_PCI_CFG_REG_BAR 1 + +#define HINIC5_PCI_INTR_REG_BAR 2 +#define HINIC5_PCI_MGMT_REG_BAR 3 /* Only PF have mgmt bar */ +#define HINIC5_PCI_DB_BAR 4 + +#define PRINT_ULD_DETACH_TIMEOUT_INTERVAL 5000 /* 5 second */ +#define PRINT_ULD_DETACH_TIMES 30 +#define PRINT_ULD_DETACH_TIMES_INTERVAL 5 +#define ULD_LOCK_MIN_USLEEP_TIME 900 +#define ULD_LOCK_MAX_USLEEP_TIME 1000 + +#define HINIC5_UBUS_FERS2 2 +#define HINIC5_UBUS_DB_BAR 1 +#define HINIC5_UBUS_INTR_REG_BAR 0 + +/* + * 1825 UBUS FERS2 资源空间排布 +------------------------------------------ + * CPI VF BAR0 (CFG) 8 KB + * CPI PF BAR1 (CFG) 64 KB + * CPI PF BAR3 (MGMT) 128 KB + */ + +#define HINIC5_VF_UBUS_CFG_REG_OFFSET 0 +#define HINIC5_VF_UBUS_CFG_REG_SIZE 0x2000 /* 8 KB */ + +#define HINIC5_PF_UBUS_CFG_REG_OFFSET HINIC5_VF_UBUS_CFG_REG_SIZE +#define HINIC5_PF_UBUS_CFG_REG_SIZE 0x10000 /* 64 KB */ + +#define HINIC5_PF_UBUS_MGMT_REG_OFFSET (HINIC5_PF_UBUS_CFG_REG_OFFSET + HINIC5_PF_UBUS_CFG_REG_SIZE) +#define HINIC5_PF_UBUS_MGMT_REG_SIZE 0x20000 /* 128 KB */ + +/* + * 1872 UBUS FERS2 资源空间排布 + * Ref 半导体/Hi1872 V100/Docs/KIA2/0.1.System/1.4.FS/ + * Programming User Guide/Hi1872 V100 UB 初始化.docx + + * PF FERS2 +------------------------------------------ + * UB vendor space for UBD2H 128 KB + * UB vendor space for UBG 128 KB + * UMMU non-secure 32 KB + * CPI PF BAR3 (MGMT) 96 KB + * CPI PF BAR1 (CFG) 128 KB + + * VF FERS2 +------------------------------------------ + * UB vendor space for UBD2H 128 KB + * UB vendor space for UBG 128 KB + * CPI VF BAR01 (CFG) 256 KB + */ + +#define HINIC5_HTN_VF_UBUS_CFG_REG_OFFSET 0x40000 /* 256 KB */ +#define HINIC5_HTN_VF_UBUS_CFG_REG_SIZE 0x40000 /* 256 KB */ + +#define HINIC5_HTN_PF_UBUS_CFG_REG_OFFSET 0x60000 /* 384 KB */ +#define HINIC5_HTN_PF_UBUS_CFG_REG_SIZE 0x20000 /* 128 KB */ + +#define HINIC5_HTN_PF_UBUS_MGMT_REG_OFFSET 0x48000 /* 288 KB */ +#define HINIC5_HTN_PF_UBUS_MGMT_REG_SIZE 0x18000 /* 96 KB */ + +/* 默认ubus dma bit mask */ +#define HINIC5_UBUS_DMA_BIT_MASK_DEFAULT 48 + +/* ubus dma bit mask控制范围 */ +#define HINIC5_UBUS_DMA_BIT_MASK_MAX 64 +#define HINIC5_UBUS_DMA_BIT_MASK_MIN 32 + +enum { + HINIC5_NOT_PROBE = 1, + HINIC5_PROBE_START = 2, + HINIC5_PROBE_OK = 3, + HINIC5_IN_REMOVE = 4, +}; + +#define HINIC5_VPMD_PROC_NAME_LEN 32 +/* Structure dev private */ +struct hinic5_adev { + struct hinic5_lld_dev lld_dev; + struct device *dev; + void *hwdev; + void *bus_dev; /* pcie场景下代表pdev,ubus场景下代表ub dev */ + struct card_node *chip_node; + /* Record the service object address, + * such as hinic5_dev and toe_dev, fc_dev + */ + void *uld_dev[SERVICE_T_MAX]; + /* Record the service object name */ + char uld_dev_name[SERVICE_T_MAX][IFNAMSIZ]; + /* It is a the global variable for driver to manage + * all function device linked list + */ + struct list_head node; + + bool disable_vf_load; + bool disable_srv_load[SERVICE_T_MAX]; + + void __iomem *cfg_reg_base; + void __iomem *intr_reg_base; + void __iomem *mgmt_reg_base; + void __iomem *fers2_reg_base; + u64 db_dwqe_len; + u64 db_base_phy; + u64 cfg_base_phy; + u64 cfg_base_len; + u64 mgmt_base_phy; // 仅支持PF + u64 mgmt_base_len; + + /* 适配工具使用,暂时先在驱动手动存fers2的地址和size, + * 工具适配后删除 + */ + u64 fers2_base_phy; + u64 fers2_total_len; + void __iomem *db_base; + + /* lock for attach/detach uld */ + struct mutex adev_mutex; + int lld_state; + u32 rsvd1; + + struct hinic5_sriov_info sriov_info; + + /* setted when uld driver processing event */ + ulong state; + struct pci_device_id id; + + atomic_t ref_cnt; + + atomic_t uld_ref_cnt[SERVICE_T_MAX]; + ulong uld_state; + spinlock_t uld_lock; // Spinlock to protect ULD (Upper Layer Driver) operations + + u16 probe_fault_level; + u16 rsvd2; + +#ifdef __VMWARE__ + #include "vm_pci.h" +#endif + struct hinic5_bus_ops *bus_ops; + struct hinic5_device_info info; + + char vpmd_proc_name[HINIC5_VPMD_PROC_NAME_LEN]; + struct proc_dir_entry *vpmd_proc; +}; + +struct hinic_chip_info { + u8 chip_id; /* chip id within card */ + u8 card_type; /* hinic_multi_chip_card_type */ + u8 rsvd[10]; /* reserved 10 bytes */ +}; + +#define to_hinic5_adev(n) container_of(n, struct hinic5_adev, lld_dev) + +struct list_head *get_hinic5_chip_list(void); + +int alloc_chip_node(struct hinic5_adev *adev); + +void free_chip_node(struct hinic5_adev *adev); + +void lld_lock_chip_node(void); + +void lld_unlock_chip_node(void); + +void hinic5_lld_lock_init(void); + +void lld_dev_cnt_init(struct hinic5_adev *adev); +void wait_lld_dev_unused(struct hinic5_adev *adev); + +struct card_node *hinic5_get_chip_node_by_lld(struct hinic5_lld_dev *lld_dev); + +struct hinic5_lld_dev *hinic5_get_lld_dev_by_func_id(const char *chip_name, u32 func_id); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_lld.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_lld.c new file mode 100644 index 00000000..61413a2d --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_lld.c @@ -0,0 +1,1091 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <net/addrconf.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/io-mapping.h> +#include <linux/interrupt.h> +#include <linux/inetdevice.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/aer.h> +#include <linux/debugfs.h> +#include <linux/proc_fs.h> +#include <linux/stat.h> +#include <linux/mm.h> + +#include "ossl_knl.h" +#include "hinic5_mt.h" +#include "hinic5_common.h" +#include "hinic5_crm.h" +#include "hinic5_pci_id_tbl.h" +#include "hinic5_sriov.h" +#include "hinic5_dev_mgmt.h" +#include "hinic5_nictool.h" +#include "hinic5_hw.h" +#include "hinic5_hinic5_vram.h" +#include "hinic5_fast_msg_init.h" +#include "hinic5_profile.h" +#include "hinic5_hwdev.h" +#include "hinic5_prof_adap.h" +#include "hinic5_fw_update.h" +#include "mpu_inband_cmd_defs.h" +#include "hinic5_bus.h" +#include "hinic5_typedef_inner.h" +#include "hinic5_lld_private.h" +#include "hinic5_hw_comm.h" +#include "hinic5_lld.h" + +static bool use_hinic5_vram; +module_param(use_hinic5_vram, bool, 0644); +MODULE_PARM_DESC(use_hinic5_vram, "use HINIC5_VRAM or not (only used in sdi_nanoos) - default is false"); + +static bool disable_attach; +module_param(disable_attach, bool, 0444); +MODULE_PARM_DESC(disable_attach, "disable_attach or not - default is false"); + +static bool disable_vf_load; +module_param(disable_vf_load, bool, 0444); +MODULE_PARM_DESC(disable_vf_load, + "Disable virtual functions probe or not - default is false"); + +bool hinic5_is_disable_vf_load(void) +{ + return disable_vf_load; +} + +#define HINIC5_WAIT_SRIOV_CFG_TIMEOUT 40000 /* same as default mbox timeout */ + +#define HINIC5_SYNC_YEAR_OFFSET 1900 +#define HINIC5_SYNC_MONTH_OFFSET 1 + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION(HINIC5_DRV_DESC); +MODULE_VERSION(HINIC5_DRV_VERSION); +MODULE_LICENSE("GPL"); + +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) +static DEVICE_ATTR(sriov_numvfs, 0644, + hinic5_sriov_numvfs_show, hinic5_sriov_numvfs_store); +static DEVICE_ATTR(sriov_totalvfs, 0444, + sriov_totalvfs_show, NULL); +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + +struct hinic5_uld_info g_uld_info[SERVICE_T_MAX] = { {0} }; + +#define HINIC5_EVENT_PROCESS_TIMEOUT 10000 +struct mutex g_uld_mutex; // Global mutex to protect ULD operations + +#define HINIC5_PROC_DIR "hisdk5" +struct proc_dir_entry *g_proc_dir; + +void hinic5_uld_lock_init(void) +{ + mutex_init(&g_uld_mutex); +} + +static const char *s_uld_name[SERVICE_T_MAX] = { + "nic", "ovs", "roce", "toe", "ioe", "fc", "vbs", "ipsec", "virtio", + "migrate", "ppa", "custom", "vroce", "ub", "jbof", "macsec", "dmmu", + "cfm", "bifur", "hihtr"}; + +const char **hinic5_get_uld_names(void) +{ + return s_uld_name; +} + +const struct hinic5_uld_info *hinic5_get_uld_info_by_type(enum hinic5_service_type type) +{ + if (type >= SERVICE_T_MAX) + return NULL; + + return &g_uld_info[type]; +} + +static int attach_uld(struct hinic5_adev *adev, enum hinic5_service_type type, + const struct hinic5_uld_info *uld_info) +{ + void *uld_dev = NULL; + int err; + + mutex_lock(&adev->adev_mutex); + + if (adev->uld_dev[type]) { + sdk_err(adev->dev, + "%s driver has attached\n", + s_uld_name[type]); + err = 0; + goto out_unlock; + } + + if (!uld_info || !uld_info->probe || !uld_info->remove) { + err = 0; + goto out_unlock; + } + + atomic_set(&adev->uld_ref_cnt[type], 0); + err = uld_info->probe(&adev->lld_dev, &uld_dev, adev->uld_dev_name[type]); + if (err != 0) { + sdk_info(adev->dev, + "cannot add object for %s driver\n", + s_uld_name[type]); + goto probe_failed; + } + + adev->uld_dev[type] = uld_dev; + set_bit(type, &adev->uld_state); + mutex_unlock(&adev->adev_mutex); + + sdk_info(adev->dev, + "Attach %s driver succeed\n", s_uld_name[type]); + return 0; + +probe_failed: +out_unlock: + mutex_unlock(&adev->adev_mutex); + + return err; +} + +static void wait_uld_unused(struct hinic5_adev *adev, enum hinic5_service_type type) +{ + u32 loop_cnt = 0; + u32 print_cnt = 0; + + while (atomic_read(&adev->uld_ref_cnt[type]) != 0) { + loop_cnt++; + if ((loop_cnt % PRINT_ULD_DETACH_TIMEOUT_INTERVAL == 0) && + print_cnt < PRINT_ULD_DETACH_TIMES) { + sdk_err(adev->dev, "Wait for uld unused for %lds, reference count: %d\n", + (PRINT_ULD_DETACH_TIMES_INTERVAL * loop_cnt / MSEC_PER_SEC), + atomic_read(&adev->uld_ref_cnt[type])); + + print_cnt++; + } + + usleep_range(ULD_LOCK_MIN_USLEEP_TIME, ULD_LOCK_MAX_USLEEP_TIME); + } +} + +static void detach_uld(struct hinic5_adev *adev, + enum hinic5_service_type type) +{ + struct hinic5_uld_info *uld_info = &g_uld_info[type]; + ulong end; + bool timeout = true; + + mutex_lock(&adev->adev_mutex); + if (!adev->uld_dev[type]) { + mutex_unlock(&adev->adev_mutex); + return; + } + + end = jiffies + msecs_to_jiffies(HINIC5_EVENT_PROCESS_TIMEOUT); + do { + if (!test_and_set_bit(type, &adev->state)) { + timeout = false; + break; + } + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + } while (time_before(jiffies, end)); + + if (timeout && !test_and_set_bit(type, &adev->state)) + timeout = false; + + spin_lock_bh(&adev->uld_lock); + clear_bit(type, &adev->uld_state); + spin_unlock_bh(&adev->uld_lock); + + wait_uld_unused(adev, type); + + uld_info->remove(&adev->lld_dev, adev->uld_dev[type]); + + adev->uld_dev[type] = NULL; + if (!timeout) + clear_bit(type, &adev->state); + + sdk_info(adev->dev, + "Detach %s driver succeed\n", + s_uld_name[type]); + mutex_unlock(&adev->adev_mutex); +} + +static void attach_ulds(struct hinic5_adev *adev) +{ + int type; + + lld_hold(); + mutex_lock(&g_uld_mutex); + + for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { + if (g_uld_info[type].probe) { + /* vf in VM can not disable service load */ + if ((hinic5_adev_is_virtfn(adev) != 0) && + (!hinic5_get_vf_service_load(adev, (u16)type))) { + sdk_info(adev->dev, "VF device disable service_type = %d load in host\n", + type); + continue; + } + attach_uld(adev, (enum hinic5_service_type)type, &g_uld_info[type]); + } + } + mutex_unlock(&g_uld_mutex); + lld_put(); +} + +static void detach_ulds(struct hinic5_adev *adev) +{ + int type; + + lld_hold(); + mutex_lock(&g_uld_mutex); + for (type = SERVICE_T_MAX - 1; type > SERVICE_T_NIC; type--) { + if (g_uld_info[type].probe) + detach_uld(adev, (enum hinic5_service_type)type); + } + + if (g_uld_info[SERVICE_T_NIC].probe) + detach_uld(adev, SERVICE_T_NIC); + mutex_unlock(&g_uld_mutex); + lld_put(); +} + +int hinic5_register_uld(enum hinic5_service_type type, + struct hinic5_uld_info *uld_info) +{ + struct card_node *chip_node = NULL; + struct hinic5_adev *adev = NULL; + struct list_head *chip_list = NULL; + + if (type >= SERVICE_T_MAX) { + pr_err("Unknown type %d of up layer driver to register\n", + type); + return -EINVAL; + } + + if (!uld_info || !uld_info->probe || !uld_info->remove) { + pr_err("Invalid information of %s driver to register\n", + s_uld_name[type]); + return -EINVAL; + } + + lld_hold(); + mutex_lock(&g_uld_mutex); + + if (g_uld_info[type].probe) { + pr_err("%s driver has registered\n", s_uld_name[type]); + mutex_unlock(&g_uld_mutex); + lld_put(); + return -EINVAL; + } + + chip_list = get_hinic5_chip_list(); + memcpy(&g_uld_info[type], uld_info, sizeof(struct hinic5_uld_info)); + list_for_each_entry(chip_node, chip_list, node) { + list_for_each_entry(adev, &chip_node->func_list, node) { + if (attach_uld(adev, type, uld_info) != 0) { + sdk_info(adev->dev, + "Cannot attach %s driver\n", + s_uld_name[type]); +#ifdef CONFIG_MODULE_PROF + adev->bus_ops->fault_process(adev, hinic5_func_max_vf(adev->hwdev)); + break; +#else + continue; +#endif + } + } + } + + mutex_unlock(&g_uld_mutex); + lld_put(); + + pr_info("Register %s driver succeed\n", s_uld_name[type]); + return 0; +} +EXPORT_SYMBOL(hinic5_register_uld); + +void hinic5_unregister_uld(enum hinic5_service_type type) +{ + struct card_node *chip_node = NULL; + struct hinic5_adev *adev = NULL; + struct hinic5_uld_info *uld_info = NULL; + struct list_head *chip_list = NULL; + + if (type >= SERVICE_T_MAX) { + pr_err("Unknown type %d of up layer driver to unregister\n", + type); + return; + } + + lld_hold(); + mutex_lock(&g_uld_mutex); + chip_list = get_hinic5_chip_list(); + list_for_each_entry(chip_node, chip_list, node) { + /* detach vf first */ + list_for_each_entry(adev, &chip_node->func_list, node) + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + detach_uld(adev, type); + + list_for_each_entry(adev, &chip_node->func_list, node) + if (hinic5_func_type(adev->hwdev) == TYPE_PF) + detach_uld(adev, type); + + list_for_each_entry(adev, &chip_node->func_list, node) + if (hinic5_func_type(adev->hwdev) == TYPE_PPF) + detach_uld(adev, type); + } + + uld_info = &g_uld_info[type]; + memset(uld_info, 0, sizeof(struct hinic5_uld_info)); + mutex_unlock(&g_uld_mutex); + lld_put(); +} +EXPORT_SYMBOL(hinic5_unregister_uld); + +int hinic5_attach_nic(struct hinic5_lld_dev *lld_dev) +{ + struct hinic5_adev *adev = NULL; + + if (!lld_dev) + return -EINVAL; + + adev = to_hinic5_adev(lld_dev); + return attach_uld(adev, SERVICE_T_NIC, &g_uld_info[SERVICE_T_NIC]); +} +EXPORT_SYMBOL(hinic5_attach_nic); + +void hinic5_detach_nic(const struct hinic5_lld_dev *lld_dev) +{ + struct hinic5_adev *adev = NULL; + + if (!lld_dev) + return; + + adev = to_hinic5_adev(lld_dev); + detach_uld(adev, SERVICE_T_NIC); +} +EXPORT_SYMBOL(hinic5_detach_nic); + +int hinic5_attach_service(const struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type) +{ + struct hinic5_adev *adev = NULL; + + if (!lld_dev || type >= SERVICE_T_MAX) + return -EINVAL; + + adev = to_hinic5_adev(lld_dev); + if (!adev) + return -EINVAL; + + return attach_uld(adev, type, &g_uld_info[type]); +} +EXPORT_SYMBOL(hinic5_attach_service); + +void hinic5_detach_service(const struct hinic5_lld_dev *lld_dev, enum hinic5_service_type type) +{ + struct hinic5_adev *adev = NULL; + + if (!lld_dev || type >= SERVICE_T_MAX) + return; + + adev = to_hinic5_adev(lld_dev); + detach_uld(adev, type); +} +EXPORT_SYMBOL(hinic5_detach_service); + +static void hinic5_sync_time_to_fmw(struct hinic5_adev *adev) +{ + struct timeval tv = {0}; + struct rtc_time rt_time = {0}; + u64 tv_msec; + int err; + + do_gettimeofday(&tv); + + tv_msec = (u64)(tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC); + err = hinic5_sync_time(adev->hwdev, tv_msec); + if (err != 0) { + sdk_err(adev->dev, "Synchronize UTC time to firmware failed, errno:%d.\n", + err); + } else { + rtc_time_to_tm(tv.tv_sec, &rt_time); + sdk_info(adev->dev, "Synchronize UTC time to firmware succeed. UTC time %d-%02d-%02d %02d:%02d:%02d.\n", + rt_time.tm_year + HINIC5_SYNC_YEAR_OFFSET, + rt_time.tm_mon + HINIC5_SYNC_MONTH_OFFSET, + rt_time.tm_mday, rt_time.tm_hour, + rt_time.tm_min, rt_time.tm_sec); + } +} + +static void send_uld_dev_event(struct hinic5_adev *adev, + struct hinic5_event_info *event) +{ + int type; + + for (type = SERVICE_T_NIC; type < SERVICE_T_MAX; type++) { + if (test_and_set_bit((u32)type, &adev->state)) { + sdk_warn(adev->dev, "Svc: 0x%x, event: 0x%x can't handler, %s is in detach\n", + event->service, event->type, s_uld_name[type]); + continue; + } + + if (g_uld_info[type].event && adev->uld_dev[type]) + g_uld_info[type].event(&adev->lld_dev, + adev->uld_dev[type], event); + clear_bit((u32)type, &adev->state); + } +} + +static void send_event_to_dst_pf(struct hinic5_adev *adev, u16 func_id, + struct hinic5_event_info *event) +{ + struct hinic5_adev *des_dev = NULL; + + lld_hold(); + list_for_each_entry(des_dev, &adev->chip_node->func_list, node) { + if (adev->lld_state == HINIC5_IN_REMOVE) + continue; + + if (hinic5_func_type(des_dev->hwdev) == TYPE_VF) + continue; + + if (hinic5_global_func_id(des_dev->hwdev) == func_id) { + send_uld_dev_event(des_dev, event); + break; + } + } + lld_put(); +} + +static void send_event_to_all_pf(struct hinic5_adev *adev, + struct hinic5_event_info *event) +{ + struct hinic5_adev *des_adev = NULL; + + lld_hold(); + list_for_each_entry(des_adev, &adev->chip_node->func_list, node) { + if (adev->lld_state == HINIC5_IN_REMOVE) + continue; + + if (hinic5_func_type(des_adev->hwdev) == TYPE_VF) + continue; + + send_uld_dev_event(des_adev, event); + } + lld_put(); +} + +static void hinic5_event_process(void *adapter, struct hinic5_event_info *event) +{ + struct hinic5_adev *adev = adapter; + struct hinic5_fault_event *fault = (void *)event->event_data; + u16 func_id; + + if ((event->service == EVENT_SRV_COMM && event->type == EVENT_COMM_FAULT) && + fault->fault_level == FAULT_LEVEL_SERIOUS_FLR && + fault->event.chip.func_id < hinic5_max_pf_num(adev->hwdev)) { + func_id = fault->event.chip.func_id; + send_event_to_dst_pf(adapter, func_id, event); + return; + } + + if (event->type == EVENT_COMM_MGMT_WATCHDOG) + send_event_to_all_pf(adapter, event); + else + send_uld_dev_event(adapter, event); +} + +static void uld_def_init(struct hinic5_adev *adev) +{ + int type; + + for (type = 0; type < SERVICE_T_MAX; type++) { + atomic_set(&adev->uld_ref_cnt[type], 0); + clear_bit(type, &adev->uld_state); + } + + spin_lock_init(&adev->uld_lock); +} + +#ifdef CONFIG_X86 +/** + * cfg_order_reg - when cpu model is haswell or broadwell, should configure dma + * order register to zero + * @adev: adev + **/ +static void cfg_order_reg(struct hinic5_adev *adev) +{ + u8 cpu_model[] = {0x3c, 0x3f, 0x45, 0x46, 0x3d, 0x47, 0x4f, 0x56}; + struct cpuinfo_x86 *cpuinfo = NULL; + u32 i; + + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + return; + + cpuinfo = &cpu_data(0); + for (i = 0; i < sizeof(cpu_model); i++) { + if (cpu_model[i] == cpuinfo->x86_model) + hinic5_set_pcie_order_cfg(adev->hwdev); + } +} + +#endif + +int hinic5_set_vf_load_state(struct hinic5_lld_dev *lld_dev, bool vf_load_state) +{ + struct hinic5_adev *adev = NULL; + + if (!lld_dev) { + pr_err("lld_dev is null.\n"); + return -EINVAL; + } + + adev = to_hinic5_adev(lld_dev); + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + return 0; + + adev->disable_vf_load = !vf_load_state; + sdk_info(adev->dev, "Current function %s vf load in host\n", + vf_load_state ? "enable" : "disable"); + + return 0; +} +EXPORT_SYMBOL(hinic5_set_vf_load_state); + +static void set_vf_load_state(struct hinic5_adev *adev) +{ + /* In bm mode, slave host will load vfs in default */ + if (IS_BMGW_SLAVE_HOST(((struct hinic5_hwdev *)adev->hwdev)) && + hinic5_func_type(adev->hwdev) != TYPE_VF) + hinic5_set_vf_load_state(&adev->lld_dev, false); + + if (!disable_attach) { +#ifndef __HIFC__ + if ((hinic5_func_type(adev->hwdev) != TYPE_VF) && + hinic5_is_multi_bm(adev->hwdev)) { + adev->bus_ops->virt_configure(adev, hinic5_func_max_vf(adev->hwdev)); + } +#endif + } +} + +#define HINIC5_CSR_BASIC_SIZE 0x1000 +#define HINIC5_CSR_MGMT_SIZE 0x10000 + +static int hinic5_vpmd_proc_mmap(struct file *file, struct vm_area_struct *vma) +{ + int err; + u64 pfn, vma_size, bar_size, ofst, check_size; + struct hinic5_adev *adev = NULL; + + if (!file || !vma || vma->vm_end < vma->vm_start) + return -EINVAL; + + adev = (struct hinic5_adev *)PDE_DATA(file_inode(file)); + if (!adev) + return -EINVAL; + + ofst = vma->vm_pgoff << PAGE_SHIFT; + if (ofst == 0) { + pfn = adev->cfg_base_phy >> PAGE_SHIFT; + bar_size = adev->cfg_base_len; + } else if (ofst == HINIC5_CSR_BASIC_SIZE) { + pfn = adev->mgmt_base_phy >> PAGE_SHIFT; + bar_size = adev->mgmt_base_len; + } else if (ofst == HINIC5_CSR_BASIC_SIZE + HINIC5_CSR_MGMT_SIZE) { + pfn = adev->db_base_phy >> PAGE_SHIFT; + bar_size = adev->db_dwqe_len; + } else { + pr_err("invalid offset:0x%llx", ofst); + return -EINVAL; + } + + vma_size = vma->vm_end - vma->vm_start; + /* bar_size align pagesize, check vma_size */ + check_size = ALIGN(bar_size, PAGE_SIZE); + if (vma_size > check_size) { + pr_err("invalid vma_size:0x%llx, check_size:0x%llx, bar_size:0x%llx", + vma_size, check_size, bar_size); + return -EINVAL; + } + + vm_flags_set(vma, VM_IO); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + err = remap_pfn_range(vma, vma->vm_start, (unsigned long)pfn, + (unsigned long)vma_size, vma->vm_page_prot); + if (err != 0) { + pr_err("mmap vpmd failed, err %d\n", err); + return err; + } + + return 0; +} + +#ifdef HAVE_PROC_OPS +static const struct proc_ops hinic5_vpmd_proc_fops = { + .proc_open = NULL, + .proc_mmap = hinic5_vpmd_proc_mmap, + .proc_read = NULL, + .proc_release = NULL, +}; + +#else +static const struct file_operations hinic5_vpmd_proc_fops = { + .owner = THIS_MODULE, + .open = NULL, + .llseek = NULL, + .mmap = hinic5_vpmd_proc_mmap, + .read = NULL, + .release = NULL, +}; +#endif + +static int hinic5_init_vpmd_proc(struct hinic5_adev *adev) +{ + strscpy(adev->vpmd_proc_name, dev_name(adev->dev), sizeof(adev->vpmd_proc_name)); + adev->vpmd_proc = proc_create_data(&adev->vpmd_proc_name[0], 0640, g_proc_dir, + &hinic5_vpmd_proc_fops, adev); + if (!adev->vpmd_proc) { + sdk_err(adev->dev, "init vpmd proc failed, vpmd_proc_name:%s.", + adev->vpmd_proc_name); + return -ENOMEM; + } + + return 0; +} + +static bool hinic5_need_ht_gpa(struct hinic5_hwdev *hwdev) +{ + if (COMM_SUPPORT_HT_GPA(hwdev)) + return true; + return hinic5_func_type(hwdev) == TYPE_PPF; +} + +int hinic5_func_init(struct hinic5_adev *adev) +{ + struct hinic5_init_para init_para = {0}; + bool hinic5_cqm_init_en = false; + int err; + + uld_def_init(adev); + + init_para.adapter_hdl = adev; + init_para.dev_hdl = adev->dev; +#ifdef __UEFI__ + init_para.busdev_hdl = adev->bus_dev; +#endif + init_para.fers2_reg_base = adev->fers2_reg_base; + init_para.cfg_reg_base = adev->cfg_reg_base; + init_para.intr_reg_base = adev->intr_reg_base; + init_para.mgmt_reg_base = adev->mgmt_reg_base; + init_para.db_base = adev->db_base; + init_para.db_base_phy = adev->db_base_phy; + init_para.db_dwqe_len = adev->db_dwqe_len; + init_para.hwdev = &adev->hwdev; + init_para.chip_node = adev->chip_node; + init_para.probe_fault_level = adev->probe_fault_level; + + err = hinic5_init_hwdev(&init_para); + if (err != 0) { + adev->hwdev = NULL; + adev->probe_fault_level = init_para.probe_fault_level; + sdk_err(adev->dev, "Failed to initialize hardware device\n"); + return -EFAULT; + } + + if (COMM_SUPPORT_FAST_MSG((struct hinic5_hwdev *)adev->hwdev)) { + err = hinic5_fast_msg_init(adev->hwdev); + if (err != 0) { + sdk_err(adev->dev, "Failed to fast msg\n"); + goto fast_msg_init_err; + } + } + + if (hinic5_need_ht_gpa(adev->hwdev)) { + err = hinic5_ht_gpa_init(adev->hwdev); + if (err != 0) { + sdk_err(adev->dev, "Failed to init bank gpa\n"); + hinic5_ht_gpa_deinit(adev->hwdev); + goto ht_gpa_init_err; + } + } + + err = hinic5_fw_update_init(adev->hwdev); + if (err != 0) { + sdk_err(adev->dev, "Failed to init firmware maintenance\n"); + goto fw_update_init_err; + } + + hinic5_cqm_init_en = hinic5_need_init_stateful_default(adev->hwdev); + if (hinic5_cqm_init_en) { + err = hinic5_stateful_init(adev->hwdev); + if (err != 0) { + sdk_err(adev->dev, "Failed to init stateful\n"); + goto stateful_init_err; + } + } + + adev->lld_dev.dev = adev->dev; + adev->lld_dev.hwdev = adev->hwdev; + + if (hinic5_func_type(adev->hwdev) != TYPE_VF) + set_bit(HINIC5_FUNC_PERSENT, &adev->sriov_info.state); + + err = hinic5_event_register(adev->hwdev, adev, hinic5_event_process); + if (err != 0) { + sdk_err(adev->dev, "Failed to register callback for event, err = %d\n", err); + goto event_register_err; + } + + if (hinic5_func_type(adev->hwdev) != TYPE_VF) + hinic5_sync_time_to_fmw(adev); + + /* dbgtool init */ + lld_lock_chip_node(); + err = nictool_k_init(adev->hwdev, adev->chip_node); + if (err != 0) { + lld_unlock_chip_node(); + sdk_err(adev->dev, "Failed to initialize dbgtool\n"); + goto nictool_init_err; + } + list_add_tail(&adev->node, &adev->chip_node->func_list); + lld_unlock_chip_node(); + + set_vf_load_state(adev); + + err = hinic5_init_vpmd_proc(adev); + if (err != 0) { + sdk_err(adev->dev, "Failed to init vpmd proc\n"); + goto init_vpmd_proc_err; + } + + if (!disable_attach) { + attach_ulds(adev); +#ifdef CONFIG_X86 + cfg_order_reg(adev); +#endif + } + + return 0; + +init_vpmd_proc_err: + lld_lock_chip_node(); + nictool_k_uninit(adev->hwdev, adev->chip_node); + lld_unlock_chip_node(); +nictool_init_err: + hinic5_event_unregister(adev->hwdev); +event_register_err: + if (hinic5_cqm_init_en) + hinic5_stateful_deinit(adev->hwdev); +stateful_init_err: + hinic5_fw_update_deinit(adev->hwdev); +fw_update_init_err: + if (hinic5_need_ht_gpa(adev->hwdev)) + hinic5_ht_gpa_deinit(adev->hwdev); +ht_gpa_init_err: + if (COMM_SUPPORT_FAST_MSG((struct hinic5_hwdev *)adev->hwdev)) + hinic5_fast_msg_deinit(adev->hwdev); +fast_msg_init_err: + hinic5_free_hwdev(adev->hwdev); + adev->hwdev = NULL; + + return err; +} + +static void hinic5_deinit_vpmd_proc(struct hinic5_adev *adev) +{ + remove_proc_entry(adev->vpmd_proc_name, g_proc_dir); + adev->vpmd_proc = NULL; +} + +void hinic5_func_deinit(struct hinic5_adev *adev) +{ + /* When function deinit, disable mgmt initiative report events firstly, + * then flush mgmt work-queue. + */ + hinic5_disable_mgmt_msg_report(adev->hwdev); + + hinic5_flush_mgmt_workq(adev->hwdev); + + lld_lock_chip_node(); + list_del(&adev->node); + lld_unlock_chip_node(); + + detach_ulds(adev); + + wait_lld_dev_unused(adev); + + hinic5_deinit_vpmd_proc(adev); + + lld_lock_chip_node(); + nictool_k_uninit(adev->hwdev, adev->chip_node); + lld_unlock_chip_node(); + + hinic5_event_unregister(adev->hwdev); + + hinic5_free_stateful(adev->hwdev); + + hinic5_fw_update_deinit(adev->hwdev); + + if (hinic5_need_ht_gpa(adev->hwdev)) + hinic5_ht_gpa_deinit(adev->hwdev); + + if (COMM_SUPPORT_FAST_MSG((struct hinic5_hwdev *)adev->hwdev)) + hinic5_fast_msg_deinit(adev->hwdev); + + hinic5_free_hwdev(adev->hwdev); + adev->hwdev = NULL; +} + +void wait_sriov_cfg_complete(struct hinic5_adev *adev) +{ + struct hinic5_sriov_info *sriov_info = NULL; + ulong end; + + sriov_info = &adev->sriov_info; + clear_bit(HINIC5_FUNC_PERSENT, &sriov_info->state); + usleep_range(9900, 10000); /* sleep 9900 us ~ 10000 us */ + + end = jiffies + msecs_to_jiffies(HINIC5_WAIT_SRIOV_CFG_TIMEOUT); + do { + if (!test_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state) && + !test_bit(HINIC5_SRIOV_DISABLE, &sriov_info->state)) + return; + + usleep_range(9900, 10000); /* sleep 9900 us ~ 10000 us */ + } while (time_before(jiffies, end)); +} + +bool hinic5_get_vf_service_load(struct hinic5_adev *adev, u16 service) +{ + struct hinic5_adev *pf_adev = NULL; + + if (!adev) { + pr_err("adev is null.\n"); + return false; + } + + pf_adev = hinic5_adev_get_pf_adev(adev); + if (!pf_adev) { + sdk_err(adev->dev, "pf_adev is null.\n"); + return false; + } + + if (service >= SERVICE_T_MAX) { + sdk_err(adev->dev, "service_type = %u state is error\n", + service); + return false; + } + + return !pf_adev->disable_srv_load[service]; +} + +int hinic5_set_vf_service_load(struct hinic5_lld_dev *lld_dev, u16 service, + bool vf_srv_load) +{ + struct hinic5_adev *adev = NULL; + + if (!lld_dev) { + pr_err("lld_dev is null.\n"); + return -EINVAL; + } + + adev = to_hinic5_adev(lld_dev); + + if (service >= SERVICE_T_MAX) { + sdk_err(adev->dev, "service_type = %u state is error\n", + service); + return -EFAULT; + } + + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + return 0; + + adev->disable_srv_load[service] = !vf_srv_load; + sdk_info(adev->dev, "Current function %s vf load in host\n", + vf_srv_load ? "enable" : "disable"); + + return 0; +} +EXPORT_SYMBOL(hinic5_set_vf_service_load); + +int probe_func_param_init(struct hinic5_adev *adev) +{ + if (!adev) + return -EFAULT; + + mutex_lock(&adev->adev_mutex); + if (adev->lld_state >= HINIC5_PROBE_START) { + sdk_warn(adev->dev, "Don not probe repeat\n"); + mutex_unlock(&adev->adev_mutex); + return -EEXIST; + } + adev->lld_state = HINIC5_PROBE_START; + mutex_unlock(&adev->adev_mutex); + + return 0; +} + +static int hinic5_sdk_proc_init(void) +{ + g_proc_dir = proc_mkdir(HINIC5_PROC_DIR, NULL); + if (!g_proc_dir) + return -EPERM; + return 0; +} + +static void hinic5_sdk_proc_deinit(void) +{ + if (!g_proc_dir) + return; + + proc_remove(g_proc_dir); + g_proc_dir = NULL; +} + +int hinic5_lld_init(void) +{ + int err; + + pr_info("%s - version %s\n", HINIC5_DRV_DESC, HINIC5_DRV_VERSION); + memset(g_uld_info, 0, sizeof(g_uld_info)); + + hinic5_lld_lock_init(); + hinic5_uld_lock_init(); + set_use_hinic5_vram_flag(use_hinic5_vram); + + if (use_hinic5_vram) { + err = hisdk5_hinic5_vram_init(); + if (err != 0) + return err; + } + + err = hinic5_sdk_proc_init(); + if (err != 0) { + pr_err("create vpmd dir failed\n"); + goto dir_create_err; + } + + err = hinic5_module_pre_init(); + if (err != 0) { + pr_err("Init custom failed\n"); + goto module_pre_init_err; + } + + err = hinic5_register_driver(); + if (err != 0) + goto register_driver_err; + + return 0; + +register_driver_err: + hinic5_module_post_exit(); + +module_pre_init_err: + hinic5_sdk_proc_deinit(); + +dir_create_err: + if (use_hinic5_vram) + hisdk5_hinic5_vram_deinit(); + + return err; +} + +void hinic5_lld_exit(void) +{ + if (use_hinic5_vram) + hisdk5_hinic5_vram_deinit(); + + hinic5_unregister_driver(); + hinic5_module_post_exit(); + hinic5_sdk_proc_deinit(); +} + +static bool is_uld_with_cleanup(enum hinic5_service_type type) +{ + const enum hinic5_service_type uld_with_cleanup[] = { + SERVICE_T_IPSEC, + }; + u32 uld_with_cleanup_size = sizeof(uld_with_cleanup) / sizeof(enum hinic5_service_type); + u32 i; + + for (i = 0; i < uld_with_cleanup_size; i++) { + if (uld_with_cleanup[i] == type) + return true; + } + return false; +} + +void hinic5_uld_cleanup_before_unregister(enum hinic5_service_type type, void (*cleanup)(void *)) +{ + struct card_node *chip_node = NULL; + struct hinic5_adev *adev = NULL; + struct list_head *chip_list = NULL; + + if (!is_uld_with_cleanup(type)) { + pr_info("this service does not support cleanup.\n"); + return; + } + + if (!cleanup) { + pr_info("this service no need to cleanup.\n"); + return; + } + + lld_hold(); + mutex_lock(&g_uld_mutex); + chip_list = get_hinic5_chip_list(); + list_for_each_entry(chip_node, chip_list, node) { + /* detach vf first */ + list_for_each_entry(adev, &chip_node->func_list, node) + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + cleanup(adev->uld_dev[type]); + + list_for_each_entry(adev, &chip_node->func_list, node) + if (hinic5_func_type(adev->hwdev) == TYPE_PF) + cleanup(adev->uld_dev[type]); + + list_for_each_entry(adev, &chip_node->func_list, node) + if (hinic5_func_type(adev->hwdev) == TYPE_PPF) + cleanup(adev->uld_dev[type]); + } + mutex_unlock(&g_uld_mutex); + lld_put(); +} +EXPORT_SYMBOL(hinic5_uld_cleanup_before_unregister); + +int hinic5_get_vf_num(struct hinic5_lld_dev *lld_dev) +{ + struct hinic5_adev *adev = NULL; + + if (!lld_dev) { + pr_err("lld_dev is null.\n"); + return -EINVAL; + } + + adev = to_hinic5_adev(lld_dev); + return hinic5_adev_get_vf_num(adev); +} +EXPORT_SYMBOL(hinic5_get_vf_num); + +int hinic5_get_chip_node_id(struct hinic5_lld_dev *lld_dev, u64 *chip_node_id) +{ + struct hinic5_adev *adev = NULL; + + if (!lld_dev) { + pr_err("lld_dev is null.\n"); + return -EINVAL; + } + + adev = to_hinic5_adev(lld_dev); + *chip_node_id = adev->chip_node->id; + + return 0; +} +EXPORT_SYMBOL(hinic5_get_chip_node_id); diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_lld_private.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_lld_private.h new file mode 100644 index 00000000..730c4548 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_lld_private.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_LLD_PRIVATE_H +#define HINIC5_LLD_PRIVATE_H +#include <linux/types.h> + +#include "hinic5_dev_mgmt.h" + +/* Module parameters */ +bool hinic5_is_disable_vf_load(void); + +bool hinic5_get_vf_service_load(struct hinic5_adev *adev, u16 service); +void wait_sriov_cfg_complete(struct hinic5_adev *adev); +void hinic5_func_deinit(struct hinic5_adev *adev); +int hinic5_func_init(struct hinic5_adev *adev); +int probe_func_param_init(struct hinic5_adev *adev); +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pci_id_tbl.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pci_id_tbl.h new file mode 100644 index 00000000..4d268202 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pci_id_tbl.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_PCI_ID_TBL_H +#define HINIC5_PCI_ID_TBL_H + +#define HINIC5_VIRTIO_VNEDER_ID 0x1AF4 + +#ifdef CONFIG_SP_VID_DID +#define PCI_VENDOR_ID_SPNIC 0x1F3F +#define HINIC5_DEV_ID_STANDARD 0x9020 +#define HINIC5_DEV_ID_SDI_5_1_PF 0x9032 +#define HINIC5_DEV_ID_VF 0x9001 +#define HINIC5_DEV_ID_VF_HV 0x9002 +#define HINIC5_DEV_ID_SPU 0xAC00 +#else +#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define HINIC5_DEV_ID_STANDARD 0x0222 +#define HINIC5_DEV_ID_SDI_5_1_PF 0x0226 +#define HINIC5_DEV_ID_SDI_5_0_PF 0x0225 +#define HINIC5_DEV_ID_DPU_STORGE_PF 0x0220 +#define HINIC5_DEV_ID_SDI_6_0_PF 0x0225 +#define HINIC5_DEV_ID_VF 0x375F +#define HINIC5_DEV_ID_VF_HV 0x379F +#define HINIC5_DEV_ID_SPU 0xAC00 + +#define HINIC5_DEV_ID_TEMP 0x1823 +#define HINIC5_DEV_ID_1823_VF_TEMP 0x375E +#define HINIC5_DEV_ID_1823_VF_HV_TEMP 0x379E + +/* Hi1872V100, SP233, SP235, SP235-O */ +#define HINIC5_DEV_ID_72V1_PF 0x0229 +#define HINIC5_DEV_ID_72V1_VF 0x022a + +/* Hi1825V100 2X200G标卡、1X400G标卡、2X400G UBX + * UB载板 UB EXP定制卡、2X200G 4X200G天衢2.0板载卡 + */ +#define HINIC5_DEV_ID_25V1_PF 0x0230 +#define HINIC5_DEV_ID_25V1_VF 0x0231 +#endif /* CONFIG_SP_VID_DID */ + +#define HINIC5_IS_VF_DEV(dev_id) (dev_id == HINIC5_DEV_ID_VF || \ + dev_id == HINIC5_DEV_ID_72V1_VF || \ + dev_id == HINIC5_DEV_ID_1823_VF_TEMP || \ + dev_id == HINIC5_DEV_ID_25V1_VF) +#define HINIC5_IS_SPU_DEV(dev_id) (dev_id == HINIC5_DEV_ID_SPU || \ + dev_id == HINIC5_DEV_ID_SDI_5_0_PF) + +#endif /* HINIC5_PCI_ID_TBL_H */ + diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pcie.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pcie.c new file mode 100644 index 00000000..7eb3f69c --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pcie.c @@ -0,0 +1,1109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <net/addrconf.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/io-mapping.h> +#include <linux/interrupt.h> +#include <linux/inetdevice.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/aer.h> +#include <linux/debugfs.h> + +#include "ossl_knl.h" +#include "hinic5_mt.h" +#include "hinic5_common.h" +#include "hinic5_crm.h" +#include "hinic5_pci_id_tbl.h" +#include "hinic5_sriov.h" +#include "hinic5_dev_mgmt.h" +#include "hinic5_nictool.h" +#include "hinic5_hw.h" +#include "hinic5_hinic5_vram.h" +#include "hinic5_fast_msg_init.h" +#include "hinic5_lld.h" +#include "hinic5_lld_private.h" +#include "hinic5_profile.h" +#include "hinic5_hwdev.h" +#include "hinic5_typedef_inner.h" +#include "hinic5_prof_adap.h" +#include "hinic5_fw_update.h" +#include "mpu_inband_cmd_defs.h" +#include "hinic5_bus.h" +#include "hinic5_sysfs.h" +#include "hinic5_pcie.h" + +#define HINIC5_VF_TIMER_DISABLE_MAX_TIMEOUT 50 /* 50 mseconds */ +#define HINIC5_VF_TIMER_DISABLE_WAIT_TIME 5 /* 5 mseconds */ +#define HINIC5_VF_NOTIFY_FLR_BIT BIT(17) +#define HINIC5_PF_HOST_MPU_NOTIFY_BIT BIT(31) +#define HINIC5_VF_FUNC_ATTRIBUTE6_OFFSET 0x2018 +#define HINIC5_PF_HOST_MPU_NOTIFY_OFFSET 0x60C0 + +typedef struct vf_offset_info { + u8 valid; + u16 vf_offset_from_pf[CMD_MAX_MAX_PF_NUM]; +} VF_OFFSET_INFO_S; + +static VF_OFFSET_INFO_S g_vf_offset; +static DEFINE_MUTEX(g_vf_offset_lock); + +static struct attribute *hinic5_attributes[] = { +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) + &dev_attr_sriov_numvfs.attr, + &dev_attr_sriov_totalvfs.attr, +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + NULL +}; + +static const struct attribute_group hinic5_attr_group = { + .attrs = hinic5_attributes, +}; + +static int mapping_bar(struct pci_dev *pdev, + struct hinic5_adev *adev) +{ + int cfg_bar; + + cfg_bar = HINIC5_IS_VF_DEV(hinic5_adev_get_device_id(adev)) ? + HINIC5_VF_PCI_CFG_REG_BAR : HINIC5_PF_PCI_CFG_REG_BAR; + + adev->cfg_base_phy = pci_resource_start(pdev, cfg_bar); + adev->cfg_base_len = pci_resource_len(pdev, cfg_bar); + adev->cfg_reg_base = pci_ioremap_bar(pdev, cfg_bar); + if (!adev->cfg_reg_base) { + sdk_err(&pdev->dev, + "Failed to map configuration regs\n"); + return -ENOMEM; + } + + adev->intr_reg_base = pci_ioremap_bar(pdev, HINIC5_PCI_INTR_REG_BAR); + if (!adev->intr_reg_base) { + sdk_err(&pdev->dev, + "Failed to map interrupt regs\n"); + goto map_intr_bar_err; + } + + if (!HINIC5_IS_VF_DEV(hinic5_adev_get_device_id(adev))) { + adev->mgmt_base_phy = pci_resource_start(pdev, HINIC5_PCI_MGMT_REG_BAR); + adev->mgmt_base_len = pci_resource_len(pdev, HINIC5_PCI_MGMT_REG_BAR); + adev->mgmt_reg_base = + pci_ioremap_bar(pdev, HINIC5_PCI_MGMT_REG_BAR); + if (!adev->mgmt_reg_base) { + sdk_err(&pdev->dev, + "Failed to map mgmt regs\n"); + goto map_mgmt_bar_err; + } + } + + adev->db_base_phy = pci_resource_start(pdev, HINIC5_PCI_DB_BAR); + adev->db_dwqe_len = pci_resource_len(pdev, HINIC5_PCI_DB_BAR); + adev->db_base = pci_ioremap_bar(pdev, HINIC5_PCI_DB_BAR); + if (!adev->db_base) { + sdk_err(&pdev->dev, + "Failed to map doorbell regs\n"); + goto map_db_err; + } + + return 0; + +map_db_err: + if (!HINIC5_IS_VF_DEV(hinic5_adev_get_device_id(adev))) + iounmap(adev->mgmt_reg_base); + +map_mgmt_bar_err: + iounmap(adev->intr_reg_base); + +map_intr_bar_err: + iounmap(adev->cfg_reg_base); + + return -ENOMEM; +} + +static void unmapping_bar(struct hinic5_adev *adev) +{ + iounmap(adev->db_base); + + if (!HINIC5_IS_VF_DEV(hinic5_adev_get_device_id(adev))) + iounmap(adev->mgmt_reg_base); + + iounmap(adev->intr_reg_base); + iounmap(adev->cfg_reg_base); +} + +static int hinic5_pci_init(struct pci_dev *pdev) +{ + struct hinic5_adev *adev = NULL; + int err; + + adev = kzalloc(sizeof(*adev), GFP_KERNEL); + if (!adev) + return -ENOMEM; + adev->dev = &pdev->dev; + adev->bus_dev = pdev; + mutex_init(&adev->adev_mutex); + + pci_set_drvdata(pdev, adev); + + err = pci_enable_device(pdev); + if (err != 0) { + sdk_err(&pdev->dev, "Failed to enable PCI device\n"); + goto pci_enable_err; + } + + err = pci_request_regions(pdev, HINIC5_DRV_NAME); + if (err != 0) { + sdk_err(&pdev->dev, "Failed to request regions\n"); + goto pci_regions_err; + } + + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); /* 64 bit DMA mask */ + if (err != 0) { + sdk_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); /* 32 bit DMA mask */ + if (err != 0) { + sdk_err(&pdev->dev, "Failed to set DMA mask\n"); + goto dma_mask_err; + } + } + + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); /* 64 bit DMA mask */ + if (err != 0) { + sdk_warn(&pdev->dev, "Couldn't set 64-bit coherent DMA mask\n"); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); /* 32 bit DMA mask */ + if (err != 0) { + sdk_err(&pdev->dev, "Failed to set coherent DMA mask\n"); + goto dma_consistnet_mask_err; + } + } + + return 0; + +dma_consistnet_mask_err: +dma_mask_err: + pci_clear_master(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_release_regions(pdev); + +pci_regions_err: + pci_disable_device(pdev); + +pci_enable_err: + pci_set_drvdata(pdev, NULL); + kfree(adev); + + return err; +} + +static void hinic5_pci_deinit(struct pci_dev *pdev) +{ + struct hinic5_adev *adev = pci_get_drvdata(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(adev); +} + +static int hinic5_remove_func(struct hinic5_adev *adev) +{ + struct pci_dev *pdev = to_pci_dev(adev->dev); + + mutex_lock(&adev->adev_mutex); + if (adev->lld_state != HINIC5_PROBE_OK) { + sdk_warn(&pdev->dev, "Current function don not need remove\n"); + mutex_unlock(&adev->adev_mutex); + return 0; + } + adev->lld_state = HINIC5_IN_REMOVE; + mutex_unlock(&adev->adev_mutex); + + hinic5_detect_hw_present(adev->hwdev); + + hisdk5_remove_pre_process(adev->hwdev); + + if (hinic5_func_type(adev->hwdev) != TYPE_VF) { + sysfs_remove_group(&pdev->dev.kobj, &hinic5_attr_group); + wait_sriov_cfg_complete(adev); + hinic5_pci_sriov_disable(pdev); + } + + hinic5_func_deinit(adev); + + lld_lock_chip_node(); + free_chip_node(adev); + lld_unlock_chip_node(); + + unmapping_bar(adev); + + mutex_lock(&adev->adev_mutex); + adev->lld_state = HINIC5_NOT_PROBE; + mutex_unlock(&adev->adev_mutex); + + sdk_info(&pdev->dev, "Pcie device removed function\n"); + + return 0; +} + +#ifdef CONFIG_PCI_IOV +STATIC int hinic5_get_pf_device_id(struct pci_dev *pdev) +{ + struct pci_dev *pf_dev = pci_physfn(pdev); + + return pf_dev->device; +} +#endif + +static int hinic5_get_pf_info(struct hinic5_adev *dev, u16 service, + struct hinic5_hw_pf_infos **pf_infos) +{ + int err; + + if (service >= SERVICE_T_MAX) { + sdk_err(dev->dev, "Current vf do not supports set service_type = %u state in host\n", + service); + return -EFAULT; + } + + pf_infos = kzalloc(sizeof(*pf_infos), GFP_KERNEL); + if (!pf_infos) { + sdk_err(dev->dev, "Failed to allocate pf infos\n"); + return -ENOMEM; + } + err = hinic5_get_hw_pf_infos(dev->hwdev, *pf_infos, HINIC5_CHANNEL_COMM); + if (err != 0) { + kfree(*pf_infos); + sdk_err(dev->dev, "Get chipf pf info failed, ret %d\n", err); + return -EFAULT; + } + + return 0; +} + +static int get_vf_service_state_param(struct hinic5_lld_dev *lld_dev, struct hinic5_adev *dev_ptr, + u16 service, struct hinic5_hw_pf_infos **pf_infos) +{ + int err; + + if (!lld_dev || !dev_ptr) + return -EINVAL; + + err = hinic5_get_pf_info(dev_ptr, service, pf_infos); + if (err != 0) + return err; + + return 0; +} + +static int hinic5_dst_pdev_valid(struct hinic5_adev *dst_dev, struct pci_dev **des_pdev_ptr, + u16 vf_devfn, bool en) +{ + struct pci_dev *pdev = container_of(dst_dev->dev, struct pci_dev, dev); + u16 bus; + + bus = pdev->bus->number + vf_devfn / BUS_MAX_DEV_NUM; + *des_pdev_ptr = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), + bus, vf_devfn % BUS_MAX_DEV_NUM); + if (!(*des_pdev_ptr)) { + pr_err("des_pdev is NULL\n"); + return -EFAULT; + } + + /* OVS sriov hw scene, when vf bind to vf_io return error. */ + if (!en && (strcmp((*des_pdev_ptr)->driver->name, HINIC5_DRV_NAME) != 0)) { + pr_err("vf bind driver:%s\n", (*des_pdev_ptr)->driver->name); + return -EFAULT; + } + + return 0; +} + +static int paramerter_is_unexpected(struct hinic5_adev *dst_dev, u16 *func_id, u16 *vf_start, + u16 *vf_end, u16 vf_func_id) +{ + if (hinic5_func_type(dst_dev->hwdev) == TYPE_VF) + return -EPERM; + + *func_id = hinic5_global_func_id(dst_dev->hwdev); + *vf_start = hinic5_glb_pf_vf_offset(dst_dev->hwdev) + 1; + *vf_end = *vf_start + hinic5_func_max_vf(dst_dev->hwdev); + if (vf_func_id < *vf_start || vf_func_id >= *vf_end) + return -EPERM; + + return 0; +} + +void hinic5_notify_vf_timer_disable(struct pci_dev *pdev) +{ + ulong timeout; + void __iomem *bar = NULL; + void __iomem *bar_physfn = NULL; + struct pci_dev *physfn = NULL; + u32 val; + + if (!pdev || pdev->vendor != PCI_VENDOR_ID_HUAWEI || + (pdev->device != HINIC5_DEV_ID_VF && pdev->device != HINIC5_DEV_ID_25V1_VF)) + return; + + sdk_warn(&pdev->dev, "Notify vf disable timer bitmap before flr\n"); + + /* VF bar space map */ + bar = pci_ioremap_bar(pdev, HINIC5_VF_PCI_CFG_REG_BAR); + if (!bar) { + sdk_err(&pdev->dev, "VF bar map failed in vf timer disable\n"); + return; + } + + /* PF bar space map */ + physfn = pci_physfn(pdev); + bar_physfn = pci_ioremap_bar(physfn, HINIC5_PCI_MGMT_REG_BAR); + if (!bar_physfn) { + sdk_err(&pdev->dev, "PF bar map failed in vf timer disable\n"); + iounmap(bar); + return; + } + + /* Set VF FUNC ATTR6 17bit to mark vf before flr */ + val = ioread32be(bar + HINIC5_VF_FUNC_ATTRIBUTE6_OFFSET); + iowrite32be(val | HINIC5_VF_NOTIFY_FLR_BIT, bar + HINIC5_VF_FUNC_ATTRIBUTE6_OFFSET); + + /* Set PF HOST_MPU_NOTIFY to cause mpu interrupt */ + val = ioread32be(bar_physfn + HINIC5_PF_HOST_MPU_NOTIFY_OFFSET); + iowrite32be(val | HINIC5_PF_HOST_MPU_NOTIFY_BIT, + bar_physfn + HINIC5_PF_HOST_MPU_NOTIFY_OFFSET); + + /* Wait for MPU disable vf timer bitmap */ + timeout = jiffies + msecs_to_jiffies(HINIC5_VF_TIMER_DISABLE_MAX_TIMEOUT); + do { + val = ioread32be(bar + HINIC5_VF_FUNC_ATTRIBUTE6_OFFSET); + if ((val & HINIC5_VF_NOTIFY_FLR_BIT) == 0) + break; + msleep(HINIC5_VF_TIMER_DISABLE_WAIT_TIME); + } while (time_before(jiffies, timeout)); + + iounmap(bar_physfn); + iounmap(bar); +} +EXPORT_SYMBOL(hinic5_notify_vf_timer_disable); + +int hinic5_set_vf_service_state(struct hinic5_lld_dev *lld_dev, + u16 vf_func_id, u16 service, bool en) +{ + struct hinic5_adev *dev = to_hinic5_adev(lld_dev); + struct hinic5_hw_pf_infos *pf_infos = NULL; + struct hinic5_adev *dst_dev = NULL; + struct pci_dev *des_pdev = NULL; + struct pci_dev *dst_pdev = NULL; + u16 vf_start, vf_end, vf_devfn, func_id; + int err; + bool find_dst_dev = false; + + err = get_vf_service_state_param(lld_dev, dev, service, &pf_infos); + if (err != 0 || !pf_infos) + return err; + + lld_hold(); + list_for_each_entry(dst_dev, &dev->chip_node->func_list, node) { + if (paramerter_is_unexpected(dst_dev, &func_id, &vf_start, + &vf_end, vf_func_id) != 0) + continue; + + dst_pdev = container_of(dst_dev->dev, struct pci_dev, dev); + vf_devfn = pf_infos->infos[func_id].vf_offset + (vf_func_id - vf_start) + + (u16)dst_pdev->devfn; + err = hinic5_dst_pdev_valid(dst_dev, &des_pdev, vf_devfn, en); + if (err != 0) { + sdk_err(dev->dev, "Can not get vf func_id %u from pf %u\n", + vf_func_id, func_id); + lld_put(); + goto free_pf_info; + } + + dst_dev = pci_get_drvdata(des_pdev); + /* When enable vf scene, if vf bind to vf-io, return ok */ + if ((strcmp(des_pdev->driver->name, HINIC5_DRV_NAME) != 0) || + !dst_dev || (!en && dst_dev->lld_state != HINIC5_PROBE_OK) || + (en && dst_dev->lld_state != HINIC5_NOT_PROBE)) { + lld_put(); + goto free_pf_info; + } + + if (en) + pci_dev_put(des_pdev); + find_dst_dev = true; + break; + } + lld_put(); + + if (!find_dst_dev) { + err = -EFAULT; + sdk_err(dev->dev, "Invalid parameter vf_id %u\n", vf_func_id); + goto free_pf_info; + } + + err = hinic5_pci_set_func_en(dst_dev, en, vf_func_id); + +free_pf_info: + if (pf_infos) + kfree(pf_infos); + return err; +} +EXPORT_SYMBOL(hinic5_set_vf_service_state); + +int hinic5_pci_irq_vectors_alloc(struct hinic5_adev *adev, void *entry, u32 irqs_min, u32 irqs_num) +{ + struct pci_dev *pdev = to_pci_dev(adev->dev); + + return pci_enable_msix_range(pdev, entry, (int)irqs_min, (int)irqs_num); +} + +void hinic5_pci_irq_vectors_free(struct hinic5_adev *adev) +{ + struct pci_dev *pdev = to_pci_dev(adev->dev); + + pci_free_irq_vectors(pdev); +} + +int hinic5_pci_irq_vector(struct hinic5_adev *adev, u32 idx) +{ + struct pci_dev *pdev = to_pci_dev(adev->dev); + + return pci_irq_vector(pdev, idx); +} + +STATIC bool hinic5_get_vf_load_state(struct pci_dev *pdev) +{ + struct hinic5_adev *adev = NULL; + struct pci_dev *pf_pdev = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return false; + } + + /* vf used in vm */ + if (pci_is_root_bus(pdev->bus)) + return false; + + if (pdev->is_virtfn != 0) + pf_pdev = pdev->physfn; + else + pf_pdev = pdev; + + adev = pci_get_drvdata(pf_pdev); + if (!adev) { + sdk_err(&pdev->dev, "adev is null.\n"); + return false; + } + + return !adev->disable_vf_load; +} + +struct hinic5_sriov_info *hinic5_get_sriov_info_by_pcidev(struct pci_dev *pdev) +{ + struct hinic5_adev *adev = NULL; + + if (!pdev) + return NULL; + + adev = pci_get_drvdata(pdev); + if (!adev) + return NULL; + + return &adev->sriov_info; +} + +void *hinic5_get_hwdev_by_pcidev(struct pci_dev *pdev) +{ + struct hinic5_adev *adev = NULL; + + if (!pdev) + return NULL; + + adev = pci_get_drvdata(pdev); + if (!adev) + return NULL; + + return adev->hwdev; +} + +static void hinic5_pci_remove(struct pci_dev *pdev) +{ + struct hinic5_adev *adev = pci_get_drvdata(pdev); + + if (!adev) + return; + +#ifndef __HIFC__ +#ifdef CONFIG_PCI_IOV + if (pdev->is_virtfn != 0 && (hinic5_get_pf_device_id(pdev) == HINIC5_DEV_ID_SDI_6_0_PF) && + hinic5_get_vf_load_state(pdev)) + return; +#endif +#endif + + sdk_info(&pdev->dev, "Pcie device remove begin\n"); + + unregister_device_attr_groups(adev); + + hinic5_remove_func(adev); + + hinic5_pci_deinit(pdev); + hinic5_probe_pre_unprocess(pdev); + + sdk_info(&pdev->dev, "Pcie device removed\n"); +} + +#if (defined CONFIG_ARM) || (defined CONFIG_ARM64) +/* Mask the PCI_ERR_UNC_COMP_ABORT to prevent PF from handling Completer Aborts + * from the VF. On ARM platforms, Completer Aborts may occur when a VF try to + * write a non-accessible address. + */ +static void hinic5_mask_aer_comp_abort(struct pci_dev *pdev) +{ + u32 err_mask; + int pos; + + struct pci_dev *rp = pcie_find_root_port(pdev); + + if (!rp) { + sdk_warn(&pdev->dev, "Cannot find root port.\n"); + return; + } + + pos = pci_find_ext_capability(rp, PCI_EXT_CAP_ID_ERR); + if (!pos) { + sdk_err(&pdev->dev, "AER capability is not found in PCIe config space.\n"); + return; + } + + pci_read_config_dword(rp, pos + PCI_ERR_UNCOR_MASK, &err_mask); + err_mask |= PCI_ERR_UNC_COMP_ABORT; + pci_write_config_dword(rp, pos + PCI_ERR_UNCOR_MASK, err_mask); +} +#endif + +static int hinic5_probe_func(struct hinic5_adev *adev) +{ + struct pci_dev *pdev = to_pci_dev(adev->dev); + int err; + + err = probe_func_param_init(adev); + if (err == -EEXIST) + return 0; + else if (err != 0) + return err; + + err = mapping_bar(pdev, adev); + if (err != 0) { + sdk_err(&pdev->dev, "Failed to map bar\n"); + goto map_bar_failed; + } + + /* if chip information of pcie function exist, add the function into chip */ + lld_lock_chip_node(); + err = alloc_chip_node(adev); + if (err != 0) { + lld_unlock_chip_node(); + sdk_err(&pdev->dev, "Failed to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + lld_unlock_chip_node(); + + err = hinic5_func_init(adev); + if (err != 0) + goto func_init_err; + +#if (defined CONFIG_ARM) || (defined CONFIG_ARM64) + /* Prevent PF from being in an abnormal state + * due to illegal memory access by its VF. + */ + if (hinic5_func_type(adev->hwdev) == TYPE_PPF) { + if (!hinic5_in_spu(adev->hwdev)) + hinic5_mask_aer_comp_abort(pdev); + } +#endif /* ARM */ + + if (hinic5_func_type(adev->hwdev) != TYPE_VF) { + err = sysfs_create_group(&pdev->dev.kobj, &hinic5_attr_group); + if (err != 0) { + sdk_err(&pdev->dev, "Failed to create sysfs group\n"); + goto create_sysfs_err; + } + + err = hinic5_set_bdf_ctxt(adev->hwdev, pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + if (err != 0) { + sdk_err(&pdev->dev, "Failed to set BDF info to MPU\n"); + sysfs_remove_group(&pdev->dev.kobj, &hinic5_attr_group); + goto set_bdf_err; + } + } + + hinic5_probe_success(adev->hwdev); + + mutex_lock(&adev->adev_mutex); + adev->lld_state = HINIC5_PROBE_OK; + mutex_unlock(&adev->adev_mutex); + + return 0; + +set_bdf_err: +create_sysfs_err: + hinic5_func_deinit(adev); + +func_init_err: + lld_lock_chip_node(); + free_chip_node(adev); + lld_unlock_chip_node(); + +alloc_chip_node_fail: + unmapping_bar(adev); + +map_bar_failed: + sdk_err(&pdev->dev, "Pcie device probe function failed\n"); + return err; +} + +bool hinic5_pci_is_virtfn(struct hinic5_adev *adev) +{ +#ifdef CONFIG_PCI_IOV + struct pci_dev *pdev = to_pci_dev(adev->dev); + + return (bool)(pdev->is_virtfn); +#else + return false; +#endif +} + +int hinic5_pci_get_vf_num(struct hinic5_adev *adev) +{ + struct pci_dev *pdev = to_pci_dev(adev->dev); + + return pci_num_vf(pdev); +} + +int hinic5_pci_init_device_info(struct hinic5_adev *adev) +{ + struct hinic5_adev *pf_adev = hinic5_pdev_get_pf_adev(adev); + struct pci_dev *pdev = to_pci_dev(pf_adev->dev); + u64 bus_domain_nr = (u64)pci_domain_nr(pdev->bus); + + adev->info.id = (bus_domain_nr << PCI_BUS_NUM_SHIFT) + pdev->bus->number; + + return 0; +} + +u16 hinic5_pci_get_device_id(struct hinic5_adev *adev) +{ + struct pci_dev *pdev = to_pci_dev(adev->dev); + + return pdev->device; +} + +int hinic5_pci_set_func_en(struct hinic5_adev *dst_adev, bool en, u16 vf_func_id) +{ + struct pci_dev *des_pdev = container_of(dst_adev->dev, struct pci_dev, dev); + int err; + + mutex_lock(&dst_adev->adev_mutex); + /* unload invalid vf func id */ + if (!en && vf_func_id != hinic5_global_func_id(dst_adev->hwdev) && + (strcmp(des_pdev->driver->name, HINIC5_DRV_NAME) == 0)) { + pr_err("dst_adev func id:%u, vf_func_id:%u\n", + hinic5_global_func_id(dst_adev->hwdev), vf_func_id); + mutex_unlock(&dst_adev->adev_mutex); + return -EFAULT; + } + + if (!en && dst_adev->lld_state == HINIC5_PROBE_OK) { + mutex_unlock(&dst_adev->adev_mutex); + hinic5_remove_func(dst_adev); + } else if (en && dst_adev->lld_state == HINIC5_NOT_PROBE) { + mutex_unlock(&dst_adev->adev_mutex); + err = hinic5_probe_func(dst_adev); + if (err != 0) + return -EFAULT; + } else { + mutex_unlock(&dst_adev->adev_mutex); + } + + return 0; +} + +struct hinic5_adev *hinic5_pdev_get_pf_adev(struct hinic5_adev *adev) +{ + struct hinic5_adev *pf_adev = NULL; + struct pci_dev *pdev = to_pci_dev(adev->dev); + + pf_adev = (hinic5_pci_is_virtfn(adev) != 0) ? pci_get_drvdata(pdev->physfn) : adev; + return pf_adev; +} + +static int hinic5_pf_get_vf_offset_info(struct hinic5_adev *des_adev, u16 *vf_offset) +{ + int err, i; + struct hinic5_hw_pf_infos *pf_infos = NULL; + u16 pf_func_id; + struct hinic5_adev *pf_adev = NULL; + + pf_adev = hinic5_pdev_get_pf_adev(des_adev); + pf_func_id = hinic5_global_func_id(pf_adev->hwdev); + if (pf_func_id >= CMD_MAX_MAX_PF_NUM || !vf_offset) + return -EINVAL; + + mutex_lock(&g_vf_offset_lock); + if (g_vf_offset.valid == 0) { + pf_infos = kzalloc(sizeof(*pf_infos), GFP_KERNEL); + if (!pf_infos) { + err = -ENOMEM; + goto err_malloc; + } + + err = hinic5_get_hw_pf_infos(pf_adev->hwdev, pf_infos, HINIC5_CHANNEL_COMM); + if (err != 0) { + sdk_warn(pf_adev->dev, "Hinic5_get_hw_pf_infos fail err %d\n", err); + err = -EFAULT; + goto err_out; + } + + g_vf_offset.valid = 1; + for (i = 0; i < CMD_MAX_MAX_PF_NUM; i++) + g_vf_offset.vf_offset_from_pf[i] = pf_infos->infos[i].vf_offset; + + kfree(pf_infos); + } + + *vf_offset = g_vf_offset.vf_offset_from_pf[pf_func_id]; + + mutex_unlock(&g_vf_offset_lock); + + return 0; + +err_out: + kfree(pf_infos); +err_malloc: + mutex_unlock(&g_vf_offset_lock); + return err; +} + +struct hinic5_adev *hinic5_pci_get_vf_adev_by_pf(struct hinic5_adev *adev, u16 func_id) +{ + int err; + u16 bus_num; + u16 vf_start, vf_end; + u16 des_fn, pf_func_id, vf_offset; + struct hinic5_adev *src_adev = adev; + struct pci_dev *pdev = container_of(src_adev->dev, struct pci_dev, dev); + struct pci_dev *dst_vf_pdev = NULL; + struct hinic5_adev *dst_adev = NULL; + + vf_start = hinic5_glb_pf_vf_offset(src_adev->hwdev); + vf_end = vf_start + hinic5_func_max_vf(src_adev->hwdev); + pf_func_id = hinic5_global_func_id(src_adev->hwdev); + if (func_id <= vf_start || func_id > vf_end || pf_func_id >= CMD_MAX_MAX_PF_NUM) + return NULL; + + err = hinic5_pf_get_vf_offset_info(src_adev, &vf_offset); + if (err != 0) { + sdk_warn(src_adev->dev, "Hinic5_pf_get_vf_offset_info fail\n"); + return NULL; + } + + des_fn = ((func_id - vf_start) - 1) + pf_func_id + vf_offset; + bus_num = pdev->bus->number + des_fn / BUS_MAX_DEV_NUM; + + dst_vf_pdev = pci_get_domain_bus_and_slot(0, bus_num, (des_fn % BUS_MAX_DEV_NUM)); + dst_adev = pci_get_drvdata(dst_vf_pdev); + put_device(dst_adev->dev); + return dst_adev; +} + +STATIC int hinic5_get_vfid_by_vfpci(void *hwdev, struct pci_dev *pdev, u16 *global_func_id) +{ + struct pci_dev *pf_pdev = NULL; + struct hinic5_adev *adev = NULL; + u16 pf_bus, vf_bus, vf_offset; + int err; + + if (!pdev || !global_func_id || pdev->is_virtfn == 0) + return -EINVAL; + + pf_pdev = pdev->physfn; + + vf_bus = pdev->bus->number; + pf_bus = pf_pdev->bus->number; + + if (pdev->vendor != HINIC5_VIRTIO_VNEDER_ID) { + adev = pci_get_drvdata(pf_pdev); + err = hinic5_pf_get_vf_offset_info(adev, &vf_offset); + if (err != 0) { + sdk_err(&pdev->dev, "Func hinic5_pf_get_vf_offset_info fail\n"); + return -EFAULT; + } + } else { + if (g_vf_offset.valid == 0) { + sdk_err(&pdev->dev, "Pf offset get fail\n"); + return -EFAULT; + } + } + + *global_func_id = (u16)((vf_bus - pf_bus) * BUS_MAX_DEV_NUM) + (u16)pdev->devfn + + (u16)(CMD_MAX_MAX_PF_NUM - g_vf_offset.vf_offset_from_pf[0]); + + return 0; +} + +STATIC bool hinic5_get_vf_nic_en_status(struct pci_dev *pdev) +{ + u8 nic_en; + u16 global_func_id; + struct pci_dev *pf_pdev = NULL; + struct hinic5_adev *adev = NULL; + + if (!pdev) { + pr_err("pdev is null.\n"); + return false; + } + + if (pdev->is_virtfn != 0) + pf_pdev = pdev->physfn; + else + return false; + + adev = pci_get_drvdata(pf_pdev); + if (!adev) { + sdk_err(&pdev->dev, "adev is null.\n"); + return false; + } + + if (!IS_BMGW_SLAVE_HOST((struct hinic5_hwdev *)adev->hwdev)) + return false; + + if (hinic5_get_vfid_by_vfpci(NULL, pdev, &global_func_id) != 0) { + sdk_err(&pdev->dev, "Get vf id by vfpci failed\n"); + return false; + } + + if (hisdk5_get_plug_srv_bitmap(adev->hwdev, COMM_PLUG_SRV_NIC, + global_func_id, &nic_en) != 0) { + sdk_err(&pdev->dev, "Get function nic status failed\n"); + return false; + } + + sdk_info(&pdev->dev, "Func %hu %s default probe in host\n", + global_func_id, (nic_en != 0) ? "enable" : "disable"); + + return (nic_en != 0); +} + +static int hinic5_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hinic5_adev *adev = NULL; + u16 probe_fault_level = FAULT_LEVEL_SERIOUS_FLR; + int err; + + sdk_info(&pdev->dev, "Pcie device probe begin\n"); + +#ifndef __HIFC__ +#ifdef CONFIG_PCI_IOV + if (pdev->is_virtfn != 0 && (hinic5_get_pf_device_id(pdev) == HINIC5_DEV_ID_SDI_6_0_PF) && + hinic5_get_vf_load_state(pdev)) { + sdk_info(&pdev->dev, "VFs are not binded to hinic\n"); + return -EINVAL; + } +#endif +#endif + + err = hinic5_probe_pre_process(pdev); + if (err == HINIC5_NOT_PROBE) + return 0; + + if (err != 0) + goto out; + + err = hinic5_pci_init(pdev); + if (err != 0) + goto pci_init_err; + + adev = pci_get_drvdata(pdev); + adev->disable_vf_load = hinic5_is_disable_vf_load(); + adev->id = *id; + adev->lld_state = HINIC5_NOT_PROBE; + adev->probe_fault_level = probe_fault_level; + adev->lld_dev.dev_type = HINIC5_DEVICE_T_PCI; + adev->bus_ops = hinic5_get_dev_ops(adev); + err = adev->bus_ops->init_device_info(adev); + if (err != 0) + goto init_device_info_err; + lld_dev_cnt_init(adev); + + if (pdev->is_virtfn != 0 && (!hinic5_get_vf_load_state(pdev)) && + (!hinic5_get_vf_nic_en_status(pdev))) { + sdk_info(&pdev->dev, "VF device disable load in host\n"); + return 0; + } + + err = hinic5_probe_func(adev); + if (err != 0) + goto hinic5_probe_func_fail; + + err = register_device_attr_groups(adev); + if (err != 0) + goto hinic5_register_device_attrs_fail; + + sdk_info(&pdev->dev, "Pcie device probed\n"); + return 0; + +hinic5_register_device_attrs_fail: + hinic5_remove_func(adev); + +hinic5_probe_func_fail: + probe_fault_level = adev->probe_fault_level; + +init_device_info_err: + hinic5_pci_deinit(pdev); + +pci_init_err: + hinic5_probe_pre_unprocess(pdev); + +out: + hinic5_probe_fault_process(pdev, probe_fault_level); + sdk_err(&pdev->dev, "Pcie device probe failed\n"); + return err; +} + +void hinic5_pci_probe_fault_process(struct hinic5_adev *adev) +{ + struct pci_dev *pdev = to_pci_dev(adev->dev); + + hinic5_probe_fault_process(pdev, FAULT_LEVEL_HOST); +} + +static const struct pci_device_id hinic5_pci_table[] = { +#ifdef CONFIG_SP_VID_DID + {PCI_VDEVICE(SPNIC, HINIC5_DEV_ID_STANDARD), 0}, + {PCI_VDEVICE(SPNIC, HINIC5_DEV_ID_SDI_5_1_PF), 0}, + {PCI_VDEVICE(SPNIC, HINIC5_DEV_ID_VF), 0}, +#else + {PCI_VDEVICE(HUAWEI, HINIC5_DEV_ID_72V1_PF), 0}, + {PCI_VDEVICE(HUAWEI, HINIC5_DEV_ID_72V1_VF), 0}, + {PCI_VDEVICE(HUAWEI, HINIC5_DEV_ID_25V1_PF), 0}, + {PCI_VDEVICE(HUAWEI, HINIC5_DEV_ID_25V1_VF), 0}, +#endif + {0,} + +}; + +MODULE_DEVICE_TABLE(pci, hinic5_pci_table); + +/** + * hinic5_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + * + * Since we only need error detecting not error handling, so we + * always return PCI_ERS_RESULT_CAN_RECOVER to tell the AER + * driver that we don't need reset(error handling). + */ +static pci_ers_result_t hinic5_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct hinic5_adev *adev = NULL; + + sdk_err(&pdev->dev, + "Uncorrectable error detected, log and cleanup error status: 0x%08x\n", + state); + + pci_cleanup_aer_uncorrect_error_status(pdev); + adev = pci_get_drvdata(pdev); + if (adev) + hinic5_record_pcie_error(adev->hwdev); + + return PCI_ERS_RESULT_CAN_RECOVER; +} + +static void hinic5_pci_shutdown(struct pci_dev *pdev) +{ + struct hinic5_adev *adev = pci_get_drvdata(pdev); + + sdk_info(&pdev->dev, "Shutdown device\n"); + + if (adev) + hinic5_shutdown_hwdev(adev->hwdev); + + pci_disable_device(pdev); + + if (adev) + hinic5_set_api_stop(adev->hwdev); +} + +#ifdef HAVE_PCIE_RESET_DONE +STATIC void hinic5_reset_done(struct pci_dev *pdev) +{ + struct hinic5_adev *adev = pci_get_drvdata(pdev); + + sdk_info(&pdev->dev, "pcie is reset done\n"); + if (adev) + hinic5_set_api_stop(adev->hwdev); +} +#endif + +#ifdef HAVE_RHEL6_SRIOV_CONFIGURE +static struct pci_driver_rh hinic5_driver_rh = { + .sriov_configure = hinic5_pci_sriov_configure, +}; +#endif + +/* Cause we only need error detecting not error handling, so only error_detected + * callback is enough. + */ +static struct pci_error_handlers hinic5_err_handler = { + .error_detected = hinic5_io_error_detected, +#ifdef HAVE_PCIE_RESET_DONE + .reset_done = hinic5_reset_done, +#endif +}; + +static struct pci_driver hinic5_driver = { + .name = HINIC5_DRV_NAME, + .id_table = hinic5_pci_table, + .probe = hinic5_pci_probe, + .remove = hinic5_pci_remove, + .shutdown = hinic5_pci_shutdown, +#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = hinic5_pci_sriov_configure, +#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) + .rh_reserved = &hinic5_driver_rh, +#endif + .err_handler = &hinic5_err_handler, +#if (KERNEL_VERSION(3, 10, 0) != LINUX_VERSION_CODE) + .groups = hisdk5_driver_attr_groups, +#endif +}; + +int hinic5_pci_register_driver(void) +{ + return pci_register_driver(&hinic5_driver); +} + +void hinic5_pci_unregister_driver(void) +{ + pci_unregister_driver(&hinic5_driver); +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pcie.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pcie.h new file mode 100644 index 00000000..b949af7f --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_pcie.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_PCIE_H +#define HINIC5_PCIE_H +#include <linux/types.h> +#include <linux/pci.h> +#include "hinic5_dev_mgmt.h" + +#define PCI_BUS_NUM_SHIFT 8 +#define BUS_MAX_DEV_NUM 256 + +int hinic5_pci_register_driver(void); +void hinic5_pci_unregister_driver(void); +bool hinic5_pci_is_virtfn(struct hinic5_adev *adev); +struct hinic5_adev *hinic5_pdev_get_pf_adev(struct hinic5_adev *adev); +int hinic5_pci_set_func_en(struct hinic5_adev *dst_adev, bool en, u16 vf_func_id); +int hinic5_pci_get_vf_num(struct hinic5_adev *adev); +u16 hinic5_pci_get_device_id(struct hinic5_adev *adev); +struct hinic5_adev *hinic5_pci_get_vf_adev_by_pf(struct hinic5_adev *adev, u16 func_id); +int hinic5_pci_irq_vectors_alloc(struct hinic5_adev *adev, void *entry, u32 irqs_min, u32 irqs_num); +void hinic5_pci_irq_vectors_free(struct hinic5_adev *adev); +int hinic5_pci_irq_vector(struct hinic5_adev *adev, u32 idx); +int hinic5_pci_init_device_info(struct hinic5_adev *adev); +void hinic5_pci_probe_fault_process(struct hinic5_adev *adev); +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sriov.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sriov.c new file mode 100644 index 00000000..61925bb9 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sriov.c @@ -0,0 +1,300 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/interrupt.h> + +#include "ossl_knl.h" +#include "hinic5_hwdev.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#ifndef __WIN__ +#include "hinic5_lld.h" +#include "hinic5_dev_mgmt.h" +#endif +#include "hinic5_sriov.h" + +int hinic5_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) +{ + u16 i, func_idx; + int err; + + /* mbox msg channel resources will be freed during remove process */ + err = hinic5_init_func_mbox_msg_channel(hwdev, + hinic5_func_max_vf(hwdev)); + if (err != 0) + return err; + + /* vf use 256K as default wq page size, and can't change it */ + for (i = start_vf_id; i <= end_vf_id; i++) { + func_idx = hinic5_glb_pf_vf_offset(hwdev) + i; + err = hinic5_set_wq_page_size(hwdev, func_idx, + HINIC5_DEFAULT_WQ_PAGE_SIZE, + HINIC5_CHANNEL_COMM); + if (err != 0) + return err; + } + + return 0; +} + +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE)) && !defined(__WIN__) +ssize_t hinic5_sriov_totalvfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf_s(buf, PAGE_SIZE, "%d\n", pci_sriov_get_totalvfs(pdev)); +} + +ssize_t hinic5_sriov_numvfs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf_s(buf, PAGE_SIZE, "%d\n", pci_num_vf(pdev)); +} + +ssize_t hinic5_sriov_numvfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int ret; + u16 num_vfs; + int cur_vfs, total_vfs; + + ret = kstrtou16(buf, 0, &num_vfs); + if (ret < 0) + return ret; + + cur_vfs = pci_num_vf(pdev); + total_vfs = pci_sriov_get_totalvfs(pdev); + if (num_vfs > total_vfs) + return -ERANGE; + + if (num_vfs == cur_vfs) + return count; /* no change */ + + if (num_vfs == 0) { + /* disable VFs */ + ret = hinic5_pci_sriov_configure(pdev, 0); + if (ret < 0) + return ret; + return count; + } + + /* enable VFs */ + if (cur_vfs) { + nic_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n", + cur_vfs, num_vfs); + return -EBUSY; + } + + ret = hinic5_pci_sriov_configure(pdev, num_vfs); + if (ret < 0) + return ret; + + if (ret != num_vfs) + nic_warn(&pdev->dev, "%d VFs requested; only %d enabled\n", + num_vfs, ret); + + return count; +} +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE) */ + +#ifndef __WIN__ +static void migration_uninit_vf(struct pci_dev *pdev) +{ +#if (defined CONFIG_ARM) || (defined CONFIG_ARM64) + void (*uninit_vfs)(struct pci_dev *) = __symbol_get("migration_dev_uninit_vfs"); + + if (uninit_vfs) { + uninit_vfs(pdev); + __symbol_put("migration_dev_uninit_vfs"); + } +#endif +} + +int hinic5_pci_sriov_disable(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_IOV + struct hinic5_sriov_info *sriov_info = NULL; + struct hinic5_event_info event = {0}; + void *hwdev = NULL; + + sriov_info = hinic5_get_sriov_info_by_pcidev(dev); + hwdev = hinic5_get_hwdev_by_pcidev(dev); + if (!hwdev) { + sdk_err(&dev->dev, "SR-IOV disable is not permitted, please wait...\n"); + return -EPERM; + } + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!sriov_info->sriov_enabled) + return 0; + + if (test_and_set_bit(HINIC5_SRIOV_DISABLE, &sriov_info->state)) { + sdk_err(&dev->dev, "SR-IOV disable in process, please wait"); + return -EPERM; + } + + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(dev) != 0) { + clear_bit(HINIC5_SRIOV_DISABLE, &sriov_info->state); + sdk_warn(&dev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return -EPERM; + } + + event.service = EVENT_SRV_COMM; + event.type = EVENT_COMM_SRIOV_STATE_CHANGE; + ((struct hinic5_sriov_state_info *)(void *)event.event_data)->enable = 0; + ((struct hinic5_sriov_state_info *)(void *)event.event_data)->num_vfs = 0; + hinic5_event_callback(hwdev, &event); + + sriov_info->sriov_enabled = false; + + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(dev); + + sriov_info->num_vfs = 0; + + clear_bit(HINIC5_SRIOV_DISABLE, &sriov_info->state); + + migration_uninit_vf(dev); +#endif + + return 0; +} + +#ifdef CONFIG_PCI_IOV +#if (defined CONFIG_ARM) || (defined CONFIG_ARM64) +static int migration_init_vf(struct pci_dev *dev, int num_vfs, struct hinic5_sriov_info *sriov_info) +{ + int err = 0; + int (*migration_dev_init_vfs)(struct pci_dev *dev, uint32_t num_vfs); + + migration_dev_init_vfs = __symbol_get("migration_dev_init_vfs"); + if (migration_dev_init_vfs) { + sdk_warn(&dev->dev, "migration_dev_init_vfs is not NULL\n"); + err = migration_dev_init_vfs(dev, num_vfs); + __symbol_put("migration_dev_init_vfs"); + if (err < 0) { + pci_disable_sriov(dev); + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); + return err; + } + } + return err; +} +#endif + +static void hinic5_event_callback_dev(struct hinic5_sriov_info *sriov_info, + void *hwdev, int num_vfs) +{ + struct hinic5_event_info event = {0}; + sriov_info->sriov_enabled = true; + + sriov_info->num_vfs = (u32)num_vfs; + + event.service = EVENT_SRV_COMM; + event.type = EVENT_COMM_SRIOV_STATE_CHANGE; + ((struct hinic5_sriov_state_info *)(void *)event.event_data)->enable = 1; + ((struct hinic5_sriov_state_info *)(void *)event.event_data)->num_vfs = (u16)num_vfs; + hinic5_event_callback(hwdev, &event); + + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); +} +#endif + +int hinic5_pci_sriov_enable(struct pci_dev *dev, int num_vfs) +{ +#ifdef CONFIG_PCI_IOV + int err, pre_existing_vfs; + + struct hinic5_sriov_info *sriov_info = hinic5_get_sriov_info_by_pcidev(dev); + void *hwdev = hinic5_get_hwdev_by_pcidev(dev); + + if (!hwdev) { + sdk_err(&dev->dev, "hwdev is null\n"); + return -EPERM; + } + + if (test_and_set_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state)) { + sdk_err(&dev->dev, "SR-IOV enable in process, please wait, num_vfs %d\n", num_vfs); + return -EPERM; + } + + pre_existing_vfs = pci_num_vf(dev); + + if (num_vfs > pci_sriov_get_totalvfs(dev)) { + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); + return -ERANGE; + } + if (pre_existing_vfs != 0 && pre_existing_vfs != num_vfs) { + err = hinic5_pci_sriov_disable(dev); + if (err != 0) { + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); + return err; + } + } else if (pre_existing_vfs == num_vfs) { + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); + return num_vfs; + } + + err = hinic5_init_vf_hw(hwdev, 1, (u16)num_vfs); + if (err != 0) { + sdk_err(&dev->dev, + "Failed to init vf in hardware before enable sriov, error %d\n", err); + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + err = pci_enable_sriov(dev, num_vfs); + if (err != 0) { + sdk_err(&dev->dev, "Failed to enable SR-IOV, error %d\n", err); + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); + return err; + } + +#if (defined CONFIG_ARM) || (defined CONFIG_ARM64) + err = migration_init_vf(dev, num_vfs, sriov_info); + if (err < 0) + return err; +#endif + hinic5_event_callback_dev(sriov_info, hwdev, num_vfs); + return num_vfs; +#else + + return 0; +#endif +} + +void hinic5_pci_sriov_enable_ops(struct hinic5_adev *adev, int num_vfs) +{ + struct pci_dev *pdev = to_pci_dev(adev->dev); + + (void)hinic5_pci_sriov_enable(pdev, num_vfs); +} + +int hinic5_pci_sriov_configure(struct pci_dev *dev, int num_vfs) +{ + struct hinic5_sriov_info *sriov_info = NULL; + + sriov_info = hinic5_get_sriov_info_by_pcidev(dev); + if (!sriov_info) + return -EFAULT; + + if (!test_bit(HINIC5_FUNC_PERSENT, &sriov_info->state)) + return -EFAULT; + + if (num_vfs == 0) + return hinic5_pci_sriov_disable(dev); + else + return hinic5_pci_sriov_enable(dev, num_vfs); +} +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sriov.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sriov.h new file mode 100644 index 00000000..034411cc --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sriov.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_SRIOV_H +#define HINIC5_SRIOV_H +#include <linux/types.h> +#include <linux/pci.h> + +#include "hinic5_bus.h" + +#if !(defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE) || \ + defined(__WIN__) || defined(__VMWARE__)) +ssize_t hinic5_sriov_totalvfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hinic5_sriov_numvfs_show(struct device *dev, + struct device_attribute *attr, char *buf); +ssize_t hinic5_sriov_numvfs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +#endif /* !(HAVE_SRIOV_CONFIGURE || HAVE_RHEL6_SRIOV_CONFIGURE || __WIN__ || __VMWARE__) */ + +struct hinic5_sriov_info *hinic5_get_sriov_info_by_pcidev(struct pci_dev *pdev); +int hinic5_pci_sriov_disable(struct pci_dev *dev); +int hinic5_pci_sriov_enable(struct pci_dev *dev, int num_vfs); +void hinic5_pci_sriov_enable_ops(struct hinic5_adev *adev, int num_vfs); +int hinic5_pci_sriov_configure(struct pci_dev *dev, int num_vfs); +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sysfs.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sysfs.c new file mode 100644 index 00000000..5baee6fa --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sysfs.c @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/sysfs.h> + +#include "ossl_knl.h" +#include "hinic5_dev_mgmt.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_typedef_inner.h" +#include "hinic5_sysfs.h" + +/* driver attributes */ +static ssize_t metadata_show(struct device_driver *dev, char *buf) +{ + return (ssize_t)sysfs_emit(buf, "commit: %s\ncompile time: %s\n", + GIT_COMMIT_ID, __TIME_STR__); +} +static DRIVER_ATTR_RO(metadata); + +static struct attribute *hisdk5_driver_attrs[] = { + &driver_attr_metadata.attr, + NULL, +}; + +static const struct attribute_group hisdk5_driver_attr_group = { + .attrs = hisdk5_driver_attrs, +}; + +const struct attribute_group *hisdk5_driver_attr_groups[] = { + &hisdk5_driver_attr_group, + NULL, +}; + +/* device attributes */ +static ssize_t timeout_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hinic5_adev *adev = dev_get_drvdata(dev); + struct hinic5_hwdev *hwdev = adev->hwdev; + ssize_t len = 0; + + if (!hwdev || !hwdev->timeout_info) + return len; + + len += sysfs_emit(buf, + "hw_type: %s\n" + "mbox_timeout: %u ms\nmailbox_poll_timeout: %u ms\n" + "cmdq_timeout: %u ms\n", + hwdev->timeout_info->hw_type_desc, + hwdev->timeout_info->mbox_timeout, + hwdev->timeout_info->mbox_poll_timeout, + hwdev->timeout_info->cmdq_timeout); + return len; +} +static DEVICE_ATTR_RO(timeout); + +static struct attribute *hisdk5_device_attrs[] = { + &dev_attr_timeout.attr, + NULL +}; + +static const struct attribute_group hisdk5_device_attr_group = { + .attrs = hisdk5_device_attrs +}; + +const struct attribute_group *hisdk5_device_attr_groups[] = { + &hisdk5_device_attr_group, + NULL, +}; + +const struct attribute_group *a_hisdk5_device_attr_groups[] = { + &hisdk5_device_attr_group, + NULL, +}; + +int register_device_attr_groups(struct hinic5_adev *adev) +{ + return sysfs_create_groups(&adev->dev->kobj, hisdk5_device_attr_groups); +} + +void unregister_device_attr_groups(struct hinic5_adev *adev) +{ + sysfs_remove_groups(&adev->dev->kobj, hisdk5_device_attr_groups); +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sysfs.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sysfs.h new file mode 100644 index 00000000..73342eac --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_sysfs.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef __HINIC5_SYSFS_H_ +#define __HINIC5_SYSFS_H_ + +#include <linux/sysfs.h> + +#include "hinic5_dev_mgmt.h" + +extern const struct attribute_group *hisdk5_driver_attr_groups[]; + +int register_device_attr_groups(struct hinic5_adev *adev); +void unregister_device_attr_groups(struct hinic5_adev *adev); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus.c new file mode 100644 index 00000000..50b3341f --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus.c @@ -0,0 +1,707 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifdef __UBUS_DRIVER__ +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt +#include <net/addrconf.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/io-mapping.h> +#include <linux/interrupt.h> +#include <linux/inetdevice.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/aer.h> +#include <linux/debugfs.h> +#ifdef UB_SUPPORT_ENTITY +#include <linux/mod_devicetable.h> +#endif + +#include "ossl_knl.h" +#include "hinic5_mt.h" +#include "hinic5_common.h" +#include "hinic5_crm.h" +#include "hinic5_ubus_sriov.h" +#include "hinic5_dev_mgmt.h" +#include "hinic5_nictool.h" +#include "hinic5_hw.h" +#include "hinic5_hinic5_vram.h" +#include "hinic5_fast_msg_init.h" +#include "hinic5_lld.h" +#include "hinic5_lld_private.h" +#include "hinic5_ubus_id_tbl.h" +#include "hinic5_ubus.h" +#include "hinic5_bus.h" + +#include "hinic5_hwdev.h" +#include "hinic5_profile.h" +#include "hinic5_prof_adap.h" +#include "hinic5_fw_update.h" +#include "mpu_inband_cmd_defs.h" +#include "hinic5_sysfs.h" + +static u8 ubus_dma_bit_mask = HINIC5_UBUS_DMA_BIT_MASK_DEFAULT; +module_param(ubus_dma_bit_mask, byte, 0444); +MODULE_PARM_DESC(ubus_dma_bit_mask, "ubus dma addr bit mask - default is 48"); + +static enum ubus_device_type ubus_get_device_type(hinic_ub_dev *ubus_dev) +{ + if (HINIC_UB_GET_DEVICE_ID(ubus_dev) == HINIC5_UDEV_DEVICE_ID_1825_PF || + HINIC_UB_GET_DEVICE_ID(ubus_dev) == HINIC5_UDEV_DEVICE_ID_1825_VF || + HINIC_UB_GET_DEVICE_ID(ubus_dev) == HINIC5_UDEV_DEVICE_ID_1825_TEMP) { + return UBUS_DEVICE_TYPE_1825; + } else if (HINIC_UB_GET_DEVICE_ID(ubus_dev) == HINIC5_UDEV_DEVICE_ID_1872_PF || + HINIC_UB_GET_DEVICE_ID(ubus_dev) == HINIC5_UDEV_DEVICE_ID_1872_VF) { + return UBUS_DEVICE_TYPE_1872; + } else { + return UBUS_DEVICE_TYPE_INVALID; + } +} + +struct fers2_conf { + u32 cfg_reg_offset; + u32 cfg_reg_size; + u32 mgmt_reg_offset; + u32 mgmt_reg_size; +}; + +static const struct fers2_conf PF_RS2_CONF = { + HINIC5_PF_UBUS_CFG_REG_OFFSET, HINIC5_PF_UBUS_CFG_REG_SIZE, + HINIC5_PF_UBUS_MGMT_REG_OFFSET, HINIC5_PF_UBUS_MGMT_REG_SIZE +}; + +static const struct fers2_conf VF_RS2_CONF = { + HINIC5_VF_UBUS_CFG_REG_OFFSET, HINIC5_VF_UBUS_CFG_REG_SIZE, + 0, 0 +}; + +static const struct fers2_conf HTN_PF_RS2_CONF = { + HINIC5_HTN_PF_UBUS_CFG_REG_OFFSET, HINIC5_HTN_PF_UBUS_CFG_REG_SIZE, + HINIC5_HTN_PF_UBUS_MGMT_REG_OFFSET, HINIC5_HTN_PF_UBUS_MGMT_REG_SIZE +}; + +static const struct fers2_conf HTN_VF_RS2_CONF = { + HINIC5_HTN_VF_UBUS_CFG_REG_OFFSET, HINIC5_HTN_VF_UBUS_CFG_REG_SIZE, + 0, 0 +}; + +static const struct fers2_conf *get_fers2_config(hinic_ub_dev *ubus_dev, + struct hinic5_adev *adev) +{ + enum ubus_device_type dev_type = ubus_get_device_type(ubus_dev); + bool is_vf = hinic5_ubus_is_virtfn(adev); + + if (dev_type == UBUS_DEVICE_TYPE_1825) + return is_vf ? &VF_RS2_CONF : &PF_RS2_CONF; + if (dev_type == UBUS_DEVICE_TYPE_1872) + return is_vf ? &HTN_VF_RS2_CONF : &HTN_PF_RS2_CONF; + return NULL; +} + +static int ubus_mapping_bar(hinic_ub_dev *ubus_dev, struct hinic5_adev *adev) +{ + bool is_pf = !hinic5_ubus_is_virtfn(adev); + const struct fers2_conf *rs2 = NULL; + + rs2 = get_fers2_config(ubus_dev, adev); + if (unlikely(!rs2)) { + sdk_err(&ubus_dev->dev, "Unsupport device\n"); + return -EFAULT; + } + +#ifdef __HINIC5_UBC_DEBUG__ + sdk_info(&ubus_dev->dev, "fers2 cfg off 0x%x size 0x%x\n", + rs2->cfg_reg_offset, rs2->cfg_reg_size); + sdk_info(&ubus_dev->dev, "fers2 mgmt off 0x%x size 0x%x\n", + rs2->mgmt_reg_offset, rs2->mgmt_reg_size); +#endif + + /* resource 2 */ + adev->fers2_base_phy = ub_resource_start(ubus_dev, HINIC5_UBUS_FERS2); + adev->fers2_total_len = ub_resource_len(ubus_dev, HINIC5_UBUS_FERS2); + adev->fers2_reg_base = ub_iomap(ubus_dev, HINIC5_UBUS_FERS2, 0); + if (!adev->fers2_reg_base) { + sdk_err(&ubus_dev->dev, "Failed to map resource 2\n"); + return -ENOMEM; + } + + /* cfg reg */ + adev->cfg_reg_base = adev->fers2_reg_base + rs2->cfg_reg_offset; + adev->cfg_base_phy = adev->fers2_base_phy + rs2->cfg_reg_offset; + adev->cfg_base_len = rs2->cfg_reg_size; + + /* mgmt reg */ + if (is_pf) { + adev->mgmt_reg_base = adev->fers2_reg_base + rs2->mgmt_reg_offset; + adev->mgmt_base_phy = adev->fers2_base_phy + rs2->mgmt_reg_offset; + adev->mgmt_base_len = rs2->mgmt_reg_offset; + } + + /* interrupt reg */ + adev->intr_reg_base = ub_iomap(ubus_dev, HINIC5_UBUS_INTR_REG_BAR, 0); + if (!adev->intr_reg_base) { + sdk_err(&ubus_dev->dev, + "Failed to map interrupt regs\n"); + goto map_intr_bar_err; + } + + /* doorbell reg */ + adev->db_base_phy = ub_resource_start(ubus_dev, HINIC5_UBUS_DB_BAR); + adev->db_dwqe_len = ub_resource_len(ubus_dev, HINIC5_UBUS_DB_BAR); + adev->db_base = devm_ioremap_wc(&ubus_dev->dev, + adev->db_base_phy, + adev->db_dwqe_len); + if (!adev->db_base) { + sdk_err(&ubus_dev->dev, + "Failed to map doorbell regs\n"); + goto map_db_err; + } + +#ifdef __HINIC5_UBC_DEBUG__ + sdk_info(&ubus_dev->dev, "cfg reg 0x%llx, mgmt reg 0x%llx\n", + (u64)adev->cfg_reg_base, (u64)adev->mgmt_reg_base); +#endif + + return 0; + +map_db_err: + ub_iounmap(adev->intr_reg_base); + +map_intr_bar_err: + ub_iounmap(adev->fers2_reg_base); + + return -ENOMEM; +} + +static void ubus_unmapping_bar(struct hinic5_adev *adev) +{ + devm_iounmap(adev->dev, adev->db_base); + + ub_iounmap(adev->intr_reg_base); + + ub_iounmap(adev->fers2_reg_base); +} + +static void hinic5_ubus_deinit(hinic_ub_dev *ubus_dev) +{ + HINIC_UB_UE_ENABLE(ubus_dev, 0); + HINIC_UB_UNSET_HOST_INFO(ubus_dev); + dev_set_drvdata(&ubus_dev->dev, NULL); +} + +static int hinic5_ubus_init(hinic_ub_dev *ubus_dev) +{ + struct hinic5_adev *adev = NULL; + int err; + + /* 写配置空间 配置来源于ubus驱动加载后的配置 */ + (void)HINIC_UB_SET_HOST_INFO(ubus_dev); + + adev = devm_kzalloc(&ubus_dev->dev, sizeof(*adev), GFP_KERNEL); + if (!adev) { + return -ENOMEM; + } + adev->dev = &ubus_dev->dev; + mutex_init(&adev->adev_mutex); + + dev_set_drvdata(&ubus_dev->dev, adev); + + /* set bus_access_en */ + HINIC_UB_UE_ENABLE(ubus_dev, 1); + + sdk_info(&ubus_dev->dev, "Ubus DMA Bit Mask is (%u).\n", ubus_dma_bit_mask); + if (ubus_dma_bit_mask < HINIC5_UBUS_DMA_BIT_MASK_MIN || + ubus_dma_bit_mask > HINIC5_UBUS_DMA_BIT_MASK_MAX) { + err = -EPERM; + sdk_err(&ubus_dev->dev, "Ubus DMA Bit Mask Illegal\n"); + goto dma_mask_err; + } + + err = dma_set_mask_and_coherent(&ubus_dev->dev, + DMA_BIT_MASK(ubus_dma_bit_mask)); + if (err != 0) { + sdk_warn(&ubus_dev->dev, "Couldn't set ubus DMA mask\n"); + goto dma_mask_err; + } + + return 0; + +dma_mask_err: + hinic5_ubus_deinit(ubus_dev); + + return err; +} + +static int hinic5_remove_ubus_func(struct hinic5_adev *adev) +{ + mutex_lock(&adev->adev_mutex); + if (adev->lld_state != HINIC5_PROBE_OK) { + sdk_warn(adev->dev, "Current function don not need remove\n"); + mutex_unlock(&adev->adev_mutex); + return 0; + } + adev->lld_state = HINIC5_IN_REMOVE; + mutex_unlock(&adev->adev_mutex); + + hinic5_detect_hw_present(adev->hwdev); + + if (hinic5_func_type(adev->hwdev) != TYPE_VF) + wait_sriov_cfg_complete(adev); + + hinic5_func_deinit(adev); + + lld_lock_chip_node(); + free_chip_node(adev); + lld_unlock_chip_node(); + + ubus_unmapping_bar(adev); + + mutex_lock(&adev->adev_mutex); + adev->lld_state = HINIC5_NOT_PROBE; + mutex_unlock(&adev->adev_mutex); + + sdk_info(adev->dev, "Ubus device removed function\n"); + + return 0; +} + +static void hinic5_ubus_remove(hinic_ub_dev *ubus_dev) +{ + struct hinic5_adev *adev = dev_get_drvdata(&ubus_dev->dev); + + if (!adev) + return; + + sdk_info(&ubus_dev->dev, "Ubus device remove begin\n"); + + HINIC_UB_DISABLE_FUNC(ubus_dev); + hinic5_remove_ubus_func(adev); + hinic5_ubus_deinit(ubus_dev); + sdk_info(&ubus_dev->dev, "Ubus device removed\n"); +} + +static int hinic5_probe_ubus_func(hinic_ub_dev *ubus_dev, struct hinic5_adev *adev) +{ + int err; + + err = probe_func_param_init(adev); + if (err == -EEXIST) + return 0; + else if (err != 0) + return err; + + err = ubus_mapping_bar(ubus_dev, adev); + if (err != 0) { + sdk_err(&ubus_dev->dev, "Failed to map bar\n"); + goto map_bar_failed; + } + + lld_lock_chip_node(); + err = alloc_chip_node(adev); + if (err != 0) { + lld_unlock_chip_node(); + sdk_err(&ubus_dev->dev, "Failed to add new chip node to global list\n"); + goto alloc_chip_node_fail; + } + lld_unlock_chip_node(); + + err = hinic5_func_init(adev); + if (err != 0) + goto func_init_err; + + hinic5_probe_success(adev->hwdev); + + mutex_lock(&adev->adev_mutex); + adev->lld_state = HINIC5_PROBE_OK; + mutex_unlock(&adev->adev_mutex); + + return 0; + +func_init_err: + lld_lock_chip_node(); + free_chip_node(adev); + lld_unlock_chip_node(); + +alloc_chip_node_fail: + ubus_unmapping_bar(adev); + +map_bar_failed: + sdk_err(adev->dev, "Ubus device probe function failed\n"); + return err; +} + +static int hinic5_ubus_probe(hinic_ub_dev *ubus_dev, const struct ub_device_id *utbl_entry) +{ + struct hinic5_adev *adev = NULL; + u16 probe_fault_level = FAULT_LEVEL_SERIOUS_FLR; + int err; + + sdk_info(&ubus_dev->dev, "Ubus device probe begin\n"); + err = hinic5_probe_pre_process(ubus_dev); + if (err == HINIC5_NOT_PROBE) + return 0; + if (err != 0) + goto out; + err = hinic5_ubus_init(ubus_dev); + if (err != 0) + goto out; + + adev = dev_get_drvdata(&ubus_dev->dev); + adev->disable_vf_load = hinic5_is_disable_vf_load(); + adev->lld_state = HINIC5_NOT_PROBE; + adev->probe_fault_level = probe_fault_level; + adev->lld_dev.dev_type = HINIC5_DEVICE_T_UB; + adev->bus_ops = hinic5_get_dev_ops(adev); + adev->bus_dev = ubus_dev; + err = adev->bus_ops->init_device_info(adev); + if (err != 0) + goto init_device_info_err; + + lld_dev_cnt_init(adev); + + err = hinic5_probe_ubus_func(ubus_dev, adev); + if (err != 0) + goto hinic5_probe_func_fail; + + sdk_info(&ubus_dev->dev, "Ubus device probed\n"); + return 0; + +hinic5_probe_func_fail: + probe_fault_level = adev->probe_fault_level; + +init_device_info_err: + hinic5_ubus_deinit(ubus_dev); + +out: + hinic5_probe_fault_process(ubus_dev, probe_fault_level); + sdk_err(&ubus_dev->dev, "Ubus device probe failed\n"); + return err; +} + +void hinic5_ubus_probe_fault_process(struct hinic5_adev *adev) +{ + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + + hinic5_probe_fault_process(udev, FAULT_LEVEL_HOST); +} + +#ifdef UB_SUPPORT_ENTITY +#define UB_DEVICE UB_ENTITY +#endif + +#define HUAWEI_UB_DEVICE_ID(device) \ + UB_DEVICE(HINIC5_UDEV_VENDOR_ID_HUAWEI, device) + +/* Old Vendor ID, to be deleted */ +#define HUAWEI_UB_DEVICE_ID_OLD(device) \ + UB_DEVICE(HINIC5_UDEV_VENDOR_ID_HUAWEI_E0FC, device) + +static const struct ub_device_id hinic5_ubus_tbl[] = { + { + HUAWEI_UB_DEVICE_ID(HINIC5_UDEV_DEVICE_ID_1825_PF), + HINIC5_UDEV_CLASS_CODE_1825, HINIC5_UDEV_CLASS_CODE_MASK, + }, + { + HUAWEI_UB_DEVICE_ID(HINIC5_UDEV_DEVICE_ID_1825_VF), + HINIC5_UDEV_CLASS_CODE_1825, HINIC5_UDEV_CLASS_CODE_MASK, + }, + { + HUAWEI_UB_DEVICE_ID(HINIC5_UDEV_DEVICE_ID_1825_TEMP), + HINIC5_UDEV_CLASS_CODE_1825, HINIC5_UDEV_CLASS_CODE_MASK, + }, + { + HUAWEI_UB_DEVICE_ID(HINIC5_UDEV_DEVICE_ID_1872_PF), + HINIC5_UDEV_CLASS_CODE_1872, HINIC5_UDEV_CLASS_CODE_MASK, + }, + { + HUAWEI_UB_DEVICE_ID(HINIC5_UDEV_DEVICE_ID_1872_VF), + HINIC5_UDEV_CLASS_CODE_1872, HINIC5_UDEV_CLASS_CODE_MASK, + }, + /* Old Vendor ID, to be deleted */ + { + HUAWEI_UB_DEVICE_ID_OLD(HINIC5_UDEV_DEVICE_ID_1825_PF), + HINIC5_UDEV_CLASS_CODE_1825, HINIC5_UDEV_CLASS_CODE_MASK, + }, + { + HUAWEI_UB_DEVICE_ID_OLD(HINIC5_UDEV_DEVICE_ID_1825_VF), + HINIC5_UDEV_CLASS_CODE_1825, HINIC5_UDEV_CLASS_CODE_MASK, + }, + { + HUAWEI_UB_DEVICE_ID_OLD(HINIC5_UDEV_DEVICE_ID_1825_TEMP), + HINIC5_UDEV_CLASS_CODE_1825, HINIC5_UDEV_CLASS_CODE_MASK, + }, + { + HUAWEI_UB_DEVICE_ID_OLD(HINIC5_UDEV_DEVICE_ID_1872_PF), + HINIC5_UDEV_CLASS_CODE_1872, HINIC5_UDEV_CLASS_CODE_MASK, + }, + { + HUAWEI_UB_DEVICE_ID_OLD(HINIC5_UDEV_DEVICE_ID_1872_VF), + HINIC5_UDEV_CLASS_CODE_1872, HINIC5_UDEV_CLASS_CODE_MASK, + }, + /* required last entry */ + {0}, +}; + +u16 hinic5_ubus_get_device_id(struct hinic5_adev *adev) +{ + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + + return HINIC_UB_GET_DEVICE_ID(udev); +} + +bool hinic5_ubus_is_virtfn(struct hinic5_adev *adev) +{ + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + u16 dev_id = hinic5_ubus_get_device_id(adev); + + if (dev_id == HINIC5_UDEV_DEVICE_ID_1825_VF) + return true; + if (dev_id == HINIC5_UDEV_DEVICE_ID_1872_VF) + return true; + +#ifdef UB_SUPPORT_ENTITY + return !udev->is_mue; +#else + return !udev->is_pd; +#endif +} + +int hinic5_ub_init_device_info(struct hinic5_adev *adev) +{ + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + + adev->info.id = (u64)((udev->guid.bits.seq_num >> HINIC5_CARD_ID_OFFSET) & + HINIC5_CARD_ID_MASK); + + if (sizeof(udev->guid) != sizeof(adev->info.guid)) { + sdk_err(adev->dev, "guid size is not matched.\n"); + return -EINVAL; + } + memcpy(&adev->info.guid, &udev->guid, sizeof(udev->guid)); + + sdk_info(adev->dev, "card_id: %lld, seq_num: %lld\n", + adev->info.id, (u64)(udev->guid.bits.seq_num)); + return 0; +} + +struct hinic5_adev *hinic5_ubus_get_pf_adev(struct hinic5_adev *adev) +{ + struct hinic5_adev *pf_adev = NULL; + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + +#ifdef UB_SUPPORT_ENTITY + pf_adev = (hinic5_ubus_is_virtfn(adev) != 0) ? dev_get_drvdata(&udev->pue->dev) : adev; +#else + pf_adev = (hinic5_ubus_is_virtfn(adev) != 0) ? dev_get_drvdata(&udev->pdev->dev) : adev; +#endif + return pf_adev; +} + +struct hinic5_adev *hinic5_ubus_get_vf_adev_by_pf(struct hinic5_adev *adev, u16 func_id) +{ + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + hinic_ub_dev *vd_dev = NULL; + +#ifdef UB_SUPPORT_ENTITY + list_for_each_entry(vd_dev, &udev->ue_list, node) { + if (vd_dev->entity_idx == func_id) { +#else + list_for_each_entry(vd_dev, &udev->vdevice_list, node) { + if (vd_dev->fe_idx == func_id) { +#endif + return dev_get_drvdata(&vd_dev->dev); + } + } + return NULL; +} + +int hinic5_ubus_get_vf_num(struct hinic5_adev *adev) +{ + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + +#ifdef UB_SUPPORT_ENTITY + return udev->num_ues; +#else + return udev->num_vds; +#endif +} + +int hinic5_ubus_set_func_en(struct hinic5_adev *dst_dev, bool en, u16 vf_func_id) +{ + hinic_ub_dev *des_udev = container_of(dst_dev->dev, hinic_ub_dev, dev); + int err; + + mutex_lock(&dst_dev->adev_mutex); + /* unload invalid vf func id */ + if (!en && vf_func_id != hinic5_global_func_id(dst_dev->hwdev) && + (strcmp(des_udev->driver->name, HINIC5_DRV_NAME) == 0)) { + pr_err("dst_dev func id:%u, vf_func_id:%u\n", + hinic5_global_func_id(dst_dev->hwdev), vf_func_id); + mutex_unlock(&dst_dev->adev_mutex); + return -EFAULT; + } + + if (!en && dst_dev->lld_state == HINIC5_PROBE_OK) { + mutex_unlock(&dst_dev->adev_mutex); + hinic5_remove_ubus_func(dst_dev); + } else if (en && dst_dev->lld_state == HINIC5_NOT_PROBE) { + mutex_unlock(&dst_dev->adev_mutex); + err = hinic5_probe_ubus_func(des_udev, dst_dev); + if (err != 0) + return -EFAULT; + } else { + mutex_unlock(&dst_dev->adev_mutex); + } + + return 0; +} + +int hinic5_ubus_irq_vectors_alloc(struct hinic5_adev *adev, void *entry, u32 irqs_min, u32 irqs_num) +{ + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + + return ub_alloc_irq_vectors(udev, irqs_min, irqs_num); +} + +void hinic5_ubus_irq_vectors_free(struct hinic5_adev *adev) +{ + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + + return ub_disable_intr(udev); +} + +int hinic5_ubus_irq_vector(struct hinic5_adev *adev, u32 idx) +{ + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + + return ub_irq_vector(udev, idx); +} + +static ub_ers_result_t hinic5_ubus_error_detected(hinic_ub_dev *ubus_dev, + ub_channel_state_t state) +{ + dev_info(&ubus_dev->dev, "UBUS error detected, state = %d.\n", state); + + switch (state) { + case ub_channel_io_normal: + return UB_ERS_RESULT_NEED_RESET; + case ub_channel_io_frozen: + return UB_ERS_RESULT_DISCONNECT; + case ub_channel_io_perm_failure: + default: + return UB_ERS_RESULT_NONE; + } +} + +static const struct ub_error_handlers hinic5_ubus_err_handler = { +#ifdef UB_SUPPORT_B177 + .ub_error_detected = hinic5_ubus_error_detected, +#else + .error_detected = hinic5_ubus_error_detected, +#endif +}; + +static int hinic5_ubus_suspend(struct device *dev) +{ + dev_info(dev, "UBUS suspend start\n"); + + return 0; +} + +static int hinic5_ubus_resume(struct device *dev) +{ + dev_info(dev, "UBUS resume start\n"); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(hinic5_ubus_pm_ops, hinic5_ubus_suspend, hinic5_ubus_resume); + +static struct ub_driver hinic5_ubus_driver = { + .name = HINIC5_DRV_NAME, + .id_table = hinic5_ubus_tbl, + .probe = hinic5_ubus_probe, + .remove = hinic5_ubus_remove, + .virt_configure = hinic5_ubus_virt_configure, + .err_handler = &hinic5_ubus_err_handler, + .driver = { + .pm = &hinic5_ubus_pm_ops, + }, + .groups = hisdk5_driver_attr_groups, +}; + +int hinic5_ubus_register_driver(void) +{ + return ub_register_driver(&hinic5_ubus_driver); +} + +void hinic5_ubus_unregister_driver(void) +{ + ub_unregister_driver(&hinic5_ubus_driver); +} + +void hinic5_ubus_numvds_store_vds_process(struct hinic5_adev *adev, int nums) +{ + int vd_start_idx, vd_end_idx, i, cnt, ret; + hinic_ub_dev *vdev; + hinic_ub_dev *udev = HINIC_TO_UB_DEV(adev->dev); + + /* + * The software finds "cnt" disabled vDevices for "vd_start_idx" to + * "vd_end_idx" based on the ordered vdevice_list. "vd_start_idx" is + * the ue_idx of first vDevices and "vd_end_idx" is calculated based + * on the total number(that is, "nums") vDevices to be enabled. + */ +#ifdef UB_SUPPORT_ENTITY + vd_start_idx = udev->uem.start_entity_idx; + vd_end_idx = vd_start_idx + nums - 1; + i = vd_start_idx; + cnt = nums - udev->num_ues; + list_for_each_entry(vdev, &udev->ue_list, node) { + for (; i < vdev->entity_idx; i++) { + /* + * The "vdevice_list" is sorted by ue_idx in ascending order. + * Ensure that others vDevs before this vDev are enabled. + */ + ret = hinic5_ubus_virt_configure(udev, i, 1); + if (ret != 0) + sdk_warn(adev->dev, "driver virt_configure return %d\n", ret); + if (--cnt == 0) + return; + } + /* Skip this enabled vDev. */ + i++; + } +#else + vd_start_idx = udev->vd.start_fe_idx; + vd_end_idx = vd_start_idx + nums - 1; + i = vd_start_idx; + cnt = nums - udev->num_vds; + list_for_each_entry(vdev, &udev->vdevice_list, node) { + for (; i < vdev->fe_idx; i++) { + /* + * The "vdevice_list" is sorted by ue_idx in ascending order. + * Ensure that others vDevs before this vDev are enabled. + */ + ret = hinic5_ubus_virt_configure(udev, i, 1); + if (ret != 0) + sdk_warn(adev->dev, "driver virt_configure return %d\n", ret); + if (--cnt == 0) + return; + } + /* Skip this enabled vDev. */ + i++; + } +#endif + /* Ensure that the remaining vDevs enabled. */ + for (; i <= vd_end_idx; i++) { + ret = hinic5_ubus_virt_configure(udev, i, 1); + if (ret != 0) + sdk_warn(adev->dev, "driver virt_configure return %d\n", ret); + if (--cnt == 0) + return; + } +} +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus.h new file mode 100644 index 00000000..46518d9b --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_UBUS_H +#define HINIC5_UBUS_H +#ifdef __UBUS_DRIVER__ +#include <linux/types.h> +#ifdef UB_SUPPORT_ENTITY +#include <ub/ubus/ubus.h> +#include <ub/ubus/ubus_regs.h> +#else +#include <linux/ubus.h> +#include <linux/ubus_regs.h> +#endif +#include "hinic5_dev_mgmt.h" + +enum ubus_device_type { + UBUS_DEVICE_TYPE_1825, + UBUS_DEVICE_TYPE_1872, + UBUS_DEVICE_TYPE_INVALID +}; + +// TODO: UB B173之后的版本接口差异, 173及以后需要define UB_SUPPORT_ENTITY +#ifdef UB_SUPPORT_ENTITY +typedef struct ub_entity hinic_ub_dev; +#define HINIC_UB_UE_ENABLE(ubus_dev, enable) ub_entity_enable(ubus_dev, enable) +#define HINIC_UB_UNSET_HOST_INFO(ubus_dev) ub_unset_user_info(ubus_dev) +#define HINIC_UB_SET_HOST_INFO(ubus_dev) ub_set_user_info(ubus_dev) +#define HINIC_UB_DISABLE_FUNC(ubus_dev) ub_disable_entities(ubus_dev) +#define HINIC_UB_ENABLE_VDEV(ubus_dev, ue_idx) ub_enable_ue(ubus_dev, ue_idx) +#define HINIC_UB_DISABLE_VDEV(ubus_dev, ue_idx) ub_disable_ue(ubus_dev, ue_idx) +#define HINIC_UB_GET_DEVICE_ID(udev) uent_device(udev) +#define HINIC_UB_GET_CLASS_CODE(udev) uent_class(udev) +#define HINIC_TO_UB_DEV(dev) to_ub_entity(dev) +#else +typedef struct ub_dev hinic_ub_dev; +#define HINIC_UB_UE_ENABLE(ubus_dev, enable) ub_fe_enable(ubus_dev, enable) +#define HINIC_UB_UNSET_HOST_INFO(ubus_dev) ub_unset_hostinfo(ubus_dev) +#define HINIC_UB_SET_HOST_INFO(ubus_dev) ub_set_hostinfo(ubus_dev) +#define HINIC_UB_DISABLE_FUNC(ubus_dev) ub_disable_funcs(ubus_dev) +#define HINIC_UB_ENABLE_VDEV(ubus_dev, ue_idx) ub_enable_vdev(ubus_dev, ue_idx) +#define HINIC_UB_DISABLE_VDEV(ubus_dev, ue_idx) ub_disable_vdev(ubus_dev, ue_idx) +#define HINIC_UB_GET_DEVICE_ID(udev) udev_device(udev) +#define HINIC_UB_GET_CLASS_CODE(udev) udev_class(udev) +#define HINIC_TO_UB_DEV(dev) to_ub_dev(dev) +#endif + +int hinic5_ubus_register_driver(void); +void hinic5_ubus_unregister_driver(void); +bool hinic5_ubus_is_virtfn(struct hinic5_adev *adev); +struct hinic5_adev *hinic5_ubus_get_pf_adev(struct hinic5_adev *adev); +int hinic5_ubus_set_func_en(struct hinic5_adev *dst_dev, bool en, u16 vf_func_id); +struct hinic5_adev *hinic5_ubus_get_vf_adev_by_pf(struct hinic5_adev *adev, u16 func_id); +int hinic5_ubus_get_vf_num(struct hinic5_adev *adev); +u16 hinic5_ubus_get_device_id(struct hinic5_adev *adev); +int hinic5_ubus_irq_vectors_alloc(struct hinic5_adev *adev, + void *entry, u32 irqs_min, u32 irqs_num); +void hinic5_ubus_irq_vectors_free(struct hinic5_adev *adev); +int hinic5_ubus_irq_vector(struct hinic5_adev *adev, u32 idx); +int hinic5_ub_init_device_info(struct hinic5_adev *adev); +int hinic5_ubus_virt_configure(hinic_ub_dev *ubus_dev, int ue_idx, bool is_en); +void hinic5_ubus_numvds_store_vds_process(struct hinic5_adev *adev, int nums); +void hinic5_ubus_probe_fault_process(struct hinic5_adev *adev); + +#endif +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_id_tbl.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_id_tbl.h new file mode 100644 index 00000000..be37e785 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_id_tbl.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_UBUS_ID_TBL_H +#define HINIC5_UBUS_ID_TBL_H + +#define HINIC5_UDEV_VENDOR_ID_HUAWEI 0xCC08 +#define HINIC5_UDEV_VENDOR_ID_HUAWEI_E0FC 0xE0FC /* Old Vendor ID, to be deleted */ + +#define HINIC5_UDEV_DEVICE_ID_1825_PF 0x8200 +#define HINIC5_UDEV_DEVICE_ID_1825_VF 0x8201 +#define HINIC5_UDEV_DEVICE_ID_1825_TEMP 0x1825 /* For development */ + +#define HINIC5_UDEV_DEVICE_ID_1872_PF 0x8100 +#define HINIC5_UDEV_DEVICE_ID_1872_VF 0x8101 + +#define HINIC5_UDEV_CLASS_CODE_1825 0x0102 +#define HINIC5_UDEV_CLASS_CODE_1872 0x0102 + +#define HINIC5_UDEV_CLASS_CODE_MASK 0xFFFF + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_sriov.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_sriov.c new file mode 100644 index 00000000..5d4b86a3 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_sriov.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifdef __UBUS_DRIVER__ +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/interrupt.h> + +#include "ossl_knl.h" +#include "hinic5_hwdev.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#ifndef __WIN__ +#include "hinic5_lld.h" +#include "hinic5_dev_mgmt.h" +#endif +#include "hinic5_ubus_sriov.h" +#include "hinic5_ubus.h" + +static int ub_get_totalvds(hinic_ub_dev *udev) +{ +#ifdef UB_SUPPORT_ENTITY + if (!udev->is_mue) + return 0; + + return udev->total_ues; +#else + if (!udev->is_pd) + return 0; + + return udev->total_vds; +#endif +} + +static int ub_num_vdevice(hinic_ub_dev *udev) +{ +#ifdef UB_SUPPORT_ENTITY + if (!udev->is_mue) + return 0; + + return udev->num_ues; +#else + if (!udev->is_pd) + return 0; + + return udev->num_vds; +#endif +} + +static void hinic5_ubus_event_callback_dev(struct hinic5_sriov_info *sriov_info, void *hwdev, int num_vfs) +{ + struct hinic5_event_info event = {0}; + sriov_info->sriov_enabled = true; + + sriov_info->num_vfs = (u32)num_vfs; + + event.service = EVENT_SRV_COMM; + event.type = EVENT_COMM_SRIOV_STATE_CHANGE; + ((struct hinic5_sriov_state_info *)(void *)event.event_data)->enable = 1; + ((struct hinic5_sriov_state_info *)(void *)event.event_data)->num_vfs = (u16)num_vfs; + hinic5_event_callback(hwdev, &event); + + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); +} + +int hinic5_ubus_init_func_mbox_channel(void *hwdev) +{ + /* mbox msg channel resources will be freed during remove process */ + return hinic5_init_func_mbox_msg_channel(hwdev, + hinic5_func_max_vf(hwdev)); +} + +int hinic5_ubus_init_vf_hw(void *hwdev, u16 start_vf_id, u16 end_vf_id) +{ + u16 i, func_idx; + int err; + + /* vf use 256K as default wq page size, and can't change it */ + for (i = start_vf_id; i <= end_vf_id; i++) { + func_idx = hinic5_glb_pf_vf_offset(hwdev) + i; + err = hinic5_set_wq_page_size(hwdev, func_idx, + HINIC5_DEFAULT_WQ_PAGE_SIZE, + HINIC5_CHANNEL_COMM); + if (err != 0) + return err; + } + + return 0; +} + +int hinic5_ubus_sriov_enable(hinic_ub_dev *ubus_dev, int ue_idx) +{ + int err, pre_existing_vfs; + struct hinic5_adev *adev = dev_get_drvdata(&ubus_dev->dev); + void *hwdev = adev->hwdev; + struct hinic5_sriov_info *sriov_info = &adev->sriov_info; + u32 tmp_vf_id = ue_idx - sriov_info->first_ue_idx; + + if (!hwdev) { + sdk_err(&ubus_dev->dev, "hwdev is null\n"); + return -EPERM; + } + + if (test_and_set_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state)) { + sdk_err(&ubus_dev->dev, + "SR-IOV enable in process, please wait, ue_idx %d\n", ue_idx); + return -EPERM; + } + + pre_existing_vfs = ub_num_vdevice(ubus_dev); + if ((pre_existing_vfs + 1) > ub_get_totalvds(ubus_dev)) { + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); + return -ERANGE; + } + + /* 如果不是第一次ENABLE,不要再次初始化FUNC MAILBOX通道 */ + if (!sriov_info->sriov_enabled) { + err = hinic5_ubus_init_func_mbox_channel(hwdev); + sriov_info->first_ue_idx = ue_idx; + if (err != 0) { + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); + return err; + } + } + + err = hinic5_ubus_init_vf_hw(hwdev, tmp_vf_id, tmp_vf_id); + if (err != 0) { + sdk_err(&ubus_dev->dev, + "Failed to init vf in hardware before enable sriov, error %d\n", err); + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + err = HINIC_UB_ENABLE_VDEV(ubus_dev, ue_idx); + if (err != 0) { + sdk_err(&ubus_dev->dev, "Failed to enable SR-IOV, error %d\n", err); + clear_bit(HINIC5_SRIOV_ENABLE, &sriov_info->state); + return err; + } + + hinic5_ubus_event_callback_dev(sriov_info, hwdev, (pre_existing_vfs + 1)); + return 0; +} + +int hinic5_ubus_sriov_disable(hinic_ub_dev *ubus_dev, int ue_idx) +{ + struct hinic5_adev *adev = dev_get_drvdata(&ubus_dev->dev); + struct hinic5_sriov_info *sriov_info = &adev->sriov_info; + struct hinic5_event_info event = {0}; + void *hwdev = adev->hwdev; + u32 tmp_vf_id = ue_idx - sriov_info->first_ue_idx; + int err, pre_existing_vfs; + + if (!hwdev) { + sdk_err(&ubus_dev->dev, "SR-IOV disable is not permitted, please wait...\n"); + return -EPERM; + } + + /* if SR-IOV is already disabled then there is nothing to do */ + if (!sriov_info->sriov_enabled) + return 0; + + if (test_and_set_bit(HINIC5_SRIOV_DISABLE, &sriov_info->state)) { + sdk_err(&ubus_dev->dev, "SR-IOV disable in process, please wait"); + return -EPERM; + } + + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + // todo, VFs in VM,cannot disable directly + + pre_existing_vfs = ub_num_vdevice(ubus_dev); + event.service = EVENT_SRV_COMM; + event.type = EVENT_COMM_SRIOV_STATE_CHANGE; + ((struct hinic5_sriov_state_info *)(void *)event.event_data)->enable = 0; + ((struct hinic5_sriov_state_info *)(void *)event.event_data)->vf_id = tmp_vf_id; + ((struct hinic5_sriov_state_info *)(void *)event.event_data)->num_vfs = + (u16)(pre_existing_vfs - 1); + hinic5_event_callback(hwdev, &event); + + /* disable iov and allow time for transactions to clear */ + err = HINIC_UB_DISABLE_VDEV(ubus_dev, ue_idx); + if (err != 0) { + sdk_err(&ubus_dev->dev, "Failed to disable SR-IOV, error %d\n", err); + clear_bit(HINIC5_SRIOV_DISABLE, &sriov_info->state); + return err; + } + + sriov_info->num_vfs = pre_existing_vfs - 1; + + if (sriov_info->num_vfs == 0) + sriov_info->sriov_enabled = 0; + clear_bit(HINIC5_SRIOV_DISABLE, &sriov_info->state); + return 0; +} + +/* + UBUS的sriov实现与PCIE有区别,UBUS支持动态修改vf个数 + 1)pcie需要echo 2 --> echo 0 --> echo 3,ubus不需要, + ub_dev中维护了vdevice链表和start_vf_idx和end_vf_idx + 2)pcie每次echo只会调用一次驱动的接口, + 根据传参vf_num决定使能多少个vf; + ubus每次echo会根据vf_num决定调用多少次驱动的接口 + */ +int hinic5_ubus_virt_configure(hinic_ub_dev *ubus_dev, int ue_idx, bool is_en) +{ + struct hinic5_sriov_info *sriov_info = NULL; + struct hinic5_adev *adev = dev_get_drvdata(&ubus_dev->dev); + + if (!adev) + return -EFAULT; + + sriov_info = &adev->sriov_info; + + if (!sriov_info) + return -EFAULT; + + if (!test_bit(HINIC5_FUNC_PERSENT, &sriov_info->state)) + return -EFAULT; + + /* The ubus framework have ensure that only primary entity can come + * here, so we not need to check is this a primary entity again. + */ + dev_info(&ubus_dev->dev, "ubase virt configure set idx = %d en = %d.\n", + ue_idx, is_en); + + if (!is_en) + return hinic5_ubus_sriov_disable(ubus_dev, ue_idx); + else + return hinic5_ubus_sriov_enable(ubus_dev, ue_idx); +} +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_sriov.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_sriov.h new file mode 100644 index 00000000..37952632 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/lld/hinic5_ubus_sriov.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_SRIOV_H +#define HINIC5_SRIOV_H +#ifdef __UBUS_DRIVER__ +#include <linux/types.h> +#include "hinic5_bus.h" + +#endif +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_devlink.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_devlink.c new file mode 100644 index 00000000..66d944d4 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_devlink.c @@ -0,0 +1,491 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/netlink.h> +#include <linux/firmware.h> + +#include "hinic5_devlink.h" +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +#include "mpu_inband_cmd.h" +#include "hinic5_common.h" +#include "hinic5_api_cmd.h" +#include "hinic5_mgmt.h" +#include "hinic5_hw.h" +#include "ossl_knl.h" +#include "fw_typedef.h" + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_METHOD +static bool check_image_valid(struct hinic5_hwdev *hwdev, const u8 *buf, + u32 size, struct host_image *host_image) +{ + struct firmware_image *fw_image = NULL; + u32 len = 0; + u32 i, n; + + fw_image = (struct firmware_image *)buf; + if (fw_image->fw_magic != FW_MAGIC_NUM) { + sdk_err(hwdev->dev_hdl, "Wrong fw magic read from file, fw_magic: 0x%x\n", + fw_image->fw_magic); + return false; + } + + if (fw_image->fw_info.section_cnt > FW_TYPE_MAX_NUM) { + sdk_err(hwdev->dev_hdl, "Wrong fw type number read from file, fw_type_num: 0x%x\n", + fw_image->fw_info.section_cnt); + return false; + } + + for (i = 0, n = 0; i < fw_image->fw_info.section_cnt; i++) { + if (fw_image->section_info[i].section_type == UP_FW_UPDATE_L0FW) { + len += fw_image->section_info[i].section_len; + memcpy(&host_image->section_info[n++], &fw_image->section_info[i], + sizeof(struct firmware_section)); + break; + } + } + + for (i = 0; i < fw_image->fw_info.section_cnt; i++) { + if (fw_image->section_info[i].section_type == UP_FW_UPDATE_L0FW) + continue; + len += fw_image->section_info[i].section_len; + memcpy(&host_image->section_info[n++], &fw_image->section_info[i], + sizeof(struct firmware_section)); + } + + if (len != fw_image->fw_len || + (u32)(fw_image->fw_len + FW_IMAGE_HEAD_SIZE) != size) { + sdk_err(hwdev->dev_hdl, "Wrong data size read from file\n"); + return false; + } + + host_image->image_info.total_len = fw_image->fw_len; + host_image->image_info.fw_version = fw_image->fw_version; + host_image->type_num = fw_image->fw_info.section_cnt; + host_image->device_id = fw_image->device_id; + + return true; +} + +static bool check_image_device_type(struct hinic5_hwdev *hwdev, u32 device_type) +{ + struct comm_cmd_board_info board_info; + + /* 冷升级取固件type为默认值0 */ + if (device_type == FW_DEFAULT_TYPE_COLD_UPDATE) + return true; + + memset(&board_info, 0, sizeof(board_info)); + if (hinic5_get_board_info(hwdev, &board_info.info, HINIC5_CHANNEL_COMM) != 0) { + sdk_err(hwdev->dev_hdl, "Failed to get board info\n"); + return false; + } + + if (device_type == board_info.info.board_type) + return true; + + sdk_err(hwdev->dev_hdl, "The image device type: 0x%x doesn't match the firmware device type: 0x%x\n", + device_type, board_info.info.board_type); + + return false; +} + +static void encapsulate_update_cmd(struct hinic5_cmd_update_firmware *msg, + struct firmware_section *section_info, + const int *remain_len, u32 *send_len, const u32 *send_pos) +{ + memset(msg->data, 0, sizeof(msg->data)); + msg->ctl_info.sf = (*remain_len == section_info->section_len) ? true : false; + msg->section_info.section_crc = section_info->section_crc; + msg->section_info.section_type = section_info->section_type; + msg->section_version = section_info->section_version; + msg->section_len = section_info->section_len; + msg->section_offset = *send_pos; + msg->ctl_info.bit_signed = section_info->section_flag & 0x1; + + if (*remain_len <= FW_FRAGMENT_MAX_LEN) { + msg->ctl_info.sl = true; + msg->ctl_info.fragment_len = (u32)(*remain_len); + *send_len += section_info->section_len; + } else { + msg->ctl_info.sl = false; + msg->ctl_info.fragment_len = FW_FRAGMENT_MAX_LEN; + *send_len += FW_FRAGMENT_MAX_LEN; + } +} + +static int hinic5_flash_firmware(struct hinic5_hwdev *hwdev, const u8 *data, + struct host_image *image) +{ + u32 send_pos, send_len, section_offset, i; + struct hinic5_cmd_update_firmware *update_msg = NULL; + u16 out_size = sizeof(*update_msg); + bool total_flag = false; + int remain_len, err; + + update_msg = kzalloc(sizeof(*update_msg), GFP_KERNEL); + if (!update_msg) + return -ENOMEM; + + for (i = 0; i < image->type_num; i++) { + section_offset = image->section_info[i].section_offset; + remain_len = (int)(image->section_info[i].section_len); + send_len = 0; + send_pos = 0; + + while (remain_len > 0) { + if (!total_flag) { + update_msg->total_len = image->image_info.total_len; + total_flag = true; + } else { + update_msg->total_len = 0; + } + + encapsulate_update_cmd(update_msg, &image->section_info[i], + &remain_len, &send_len, &send_pos); + + memcpy(update_msg->data, + ((data + FW_IMAGE_HEAD_SIZE) + section_offset) + send_pos, + update_msg->ctl_info.fragment_len); + + err = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, + COMM_MGMT_CMD_UPDATE_FW, + update_msg, sizeof(*update_msg), + update_msg, &out_size, + FW_UPDATE_MGMT_TIMEOUT, 0); + if (err != 0 || out_size == 0 || update_msg->msg_head.status != 0) { + sdk_err(hwdev->dev_hdl, "Failed to update firmware, err: %d, \ + status: 0x%x, out size: 0x%x\n", + err, update_msg->msg_head.status, out_size); + err = (update_msg->msg_head.status != 0) ? + update_msg->msg_head.status : -EIO; + kfree(update_msg); + return err; + } + + send_pos = send_len; + remain_len = (int)(image->section_info[i].section_len - send_len); + } + } + + kfree(update_msg); + + return 0; +} + +static int hinic5_flash_update_notify(struct devlink *devlink, const struct firmware *fw, + struct host_image *image, struct netlink_ext_ack *extack) +{ + struct hinic5_devlink *devlink_dev = devlink_priv(devlink); + struct hinic5_hwdev *hwdev = devlink_dev->hwdev; + int err; + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY + devlink_flash_update_begin_notify(devlink); +#endif + devlink_flash_update_status_notify(devlink, "Flash firmware begin", NULL, 0, 0); + sdk_info(hwdev->dev_hdl, "Flash firmware begin\n"); + err = hinic5_flash_firmware(hwdev, fw->data, image); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to flash firmware, err: %d\n", err); + NL_SET_ERR_MSG_MOD(extack, "Flash firmware failed"); + devlink_flash_update_status_notify(devlink, "Flash firmware failed", NULL, 0, 0); + } else { + err = hinic5_activate_firmware(hwdev, 0); + if (err != 0) { + sdk_err(hwdev->dev_hdl, " Failed to activate firmware, err: %d\n", err); + devlink_flash_update_status_notify(devlink, + "Activate firmware failed", NULL, 0, 0); + } else { + sdk_info(hwdev->dev_hdl, "Flash firmware end\n"); + devlink_flash_update_status_notify(devlink, + "Flash firmware end", NULL, 0, 0); + } + } +#ifdef HAVE_DEVLINK_FLASH_UPDATE_BEGIN_END_NOTIFY + devlink_flash_update_end_notify(devlink); +#endif + + return err; +} + +#ifdef HAVE_DEVLINK_OPS_FLASH_UPDATE_HAVE_PARAMS +static int hinic5_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +#else +static int hinic5_devlink_flash_update(struct devlink *devlink, const char *file_name, + const char *component, struct netlink_ext_ack *extack) +#endif +{ + struct hinic5_devlink *devlink_dev = devlink_priv(devlink); + struct hinic5_hwdev *hwdev = devlink_dev->hwdev; +#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW + const struct firmware *fw = NULL; // fw和file_name是互斥的 +#else + const struct firmware *fw = params->fw; +#endif + struct host_image *image = NULL; + int err; + + image = kzalloc(sizeof(*image), GFP_KERNEL); + if (!image) { + sdk_err(hwdev->dev_hdl, "Failed to alloc host image\n"); + err = -ENOMEM; + goto devlink_param_reset; + } + +#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW +#ifdef HAVE_DEVLINK_OPS_FLASH_UPDATE_HAVE_PARAMS +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FILE_NAME + err = request_firmware_direct(&fw, params->file_name, hwdev->dev_hdl); +#else + // 理论上不存在该场景 + kfree(image); + err = -EINVAL; + goto devlink_param_reset; +#endif +#else + err = request_firmware_direct(&fw, file_name, hwdev->dev_hdl); +#endif + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to request firmware\n"); + goto devlink_request_fw_err; + } +#endif + + if (!check_image_valid(hwdev, fw->data, (u32)(fw->size), image) || + !check_image_device_type(hwdev, image->device_id)) { + sdk_err(hwdev->dev_hdl, "Failed to check image\n"); + NL_SET_ERR_MSG_MOD(extack, "Check image failed"); + err = -EINVAL; + goto devlink_update_out; + } + + err = hinic5_flash_update_notify(devlink, fw, image, extack); + +devlink_update_out: +#ifndef HAVE_DEVLINK_FLASH_UPDATE_PARAMS_FW + release_firmware(fw); + +devlink_request_fw_err: +#endif + kfree(image); + +devlink_param_reset: + /* reset activate_fw and switch_cfg after flash update operation */ + devlink_dev->activate_fw = FW_CFG_DEFAULT_INDEX; + devlink_dev->switch_cfg = FW_CFG_DEFAULT_INDEX; + + return err; +} + +static int hinic5_devlink_info_get(struct devlink *dl, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct hinic5_devlink *devlink_dev = devlink_priv(dl); + struct hinic5_hwdev *hwdev = devlink_dev->hwdev; + u8 mgmt_ver[HINIC5_MGMT_VERSION_MAX_LEN] = {0}; + int err; + +#ifdef HAVE_DEVLINK_INFO_DRIVER_NAME_PUT + err = devlink_info_driver_name_put(req, HINIC5_DRV_NAME); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set driver name\n"); + return err; + }; +#endif + + /* 固件版本 */ + err = hinic5_get_mgmt_version(hwdev, mgmt_ver, sizeof(mgmt_ver), HINIC5_CHANNEL_COMM); + if (err != 0) { + sdk_info(hwdev->dev_hdl, "Failed to get firmware versions\n"); + return err; + } + + err = devlink_info_version_stored_put(req, "fw.version", (char *)&mgmt_ver[0]); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set stored fw version\n"); + return err; + } + + err = devlink_info_version_running_put(req, "fw.version", (char *)&mgmt_ver[0]); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to set running fw version\n"); + return err; + } + + return 0; +} +#endif + +static const struct devlink_ops hinic5_devlink_ops = { +#ifdef HAVE_DEVLINK_FLASH_UPDATE_METHOD + .flash_update = hinic5_devlink_flash_update, +#endif + .info_get = hinic5_devlink_info_get, +}; + +static int hinic5_devlink_get_activate_firmware_config(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hinic5_devlink *devlink_dev = devlink_priv(devlink); + + ctx->val.vu8 = devlink_dev->activate_fw; + + return 0; +} + +#ifdef HAVE_DEVLINK_PARAM_SET_EXTACK +static int hinic5_devlink_set_activate_firmware_config(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +#else +static int hinic5_devlink_set_activate_firmware_config(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +#endif +{ + struct hinic5_devlink *devlink_dev = devlink_priv(devlink); + struct hinic5_hwdev *hwdev = devlink_dev->hwdev; + int err; + + devlink_dev->activate_fw = ctx->val.vu8; + sdk_info(hwdev->dev_hdl, "Activate firmware begin\n"); + + err = hinic5_activate_firmware(hwdev, devlink_dev->activate_fw); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to activate firmware, err: %d\n", err); + return err; + } + + sdk_info(hwdev->dev_hdl, "Activate firmware end\n"); + + return 0; +} + +static int hinic5_devlink_get_switch_config(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hinic5_devlink *devlink_dev = devlink_priv(devlink); + + ctx->val.vu8 = devlink_dev->switch_cfg; + + return 0; +} + +#ifdef HAVE_DEVLINK_PARAM_SET_EXTACK +static int hinic5_devlink_set_switch_config(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +#else +static int hinic5_devlink_set_switch_config(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +#endif +{ + struct hinic5_devlink *devlink_dev = devlink_priv(devlink); + struct hinic5_hwdev *hwdev = devlink_dev->hwdev; + int err; + + devlink_dev->switch_cfg = ctx->val.vu8; + sdk_info(hwdev->dev_hdl, "Switch cfg begin"); + + err = hinic5_switch_config(hwdev, devlink_dev->switch_cfg); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to switch cfg, err: %d\n", err); + return err; + } + + sdk_info(hwdev->dev_hdl, "Switch cfg end\n"); + + return 0; +} + +static int hinic5_devlink_firmware_config_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct hinic5_devlink *devlink_dev = devlink_priv(devlink); + struct hinic5_hwdev *hwdev = devlink_dev->hwdev; + u8 cfg_index = val.vu8; + + if (cfg_index > FW_CFG_MAX_INDEX) { + sdk_err(hwdev->dev_hdl, "Firmware cfg index out of range [0,7]\n"); + NL_SET_ERR_MSG_MOD(extack, "Firmware cfg index out of range [0,7]"); + return -ERANGE; + } + + return 0; +} + +static const struct devlink_param hinic5_devlink_params[] = { + DEVLINK_PARAM_DRIVER(HINIC5_DEVLINK_PARAM_ID_ACTIVATE_FW, + "activate_fw", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + hinic5_devlink_get_activate_firmware_config, + hinic5_devlink_set_activate_firmware_config, + hinic5_devlink_firmware_config_validate), + DEVLINK_PARAM_DRIVER(HINIC5_DEVLINK_PARAM_ID_SWITCH_CFG, + "switch_cfg", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + hinic5_devlink_get_switch_config, + hinic5_devlink_set_switch_config, + hinic5_devlink_firmware_config_validate), +}; + +int hinic5_init_devlink(struct hinic5_hwdev *hwdev) +{ + struct device *dev = (struct device *)hwdev->dev_hdl; + struct devlink *devlink = NULL; + int err; + + devlink = ossl_devlink_alloc(&hinic5_devlink_ops, + sizeof(struct hinic5_devlink), dev); + if (!devlink) { + sdk_err(hwdev->dev_hdl, "Failed to alloc devlink\n"); + return -ENOMEM; + } + + hwdev->devlink_dev = devlink_priv(devlink); + hwdev->devlink_dev->hwdev = hwdev; + hwdev->devlink_dev->activate_fw = FW_CFG_DEFAULT_INDEX; + hwdev->devlink_dev->switch_cfg = FW_CFG_DEFAULT_INDEX; + + err = ossl_devlink_register(devlink, dev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to register devlink\n"); + goto register_devlink_err; + } + + err = devlink_params_register(devlink, hinic5_devlink_params, + ARRAY_SIZE(hinic5_devlink_params)); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Failed to register devlink params\n"); + goto register_devlink_params_err; + } + + devlink_params_publish(devlink); + + return 0; + +register_devlink_params_err: + devlink_unregister(devlink); + +register_devlink_err: + devlink_free(devlink); + + return -EFAULT; +} + +void hinic5_uninit_devlink(struct hinic5_hwdev *hwdev) +{ + struct devlink *devlink = priv_to_devlink(hwdev->devlink_dev); + + devlink_params_unpublish(devlink); + devlink_params_unregister(devlink, hinic5_devlink_params, + ARRAY_SIZE(hinic5_devlink_params)); + devlink_unregister(devlink); + devlink_free(devlink); +} +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_devlink.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_devlink.h new file mode 100644 index 00000000..391abc94 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_devlink.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_DEVLINK_H +#define HINIC5_DEVLINK_H + +#include "ossl_knl.h" +#include "hinic5_hwdev.h" + +#define FW_MAGIC_NUM 0x5a5a1100 +#define FW_IMAGE_HEAD_SIZE 4096 +#define FW_FRAGMENT_MAX_LEN 1536 +#define FW_CFG_DEFAULT_INDEX 0xFF +#define FW_TYPE_MAX_NUM 0x40 +#define FW_CFG_MAX_INDEX 7 +#define FW_DEFAULT_TYPE_COLD_UPDATE 0 + +#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS +enum hinic5_devlink_param_id { + HINIC5_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + HINIC5_DEVLINK_PARAM_ID_ACTIVATE_FW, + HINIC5_DEVLINK_PARAM_ID_SWITCH_CFG, +}; +#endif + +#define IMAGE_MPU_ALL_IN (BIT_ULL(UP_FW_UPDATE_UP_TEXT) | \ + BIT_ULL(UP_FW_UPDATE_UP_DATA) | \ + BIT_ULL(UP_FW_UPDATE_UP_DICT)) +// BIT_ULL(UP_FW_UPDATE_MPU_CNT_DICT)) , +// 需要考虑cnt index文件的完整校验,兼容性考虑,暂时不加 + +#define IMAGE_NPU_ALL_IN (BIT_ULL(UP_FW_UPDATE_TILE_PCPTR) | \ + BIT_ULL(UP_FW_UPDATE_TILE_TEXT) | \ + BIT_ULL(UP_FW_UPDATE_TILE_DATA) | \ + BIT_ULL(UP_FW_UPDATE_TILE_DICT) | \ + BIT_ULL(UP_FW_UPDATE_PPE_STATE) | \ + BIT_ULL(UP_FW_UPDATE_PPE_BRANCH) | \ + BIT_ULL(UP_FW_UPDATE_PPE_EXTACT)) +// BIT_ULL(UP_FW_UPDATE_NPU_CNT_DICT) , +// 需要考虑cnt index文件的完整校验,兼容性考虑,暂时不加 + +#define IMAGE_COLD_SUB_MODULES_MUST_IN (IMAGE_MPU_ALL_IN | IMAGE_NPU_ALL_IN) + +#define IMAGE_CFG_SUB_MODULES_MUST_IN (BIT_ULL(UP_FW_UPDATE_CFG0) | \ + BIT_ULL(UP_FW_UPDATE_CFG1)) + +struct firmware_section { + u32 section_len; + u32 section_offset; + u32 section_version; + u32 section_type; + u32 section_crc; + u32 section_flag; +}; + +struct firmware_image { + u32 fw_version; + u32 fw_len; + u32 fw_magic; + struct { + u32 section_cnt : 16; + u32 rsvd : 16; + } fw_info; + struct firmware_section section_info[FW_TYPE_MAX_NUM]; + u32 device_id; /* cfg fw board_type value */ + u32 rsvd0[101]; /* device_id and rsvd0[101] is update_head_extend_info */ + u32 rsvd1[534]; /* big bin file total size 4096B */ + u32 bin_data; /* obtain the address for use */ +}; + +struct host_image { + struct firmware_section section_info[FW_TYPE_MAX_NUM]; + struct { + u32 total_len; + u32 fw_version; + } image_info; + u32 type_num; + u32 device_id; +}; + +int hinic5_init_devlink(struct hinic5_hwdev *hwdev); +void hinic5_uninit_devlink(struct hinic5_hwdev *hwdev); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_fw_update.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_fw_update.c new file mode 100644 index 00000000..c6712a72 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_fw_update.c @@ -0,0 +1,925 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#include <asm/byteorder.h> +#include "ossl_knl.h" +#include "hinic5_mt.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hw_cfg.h" +#include "hinic5_chip_info.h" +#include "comm_defs.h" +#include "mpu_inband_cmd.h" +#include "mpu_inband_cmd_defs.h" +#include "hinic5_hw_mt.h" +#include "hinic5_typedef_inner.h" +#include "hinic5_fw_update.h" + +/* Reference for below defines: hwsdk/hinic5_cqm/hinic5_cqm_bat_cla.h */ +#define SM_BAT_NO_BYPASS_CACHE 0 +#define SM_BAT_ENTRY_SIZE_256 0 +#define SM_CLA_LVL_0 0 +#define SM_CHIP_GPA_HIMASK 0x1ffffff +#define SM_CHIP_GPA_LOMASK 0xffffffff +#define SM_MAX_INDEX_BIT 19 + +#define SM_CHIP_GPA_HI(gpa) ((u32)(((u64)((gpa)) >> 32) & SM_CHIP_GPA_HIMASK)) +#define SM_CHIP_GPA_LW(gpa) ((u32)((u64)(gpa) & SM_CHIP_GPA_LOMASK)) + +/* Reference: hwsdk/hinic5_cqm/hinic5_cqm_bat_cla.h#tag_hinic5_cqm_bat_entry_standerd */ +struct sm_bat_entry_standerd { + u32 entry_size : 2; + u32 rsv1 : 6; + u32 max_number : 22; + u32 rsv2 : 2; + + u32 cla_gpa_h : 32; + + u32 cla_gpa_l : 32; + + u32 rsv3 : 8; + u32 z : 5; + u32 y : 5; + u32 x : 5; + u32 rsv24 : 1; + u32 bypass : 1; + u32 cla_level : 2; + u32 rsv5 : 5; +}; + +/* Reference: pfm_load_api.h */ +#define FW_SEC_TYPE_TILE_TEXT 0x4 +#define FW_SEC_TYPE_PHY 0x18 + +#define BAT_L3I_MEM_SIZE (2U * 1024 * 1024) + +#define FW_SEC_HDR_SIZE 0x2100 +#define FW_SEC_SIZE_TILE_TEXT (1U * 1024 * 1024) /* FW .text section max size: 1MB */ +#define FW_SEC_SIZE_PHY (512U * 1024) /* FW .phy section max size: 512KB */ + +static int bat_l3i_get_entry_offset(struct tag_fw_update_handle *handle, + u32 *l3i_entry_offset) +{ + struct hinic5_hwdev *hwdev = handle->hwdev; + struct hinic5_bat_entry_config l3i_config = { 0 }; + int ret; + + ret = hinic5_bat_get_l3i_entry_config(hwdev, &l3i_config); + if (unlikely(ret != 0)) { + sdk_err(hwdev->dev_hdl, "hinic5_bat_get_l3i_entry_config fail.\n"); + return ret; + } + if (unlikely(!l3i_config.mapping)) { + sdk_err(hwdev->dev_hdl, "l3i should not mapping in current function.\n"); + return -EINVAL; + } + if (unlikely(l3i_config.bat_entry_size != sizeof(struct sm_bat_entry_standerd))) { + sdk_err(hwdev->dev_hdl, "l3i entry size mismatch.\n"); + return -EINVAL; + } + + *l3i_entry_offset = l3i_config.bat_entry_offset; + return 0; +} + +static void bat_l3i_fill_bat_entry_data(struct sm_bat_entry_standerd *data, + const struct tag_fw_update_bat_l3i_entry *entry, + const struct tag_fw_update_handle *handle) +{ + u8 z = SM_MAX_INDEX_BIT; + + data->entry_size = SM_BAT_ENTRY_SIZE_256; + data->max_number = entry->buf_size / FW_UPDATE_CHIP_CACHELINE; + data->cla_gpa_h = SM_CHIP_GPA_HI(entry->buf_pa); + data->cla_gpa_l = SM_CHIP_GPA_LW(entry->buf_pa) | handle->gpa_check_enable; + data->z = z; + data->y = 0; + data->x = 0; + data->bypass = SM_BAT_NO_BYPASS_CACHE; + data->cla_level = SM_CLA_LVL_0; +} + +#ifdef __FW_UPDATE_DEBUG__ +#define FUD_PR_BYTE_MAX 16 +#define FUD_PR_BYTE_MUL 3 +#define FUD_PR_BYTE_BUF_MAX (FUD_PR_BYTE_MAX * FUD_PR_BYTE_MUL + 1) +static void mpu_set_bat_l3i_entry_print_data(struct tag_fw_update_handle *handle, + struct comm_cmd_set_bat_info *cmd) +{ + struct hinic5_hwdev *hwdev = handle->hwdev; + u8 buf[FUD_PR_BYTE_BUF_MAX]; + u8 *p, *pe; + u32 i, size; + + size = cmd->data_size; + if (size > FUD_PR_BYTE_MAX) + size = FUD_PR_BYTE_MAX; + + p = buf; + pe = p + FUD_PR_BYTE_BUF_MAX; + memset(buf, 0, FUD_PR_BYTE_BUF_MAX); + + for (i = 0; i < size; i++) { + (void)sprintf_s(p, pe - p, "%02X ", cmd->data[i]); + p += FUD_PR_BYTE_MUL; + } + + sdk_info(hwdev->dev_hdl, "fw_update: BAT L3I update: data %s\n", buf); +} +#endif + +static int mgmt_set_bat_l3i_entry(struct tag_fw_update_handle *handle, + u8 smf_id, u16 func_id, + struct tag_fw_update_bat_l3i_entry *entry) +{ + struct hinic5_hwdev *hwdev = handle->hwdev; + struct sm_bat_entry_standerd bat_data = {0}; + struct comm_cmd_set_bat_info buf; + u16 out_size = sizeof(buf); + u32 l3i_entry_offset = HINIC5_BAT_MAX; + int ret; + + ret = bat_l3i_get_entry_offset(handle, &l3i_entry_offset); + if (unlikely(ret != 0)) + return ret; + + bat_l3i_fill_bat_entry_data(&bat_data, entry, handle); + + memset(&buf, 0, sizeof(buf)); + buf.func_id = func_id; + buf.smf_id = smf_id; + buf.bat_offset = l3i_entry_offset; + buf.data_size = sizeof(bat_data); + memcpy(buf.data, (void *)&bat_data, sizeof(bat_data)); + +#ifdef __FW_UPDATE_DEBUG__ + sdk_info(hwdev->dev_hdl, + "fw_update: BAT L3I update: smf_id %u, func_id %u, bat_off %u\n", + smf_id, func_id, l3i_entry_offset); + mpu_set_bat_l3i_entry_print_data(handle, &buf); +#endif + + ret = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, + COMM_MGMT_CMD_SET_BAT_INFO, + &buf, sizeof(buf), &buf, &out_size, + 0, HINIC5_CHANNEL_COMM); + if (ret != 0 || out_size == 0 || buf.head.status != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to set bat info, err: %d, \ + status: 0x%x, out size: 0x%x, channel: 0x%x\n", + ret, buf.head.status, out_size, HINIC5_CHANNEL_COMM); + return ret; + } + + return 0; +} + +static int fw_update_init_bat_l3i_entry(struct tag_fw_update_handle *handle, + struct tag_fw_update_bat_l3i_entry *entry, + u32 page_order) +{ + struct hinic5_hwdev *hwdev = handle->hwdev; + struct device *dev = handle->dev; + u32 buf_size; + dma_addr_t pa; + int ret; + void *va = (void *)(uintptr_t)__get_free_pages(GFP_KERNEL | __GFP_ZERO, page_order); + + if (!va) { + sdk_err(hwdev->dev_hdl, + "BAT L3I buffer alloc failed, page_order %u\n", + page_order); + return -ENOMEM; + } + + buf_size = PAGE_SIZE << page_order; + pa = dma_map_single(dev, va, buf_size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, pa) != 0) { + sdk_err(hwdev->dev_hdl, + "BAT L3I buffer map failed, size 0x%x\n", buf_size); + ret = -EIO; + goto pci_map_failed; + } + + entry->page_order = page_order; + entry->buf_size = buf_size; + entry->buf_va = va; + entry->buf_pa = pa; + + return 0; + +pci_map_failed: + free_pages((uintptr_t)va, page_order); + return ret; +} + +static void fw_update_deinit_bat_l3i_entry(struct tag_fw_update_handle *handle, + struct tag_fw_update_bat_l3i_entry *entry) +{ + struct device *dev = handle->dev; + + if (entry->buf_pa != 0) { + dma_unmap_single(dev, entry->buf_pa, entry->buf_size, + DMA_BIDIRECTIONAL); + entry->buf_pa = 0; + } + + if (entry->buf_va != 0) { + free_pages((uintptr_t)entry->buf_va, entry->page_order); + entry->buf_va = 0; + } + + memset(entry, 0, sizeof(*entry)); +} + +static int fw_update_init_bat_l3i(struct tag_fw_update_handle *handle) +{ + struct hinic5_hwdev *hwdev = handle->hwdev; + struct hinic5_func_attr *func_attr = &hwdev->hwif->attr; + struct tag_fw_update_bat_l3i_entry *smf_entry = NULL; + u16 func_id = func_attr->func_global_idx; + u32 per_smf_size; + int page_order; + u8 smf_id; + int smf_idx, smf_num; + int ret = 0; + + if (handle->smf_enabled_num == 0) { + sdk_err(hwdev->dev_hdl, "smf_enabled_num is zero\n"); + return -EINVAL; + } + + per_smf_size = BAT_L3I_MEM_SIZE / handle->smf_enabled_num; + page_order = get_order(per_smf_size); + if (page_order < 0) { + sdk_err(hwdev->dev_hdl, "get_order fail, ret %d\n", page_order); + return -EINVAL; + } + + smf_num = (int)handle->smf_enabled_num; + + sdk_info(hwdev->dev_hdl, "fw_update: smf num %d, per size 0x%X, page order %d\n", + smf_num, per_smf_size, page_order); + + for (smf_idx = 0; smf_idx < smf_num; smf_idx++) { + smf_id = handle->smf_enabled[smf_idx]; + smf_entry = &handle->bat_l3i_entries[smf_id]; + ret = fw_update_init_bat_l3i_entry(handle, smf_entry, (u32)page_order); + if (ret != 0) + goto entry_init_error; + +#ifdef __FW_UPDATE_DEBUG__ + sdk_info(hwdev->dev_hdl, + "fw_update: BAT L3I init: smf_id %u, va 0x%lx, pa 0x%lx, size 0x%x\n", + smf_id, (uintptr_t)smf_entry->buf_va, + (uintptr_t)smf_entry->buf_pa, smf_entry->buf_size); +#endif + + ret = mgmt_set_bat_l3i_entry(handle, smf_id, func_id, smf_entry); + if (ret != 0) + goto entry_init_error; + } + + return 0; + +entry_init_error: + while (smf_idx >= 0) { + smf_id = handle->smf_enabled[smf_idx]; + smf_entry = &handle->bat_l3i_entries[smf_id]; + fw_update_deinit_bat_l3i_entry(handle, smf_entry); + smf_idx--; + } + + return ret; +} + +static void fw_update_deinit_bat_l3i(struct tag_fw_update_handle *handle) +{ + struct tag_fw_update_bat_l3i_entry *smf_entry = NULL; + u32 smf_id, i; + + for (i = 0; i < handle->smf_enabled_num; i++) { + smf_id = handle->smf_enabled[i]; + smf_entry = &handle->bat_l3i_entries[smf_id]; + fw_update_deinit_bat_l3i_entry(handle, smf_entry); + } +} + +static void fw_update_capability_init_smf(struct tag_fw_update_handle *handle) +{ + struct hinic5_hwdev *hwdev = handle->hwdev; + struct service_cap *svc_cap = &hwdev->cfg_mgmt->svc_cap; + const u32 smf_max_num = svc_cap->smf_max_num; + const u32 smf_pg = svc_cap->smf_pg; + u32 smf_id, i; + + i = 0; + for (smf_id = 0; smf_id < smf_max_num; smf_id++) { + if ((smf_pg & (1U << smf_id)) != 0) { + handle->smf_enabled[i] = (u8)smf_id; + i++; + } + } + handle->smf_enabled_num = i; +} + +static void fw_update_capability_init(struct tag_fw_update_handle *handle) +{ + struct hinic5_hwdev *hwdev = handle->hwdev; + struct service_cap *svc_cap = &hwdev->cfg_mgmt->svc_cap; + + fw_update_capability_init_smf(handle); + + handle->gpa_check_enable = true; + if (svc_cap->test_mode != 0) + handle->gpa_check_enable = svc_cap->test_gpa_check_enable; +} + +static int fw_update_alloc(struct hinic5_hwdev *hwdev) +{ + struct tag_fw_update_handle *handle = NULL; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (unlikely(!handle)) { + sdk_err(hwdev->dev_hdl, "fw_update_hdl alloc fail.\n"); + return -ENOMEM; + } + + handle->hwdev = hwdev; + handle->dev = hwdev->dev_hdl; + hwdev->fw_update_hdl = (void *)handle; + + return 0; +} + +int hinic5_fw_update_init(void *hwdev_hdl) +{ + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)hwdev_hdl; + struct tag_fw_update_handle *handle = NULL; + int ret; + + if (unlikely(!hwdev_hdl)) { + pr_err("hwdev_hdl is null.\n"); + return -EINVAL; + } + + if (!hinic5_fw_update_ddr_enabled(hwdev_hdl)) + return 0; + + ret = fw_update_alloc(hwdev); + if (unlikely(ret != 0)) + return ret; + handle = (struct tag_fw_update_handle *)hwdev->fw_update_hdl; + + fw_update_capability_init(handle); + + ret = fw_update_init_bat_l3i(handle); + if (ret != 0) { + sdk_err(hwdev->dev_hdl, "init resources failed %d\n", ret); + goto init_res_err; + } + + return 0; + +init_res_err: + kfree(hwdev->fw_update_hdl); + hwdev->fw_update_hdl = NULL; + return ret; +} + +void hinic5_fw_update_deinit(void *hwdev_hdl) +{ + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)hwdev_hdl; + struct tag_fw_update_handle *handle = NULL; + + if (likely(!hwdev || !hwdev->fw_update_hdl)) + return; + + handle = (struct tag_fw_update_handle *)hwdev->fw_update_hdl; + + fw_update_deinit_bat_l3i(handle); + + kfree(hwdev->fw_update_hdl); + hwdev->fw_update_hdl = NULL; +} + +bool hinic5_fw_update_ddr_enabled(void *hwdev_hdl) +{ + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)hwdev_hdl; + + if (unlikely(!hwdev)) + return false; + + return COMM_SUPPORT_UFHD_FLEX_SEG(hwdev) || COMM_SUPPORT_UFHD(hwdev); +} + +static struct fw_update_context *hinic5_fw_update_create_context(struct hinic5_hwdev *hwdev) +{ + struct fw_update_context *context; + + context = vzalloc(sizeof(*context)); + + if (!context) + return NULL; + + context->sec_text.data_cap = get_device_capablity(hwdev)->fw_update_cap.fw_tile_text_size; + if (context->sec_text.data_cap == 0) { + /* for compatibility */ + context->sec_text.data_cap = FW_SEC_SIZE_TILE_TEXT; + } + context->sec_text.data = vzalloc(context->sec_text.data_cap); + if (!context->sec_text.data) + goto alloc_sec_text_data_failed; + + context->sec_phy.data_cap = FW_SEC_SIZE_PHY; + context->sec_phy.data = vzalloc(FW_SEC_SIZE_PHY); + if (!context->sec_phy.data) + goto alloc_sec_phy_data_failed; + + return context; + +alloc_sec_phy_data_failed: + vfree(context->sec_text.data); +alloc_sec_text_data_failed: + vfree(context); + return NULL; +} + +void hinic5_fw_update_free_context(void *update_context_hdl) +{ + struct fw_update_context *context = (struct fw_update_context *)update_context_hdl; + + if (!context) + return; + + if (context->sec_phy.data) { + vfree(context->sec_phy.data); + context->sec_phy.data = NULL; + } + + if (context->sec_text.data) { + vfree(context->sec_text.data); + context->sec_text.data = NULL; + } + + vfree(context); +} + +static int fw_update_context_get(struct hinic5_hwdev *hwdev, + struct fw_update_context **context) +{ + struct card_node *chip_node = hwdev->chip_node; + spinlock_t *lock = NULL; + int ret; + + if (unlikely(!chip_node)) { + sdk_warn(hwdev->dev_hdl, "fw_update: chip_node not init, try later.\n"); + return -EAGAIN; + } + + lock = &chip_node->fw_update_context_lock; + + ret = spin_trylock(lock); + if (unlikely(ret == 0)) { + sdk_warn(hwdev->dev_hdl, "fw_update: not allowed to concurrent update, ret %d, abort!", + ret); + return -EBUSY; + } + + if (unlikely(!chip_node->fw_update_context)) { + chip_node->fw_update_context = hinic5_fw_update_create_context(hwdev); + if (!chip_node->fw_update_context) { + pr_err("fw_update: create context failed.\n"); + spin_unlock(lock); + return -ENOMEM; + } + } + *context = chip_node->fw_update_context; + + return 0; +} + +static void fw_update_context_put(struct hinic5_hwdev *hwdev) +{ + struct card_node *chip_node = hwdev->chip_node; + + spin_unlock(&chip_node->fw_update_context_lock); +} + +static void fw_update_context_reset(struct fw_update_context *context) +{ + context->update_started = 0; + + context->sec_text.data_size = 0; + context->sec_text.data_off = 0; + context->sec_text.verified = 0; + + context->sec_phy.data_size = 0; + context->sec_phy.data_off = 0; + context->sec_phy.verified = 0; +} + +static int check_fw_section_update(struct fw_section_data *section, + struct fw_update_msg_st *update) +{ + u32 data_len = (u32)update->ctl_info.Fragment_Len; + + /* handle text section first transmission */ + if (update->ctl_info.SF == 1) { + if (section->data_off != 0 || section->data_size != 0) { + pr_err("fw_update: broken fw section, last off %u, size %u\n", + section->data_off, section->data_size); + return -EINVAL; + } + if (update->setion_total_len > section->data_cap) { + pr_err("fw_update: fw section size too large, type 0x%x, size 0x%x, cap 0x%x\n", + update->section_info.FW_section_type, + update->setion_total_len, section->data_cap); + return -EFBIG; + } + + memset(section->data, 0, section->data_cap); + section->data_size = update->setion_total_len; + section->verified = 0; + } + + if (section->data_off != update->section_offset) { + pr_err("fw_update: unmatched offset, data_off %u, in_off %u\n", + section->data_off, update->section_offset); + return -EINVAL; + } + + if (section->data_size - section->data_off < data_len) + return -EINVAL; + + if (update->ctl_info.SL == 1) { + if (section->data_off + data_len != section->data_size) { + pr_err("fw_update: unmatched length\n"); + return -EINVAL; + } + } + + return 0; +} + +static inline int mgmt_cmd_update_fw_op(void *hwdev, struct hinic5_mt_cmd_info *cmd_info) +{ + return hinic5_msg_to_mgmt_sync(hwdev, cmd_info->mod, cmd_info->cmd, + cmd_info->buf_in, cmd_info->in_size, cmd_info->buf_out, + cmd_info->out_size, cmd_info->timeout, + HINIC5_CHANNEL_DEFAULT); +} + +static int handle_fw_section_update(void *hwdev, struct hinic5_mt_cmd_info *cmd_info, + struct fw_section_data *section) +{ + struct fw_update_msg_st *update = (struct fw_update_msg_st *)(cmd_info->buf_in); + struct mgmt_msg_head *result_head = NULL; + u32 data_len; + int ret = 0; + + ret = check_fw_section_update(section, update); + if (ret != 0) + return ret; + + ret = mgmt_cmd_update_fw_op(hwdev, cmd_info); + if (ret != 0) { + pr_err("fw_update failed, ret %d\n", ret); + return ret; + } + + if (*cmd_info->out_size < sizeof(struct mgmt_msg_head)) { + pr_err("fw_update: incompatible protocol, out_size %u\n", *cmd_info->out_size); + return -EINVAL; + } + result_head = (struct mgmt_msg_head *)(cmd_info->buf_out); + + /* MPU will not return verify result if Repeat-Msg requested + * so we skip updating section data + */ + if (result_head->status == MPU_FW_UPDATE_FLUSH_FLASH_REPEAT) + return 0; + + if (update->ctl_info.SL == 1) { + /* check fw bin verification result */ + if (result_head->status == MPU_FW_UPDATE_FW_VERIFY_ERR) { + pr_err("fw_update: invalid update file\n"); + return MPU_FW_UPDATE_FW_VERIFY_ERR; + } + section->verified = 1; + } + + data_len = (u32)update->ctl_info.Fragment_Len; + + memcpy(section->data + section->data_off, update->data, data_len); + section->data_off += data_len; + + if (update->ctl_info.SL == 1) { + pr_info("fw_update: tile text section upload success. section type 0x%x, size 0x%x\n", + update->section_info.FW_section_type, section->data_size); + } + + return 0; +} + +static int handle_cmd_update(void *hwdev, struct hinic5_mt_cmd_info *cmd_info, + struct fw_update_context *context) +{ + struct fw_update_msg_st *update = (struct fw_update_msg_st *)(cmd_info->buf_in); + struct fw_section_data *section = NULL; + int ret = 0; + + if (cmd_info->in_size < sizeof(struct fw_update_msg_st)) { + pr_err("fw_update: invalid argument size\n"); + ret = -EINVAL; + goto reset_update_context; + } + +#ifdef __FW_UPDATE_DEBUG__ + pr_info("fw_update: sec_type %u, off 0x%x, len %u\n", + update->section_info.FW_section_type, + update->section_offset, + (u32)update->ctl_info.Fragment_Len); +#endif + + /* handle new update session */ + if (update->total_len != 0) { + if (context->update_started != 0 && + (context->sec_text.data_off != context->sec_text.data_size || + context->sec_phy.data_off != context->sec_phy.data_size)) + pr_warn("fw_update: previous update may not completed, .text off 0x%x size 0x%x, .phy off 0x%x size 0x%x\n", + context->sec_text.data_off, context->sec_text.data_size, + context->sec_phy.data_off, context->sec_phy.data_size); + fw_update_context_reset(context); + context->update_started = 1; + } + + if (context->update_started != 1) { + pr_err("fw_update: incompatible protocol\n"); + return -EINVAL; + } + + if (update->section_info.FW_section_type == FW_SEC_TYPE_TILE_TEXT) { + section = &context->sec_text; + ret = handle_fw_section_update(hwdev, cmd_info, section); + } else if (update->section_info.FW_section_type == FW_SEC_TYPE_PHY) { + section = &context->sec_phy; + ret = handle_fw_section_update(hwdev, cmd_info, section); + } else { + ret = mgmt_cmd_update_fw_op(hwdev, cmd_info); + } + + if (ret == 0) + return ret; + +reset_update_context: + fw_update_context_reset(context); + return ret; +} + +int hinic5_fw_update_cmd_update(void *hwdev_hdl, struct hinic5_mt_cmd_info *cmd_info) +{ + struct fw_update_context *context = NULL; + int ret = 0; + + /* use extended upload procedure if FW hot update via L3I + * is enabled, otherwise use normal way + */ + if (!hinic5_fw_update_ddr_enabled(hwdev_hdl)) + return mgmt_cmd_update_fw_op(hwdev_hdl, cmd_info); + + ret = fw_update_context_get((struct hinic5_hwdev *)hwdev_hdl, &context); + if (ret != 0) + return ret; + + ret = handle_cmd_update(hwdev_hdl, cmd_info, context); + + fw_update_context_put((struct hinic5_hwdev *)hwdev_hdl); + + /* this type of error is considered a successful result of ioctl() */ + if (ret == MPU_FW_UPDATE_FW_VERIFY_ERR) + ret = 0; + + return ret; +} + +/** + * @brief hinic5_bat_l3i_store - store data into L3I buffer + * @param hwdev: device pointer to hwdev + * @param data: pointer to data + * @param data_size: data size, must aligned to cache line + * @retval zero: success + * @retval non-zero: failure + */ +STATIC int hinic5_bat_l3i_store(const struct hinic5_hwdev *hwdev, const u8 *data, u32 data_size) +{ + struct tag_fw_update_handle *handle = (struct tag_fw_update_handle *)hwdev->fw_update_hdl; + struct service_cap *svc_cap = &hwdev->cfg_mgmt->svc_cap; + struct tag_fw_update_bat_l3i_entry *smf_entry = NULL; + const u32 cache_line = FW_UPDATE_CHIP_CACHELINE; + u32 smf_id, smf_enabled_num; + u32 scale, block_size; + u32 i, block_num; + u32 buf_off; + u8 *buf = NULL, *buf_end = NULL; + u8 *va = NULL, *va_end = NULL; + + if (unlikely(!data || data_size == 0 || + (data_size % cache_line != 0) || + data_size > FW_UPDATE_DDR_MAX)) + return -EINVAL; + + smf_enabled_num = handle->smf_enabled_num; + scale = svc_cap->smf_max_num / smf_enabled_num; + block_size = cache_line * scale; + block_num = (data_size + block_size - 1) / block_size; + + for (i = 0; i < block_num; i++) { + smf_id = handle->smf_enabled[i % smf_enabled_num]; + smf_entry = &handle->bat_l3i_entries[smf_id]; + buf = (u8 *)smf_entry->buf_va; + buf_end = buf + smf_entry->buf_size; + + buf_off = i / smf_enabled_num * block_size; + va = (u8 *)smf_entry->buf_va + buf_off; + va_end = va + block_size; + if (va_end > buf_end) + va_end = buf_end; + + while (va < va_end) { + *(u32 *)va = cpu_to_be32(*(u32 *)data); + va += sizeof(u32); + data += sizeof(u32); + } + } + + return 0; +} + +/** + * @brief hinic5_bat_l3i_clean - clean data in L3I buffer + * @param hwdev: device pointer to hwdev + */ +STATIC void hinic5_bat_l3i_clean(const struct hinic5_hwdev *hwdev) +{ + struct tag_fw_update_handle *handle = (struct tag_fw_update_handle *)hwdev->fw_update_hdl; + struct tag_fw_update_bat_l3i_entry *smf_entry = NULL; + u32 i, smf_id; + + if (!handle) + return; + + for (i = 0; i < handle->smf_enabled_num; i++) { + smf_id = handle->smf_enabled[i]; + smf_entry = &handle->bat_l3i_entries[smf_id]; + memset(smf_entry->buf_va, 0, smf_entry->buf_size); + } +} + +static inline bool fw_section_data_valid(struct hinic5_hwdev *hwdev, + const struct fw_section_data *section) +{ + u32 fw_img_hdr_size = get_device_capablity(hwdev)->fw_update_cap.fw_img_hdr_size; + /* for compatibility */ + if (fw_img_hdr_size == 0) + fw_img_hdr_size = FW_SEC_HDR_SIZE; + + return section->verified == 1 && + section->data_off == section->data_size && + section->data_size >= fw_img_hdr_size; +} + +static int hot_active_fw_check(struct hinic5_hwdev *hwdev, + struct fw_update_context *context) +{ + struct fw_section_data *sec_text = &context->sec_text; + struct fw_section_data *sec_phy = &context->sec_phy; + const bool has_phy_section = sec_phy->data_size != 0; + + if (!fw_section_data_valid(hwdev, sec_text)) { + pr_err("fw_update: invalid tile text section, off %u, size %u, verify %u\n", + sec_text->data_off, sec_text->data_size, sec_text->verified); + return -EINVAL; + } + + if (has_phy_section && !fw_section_data_valid(hwdev, sec_phy)) { + pr_err("fw_update: invalid phy section, off %u, size %u, verify %u\n", + sec_phy->data_off, sec_phy->data_size, sec_phy->verified); + return -EINVAL; + } + + return 0; +} + +static int hot_active_fw_prepare(struct hinic5_hwdev *hwdev, + struct fw_update_context *context) +{ + struct fw_section_data *sec_text = &context->sec_text; + struct fw_section_data *sec_phy = &context->sec_phy; + const bool has_phy_section = sec_phy->data_size != 0; + const u32 cache_line = FW_UPDATE_CHIP_CACHELINE; + u32 data_len, data_len_aligned, data_off, fw_img_hdr_size; + u8 *data = NULL; + int ret = 0; + + fw_img_hdr_size = get_device_capablity(hwdev)->fw_update_cap.fw_img_hdr_size; + if (fw_img_hdr_size == 0) { + /* for compatibility */ + fw_img_hdr_size = FW_SEC_HDR_SIZE; + } + + data_len = sec_text->data_size - fw_img_hdr_size; + if (has_phy_section) + data_len += sec_phy->data_size - fw_img_hdr_size; + + if (data_len % cache_line != 0) { + data_len_aligned = (data_len + cache_line - 1) / cache_line * cache_line; + if (data_len_aligned > FW_UPDATE_DDR_MAX) { + sdk_err(hwdev->dev_hdl, "fw_update: update data too large, size 0x%x, aligned size 0x%x\n", + data_len, data_len_aligned); + return -EINVAL; + } + } else { + data_len_aligned = data_len; + } + + sdk_info(hwdev->dev_hdl, "fw_update: update data size 0x%x, aligned size 0x%x\n", + data_len, data_len_aligned); + + data = vzalloc(data_len_aligned); + if (!data) { + return -ENOMEM; + } + + memcpy(data, + sec_text->data + fw_img_hdr_size, + sec_text->data_size - fw_img_hdr_size); + + if (has_phy_section && sec_phy->data_size > fw_img_hdr_size) { + data_off = sec_text->data_size - fw_img_hdr_size; + memcpy(data + data_off, + sec_phy->data + fw_img_hdr_size, + sec_phy->data_size - fw_img_hdr_size); + } + + ret = hinic5_bat_l3i_store(hwdev, data, data_len_aligned); + if (ret != 0) + sdk_err(hwdev->dev_hdl, "fw_update: hinic5_bat_l3i_store fail, data len %u\n", + data_len_aligned); + + return ret; +} + +int hinic5_fw_update_cmd_hot_active(void *hwdev_hdl, struct hinic5_mt_cmd_info *cmd_info) +{ + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)hwdev_hdl; + struct cmd_hot_active_fw *hot_active = (struct cmd_hot_active_fw *)(cmd_info->buf_in); + struct fw_update_context *context = NULL; + int ret = 0; + + if (cmd_info->in_size < sizeof(struct cmd_hot_active_fw)) { + pr_err("fw_update: invalid argument size %u\n", cmd_info->in_size); + return -EINVAL; + } + + if (hot_active->type != FW_HOW_ACTIVE_TYPE_NPU) + return mgmt_cmd_update_fw_op(hwdev, cmd_info); + + if (!hinic5_fw_update_ddr_enabled(hwdev_hdl)) { + sdk_info(hwdev->dev_hdl, "fw_update: this function does not support hot update via DDR.\n"); + return mgmt_cmd_update_fw_op(hwdev, cmd_info); + } + + if (unlikely(!hwdev->fw_update_hdl)) { + sdk_err(hwdev->dev_hdl, "fw_update: DDR not ready.\n"); + return -EINVAL; + } + + ret = fw_update_context_get(hwdev, &context); + if (ret != 0) + return ret; + + ret = hot_active_fw_check(hwdev, context); + if (ret != 0) + goto fail; + + ret = hot_active_fw_prepare(hwdev, context); + if (ret != 0) + goto fail; + + ret = mgmt_cmd_update_fw_op(hwdev, cmd_info); + if (ret == 0) + goto success; + + sdk_err(hwdev->dev_hdl, "fw update hot active failed, ret %d", ret); + +fail: + fw_update_context_reset(context); + +success: +#ifndef __FW_UPDATE_DEBUG__ + hinic5_bat_l3i_clean(hwdev); +#endif + fw_update_context_put(hwdev); + return ret; +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_fw_update.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_fw_update.h new file mode 100644 index 00000000..a055fd2d --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_fw_update.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_FW_UPDATE_H +#define HINIC5_FW_UPDATE_H + +#include "hinic5_hwif_inner.h" +#include "hinic5_hw_cfg.h" +#include "hinic5_hw_mt.h" + +#define FW_UPDATE_DDR_MAX (1536 * 1024) /* 1.5 MB */ + +#define FW_UPDATE_CHIP_CACHELINE 256 + +/* Reference: updatefw.h */ +#define FW_HOW_ACTIVE_TYPE_NPU 2 + +struct tag_fw_update_bat_l3i_entry { + u32 page_order; + u32 buf_size; + void *buf_va; + dma_addr_t buf_pa; +}; + +struct tag_fw_update_handle { + struct hinic5_hwdev *hwdev; + struct device *dev; + + u8 gpa_check_enable; + + u8 smf_enabled[CHIP_SMF_NUM_MAX]; + u32 smf_enabled_num; + + struct tag_fw_update_bat_l3i_entry bat_l3i_entries[CHIP_SMF_NUM_MAX]; +}; + +struct fw_section_data { + u8 *data; + u32 data_cap; + u32 data_size; + u32 data_off; + u32 verified : 1; + u32 rsvd : 31; +}; + +struct fw_update_context { + u32 update_started : 1; + u32 rsvd : 31; + struct fw_section_data sec_text; + struct fw_section_data sec_phy; +}; + +/** + * @brief hinic5_fw_update_init - init firmware update resource + * @param hwdev_hdl: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_fw_update_init(void *hwdev_hdl); + +/** + * @brief hinic5_fw_update_deinit - deinit firmware update resource + * @param hwdev_hdl: device pointer to hwdev + */ +void hinic5_fw_update_deinit(void *hwdev_hdl); + +/** + * @brief hinic5_fw_update_free_context - free update context + * @param context_hdl: pointer to update context + */ +void hinic5_fw_update_free_context(void *update_context_hdl); + +/** + * @brief hinic5_fw_update_ddr_enabled - + * whether L3I is enabled to support firmware's hot update + * @param hwdev_hdl: device pointer to hwdev + * @retval true: This function is enables L3I + * @retval false: This function does not enable L3I + */ +bool hinic5_fw_update_ddr_enabled(void *hwdev_hdl); + +/** + * @brief hinic5_fw_update_cmd_update - handle cmd UPDATE_FW + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_fw_update_cmd_update(void *hwdev_hdl, struct hinic5_mt_cmd_info *cmd_info); + +/** + * @brief hinic5_fw_update_cmd_update - handle cmd HOT_ACTIVE_FW + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_fw_update_cmd_hot_active(void *hwdev_hdl, struct hinic5_mt_cmd_info *cmd_info); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_hw_mt.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_hw_mt.c new file mode 100644 index 00000000..2053cc64 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_hw_mt.c @@ -0,0 +1,639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <asm/byteorder.h> +#include "ossl_knl.h" +#include "hinic5_mt.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "comm_defs.h" +#include "mpu_inband_cmd.h" +#include "hinic5_fw_update.h" +#include "hinic5_hw_mt.h" + +#define HINIC5_CMDQ_BUF_MAX_SIZE 2048U +#define DW_WIDTH 4 + +#define MSG_MAX_IN_SIZE (2048 * 1024) +#define MSG_MAX_OUT_SIZE (2048 * 1024) +#define API_CSR_MAX_RD_LEN (4 * 1024 * 1024) + +/* completion timeout interval, unit is millisecond */ +#define MGMT_MSG_UPDATE_TIMEOUT 200000 +#define EMU_TIMEOUT_MULTIPLE 2 // emu场景下超时时间倍数 + +void free_buff_in(void *hwdev, const struct msg_module *nt_msg, void *buf_in) +{ + if (!buf_in) + return; + + if (nt_msg->module == SEND_TO_NPU) + hinic5_free_cmd_buf(hwdev, buf_in); + else + kfree(buf_in); +} + +void free_buff_out(void *hwdev, const struct msg_module *nt_msg, + void *buf_out) +{ + if (!buf_out) + return; + + if (nt_msg->module == SEND_TO_NPU && + nt_msg->npu_cmd.direct_resp == 0) + hinic5_free_cmd_buf(hwdev, buf_out); + else + kfree(buf_out); +} + +int alloc_buff_in(void *hwdev, const struct msg_module *nt_msg, + u32 in_size, void **buf_in) +{ + void *msg_buf = NULL; + + if (in_size == 0) + return 0; + + if (nt_msg->module == SEND_TO_NPU) { + struct hinic5_cmd_buf *cmd_buf = NULL; + + if (in_size > HINIC5_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq in size(%u) more than 2KB\n", in_size); + return -ENOMEM; + } + + cmd_buf = hinic5_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + pr_err("Alloc cmdq cmd buffer failed in %s\n", + __func__); + return -ENOMEM; + } + msg_buf = cmd_buf->buf; + *buf_in = (void *)cmd_buf; + cmd_buf->size = (u16)in_size; + } else { + if (in_size > MSG_MAX_IN_SIZE) { + pr_err("In size(%u) more than 2M\n", in_size); + return -ENOMEM; + } + msg_buf = kzalloc(in_size, GFP_KERNEL); + *buf_in = msg_buf; + } + if (!(*buf_in)) { + pr_err("Alloc buffer in failed\n"); + return -ENOMEM; + } + + if (copy_from_user(msg_buf, nt_msg->in_buf, in_size) != 0) { + pr_err("%s:%d: Copy from user failed\n", + __func__, __LINE__); + free_buff_in(hwdev, nt_msg, *buf_in); + return -EFAULT; + } + + return 0; +} + +int alloc_buff_out(void *hwdev, const struct msg_module *nt_msg, + u32 out_size, void **buf_out) +{ + if (out_size == 0) + return 0; + + if (nt_msg->module == SEND_TO_NPU && + nt_msg->npu_cmd.direct_resp == 0) { + struct hinic5_cmd_buf *cmd_buf = NULL; + + if (out_size > HINIC5_CMDQ_BUF_MAX_SIZE) { + pr_err("Cmdq out size(%u) more than 2KB\n", out_size); + return -ENOMEM; + } + + cmd_buf = hinic5_alloc_cmd_buf(hwdev); + *buf_out = (void *)cmd_buf; + } else { + if (out_size > MSG_MAX_OUT_SIZE) { + pr_err("out size(%u) more than 2M\n", out_size); + return -ENOMEM; + } + *buf_out = kzalloc(out_size, GFP_KERNEL); + } + if (!(*buf_out)) { + pr_err("Alloc buffer out failed\n"); + return -ENOMEM; + } + + return 0; +} + +int copy_buf_out_to_user(const struct msg_module *nt_msg, + u32 out_size, void *buf_out) +{ + int ret = 0; + void *msg_out = NULL; + + if (nt_msg->module == SEND_TO_NPU && + nt_msg->npu_cmd.direct_resp == 0) + msg_out = ((struct hinic5_cmd_buf *)buf_out)->buf; + else + msg_out = buf_out; + + if (copy_to_user(nt_msg->out_buf, msg_out, out_size) != 0) + ret = -EFAULT; + + return ret; +} + +int get_func_type(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size) +{ + u16 func_type; + + if (*out_size != sizeof(u16) || !buf_out) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + func_type = hinic5_func_type(hinic5_get_sdk_hwdev_by_lld(lld_dev)); + + *(u16 *)buf_out = func_type; + return 0; +} + +int get_func_id(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size) +{ + u16 func_id; + + if (*out_size != sizeof(u16) || !buf_out) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + + func_id = hinic5_global_func_id(hinic5_get_sdk_hwdev_by_lld(lld_dev)); + *(u16 *)buf_out = func_id; + + return 0; +} + +int get_hw_driver_stats(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size) +{ + return hinic5_dbg_get_hw_stats(hinic5_get_sdk_hwdev_by_lld(lld_dev), + buf_out, out_size); +} + +int clear_hw_driver_stats(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size) +{ + u16 size; + + size = hinic5_dbg_clear_hw_stats(hinic5_get_sdk_hwdev_by_lld(lld_dev)); + if (*out_size != size) { + pr_err("Unexpect out buf size from user :%u, expect: %u\n", + *out_size, size); + return -EFAULT; + } + + return 0; +} + +int get_self_test_result(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size) +{ + u32 result; + + if (*out_size != sizeof(u32) || !buf_out) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(u32)); + return -EFAULT; + } + + result = hinic5_get_self_test_result(hinic5_get_sdk_hwdev_by_lld(lld_dev)); + *(u32 *)buf_out = result; + + return 0; +} + +int get_chip_faults_stats(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size) +{ + u32 offset = 0; + struct nic_cmd_chip_fault_stats *fault_info = NULL; + + if (!buf_in || !buf_out || *out_size != sizeof(*fault_info) || + in_size != sizeof(*fault_info)) { + pr_err("Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(*fault_info)); + return -EFAULT; + } + fault_info = (struct nic_cmd_chip_fault_stats *)buf_in; + offset = fault_info->offset; + + fault_info = (struct nic_cmd_chip_fault_stats *)buf_out; + hinic5_get_chip_fault_stats(hinic5_get_sdk_hwdev_by_lld(lld_dev), + fault_info->chip_fault_stats, offset); + + return 0; +} + +static u32 get_mgmt_cmd_default_timeout(void *hwdev, u8 mod, u16 cmd) +{ + u8 hw_type = hinic5_get_hw_type(hwdev); + u32 timeout = 0; /* default mbox/apichain timeout time */ + + if (mod == HINIC5_MOD_COMM && + (cmd == COMM_MGMT_CMD_UPDATE_FW || + cmd == COMM_MGMT_CMD_UPDATE_BIOS || + cmd == COMM_MGMT_CMD_ACTIVE_FW || + cmd == COMM_MGMT_CMD_SWITCH_CFG || + cmd == COMM_MGMT_CMD_HOT_ACTIVE_FW)) + timeout = MGMT_MSG_UPDATE_TIMEOUT; + + if (unlikely(hw_type == HINIC5_HW_TYPE_EMU)) + timeout *= EMU_TIMEOUT_MULTIPLE; + + return timeout; +} + +static int api_csr_read(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in; + int ret = 0; + u32 rd_cnt = 0; + u32 offset = 0; + u8 node_id; + u32 i; + + if (!buf_in || !buf_out || in_size < sizeof(*up_log_msg) || + *out_size < up_log_msg->rd_len || up_log_msg->rd_len % DW_WIDTH != 0) + return -EINVAL; + + node_id = (u8)nt_msg->mpu_cmd.mod; + + rd_cnt = up_log_msg->rd_len / DW_WIDTH; + + for (i = 0; i < rd_cnt; i++) { + ret = hinic5_api_csr_rd32(hwdev, node_id, + up_log_msg->addr + offset, + (u32 *)((u8 *)buf_out + offset)); + if (ret != 0) { + pr_err("Csr rd fail, err: %d, node_id: %u, csr addr: 0x%08x\n", + ret, node_id, up_log_msg->addr + offset); + return ret; + } + offset += DW_WIDTH; + } + *out_size = up_log_msg->rd_len; + + return ret; +} + +static int api_csr_write(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in; + int ret = 0; + u32 rd_cnt = 0; + u32 offset = 0; + u8 node_id; + u32 i; + u8 *data = NULL; + + if (!buf_in || in_size < sizeof(*csr_write_msg) || csr_write_msg->rd_len == 0 || + csr_write_msg->rd_len > API_CSR_MAX_RD_LEN || csr_write_msg->rd_len % DW_WIDTH != 0) + return -EINVAL; + + node_id = (u8)nt_msg->mpu_cmd.mod; + + rd_cnt = csr_write_msg->rd_len / DW_WIDTH; + + data = kzalloc(csr_write_msg->rd_len, GFP_KERNEL); + if (!data) { + return -ENOMEM; + } + if (copy_from_user(data, (void *)csr_write_msg->data, csr_write_msg->rd_len) != 0) { + pr_err("Copy information from user failed\n"); + kfree(data); + return -EFAULT; + } + + for (i = 0; i < rd_cnt; i++) { + ret = hinic5_api_csr_wr32(hwdev, node_id, + csr_write_msg->addr + offset, + *((u32 *)(data + offset))); + if (ret != 0) { + pr_err("Csr wr fail, ret: %d, node_id: %u, csr addr: 0x%08x\n", + ret, csr_write_msg->addr + offset, node_id); + kfree(data); + return ret; + } + offset += DW_WIDTH; + } + + *out_size = 0; + kfree(data); + return ret; +} + +int hinic5_fw_update_cmd(void *hwdev, struct hinic5_mt_cmd_info *cmd_info) +{ + if (cmd_info->cmd == COMM_MGMT_CMD_UPDATE_FW) + return hinic5_fw_update_cmd_update(hwdev, cmd_info); + + return hinic5_fw_update_cmd_hot_active(hwdev, cmd_info); +} + +int send_mbox_to_mgmt(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout) +{ + struct hinic5_mt_cmd_info cmd_info = { 0 }; + + if (mod == HINIC5_MOD_COMM && (cmd == COMM_MGMT_CMD_UPDATE_FW || + cmd == COMM_MGMT_CMD_HOT_ACTIVE_FW)) { + cmd_info.mod = mod; + cmd_info.cmd = cmd; + cmd_info.buf_in = buf_in; + cmd_info.in_size = in_size; + cmd_info.buf_out = buf_out; + cmd_info.out_size = out_size; + cmd_info.timeout = timeout; + + return hinic5_fw_update_cmd(hwdev, &cmd_info); + } + + return hinic5_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout, + HINIC5_CHANNEL_DEFAULT); +} + +int send_to_mpu(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + enum mt_api_type api_type; + enum hinic5_mod_type mod; + u32 timeout; + int ret = 0; + u16 cmd; + u16 temp_out_size; + + if (*out_size <= UINT16_MAX) + temp_out_size = (u16)*out_size; + else + return -EINVAL; + + api_type = (enum mt_api_type)nt_msg->mpu_cmd.api_type; + mod = (enum hinic5_mod_type)nt_msg->mpu_cmd.mod; + cmd = nt_msg->mpu_cmd.cmd; + timeout = nt_msg->timeout; + if (timeout == 0) + timeout = get_mgmt_cmd_default_timeout(hwdev, mod, cmd); + + switch (api_type) { + case API_TYPE_MBOX: + ret = send_mbox_to_mgmt(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, &temp_out_size, timeout); + *out_size = temp_out_size; + break; + case API_TYPE_CLP: + ret = hinic5_clp_to_mgmt(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, &temp_out_size); + *out_size = temp_out_size; + break; + case API_TYPE_API_CHAIN_BYPASS: + if (nt_msg->mpu_cmd.cmd == API_CSR_WRITE) { + ret = api_csr_write(hwdev, nt_msg, buf_in, in_size, + buf_out, out_size); + } else { + ret = api_csr_read(hwdev, nt_msg, buf_in, in_size, + buf_out, out_size); + } + break; + case API_TYPE_API_CHAIN_TO_MPU: + ret = hinic5_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, (u16)in_size, + buf_out, &temp_out_size, timeout, + HINIC5_CHANNEL_DEFAULT); + *out_size = temp_out_size; + break; + default: + pr_err("Unsupported api_type %u\n", api_type); + return -EINVAL; + } + + if (ret != 0) + pr_err("Message to mgmt cpu return fail, api_type: %d, mod: %d, cmd: %u\n", + api_type, mod, cmd); + return ret; +} + +int send_to_npu(void *hwdev, const struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + u8 cmd; + enum hinic5_mod_type mod; + u32 timeout; + + mod = (enum hinic5_mod_type)nt_msg->npu_cmd.mod; + cmd = nt_msg->npu_cmd.cmd; + timeout = nt_msg->timeout; + + if (nt_msg->npu_cmd.direct_resp != 0) { + ret = hinic5_cmdq_direct_resp(hwdev, mod, cmd, + buf_in, buf_out, timeout, + HINIC5_CHANNEL_DEFAULT); + if (ret != 0) + pr_err("Send direct cmdq failed, err: %d\n", ret); + } else { + ret = hinic5_cmdq_detail_resp(hwdev, mod, cmd, buf_in, buf_out, + NULL, timeout, HINIC5_CHANNEL_DEFAULT); + if (ret != 0) + pr_err("Send detail cmdq failed, err: %d\n", ret); + } + + return ret; +} + +static int sm_rd16(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u16 val1; + int ret; + + ret = hinic5_sm_ctr_rd16(hwdev, node, instance, id, &val1); + if (ret != 0) { + pr_err("Get sm ctr information (16 bits)failed!\n"); + val1 = 0xffff; + } + + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd32(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u32 val1; + int ret; + + ret = hinic5_sm_ctr_rd32(hwdev, node, instance, id, &val1); + if (ret != 0) { + pr_err("Get sm ctr information (32 bits)failed!\n"); + val1 = ~0; + } + + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd32_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u32 val1; + int ret; + + ret = hinic5_sm_ctr_rd32_clear(hwdev, node, instance, id, &val1); + if (ret != 0) { + pr_err("Get sm ctr clear information(32 bits) failed!\n"); + val1 = ~0; + } + + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd64_pair(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1 = 0, val2 = 0; + int ret; + + ret = hinic5_sm_ctr_rd64_pair(hwdev, node, instance, id, &val1, &val2); + if (ret != 0) { + pr_err("Get sm ctr information (64 bits pair)failed!\n"); + val1 = ~0; + val2 = ~0; + } + + buf_out->val1 = val1; + buf_out->val2 = val2; + + return ret; +} + +static int sm_rd64_pair_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1 = 0; + u64 val2 = 0; + int ret; + + ret = hinic5_sm_ctr_rd64_pair_clear(hwdev, node, instance, id, &val1, + &val2); + if (ret != 0) { + pr_err("Get sm ctr clear information(64 bits pair) failed!\n"); + val1 = ~0; + val2 = ~0; + } + + buf_out->val1 = val1; + buf_out->val2 = val2; + + return ret; +} + +static int sm_rd64(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1; + int ret; + + ret = hinic5_sm_ctr_rd64(hwdev, node, instance, id, &val1); + if (ret != 0) { + pr_err("Get sm ctr information (64 bits)failed!\n"); + val1 = ~0; + } + buf_out->val1 = val1; + + return ret; +} + +static int sm_rd64_clear(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out) +{ + u64 val1; + int ret; + + ret = hinic5_sm_ctr_rd64_clear(hwdev, node, instance, id, &val1); + if (ret != 0) { + pr_err("Get sm ctr clear information(64 bits) failed!\n"); + val1 = ~0; + } + buf_out->val1 = val1; + + return ret; +} + +typedef int (*sm_module)(void *hwdev, u32 id, u8 instance, + u8 node, struct sm_out_st *buf_out); + +struct sm_module_handle { + enum sm_cmd_type sm_cmd_name; + sm_module sm_func; +}; + +const struct sm_module_handle sm_module_cmd_handle[] = { + {SM_CTR_RD16, sm_rd16}, + {SM_CTR_RD32, sm_rd32}, + {SM_CTR_RD64_PAIR, sm_rd64_pair}, + {SM_CTR_RD64, sm_rd64}, + {SM_CTR_RD32_CLEAR, sm_rd32_clear}, + {SM_CTR_RD64_PAIR_CLEAR, sm_rd64_pair_clear}, + {SM_CTR_RD64_CLEAR, sm_rd64_clear} +}; + +int send_to_sm(void *hwdev, const struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + struct sm_in_st *sm_in = buf_in; + struct sm_out_st *sm_out = buf_out; + u32 msg_formate = nt_msg->msg_formate; + int index, num_cmds = ARRAY_LEN(sm_module_cmd_handle); + int ret = 0; + + if (!buf_in || !buf_out || in_size != sizeof(*sm_in) || *out_size != sizeof(*sm_out)) { + pr_err("Unexpect out buf size :%u, in buf size: %u\n", + *out_size, in_size); + return -EINVAL; + } + + for (index = 0; index < num_cmds; index++) { + if (msg_formate != sm_module_cmd_handle[index].sm_cmd_name) + continue; + + ret = sm_module_cmd_handle[index].sm_func(hwdev, (u32)sm_in->id, + (u8)sm_in->instance, + (u8)sm_in->node, sm_out); + break; + } + + if (index == num_cmds) { + pr_err("Can't find callback for %d\n", msg_formate); + return -EINVAL; + } + + if (ret != 0) + pr_err("Get sm information fail, id:%d, instance:%d, node:%d\n", + sm_in->id, sm_in->instance, sm_in->node); + + *out_size = sizeof(struct sm_out_st); + + return ret; +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_hw_mt.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_hw_mt.h new file mode 100644 index 00000000..824a9cc0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_hw_mt.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_HW_MT_H +#define HINIC5_HW_MT_H + +#include "hinic5_mt.h" +#include "hinic5_lld.h" + +struct sm_in_st { + int node; + int id; + int instance; +}; + +struct sm_out_st { + u64 val1; + u64 val2; +}; + +struct up_log_msg_st { + u32 rd_len; + u32 addr; +}; + +struct csr_write_st { + u32 rd_len; + u32 addr; + u8 *data; +}; + +struct hinic5_mt_cmd_info { + u8 mod; + u16 cmd; + void *buf_in; + u16 in_size; + void *buf_out; + u16 *out_size; + u32 timeout; +}; + +int get_func_type(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size); + +int get_func_id(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size); + +int get_hw_driver_stats(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size); + +int clear_hw_driver_stats(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size); + +int get_self_test_result(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size); + +int get_chip_faults_stats(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size); + +#ifndef __WIN__ +/** + * @brief alloc input buffer + * @param hwdev: device pointer to hwdev + * @param nt_msg: message module struct + * @param in_size: input buffer size + * @param buf_in: input buffer + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ + +int alloc_buff_in(void *hwdev, const struct msg_module *nt_msg, u32 in_size, void **buf_in); + +/** + * @brief alloc output buffer + * @param hwdev: device pointer to hwdev + * @param nt_msg: message module struct + * @param out_size: input buffer size + * @param buf_out: output buffer + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ + +int alloc_buff_out(void *hwdev, const struct msg_module *nt_msg, u32 out_size, void **buf_out); + +/** + * @brief free input buffer + * @param hwdev: device pointer to hwdev + * @param nt_msg: message module struct + * @param buf_in: input buffer + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ + +void free_buff_in(void *hwdev, const struct msg_module *nt_msg, void *buf_in); + +/** + * @brief free output buffer + * @param hwdev: device pointer to hwdev + * @param nt_msg: message module struct + * @param buf_out: output buffer + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ + +void free_buff_out(void *hwdev, const struct msg_module *nt_msg, void *buf_out); + +/** + * @brief copy from message buffer to user buffer + * @param nt_msg: message module struct + * @param out_size: message buffer size + * @param buf_out: message buffer + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ + +int copy_buf_out_to_user(const struct msg_module *nt_msg, u32 out_size, void *buf_out); +#endif +/** + * @brief send message to mpu + * @param hwdev: device pointer to hwdev + * @param nt_msg: message module struct + * @param buf_in: input buffer + * @param in_size: input buffer size + * @param buf_out: output buffer + * @param out_size: output buffer size + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ + +int send_to_mpu(void *hwdev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +/** + * @brief send message to npu + * @param hwdev: device pointer to hwdev + * @param nt_msg: message module struct + * @param buf_in: input buffer + * @param in_size: input buffer size + * @param buf_out: output buffer + * @param out_size: output buffer size + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ + +int send_to_npu(void *hwdev, const struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +/** + * @brief send message to sm + * @param hwdev: device pointer to hwdev + * @param nt_msg: message module struct + * @param buf_in: input buffer + * @param in_size: input buffer size + * @param buf_out: output buffer + * @param out_size: output buffer size + * + * @return + * @retval zero: success + * @retval non-zero: failure + */ + +int send_to_sm(void *hwdev, const struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_nictool.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_nictool.c new file mode 100644 index 00000000..f24b5e2b --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_nictool.c @@ -0,0 +1,1298 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <net/sock.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/interrupt.h> + +#include "comm_defs.h" +#include "ossl_knl.h" +#include "mpu_inband_cmd.h" +#include "hinic5_mt.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_hw_cfg.h" +#include "hinic5_hwdev.h" +#include "hinic5_lld.h" +#include "hinic5_lld_inner.h" +#include "hinic5_hw_mt.h" +#include "hinic5_dev_mgmt.h" +#include "hinic5_wq.h" +#include "sdk_pub_cmd.h" +#include "hisec_pub_cmd.h" +#include "hinic5_comm_cmd.h" +#include "hinic5_cmdq.h" +#include "hinic5_sdk_attack.h" +#include "hinic5_nictool.h" + +static int g_nictool_ref_cnt; + +static dev_t g_dev_id = {0}; +static struct class *g_nictool_class; +static struct cdev g_nictool_cdev; + +#define HINIC5_MAX_BUF_SIZE (2048 * 1024) +#define HINIC5_S_TO_US_UNIT 1000000 + +void *g_card_node_array[MAX_CARD_NUM] = {0}; +void *g_card_vir_addr[MAX_CARD_NUM] = {0}; +u64 g_card_phy_addr[MAX_CARD_NUM] = {0}; +int card_id; + +#ifdef __HIFC__ +#define HIADM3_DEV_PATH "/dev/hifc_dev" +#define HIADM3_DEV_CLASS "hifc_class" +#define HIADM3_DEV_NAME "hifc_dev" +#else +#define HIADM3_DEV_PATH "/dev/hinic5_nictool_dev" +#define HIADM3_DEV_CLASS "hinic5_nictool_class" +#define HIADM3_DEV_NAME "hinic5_nictool_dev" +#endif + +typedef int (*hw_driv_module)(struct hinic5_lld_dev *lld_dev, + const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); +struct hw_drv_module_handle { + u32 driv_cmd_name; + hw_driv_module driv_func; +}; + +static bool check_cmd_compatible(u32 in_size, u32 expect_in_size, + u32 out_size, u32 expect_out_size) +{ + if (unlikely(in_size != expect_in_size)) { + pr_err("Incompatible hw driver cmd, in size %u, expect %u\n", + in_size, expect_in_size); + return false; + } + + if (unlikely(out_size != expect_out_size)) { + pr_err("Incompatible hw driver cmd, out size %u, expect %u\n", + out_size, expect_out_size); + return false; + } + + return true; +} + +static int get_single_card_info(struct hinic5_lld_dev *lld_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + if (!buf_in || in_size != sizeof(struct card_info)) { + pr_err("buf_in is NULL, or in_size(%u) != expect_in_size(%lu)\n", + in_size, sizeof(struct card_info)); + return -EINVAL; + } + + if (!buf_out || *out_size != sizeof(struct card_info)) { + pr_err("buf_out is NULL, or out_size(%u) != expect_out_size(%lu)\n", + *out_size, sizeof(struct card_info)); + return -EINVAL; + } + + hinic5_get_card_info(hinic5_get_sdk_hwdev_by_lld(lld_dev), buf_in, buf_out); + + return 0; +} + +static int is_driver_in_vm(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + bool in_host = false; + + if (!buf_out || (*out_size != sizeof(u8))) { + pr_err("buf_out is NULL, or out_size != %lu\n", sizeof(u8)); + return -EINVAL; + } + + in_host = hinic5_is_in_host(); + if (in_host) + *((u8 *)buf_out) = 0; + else + *((u8 *)buf_out) = 1; + + return 0; +} + +static int get_all_chip_id_cmd(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + if (*out_size != sizeof(struct nic_card_id) || !buf_out) { + pr_err("Invalid parameter: out_buf_size %u, expect %lu\n", + *out_size, sizeof(struct nic_card_id)); + return -EFAULT; + } + + hinic5_get_all_chip_id(buf_out); + + return 0; +} + +static int get_card_usr_api_chain_mem(int card_idx) +{ + void *tmp = NULL; + int i; + + card_id = card_idx; + if (!g_card_vir_addr[card_idx]) { + g_card_vir_addr[card_idx] = + (void *)(uintptr_t)__get_free_pages(GFP_KERNEL, + DBGTOOL_PAGE_ORDER); + if (!g_card_vir_addr[card_idx]) { + pr_err("Alloc api chain memory fail for card %d!\n", card_idx); + return -EFAULT; + } + + memset(g_card_vir_addr[card_idx], 0, + PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER)); + + g_card_phy_addr[card_idx] = + virt_to_phys(g_card_vir_addr[card_idx]); + if (g_card_phy_addr[card_idx] == 0) { + pr_err("phy addr for card %d is 0\n", card_idx); + free_pages((unsigned long)(uintptr_t)g_card_vir_addr[card_idx], + DBGTOOL_PAGE_ORDER); + g_card_vir_addr[card_idx] = NULL; + return -EFAULT; + } + + tmp = g_card_vir_addr[card_idx]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + SetPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + } + + return 0; +} + +static void dbgtool_knl_free_mem(u32 id) +{ + void *tmp = NULL; + int i; + + if (!g_card_vir_addr[id]) + return; + + tmp = g_card_vir_addr[id]; + for (i = 0; i < (1 << DBGTOOL_PAGE_ORDER); i++) { + ClearPageReserved(virt_to_page(tmp)); + tmp += PAGE_SIZE; + } + + free_pages((unsigned long)(uintptr_t)g_card_vir_addr[id], DBGTOOL_PAGE_ORDER); + g_card_vir_addr[id] = NULL; + g_card_phy_addr[id] = 0; +} + +static int card_info_param_valid(const char *dev_name, const void *buf_out, + u32 buf_out_size, int *id) +{ + int err; + + if (!buf_out || buf_out_size != sizeof(struct hinic5_card_func_info)) { + pr_err("Invalid parameter: out_buf_size %u, expect %lu\n", + buf_out_size, sizeof(struct hinic5_card_func_info)); + return -EINVAL; + } + + err = memcmp(dev_name, HINIC5_CHIP_NAME, strlen(HINIC5_CHIP_NAME)); + if (err != 0) { + pr_err("Invalid chip name %s\n", dev_name); + return err; + } + + err = sscanf(dev_name, HINIC5_CHIP_NAME "%d", id); + if (err != 1) { + pr_err("Failed to get card id\n"); + return err; + } + + if (*id >= MAX_CARD_NUM || *id < 0) { + pr_err("chip id %d exceed limit[0-%d]\n", + *id, MAX_CARD_NUM - 1); + return -EINVAL; + } + + return 0; +} + +static void hinic5_get_card_func_info_by_card_name(const char *chip_name, + struct hinic5_card_func_info *card_func) +{ + struct list_head *chip_list = get_hinic5_chip_list(); + struct card_node *chip_node = NULL; + struct func_dev_info *dev_info = NULL; + struct hinic5_adev *adev = NULL; + + card_func->num_pf = 0; + + lld_hold(); + + list_for_each_entry(chip_node, chip_list, node) { + if (strncmp(chip_node->chip_name, chip_name, IFNAMSIZ) != 0) + continue; + + list_for_each_entry(adev, &chip_node->func_list, node) { + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + continue; + + dev_info = &card_func->dev_info[card_func->num_pf]; + dev_info->bar1_size = adev->cfg_base_len; + dev_info->bar1_phy_addr = adev->cfg_base_phy; + + dev_info->bar3_size = adev->mgmt_base_len; + dev_info->bar3_phy_addr = adev->mgmt_base_phy; + + card_func->num_pf++; + if (card_func->num_pf >= CARD_MAX_SIZE) { + lld_put(); + return; + } + } + } + + lld_put(); +} + +static int get_card_func_info(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hinic5_card_func_info *card_func_info = buf_out; + struct card_node *card_info = hinic5_get_chip_node_by_lld(lld_dev); + int err, id = 0; + + err = card_info_param_valid(card_info->chip_name, buf_out, *out_size, &id); + if (err != 0) + return err; + + hinic5_get_card_func_info_by_card_name(card_info->chip_name, card_func_info); + + if (card_func_info->num_pf == 0) { + pr_err("None function found for %s\n", card_info->chip_name); + return -EFAULT; + } + + err = get_card_usr_api_chain_mem(id); + if (err != 0) { + pr_err("Faile to get api chain memory for userspace %s\n", + card_info->chip_name); + return -EFAULT; + } + + card_func_info->usr_api_phy_addr = g_card_phy_addr[id]; + + return 0; +} + +static int get_pf_cap_info(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct service_cap *func_cap = NULL; + struct hinic5_hwdev *hwdev = NULL; + struct card_node *card_info = hinic5_get_chip_node_by_lld(lld_dev); + struct svc_cap_info *svc_cap_info_in = (struct svc_cap_info *)buf_in; + struct svc_cap_info *svc_cap_info_out = (struct svc_cap_info *)buf_out; + + if (*out_size != sizeof(struct svc_cap_info) || in_size != sizeof(struct svc_cap_info) || + !buf_in || !buf_out) { + pr_err("Invalid parameter: out_buf_size %u, in_size: %u, expect %lu\n", + *out_size, in_size, sizeof(struct svc_cap_info)); + return -EINVAL; + } + + if (svc_cap_info_in->func_idx >= MAX_FUNCTION_NUM) { + pr_err("func_idx is illegal. func_idx: %u, max_num: %u\n", + svc_cap_info_in->func_idx, MAX_FUNCTION_NUM); + return -EINVAL; + } + + lld_hold(); + hwdev = (struct hinic5_hwdev *)(card_info->func_handle_array)[svc_cap_info_in->func_idx]; + if (!hwdev) { + lld_put(); + return -EINVAL; + } + + func_cap = &hwdev->cfg_mgmt->svc_cap; + memcpy(&svc_cap_info_out->cap, func_cap, sizeof(struct service_cap)); + lld_put(); + + return 0; +} + +static int get_hw_drv_version(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct drv_version_info *ver_info = buf_out; + int err; + + if (!buf_out) { + pr_err("Buf_out is NULL.\n"); + return -EINVAL; + } + + if (*out_size != sizeof(*ver_info)) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(*ver_info)); + return -EINVAL; + } + + err = snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s", + HINIC5_DRV_VERSION, __TIME_STR__); + if (err < 0) + return -EINVAL; + + return 0; +} + +static int get_pf_id(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hinic5_pf_info *pf_info = NULL; + struct card_node *chip_node = hinic5_get_chip_node_by_lld(lld_dev); + u32 port_id; + int err; + + if (!chip_node) + return -ENODEV; + + if (!buf_out || (*out_size != sizeof(*pf_info)) || !buf_in || in_size != sizeof(u32)) { + pr_err("Unexpect out buf size from user :%u, expect: %lu, in size:%u\n", + *out_size, sizeof(*pf_info), in_size); + return -EINVAL; + } + + port_id = *((u32 *)buf_in); + pf_info = (struct hinic5_pf_info *)buf_out; + err = hinic5_get_pf_id(chip_node, port_id, &pf_info->pf_id, &pf_info->isvalid); + if (err != 0) + return err; + + *out_size = sizeof(*pf_info); + + return 0; +} + +static int set_frequency_reduction_ratio(struct hinic5_lld_dev *lld_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + u32 ratio; + int err; + + if (!buf_in || in_size != sizeof(u32)) { + pr_err("Unexpect out buf size from user in size:%u\n", in_size); + return -EINVAL; + } + + ratio = *((u32 *)buf_in); + err = hinic5_set_freq_reduce_ratio(lld_dev->hwdev, ratio); + if (err != 0) { + pr_err("set freq reduce ratio err %d\n", err); + return err; + } + + return 0; +} + +static int set_time_diff_enable(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u32 enable; + int err; + + if (!lld_dev || !buf_in || in_size != sizeof(u32)) { + pr_err("Unexpect out buf size from user in size:%u\n", in_size); + return -EINVAL; + } + + enable = *((u32 *)buf_in); + err = hinic5_set_non_ptp_time_diff_en(lld_dev->hwdev, enable); + if (err != 0) { + pr_err("set time diff enable err %d\n", err); + return err; + } + + return 0; +} + +static int get_time_diff(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + int err; + + if (!lld_dev || !buf_out) { + pr_err("Unexpect out buf size from user in size:%u\n", in_size); + return -EINVAL; + } + + err = hinic5_get_non_ptp_time_diff(lld_dev->hwdev, (s64 *)buf_out); + if (err != 0) { + pr_err("get time diff err %d\n", err); + return err; + } + *out_size = sizeof(s64); + return 0; +} + +static int get_cmdq_info(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + u16 cmdq_id; + + if (!check_cmd_compatible(in_size, sizeof(u32), + *out_size, sizeof(struct hinic5_wq))) + return -EINVAL; + + cmdq_id = (u16)(*((u32 *)buf_in)); + return hinic5_dump_cmdq_wq(lld_dev->hwdev, cmdq_id, buf_out); +} + +typedef struct tag_cmdq_npu_dft { + u32 type; + u32 value; + u32 rsvd0; + u32 rsvd1; +} cmdq_npu_dft_s; + +static int detect_cmdq_channel(struct hinic5_lld_dev *lld_dev, + const void *buf_in, u32 in_size, void *buf_out, + u32 *out_size) +{ + struct hinic5_adev *adev = to_hinic5_adev(lld_dev); + struct hinic5_cmd_buf *cmd_buf = NULL; + int err; + u16 cmdq_id; + u64 out_param; + cmdq_npu_dft_s *cmdq_info = NULL; + + if (!buf_in || !buf_out || !out_size) { + sdk_err(adev->dev, "Buf_in or buf_out or out_size is NULL.\n"); + return -EINVAL; + } + + if (in_size != sizeof(u32)) { + sdk_err(adev->dev, "Unexpect in buf size from user :%u, expect: %lu\n", + in_size, sizeof(u32)); + return -EINVAL; + } + + if (*out_size != sizeof(u32)) { + sdk_err(adev->dev, "Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(struct hinic5_wq)); + return -EINVAL; + } + + cmdq_id = (u16)(*((u32 *)buf_in)); + + sdk_info(adev->dev, "debug: cmdq detect q_id=%d\n", cmdq_id); + + cmd_buf = hinic5_alloc_cmd_buf(adev->hwdev); + if (!cmd_buf) { + sdk_err(adev->hwdev, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + /* 使用dft_npu测试命令 type = DFT_CMDQ_TYPE_NPU_CHANNEL_TEST, + * value = DFT_CMDQ_VALUE_CHANNEL_TEST_LOG 做连通性测试 + */ + cmd_buf->size = sizeof(cmdq_npu_dft_s); + cmdq_info = (cmdq_npu_dft_s *)cmd_buf->buf; + cmdq_info->type = 0; + cmdq_info->value = 0; + hinic5_cpu_to_be32(cmd_buf->buf, cmd_buf->size); + + err = hinic5_cos_id_direct_resp + (adev->hwdev, HINIC5_MOD_COMM, COMM_CMD_SEND_NPU_DFT_CMD, cmdq_id, cmd_buf, + &out_param, 0, HINIC5_CHANNEL_COMM); + if (err != 0 || out_param != 0) { + sdk_err(adev->dev, "Failed to send cmdq channel detect\n"); + err = -EFAULT; + } + + hinic5_free_cmd_buf(adev->hwdev, cmd_buf); + + *(u32 *)buf_out = 0; + *out_size = sizeof(u32); + + return err; +} + +static int sdk_attack_test(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hinic5_adev *adev = to_hinic5_adev(lld_dev); + + if (!buf_in || !buf_out) { + sdk_err(adev->dev, "Buf_in or buf_out is NULL.\n"); + return -EINVAL; + } + + if (!out_size) { + sdk_err(adev->dev, "Unexpect in buf size from user :%u, expect: %lu\n", + in_size, sizeof(u32)); + return -EINVAL; + } + + return hinic5_sdk_attack_handler(lld_dev->hwdev, buf_in, in_size, buf_out, out_size); +} + +static int get_cmdq_wqe_desc(struct hinic5_lld_dev *lld_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + const struct cmdq_wqe_info *info = buf_in; + + if (!check_cmd_compatible(in_size, sizeof(struct cmdq_wqe_info), + *out_size, sizeof(struct sdk_cmdq_wqe_desc))) + return -EINVAL; + + return hinic5_dump_cmdq_wqebb(lld_dev->hwdev, + (u16)info->q_id, (u16)info->wqebb_id, buf_out); +} + +/* not support fc yet */ +static int get_mbox_cnt(struct hinic5_lld_dev *lld_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + if (!buf_out) { + pr_err("buf_out is NULL"); + return -EINVAL; + } + + if (*out_size != sizeof(struct card_mbox_cnt_info)) { + pr_err("out_size != %lu\n", sizeof(struct card_mbox_cnt_info)); + return -EINVAL; + } + + hinic5_get_mbox_cnt(hinic5_get_sdk_hwdev_by_lld(lld_dev), buf_out); + + return 0; +} + +struct hw_drv_module_handle hw_driv_module_cmd_handle[] = { + {FUNC_TYPE, (hw_driv_module)get_func_type}, + {GET_FUNC_IDX, (hw_driv_module)get_func_id}, + {GET_HW_STATS, (hw_driv_module)get_hw_driver_stats}, + {CLEAR_HW_STATS, (hw_driv_module)clear_hw_driver_stats}, + {GET_SELF_TEST_RES, (hw_driv_module)get_self_test_result}, + {GET_CHIP_FAULT_STATS, (hw_driv_module)get_chip_faults_stats}, + {GET_SINGLE_CARD_INFO, (hw_driv_module)get_single_card_info}, + {IS_DRV_IN_VM, is_driver_in_vm}, + {GET_CHIP_ID, get_all_chip_id_cmd}, + {GET_CHIP_INFO, get_card_func_info}, + {GET_FUNC_CAP, get_pf_cap_info}, + {GET_DRV_VERSION, get_hw_drv_version}, + {GET_PF_ID, get_pf_id}, + {SDK_CMD_SET_FREQ_REDUCE_RATIO, set_frequency_reduction_ratio}, + {SDK_CMD_SET_TIME_DIFF_ENABLE, set_time_diff_enable}, + {SDK_CMD_GET_TIME_DIFF, get_time_diff}, + {SDK_CMD_GET_CMDQ_INFO, get_cmdq_info}, + {SDK_CMD_CMDQ_CHANNEL_DETECT, detect_cmdq_channel}, + {SDK_CMD_GET_CMDQ_WQE_DESC, get_cmdq_wqe_desc}, + {SDK_CMD_ATTACK_TEST, sdk_attack_test}, + {GET_MBOX_CNT, (hw_driv_module)get_mbox_cnt}, +}; + +static int alloc_tmp_buf(void *hwdev, struct msg_module *nt_msg, u32 in_size, + void **buf_in, u32 out_size, void **buf_out) +{ + int ret; + + ret = alloc_buff_in(hwdev, nt_msg, in_size, buf_in); + if (ret != 0) { + pr_err("Alloc tool cmd buff in failed\n"); + return ret; + } + + ret = alloc_buff_out(hwdev, nt_msg, out_size, buf_out); + if (ret != 0) { + pr_err("Alloc tool cmd buff out failed\n"); + goto out_free_buf_in; + } + + return 0; + +out_free_buf_in: + free_buff_in(hwdev, nt_msg, *buf_in); + + return ret; +} + +static void free_tmp_buf(void *hwdev, struct msg_module *nt_msg, + void *buf_in, void *buf_out) +{ + free_buff_out(hwdev, nt_msg, buf_out); + free_buff_in(hwdev, nt_msg, buf_in); +} + +__weak int hinic5_nictool_cmd_extend_handle(void *lld_dev, u32 cmd, + struct hinic5_mt_msg *mt_msg, bool *support) +{ + *support = false; + + return 0; +} + +static int send_to_hw_driver(struct hinic5_lld_dev *lld_dev, struct msg_module *nt_msg, + const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int index, num_cmds = (int)(sizeof(hw_driv_module_cmd_handle) / + sizeof(hw_driv_module_cmd_handle[0])); + enum driver_cmd_type cmd_type = + (enum driver_cmd_type)(nt_msg->msg_formate); + struct hinic5_mt_msg mt_msg; + bool support = false; + int err = 0; + + for (index = 0; index < num_cmds; index++) { + if (cmd_type != hw_driv_module_cmd_handle[index].driv_cmd_name) + continue; + err = hw_driv_module_cmd_handle[index].driv_func + (lld_dev, buf_in, in_size, buf_out, out_size); + if (err != 0) + pr_err("Hw driver cmd %u process failed, err %d\n", cmd_type, err); + return err; + } + + mt_msg.buf_in = buf_in; + mt_msg.buf_out = buf_out; + mt_msg.in_size = in_size; + mt_msg.out_size = *out_size; + err = hinic5_nictool_cmd_extend_handle((void *)lld_dev, (u32)cmd_type, &mt_msg, &support); + if (!support) { + pr_err("Can't find callback for %d\n", cmd_type); + return -EINVAL; + } + + if (err != 0) + pr_err("extend cmd %d process failed, err:%d\n", cmd_type, err); + *out_size = mt_msg.out_size; + + return err; +} + +static int send_to_service_driver(struct hinic5_lld_dev *lld_dev, struct msg_module *nt_msg, + const void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + const struct hinic5_uld_info *uld_info = NULL; + const char **service_name = NULL; + enum hinic5_service_type type; + void *uld_dev = NULL; + int ret = -EINVAL; + + service_name = hinic5_get_uld_names(); + type = nt_msg->module - SEND_TO_SRV_DRV_BASE; + if (type >= SERVICE_T_MAX) { + pr_err("Ioctl input module id: %u is incorrectly\n", nt_msg->module); + return -EINVAL; + } + + uld_dev = hinic5_get_uld_dev(lld_dev, type); + if (!uld_dev) { + if (nt_msg->msg_formate == GET_DRV_VERSION) + return 0; + + pr_err("Can not get the uld dev correctly: %s driver may be not register\n", + service_name[type]); + return -EINVAL; + } + + uld_info = hinic5_get_uld_info_by_type(type); + if (!uld_info || !uld_info->ioctl) + return -EFAULT; + + ret = uld_info->ioctl(uld_dev, nt_msg->msg_formate, + buf_in, in_size, buf_out, out_size); + hinic5_uld_dev_put(lld_dev, type); + + return ret; +} + +static int nictool_exec_cmd(struct hinic5_lld_dev *lld_dev, struct msg_module *nt_msg, + void *buf_in, u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = 0; + + switch (nt_msg->module) { + case SEND_TO_HW_DRIVER: + ret = send_to_hw_driver(lld_dev, nt_msg, buf_in, in_size, buf_out, out_size); + break; + case SEND_TO_MPU: + ret = send_to_mpu(hinic5_get_sdk_hwdev_by_lld(lld_dev), + nt_msg, buf_in, in_size, buf_out, out_size); + break; + case SEND_TO_SM: + ret = send_to_sm(hinic5_get_sdk_hwdev_by_lld(lld_dev), + nt_msg, buf_in, in_size, buf_out, out_size); + break; + case SEND_TO_NPU: + ret = send_to_npu(hinic5_get_sdk_hwdev_by_lld(lld_dev), + nt_msg, buf_in, in_size, buf_out, out_size); + break; + default: + ret = send_to_service_driver(lld_dev, nt_msg, buf_in, in_size, buf_out, out_size); + break; + } + + return ret; +} + +static int cmd_parameter_valid(struct msg_module *nt_msg, ulong arg, + u32 *out_size_expect, u32 *in_size) +{ + if (copy_from_user(nt_msg, (void *)(uintptr_t)arg, sizeof(*nt_msg)) != 0) { + pr_err("Copy information from user failed\n"); + return -EFAULT; + } + + *out_size_expect = nt_msg->buf_out_size; + *in_size = nt_msg->buf_in_size; + if (*out_size_expect > HINIC5_MAX_BUF_SIZE || + *in_size > HINIC5_MAX_BUF_SIZE) { + pr_err("Invalid in size: %u or out size: %u\n", + *in_size, *out_size_expect); + return -EFAULT; + } + + nt_msg->device_name[IFNAMSIZ - 1] = '\0'; + + return 0; +} + +static inline bool is_send_to_srv_drv(uint32_t module) +{ + return ((module >= SEND_TO_SRV_DRV_BASE) && (module < SEND_TO_DRIVER_MAX)); +} + +#define ROCE_DRV_SCC_CMD_MIN (SERVICE_DRV_BASE_CMD + 1) +#define ROCE_DRV_SCC_CMD_MAX (SERVICE_DRV_BASE_CMD + 4) + +struct hinic5_lld_dev *get_lld_dev_by_nt_msg(struct msg_module *nt_msg) +{ + struct hinic5_lld_dev *lld_dev = NULL; + enum mt_api_type api_type; + u8 mod = 0; + u16 cmd = 0; + + if (nt_msg->module == SEND_TO_MPU) { + api_type = (enum mt_api_type)nt_msg->mpu_cmd.api_type; + mod = nt_msg->mpu_cmd.mod; + cmd = nt_msg->mpu_cmd.cmd; + + if (api_type == API_TYPE_MBOX && mod == HINIC5_MOD_COMM && + (cmd == COMM_MGMT_CMD_UPDATE_FW || cmd == COMM_MGMT_CMD_ACTIVE_FW || + cmd == COMM_MGMT_CMD_HOT_ACTIVE_FW)) { + lld_dev = hinic5_get_lld_dev_with_l3i_enabled(nt_msg->device_name); + if (lld_dev) + return lld_dev; + } + } + + if (nt_msg->module == SEND_TO_NIC_DRIVER && + (nt_msg->msg_formate == GET_XSFP_INFO || nt_msg->msg_formate == GET_XSFP_PRESENT || + nt_msg->msg_formate == GET_XSFP_INFO_COMP_CMIS)) { + return hinic5_get_lld_dev_by_chip_and_port(nt_msg->device_name, nt_msg->port_id); + } + + if (nt_msg->module == SEND_TO_HIHTR_DRIVER && + (nt_msg->msg_formate == ROCE_CMD_SET_BYPASS || + nt_msg->msg_formate == ROCE_CMD_QUERY_BYPASS)) { + return hinic5_get_lld_dev_by_chip_name(nt_msg->device_name); + } + + if (nt_msg->module == SEND_TO_HIHTR_DRIVER && + (nt_msg->msg_formate >= ROCE_DRV_SCC_CMD_MIN && + nt_msg->msg_formate <= ROCE_DRV_SCC_CMD_MAX)) { + return hinic5_get_lld_dev_by_chip_name(nt_msg->device_name); + } + + if (nt_msg->module == SEND_TO_CUSTOM_DRIVER) + return hinic5_get_lld_dev_by_chip_name(nt_msg->device_name); + + if (nt_msg->module == SEND_TO_BIFUR_DRIVER) { + lld_dev = hinic5_get_lld_dev_by_chip_name(nt_msg->device_name); + if (!lld_dev) + lld_dev = + hinic5_get_lld_dev_by_dev_name(nt_msg->device_name, SERVICE_T_NIC); + return lld_dev; + } + + if (nt_msg->module == SEND_TO_IPSEC_DRIVER && + (nt_msg->msg_formate == HISEC_DRIVER_CMD_GET_TRNG || + nt_msg->msg_formate == HISEC_DRIVER_CMD_GET_IPSEC_INFO)) { + return hinic5_get_lld_dev_by_chip_name(nt_msg->device_name); + } + + if (is_send_to_srv_drv(nt_msg->module) && nt_msg->msg_formate != GET_DRV_VERSION) { + lld_dev = hinic5_get_lld_dev_by_dev_name(nt_msg->device_name, + nt_msg->module - SEND_TO_SRV_DRV_BASE); + if (!lld_dev) + lld_dev = hinic5_get_lld_dev_by_chip_name(nt_msg->device_name); + return lld_dev; + } + + /* 支持sdk 以指定function id的方式下发dfx 命令 */ + if (nt_msg->module == SEND_TO_HW_DRIVER && nt_msg->use_func_idx == 1) + return hinic5_get_lld_dev_by_func_id(nt_msg->device_name, nt_msg->func_idx); + + lld_dev = hinic5_get_lld_dev_by_chip_name(nt_msg->device_name); + if (!lld_dev) + lld_dev = hinic5_get_lld_dev_by_dev_name(nt_msg->device_name, SERVICE_T_MAX); + + return lld_dev; +} + +static long hinicadm_k_unlocked_ioctl(struct file *pfile, ulong arg) +{ + struct hinic5_lld_dev *lld_dev = NULL; + struct msg_module nt_msg; + void *buf_out = NULL; + void *buf_in = NULL; + u32 out_size_expect = 0; + u32 out_size = 0; + u32 in_size = 0; + int ret = 0; + + memset(&nt_msg, 0, sizeof(nt_msg)); + if (cmd_parameter_valid(&nt_msg, arg, &out_size_expect, &in_size) != 0) + return -EFAULT; + + lld_dev = get_lld_dev_by_nt_msg(&nt_msg); + if (!lld_dev) { + if (nt_msg.msg_formate != DEV_NAME_TEST) + pr_err("Can not find device %s for module %u\n", + nt_msg.device_name, nt_msg.module); + + return -ENODEV; + } + + if (nt_msg.msg_formate == DEV_NAME_TEST) { + hinic5_lld_dev_put(lld_dev); + return 0; + } + + ret = alloc_tmp_buf(hinic5_get_sdk_hwdev_by_lld(lld_dev), &nt_msg, + in_size, &buf_in, out_size_expect, &buf_out); + if (ret != 0) { + pr_err("Alloc tmp buff failed\n"); + goto out_free_lock; + } + + out_size = out_size_expect; + + ret = nictool_exec_cmd(lld_dev, &nt_msg, buf_in, in_size, buf_out, &out_size); + if (ret != 0) { + pr_err("nictool_exec_cmd failed, module: %u, ret: %d.\n", nt_msg.module, ret); + goto out_free_buf; + } + + if (out_size > out_size_expect) { + ret = -EFAULT; + pr_err("Out size is greater than expected out size from user: %u, out size: %u\n", + out_size_expect, out_size); + goto out_free_buf; + } + + ret = copy_buf_out_to_user(&nt_msg, out_size, buf_out); + if (ret != 0) + pr_err("Copy information to user failed\n"); + +out_free_buf: + free_tmp_buf(hinic5_get_sdk_hwdev_by_lld(lld_dev), &nt_msg, buf_in, buf_out); + +out_free_lock: + hinic5_lld_dev_put(lld_dev); + return (long)ret; +} + +/** + * dbgtool_knl_ffm_info_rd - Read ffm information + * @para: the dbgtool parameter + * @dbgtool_info: the dbgtool info + **/ +static int dbgtool_knl_ffm_info_rd(struct dbgtool_param *para, + struct dbgtool_k_glb_info *dbgtool_info) +{ + if (!para->param.ffm_rd || !dbgtool_info->ffm) + return -EINVAL; + + /* Copy the ffm_info to user mode */ + if (copy_to_user(para->param.ffm_rd, dbgtool_info->ffm, + (unsigned int)sizeof(struct ffm_record_info)) != 0) { + pr_err("Copy ffm_info to user fail\n"); + return -EFAULT; + } + + return 0; +} + +static long dbgtool_k_unlocked_ioctl(struct file *pfile, + unsigned int real_cmd, ulong arg) +{ + int ret; + struct dbgtool_param param; + struct dbgtool_k_glb_info *dbgtool_info = NULL; + struct card_node *card_info = NULL; + int i; + + memset(¶m, 0, sizeof(param)); + + if (copy_from_user(¶m, (void *)(uintptr_t)arg, sizeof(param)) != 0) { + pr_err("Copy param from user fail\n"); + return -EFAULT; + } + + lld_hold(); + for (i = 0; i < MAX_CARD_NUM; i++) { + card_info = (struct card_node *)g_card_node_array[i]; + if (!card_info) + continue; + if (memcmp(param.chip_name, card_info->chip_name, + strlen(card_info->chip_name) + 1) == 0) + break; + } + + if (i == MAX_CARD_NUM || !card_info) { + lld_put(); + pr_err("Can't find this card %s\n", param.chip_name); + return -EFAULT; + } + + card_id = i; + dbgtool_info = (struct dbgtool_k_glb_info *)card_info->dbgtool_info; + + down(&dbgtool_info->dbgtool_sem); + + switch (real_cmd) { + case DBGTOOL_CMD_FFM_RD: + ret = dbgtool_knl_ffm_info_rd(¶m, dbgtool_info); + break; + case DBGTOOL_CMD_MSG_2_UP: + pr_err("Not suppose to use this cmd(0x%x).\n", real_cmd); + ret = 0; + break; + default: + pr_err("Dbgtool cmd(0x%x) not support now\n", real_cmd); + ret = -EFAULT; + } + + up(&dbgtool_info->dbgtool_sem); + + lld_put(); + + return (long)ret; +} + +static int nictool_k_release(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static int nictool_k_open(struct inode *pnode, struct file *pfile) +{ + return 0; +} + +static ssize_t nictool_k_read(struct file *pfile, char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static ssize_t nictool_k_write(struct file *pfile, const char __user *ubuf, + size_t size, loff_t *ppos) +{ + return 0; +} + +static long nictool_k_unlocked_ioctl(struct file *pfile, + unsigned int cmd, unsigned long arg) +{ + unsigned int real_cmd; + + real_cmd = _IOC_NR(cmd); + + return (real_cmd == NICTOOL_CMD_TYPE) ? + hinicadm_k_unlocked_ioctl(pfile, arg) : + dbgtool_k_unlocked_ioctl(pfile, real_cmd, arg); +} + +static int hinic5_bar_mmap_param_valid(phys_addr_t phy_addr, u64 vmsize) +{ + struct list_head *chip_list = get_hinic5_chip_list(); + struct card_node *chip_node = NULL; + struct hinic5_adev *adev = NULL; + + lld_hold(); + + /* get PF bar1 or bar3 physical address to verify */ + list_for_each_entry(chip_node, chip_list, node) { + list_for_each_entry(adev, &chip_node->func_list, node) { + if (hinic5_func_type(adev->hwdev) == TYPE_VF) + continue; + + if (((phy_addr >= adev->cfg_base_phy) && + (phy_addr + vmsize <= (adev->cfg_base_phy + adev->cfg_base_len))) || + ((phy_addr >= adev->mgmt_base_phy) && + (phy_addr + vmsize <= (adev->mgmt_base_phy + adev->mgmt_base_len)))) { + lld_put(); + return 0; + } + } + } + + lld_put(); + return -EINVAL; +} + +static int hinic5_mem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + u64 vmsize = vma->vm_end - vma->vm_start; + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; + phys_addr_t phy_addr; + int err = 0; + + if (vmsize > (PAGE_SIZE * (1 << DBGTOOL_PAGE_ORDER))) { + pr_err("Map size = %llu is bigger than alloc\n", vmsize); + return -EAGAIN; + } + + /* old version of tool set vma->vm_pgoff to 0 */ + phy_addr = (offset != 0) ? offset : g_card_phy_addr[card_id]; + + /* check phy_addr valid */ + if (phy_addr != g_card_phy_addr[card_id]) { + err = hinic5_bar_mmap_param_valid(phy_addr, vmsize); + if (err != 0) { + pr_err("mmap param invalid, err: %d\n", err); + return err; + } + } + + /* Disable cache and write buffer in the mapping area */ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (remap_pfn_range(vma, vma->vm_start, (phy_addr >> PAGE_SHIFT), + vmsize, vma->vm_page_prot) != 0) { + pr_err("Remap pfn range failed.\n"); + return -EAGAIN; + } + + return 0; +} + +static const struct file_operations fifo_operations = { + .owner = THIS_MODULE, + .release = nictool_k_release, + .open = nictool_k_open, + .read = nictool_k_read, + .write = nictool_k_write, + .unlocked_ioctl = nictool_k_unlocked_ioctl, + .mmap = hinic5_mem_mmap, +}; + +static void free_dbgtool_info(void *hwdev, struct card_node *chip_info) +{ + struct dbgtool_k_glb_info *dbgtool_info = NULL; + int err; + u32 id; + + if (hinic5_func_type(hwdev) != TYPE_VF) + chip_info->func_handle_array[hinic5_global_func_id(hwdev)] = NULL; + + if (chip_info->func_num == 0) { + pr_err("dbgtool already free.\n"); + return; + } + + --chip_info->func_num; + if (chip_info->func_num > 0) + return; + + err = sscanf(chip_info->chip_name, HINIC5_CHIP_NAME "%u", &id); + if (err != 1) + pr_err("Failed to get card id\n"); + + if (id < MAX_CARD_NUM) + g_card_node_array[id] = NULL; + + dbgtool_info = chip_info->dbgtool_info; + /* FFM deinit */ + kfree(dbgtool_info->ffm); + dbgtool_info->ffm = NULL; + + kfree(dbgtool_info); + chip_info->dbgtool_info = NULL; + + if (id < MAX_CARD_NUM) + dbgtool_knl_free_mem(id); +} + +static int alloc_dbgtool_info(void *hwdev, struct card_node *chip_info) +{ + struct dbgtool_k_glb_info *dbgtool_info = NULL; + int err, id = 0; + + if (hinic5_func_type(hwdev) != TYPE_VF) + chip_info->func_handle_array[hinic5_global_func_id(hwdev)] = hwdev; + + // 仅首个function申请dbgtool_info, + // 后续function主需要引用计数++,无需申请内存 + if (chip_info->func_num != 0) { + chip_info->func_num++; + return 0; + } + + chip_info->func_num++; + dbgtool_info = (struct dbgtool_k_glb_info *) + kzalloc(sizeof(struct dbgtool_k_glb_info), GFP_KERNEL); + if (!dbgtool_info) { + pr_err("Failed to allocate dbgtool_info\n"); + goto dbgtool_info_fail; + } + + chip_info->dbgtool_info = dbgtool_info; + + /* FFM init */ + dbgtool_info->ffm = (struct ffm_record_info *) + kzalloc(sizeof(struct ffm_record_info), GFP_KERNEL); + if (!dbgtool_info->ffm) { + pr_err("Failed to allocate cell contexts for a chain\n"); + goto dbgtool_info_ffm_fail; + } + + sema_init(&dbgtool_info->dbgtool_sem, 1); + + err = sscanf(chip_info->chip_name, HINIC5_CHIP_NAME "%d", &id); + if (err < 0) { + pr_err("Failed to get card id\n"); + goto sscanf_chdev_fail; + } + + g_card_node_array[id] = chip_info; + + return 0; + +sscanf_chdev_fail: + kfree(dbgtool_info->ffm); + +dbgtool_info_ffm_fail: + kfree(dbgtool_info); + chip_info->dbgtool_info = NULL; + +dbgtool_info_fail: + if (hinic5_func_type(hwdev) != TYPE_VF) + chip_info->func_handle_array[hinic5_global_func_id(hwdev)] = NULL; + chip_info->func_num--; + return -ENOMEM; +} + +/** + * nictool_k_init - initialize the hw interface + **/ +/* temp for dbgtool_info */ +int nictool_k_init(void *hwdev, void *chip_node) +{ + struct card_node *chip_info = (struct card_node *)chip_node; + struct device *pdevice = NULL; + int err; + + err = alloc_dbgtool_info(hwdev, chip_info); + if (err != 0) + return err; + + // 仅首个function初始化创建字符设备,后续仅需引用计数+1 + if (g_nictool_ref_cnt != 0) { + /* already initialized */ + g_nictool_ref_cnt++; + return 0; + } + + g_nictool_ref_cnt++; + err = alloc_chrdev_region(&g_dev_id, 0, 1, HIADM3_DEV_NAME); + if (err != 0) { + pr_err("Register nictool_dev failed(0x%x)\n", err); + goto alloc_chdev_fail; + } + + /* Create equipment */ + g_nictool_class = class_create(THIS_MODULE, HIADM3_DEV_CLASS); + if (IS_ERR(g_nictool_class)) { + pr_err("Create nictool_class fail\n"); + err = -EFAULT; + goto class_create_err; + } + + /* Initializing the character device */ + cdev_init(&g_nictool_cdev, &fifo_operations); + + /* Add devices to the operating system */ + err = cdev_add(&g_nictool_cdev, g_dev_id, 1); + if (err < 0) { + pr_err("Add nictool_dev to operating system fail(0x%x)\n", err); + goto cdev_add_err; + } + + /* Export device information to user space + * (/sys/class/class name/device name) + */ + pdevice = device_create(g_nictool_class, NULL, + g_dev_id, NULL, HIADM3_DEV_NAME); + if (IS_ERR(pdevice)) { + pr_err("Export nictool device information to user space fail\n"); + err = -EFAULT; + goto device_create_err; + } + + pr_info("Register nictool_dev to system succeed\n"); + + return 0; + +device_create_err: + cdev_del(&g_nictool_cdev); + +cdev_add_err: + class_destroy(g_nictool_class); + +class_create_err: + g_nictool_class = NULL; + unregister_chrdev_region(g_dev_id, 1); + +alloc_chdev_fail: + g_nictool_ref_cnt--; + free_dbgtool_info(hwdev, chip_info); + + return err; +} + +void nictool_k_uninit(void *hwdev, void *chip_node) +{ + struct card_node *chip_info = (struct card_node *)chip_node; + + free_dbgtool_info(hwdev, chip_info); + + if (g_nictool_ref_cnt == 0) { + pr_err("Nictool Unregister.\n"); + return; + } + + --g_nictool_ref_cnt; + if (g_nictool_ref_cnt != 0) + return; + + if (IS_ERR(g_nictool_class)) { + pr_err("Nictool class is NULL.\n"); + return; + } + + device_destroy(g_nictool_class, g_dev_id); + cdev_del(&g_nictool_cdev); + class_destroy(g_nictool_class); + g_nictool_class = NULL; + + unregister_chrdev_region(g_dev_id, 1); + + pr_info("Unregister nictool_dev succeed\n"); +} + diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_nictool.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_nictool.h new file mode 100644 index 00000000..10ae5fff --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_nictool.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NICTOOL_H +#define HINIC5_NICTOOL_H + +#include "hinic5_mt.h" +#include "hinic5_crm.h" +#include "hinic5_hw_mt.h" +#include "hinic5_chip_info.h" + +#ifndef MAX_SIZE +#define MAX_SIZE (16) +#endif + +#ifndef _LLT_TEST_ +#define DBGTOOL_PAGE_ORDER (10) +#else +#define DBGTOOL_PAGE_ORDER (1) +#endif + +#define MAX_CARD_NUM (64) + +int nictool_k_init(void *hwdev, void *chip_node); +void nictool_k_uninit(void *hwdev, void *chip_node); + +void hinic5_get_all_chip_id(void *id_info); + +void hinic5_get_card_info(const void *hwdev, const void *bufin, void *bufout); + +bool hinic5_is_in_host(void); + +int hinic5_get_pf_id(struct card_node *chip_node, u32 port_id, u32 *pf_id, u32 *isvalid); + +void hinic5_get_mbox_cnt(const void *hwdev, void *buf_out); + +extern struct hinic5_uld_info g_uld_info[SERVICE_T_MAX]; + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_non_ptp.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_non_ptp.c new file mode 100644 index 00000000..5b0e3eb4 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_non_ptp.c @@ -0,0 +1,446 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/mm.h> + +#include "ossl_knl.h" +#include "hinic5_lld.h" +#include "hinic5_dev_mgmt.h" +#include "hinic5_chip_info.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_nictool.h" +#include "hinic5_non_ptp.h" + +#if !defined(__UEFI__) && !defined(__VMWARE__) && !defined(__WIN__) +/* all chip's non ptp info saved buffer, PAGE_SIZE aligned */ +#define NON_PTP_INFO_SIZE (CARD_MAX_SIZE * sizeof(struct hinic5_non_ptp_info)) +#define NON_PTP_BUF_SIZE ((NON_PTP_INFO_SIZE / PAGE_SIZE + 1) * PAGE_SIZE) +#define NON_PTP_BUF_PAGE_ORDER (get_order(NON_PTP_BUF_SIZE)) +#define HINIC5_NON_PTP_CDEV "hinic5_non_ptp_cdev" +#define HINIC5_NON_PTP_CLASS "hinic5_non_ptp_class" +/* for calculate time diff of kernel and chip, influenced by EMU/FPGA/ASIC/EDA, + * default at ASIC condition is 1, can be modified with dfx tools + */ +unsigned int g_freq_reduce_ratio = 1; +/* for multiple hosts, cdev is created on different hosts */ +/* for multiple chips, cdev is reffered by different chips */ +atomic_t g_non_ptp_cdev_ref_cnt = ATOMIC_INIT(0); +/* create one global cdev for all chip on current host/vm */ +static struct hinic5_non_ptp_cdev *g_non_ptp_cdev; +/* all chip's non ptp info saved page base addr, index is chip_id, on current host/vm */ +static struct hinic5_non_ptp_info *g_non_ptp_info; + +static int hinic5_non_ptp_mmap(struct file *filp, struct vm_area_struct *vma) +{ + ulong pfn; + ulong offset = vma->vm_pgoff << PAGE_SHIFT; + ulong size = vma->vm_end - vma->vm_start; + + if (offset + size > NON_PTP_BUF_SIZE) + return -EINVAL; + + if (!g_non_ptp_info) + return -EFAULT; + /* 将no_ptp_info的基址进行映射, offset 0对应chip_id 0 */ + pfn = virt_to_phys(g_non_ptp_info) >> PAGE_SHIFT; + + // 强制只读 + vm_flags_clear(vma, VM_WRITE); + + // 设置nocache + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot); +} + +static const struct file_operations fops = { + .mmap = hinic5_non_ptp_mmap, +}; + +static int hinic5_non_ptp_mem_alloc(struct hinic5_hwdev *hwdev) +{ + /* 第一次加载时, 初始cdev内存和ptp info内存 */ + if (atomic_read(&g_non_ptp_cdev_ref_cnt) > 0) + return 0; + + g_non_ptp_cdev = + kzalloc(sizeof(struct hinic5_non_ptp_cdev), GFP_KERNEL); + if (!g_non_ptp_cdev) { + sdk_err(hwdev->dev_hdl, "non_ptp_cdev mem alloc fail.\n"); + return -ENOMEM; + } + + g_non_ptp_info = + (struct hinic5_non_ptp_info *) + (uintptr_t)__get_free_pages(GFP_KERNEL, NON_PTP_BUF_PAGE_ORDER); + if (!g_non_ptp_info) { + sdk_err(hwdev->dev_hdl, "non_ptp_info mem alloc fail.\n"); + kfree(g_non_ptp_cdev); + g_non_ptp_cdev = NULL; + return -ENOMEM; + } + return 0; +} + +static void hinic5_non_ptp_mem_free(void) +{ + if (atomic_read(&g_non_ptp_cdev_ref_cnt) > 0) + return; + + /* 无引用时, 释放non ptp info page和cdev内存 */ + if (g_non_ptp_info) { + free_pages((ulong)(uintptr_t)g_non_ptp_info, NON_PTP_BUF_PAGE_ORDER); + g_non_ptp_info = NULL; + } + + if (g_non_ptp_cdev) { + kfree(g_non_ptp_cdev); + g_non_ptp_cdev = NULL; + } +} + +static int hinic5_non_ptp_info_init(struct hinic5_hwdev *hwdev, u32 chip_id) +{ + struct card_node *chip_node = (struct card_node *)hwdev->chip_node; + + /* 初始化当前chip对应的 non_ptp_info */ + if (!chip_node->non_ptp_info) { + chip_node->non_ptp_info = g_non_ptp_info + chip_id; + memcpy(chip_node->non_ptp_info->name, + chip_node->chip_name, IFNAMSIZ); + atomic_set(&chip_node->non_ptp_info->ref_cnt, 0); + } + + atomic_inc(&chip_node->non_ptp_info->ref_cnt); + + return 0; +} + +static void hinic5_non_ptp_info_deinit(struct card_node *chip_node) +{ + /* 初始化当前chip对应的 non_ptp_info */ + if (!chip_node->non_ptp_info) + return; + + if (atomic_read(&chip_node->non_ptp_info->ref_cnt) == 0) + return; + + /* ref_cnt 减1后不为0, 接口返回false, 提前return */ + if (!atomic_sub_and_test(1, &chip_node->non_ptp_info->ref_cnt)) + return; + + memset(chip_node->non_ptp_info, 0, sizeof(struct hinic5_non_ptp_info)); + chip_node->non_ptp_info = NULL; +} + +static int hinic5_non_ptp_para_check(struct hinic5_hwdev *hwdev, u32 *chip_id) +{ + struct card_node *chip_node = NULL; + int err; + if ((!hwdev) || (!HINIC5_IS_PPF(hwdev))) + return -EINVAL; + + chip_node = (struct card_node *)hwdev->chip_node; + if (!chip_node) + return -EINVAL; + + /* 获取芯片号,用于索引non ptp信息 */ + err = sscanf(chip_node->chip_name, HINIC5_CHIP_NAME "%u", chip_id); + if (err != 1) { + sdk_err(hwdev->dev_hdl, "Failed to get card id err %d.\n", err); + return -EINVAL; + } + + if (*chip_id >= MAX_CARD_NUM) { + sdk_err(hwdev->dev_hdl, "Invalid card id %d.\n", *chip_id); + return -EINVAL; + } + + return 0; +} + +static int hinic5_non_ptp_ctrl_init(struct hinic5_hwdev *hwdev) +{ + int err; + + /* first chip create cdev, region, class */ + err = alloc_chrdev_region(&g_non_ptp_cdev->devid, 0, + HINIC5_NON_PTP_CDEV_MAX_DEVICES, + HINIC5_NON_PTP_CDEV); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Alloc non ptp cdev region failed %d.\n", + err); + return err; + } + + /* Create equipment */ + g_non_ptp_cdev->cdev_class = class_create(THIS_MODULE, HINIC5_NON_PTP_CLASS); + if (IS_ERR(g_non_ptp_cdev->cdev_class)) { + sdk_err(hwdev->dev_hdl, "Create non ptp class fail %ld.\n", + PTR_ERR(g_non_ptp_cdev->cdev_class)); + err = -EFAULT; + goto class_create_err; + } + + // 初始化cdev结构 + cdev_init(&g_non_ptp_cdev->dev, &fops); + g_non_ptp_cdev->dev.owner = THIS_MODULE; + + // 添加cdev到系统 + err = cdev_add(&g_non_ptp_cdev->dev, g_non_ptp_cdev->devid, + HINIC5_NON_PTP_CDEV_MAX_DEVICES); + if (err < 0) { + sdk_err(hwdev->dev_hdl, + "Add non ptp cdev to operating system fail %d\n", err); + goto cdev_add_err; + } + + g_non_ptp_cdev->cdev_device = device_create(g_non_ptp_cdev->cdev_class, NULL, + g_non_ptp_cdev->devid, NULL, + HINIC5_NON_PTP_CDEV); + if (IS_ERR(g_non_ptp_cdev->cdev_device)) { + sdk_err(hwdev->dev_hdl, + "Export non ptp cdev information to user space fail\n"); + err = -EFAULT; + goto device_create_err; + } + + return 0; + +device_create_err: + cdev_del(&g_non_ptp_cdev->dev); +cdev_add_err: + class_destroy(g_non_ptp_cdev->cdev_class); +class_create_err: + unregister_chrdev_region(g_non_ptp_cdev->devid, + HINIC5_NON_PTP_CDEV_MAX_DEVICES); + return err; +} + +static void hinic5_non_ptp_ctrl_deinit(void) +{ + if (atomic_read(&g_non_ptp_cdev_ref_cnt) > 0) + return; + + if (!g_non_ptp_cdev) + return; + device_destroy(g_non_ptp_cdev->cdev_class, g_non_ptp_cdev->devid); + cdev_del(&g_non_ptp_cdev->dev); + class_destroy(g_non_ptp_cdev->cdev_class); + unregister_chrdev_region(g_non_ptp_cdev->devid, + HINIC5_NON_PTP_CDEV_MAX_DEVICES); +} + +/* non ptp 功能初始化主函数 */ +int hinic5_non_ptp_cdev_init(struct hinic5_hwdev *hwdev) +{ + int err; + u32 chip_id; + struct card_node *chip_node = (struct card_node *)hwdev->chip_node; + + sdk_info(hwdev->dev_hdl, "init non ptp start %d.\n", atomic_read(&g_non_ptp_cdev_ref_cnt)); + + err = hinic5_non_ptp_para_check(hwdev, &chip_id); + if (err != 0) + return err; + + err = hinic5_non_ptp_mem_alloc(hwdev); + if (err != 0) + return err; + + err = hinic5_non_ptp_info_init(hwdev, chip_id); + if (err != 0) + goto info_init_err; + + if (atomic_read(&g_non_ptp_cdev_ref_cnt) > 0) { + atomic_inc(&g_non_ptp_cdev_ref_cnt); + sdk_info(hwdev->dev_hdl, + "non ptp cdev exist, cdev refcnt %d, chip refcnt %d.\n", + atomic_read(&g_non_ptp_cdev_ref_cnt), + atomic_read(&chip_node->non_ptp_info->ref_cnt)); + return 0; + } + + /* 以下流程仅首次初始化涉及 */ + atomic_inc(&g_non_ptp_cdev_ref_cnt); + + /* 初始化non ptp cdev的class region 和device */ + err = hinic5_non_ptp_ctrl_init(hwdev); + if (err != 0) { + sdk_err(hwdev->dev_hdl, "Create non ptp sys device fail.\n"); + goto ctrl_init_err; + } + + sdk_info(hwdev->dev_hdl, + "Init non ptp end cdev refcnt %d, chip refcnt %d.\n", + atomic_read(&g_non_ptp_cdev_ref_cnt), + atomic_read(&chip_node->non_ptp_info->ref_cnt)); + return 0; + +ctrl_init_err: + atomic_set(&g_non_ptp_cdev_ref_cnt, 0); + hinic5_non_ptp_info_deinit(chip_node); + +info_init_err: + hinic5_non_ptp_mem_free(); + return err; +} + +/* non ptp 功能销毁主函数 */ +void hinic5_non_ptp_cdev_deinit(struct hinic5_hwdev *hwdev) +{ + struct card_node *chip_node = NULL; + + if (!hwdev || !HINIC5_IS_PPF(hwdev)) + return; + + chip_node = (struct card_node *)hwdev->chip_node; + if (!chip_node || !chip_node->non_ptp_info) { + sdk_info(hwdev->dev_hdl, + "Exit non ptp deinit, refcnt %d.\n", atomic_read(&g_non_ptp_cdev_ref_cnt)); + return; + } + /* 销毁当前function 对应的chip 管理的non ptp info */ + hinic5_non_ptp_info_deinit(chip_node); + + /* ref_cnt 减1后不为0, 接口返回false, 提前return */ + if (!atomic_sub_and_test(1, &g_non_ptp_cdev_ref_cnt)) { + sdk_info(hwdev->dev_hdl, + "Quick deinit non ptp end %d.\n", atomic_read(&g_non_ptp_cdev_ref_cnt)); + return; + } + + /* 尝试销毁non ptp系统设备, 类, region等 */ + hinic5_non_ptp_ctrl_deinit(); + + hinic5_non_ptp_mem_free(); + sdk_info(hwdev->dev_hdl, "Deinit non ptp end %d.\n", atomic_read(&g_non_ptp_cdev_ref_cnt)); +} + +int hinic5_get_non_ptp_chip_time(void *dev, u64 *chip_time) +{ + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)dev; + int err; + + if (!hwdev || !chip_time) + return -EINVAL; + + err = hinic5_n_ptp_ts_up_en(hwdev, BIT(1)); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Failed to get n_ptp time, err: %d\n", err); + return err; + } + + err = hinic5_read_n_ptp_ts_data(hwdev, chip_time); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Failed to read n_ptp time, err: %d\n", err); + return err; + } + + return 0; +} +EXPORT_SYMBOL(hinic5_get_non_ptp_chip_time); + +int hinic5_sync_kernel_time(struct hinic5_hwdev *hwdev) +{ + int err; + u64 kernel_time, chip_time; + struct card_node *chip_node = (struct card_node *)(hwdev->chip_node); + + if (!chip_node || !chip_node->non_ptp_info) + return 0; + + if (g_freq_reduce_ratio == 0) { + sdk_warn(hwdev->dev_hdl, "The frequency scaling ratio must be greater than zero.\n"); + return -EPERM; + } + + err = hinic5_get_non_ptp_chip_time(hwdev, &chip_time); + if (err != 0) { + sdk_warn(hwdev->dev_hdl, "Failed to read n_ptp time, err: %d\n", err); + return err; + } + + kernel_time = ktime_get_ns() / g_freq_reduce_ratio; + chip_node->non_ptp_info->non_ptp_time_diff = (kernel_time > chip_time ? + (kernel_time - chip_time) : -(chip_time - kernel_time)); + return err; +} + +/* valid dev can get the chip time diff */ +int hinic5_get_non_ptp_time_diff(void *dev, s64 *time_diff) +{ + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)dev; + struct card_node *chip_node = NULL; + + if (!hwdev || !time_diff) + return -EINVAL; + + chip_node = (struct card_node *)(hwdev->chip_node); + if (!chip_node || !chip_node->non_ptp_info) { + sdk_warn(hwdev->dev_hdl, "chip_node is NULL\n"); + return -EINVAL; + } + + if (chip_node->non_ptp_info->non_ptp_time_diff_enable != 0) { + *time_diff = chip_node->non_ptp_info->non_ptp_time_diff; + return 0; + } + sdk_warn(hwdev->dev_hdl, "non ptp time diff is disable\n"); + return -EINVAL; +} +EXPORT_SYMBOL(hinic5_get_non_ptp_time_diff); + +/* only support ppf dev to set the ratio */ +int hinic5_set_freq_reduce_ratio(void *dev, u32 ratio) +{ + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)dev; + + if (!hwdev) + return -EINVAL; + + if (hinic5_func_type(hwdev) != TYPE_PPF) + return -EPERM; + + if (ratio == 0) + return -EINVAL; + + g_freq_reduce_ratio = ratio; + sdk_info(hwdev->dev_hdl, "set freq reduce ratio %d\n", ratio); + return 0; +} +EXPORT_SYMBOL(hinic5_set_freq_reduce_ratio); + +/* only support ppf dev to turn on/off the switch */ +int hinic5_set_non_ptp_time_diff_en(void *dev, bool enable) +{ + struct hinic5_hwdev *hwdev = (struct hinic5_hwdev *)dev; + struct card_node *chip_node = NULL; + + if (!hwdev) + return -EINVAL; + + if (hinic5_func_type(hwdev) != TYPE_PPF) + return -EPERM; + + chip_node = (struct card_node *)(hwdev->chip_node); + if (!chip_node || !chip_node->non_ptp_info) + return -EINVAL; + + if (enable == chip_node->non_ptp_info->non_ptp_time_diff_enable) + return 0; + + chip_node->non_ptp_info->non_ptp_time_diff_enable = enable; + if (enable) { + /* 从disable切换为enable时, 需要queue work */ + queue_delayed_work(hwdev->workq, &hwdev->sync_kernel_time_task, + msecs_to_jiffies(HINIC5_NON_PTP_SYNC_FW_TIME_PERIOD)); + sdk_info(hwdev->dev_hdl, "enable non ptp time diff\n"); + } else { + chip_node->non_ptp_info->non_ptp_time_diff = 0; + sdk_info(hwdev->dev_hdl, "disable non ptp time diff\n"); + } + return 0; +} +EXPORT_SYMBOL(hinic5_set_non_ptp_time_diff_en); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_non_ptp.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_non_ptp.h new file mode 100644 index 00000000..fb56b8f5 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_non_ptp.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NON_PTP_H +#define HINIC5_NON_PTP_H + +#if !defined(__UEFI__) && !defined(__VMWARE__) && !defined(__WIN__) + +#include <linux/cdev.h> +#include "hinic5_hwdev.h" + +#define HINIC5_NON_PTP_CDEV_MAX_DEVICES 1 + +struct hinic5_non_ptp_cdev { + dev_t devid; + struct cdev dev; + struct class *cdev_class; + struct device *cdev_device; +}; + +int hinic5_non_ptp_cdev_init(struct hinic5_hwdev *hwdev); +void hinic5_non_ptp_cdev_deinit(struct hinic5_hwdev *hwdev); +int hinic5_sync_kernel_time(struct hinic5_hwdev *hwdev); +#endif +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_sdk_attack.c b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_sdk_attack.c new file mode 100644 index 00000000..f2d5f0fa --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_sdk_attack.c @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/mm.h> + +#include "ossl_knl.h" +#include "hinic5_lld.h" +#include "hinic5_dev_mgmt.h" +#include "hinic5_chip_info.h" +#include "hinic5_hwif_inner.h" +#include "hinic5_nictool.h" +#include "hinic5_fast_msg.h" +#include "fast_msg_common_define.h" +#include "hinic5_sdk_attack.h" + +/* 当前宏&结构体&枚举类型定义需要与 工具保持一致 见sdk_attack.h */ +#define SDK_ATTACK_DW_CNT 512 +typedef struct sdk_attack_info { + u32 type; + u32 dw_cnt; + union { + u32 data[SDK_ATTACK_DW_CNT]; // 默认最大支持攻击2K Bytes + hisdk5_fast_msg_header attack_fastmsg_header; // fast_msg header 类型 16Bytes + }; +} sdk_attack_info_t; + +typedef enum sdk_attack_opcode { + SDK_ATTACK_FASTMSG = 0, + SDK_ATTACK_INVALID_OPCODE = 0xFF +} sdk_attack_opcode_e; + +int hinic5_sdk_attack_handler(void *hwdev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct hinic5_hwdev *hw_dev = (struct hinic5_hwdev *)hwdev; + struct hinic5_cmd_buf *cmd_buf = NULL; + sdk_attack_info_t *attack_info = (sdk_attack_info_t *)buf_in; + u32 attack_len = attack_info->dw_cnt * sizeof(u32); + u32 i = 0; + int ret = 0; + + switch (attack_info->type) { + case SDK_ATTACK_FASTMSG: + cmd_buf = hinic5_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + sdk_err(hw_dev->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + memset(cmd_buf->buf, 0, cmd_buf->size); + memcpy(cmd_buf->buf, &attack_info->attack_fastmsg_header, + attack_len); + sdk_info(hw_dev->dev_hdl, "attack dw cnt %d\n", attack_info->dw_cnt); + for (; i < attack_info->dw_cnt; i++) + sdk_info(hw_dev->dev_hdl, + "fastmsg[dw%d]:0x%08x\n", i, ((u32 *)cmd_buf->buf)[i]); + cmd_buf->size = sizeof(hisdk5_fast_msg_header) + + attack_info->attack_fastmsg_header.data_len; + hinic5_cpu_to_be32(cmd_buf->buf, cmd_buf->size); + ret = hinic5_attack_fast_msg(hwdev, cmd_buf, buf_out); + if (ret != 0) + sdk_info(hw_dev->dev_hdl, "fastmsg err ret %d\n", ret); + hinic5_free_cmd_buf(hwdev, cmd_buf); + break; + default: + break; + } + return ret; +} diff --git a/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_sdk_attack.h b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_sdk_attack.h new file mode 100644 index 00000000..3ac96d1f --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/knldk/mt/hinic5_sdk_attack.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_SDK_ATTACK_H +#define HINIC5_SDK_ATTACK_H + +#include <linux/types.h> +#include "hinic5_hw.h" + +int hinic5_sdk_attack_handler(void *hwdev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +int hinic5_attack_fast_msg(void *hwdev, struct hinic5_cmd_buf *cmd_buf, u64 *out_param); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/sdk/ossl/linux/kernel/ossl_knl_linux.c b/hinic5/src/dpu_platform_library/host/sdk/ossl/linux/kernel/ossl_knl_linux.c new file mode 100644 index 00000000..88a17ad8 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/sdk/ossl/linux/kernel/ossl_knl_linux.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/vmalloc.h> +#include "ossl_knl_linux.h" + +#define OSSL_MINUTE_BASE (60) + +#if (KERNEL_VERSION(2, 6, 36) > LINUX_VERSION_CODE) +#ifdef __LINX_6_0_60__ +unsigned int _work_busy(struct work_struct *work) +{ + if (work_pending(work)) + return WORK_BUSY_PENDING; + else + return WORK_BUSY_RUNNING; +} +#endif /* work_busy */ +#endif + +#if (KERNEL_VERSION(3, 4, 0) > LINUX_VERSION_CODE) +void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, + int off, int size, unsigned int truesize) +{ + skb_fill_page_desc(skb, i, page, off, size); + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; +} + +#endif /* < 3.4.0 */ +#ifdef NEED_PCI_SRIOV_GET_TOTALVFS +/* + * pci_sriov_get_totalvfs -- get total VFs supported on this device + * @dev: the PCI PF device + * + * For a PCIe device with SRIOV support, return the PCIe + * SRIOV capability value of TotalVFs. Otherwise 0. + */ +int pci_sriov_get_totalvfs(struct pci_dev *dev) +{ + int sriov_cap_pos; + u16 total_vfs = 0; + + if (dev->is_virtfn) + return 0; + + sriov_cap_pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(dev, sriov_cap_pos + PCI_SRIOV_TOTAL_VF, + &total_vfs); + + return total_vfs; +} +#endif + +#if (KERNEL_VERSION(3, 10, 0) > LINUX_VERSION_CODE) +/* + * pci_vfs_assigned - returns number of VFs are assigned to a guest + * @dev: the PCI device + * + * Returns number of VFs belonging to this device that are assigned to a guest. + * If device is not a physical function returns -ENODEV. + */ +int pci_vfs_assigned(struct pci_dev *dev) +{ + unsigned int vfs_assigned = 0; +#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED + struct pci_dev *vfdev; + unsigned short dev_id = 0; + int sriov_cap_pos; + + /* only search if we are a PF. */ + if (dev->is_virtfn) + return 0; + + /* determine the device ID for the VFs, the vendor ID will be the + * same as the PF so there is no need to check for that one. + */ + sriov_cap_pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + pci_read_config_word(dev, sriov_cap_pos + PCI_SRIOV_VF_DID, &dev_id); + + /* loop through all the VFs to see if we own any that are assigned. */ + vfdev = pci_get_device(dev->vendor, dev_id, NULL); + while (vfdev) { + /* It is considered assigned if it is a virtual function with + * our dev as the physical function and the assigned bit is set. + */ + if (vfdev->is_virtfn && vfdev->physfn == dev && + (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) + vfs_assigned++; + + vfdev = pci_get_device(dev->vendor, dev_id, vfdev); + } +#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ + + return (int)vfs_assigned; +} + +#endif /* 3.10.0 */ +#if (KERNEL_VERSION(3, 13, 0) > LINUX_VERSION_CODE) +int kc_dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int ret; + + ret = dma_set_mask(dev, mask); + if (!ret) + ret = dma_set_coherent_mask(dev, mask); + + return ret; +} + +#endif /* 3.13.0 */ +#if (KERNEL_VERSION(3, 14, 0) > LINUX_VERSION_CODE) +int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int min_vec, int max_vec) +{ + int nvec = max_vec; + int ret; + + if (max_vec < min_vec) + return -ERANGE; + + do { + ret = pci_enable_msix(dev, entries, nvec); + if (ret < 0) { + return ret; + } else if (ret > 0) { + if (ret < min_vec) + return -ENOSPC; + nvec = ret; + } + } while (ret != 0); + + return nvec; +} + +#endif + +#if (KERNEL_VERSION(3, 19, 0) > LINUX_VERSION_CODE) +#endif /* < 3.19.0 */ + +#ifdef NEED_CPUMASK_LOCAL_SPREAD +unsigned int cpumask_local_spread(unsigned int i, int node) +{ + int cpu; + unsigned int num = i; + + /* Wrap: we always want a cpu. */ + num %= (unsigned int)num_online_cpus(); + + if (node == -1) { + for_each_cpu(cpu, cpu_online_mask) { + if (num-- == 0) + return (unsigned int)cpu; + } + } else { + /* NUMA first. */ + for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask) { + if (num-- == 0) + return (unsigned int)cpu; + } + + for_each_cpu(cpu, cpu_online_mask) { + /* Skip NUMA nodes, done above. */ + if (cpumask_test_cpu(cpu, cpumask_of_node(node)) != 0) + continue; + + if (num-- == 0) + return (unsigned int)cpu; + } + } + + WARN_ON(num != 0); + return 0; +} +#endif + +struct file *file_creat(const char *file_name) +{ + return filp_open(file_name, O_CREAT | O_RDWR | O_APPEND, 0); +} + +struct file *file_open(const char *file_name) +{ + return filp_open(file_name, O_RDONLY, 0); +} + +void file_close(struct file *file_handle) +{ + (void)filp_close(file_handle, NULL); +} + +u32 get_file_size(struct file *file_handle) +{ + struct inode *file_inode = NULL; + +#if (KERNEL_VERSION(3, 19, 0) > LINUX_VERSION_CODE) + file_inode = file_handle->f_dentry->d_inode; +#else + file_inode = file_handle->f_inode; +#endif + + return (u32)(file_inode->i_size); +} + +void set_file_position(struct file *file_handle, u32 position) +{ + file_handle->f_pos = position; +} + +int file_read(struct file *file_handle, char *log_buffer, u32 rd_length, + u32 *file_pos) +{ + if (!file_handle || !log_buffer || rd_length == 0) + return -EINVAL; + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + return (int)file_handle->f_op->read(file_handle, log_buffer, rd_length, + &file_handle->f_pos); +#elif (KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE) + return (int)vfs_read(file_handle, log_buffer, rd_length, + &file_handle->f_pos); +#else + return (int)kernel_read(file_handle, log_buffer, rd_length, + &file_handle->f_pos); + +#endif +} + +u32 file_write(struct file *file_handle, const char *log_buffer, u32 wr_length) +{ + if (!file_handle || !log_buffer || wr_length == 0) + return -EINVAL; + +#if (KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE) + return (u32)file_handle->f_op->write(file_handle, log_buffer, + wr_length, &file_handle->f_pos); +#elif (KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE) + return (u32)vfs_write(file_handle, + (__force const char __user *)log_buffer, + wr_length, &file_handle->f_pos); +#else + return (u32)kernel_write(file_handle, log_buffer, wr_length, + &file_handle->f_pos); + +#endif +} + +static int _linux_thread_func(void *thread) +{ + struct sdk_thread_info *info = (struct sdk_thread_info *)thread; + + while (!kthread_should_stop()) + info->thread_fn(info->data); + + return 0; +} + +int creat_thread(struct sdk_thread_info *thread_info) +{ + thread_info->thread_obj = kthread_run(_linux_thread_func, thread_info, + thread_info->name); + if (!thread_info->thread_obj) + return -EFAULT; + + return 0; +} + +void stop_thread(struct sdk_thread_info *thread_info) +{ + if (thread_info->thread_obj) + (void)kthread_stop(thread_info->thread_obj); +} + +void utctime_to_localtime(u64 utctime, u64 *localtime) +{ + *localtime = utctime - (u64)(sys_tz.tz_minuteswest * OSSL_MINUTE_BASE); +} + +#ifndef HAVE_TIMER_SETUP +void initialize_timer(const void *adapter_hdl, struct timer_list *timer) +{ + if (!adapter_hdl || !timer) + return; + + init_timer(timer); +} +#endif + +void add_to_timer(struct timer_list *timer, u64 period) +{ + if (!timer) + return; + + add_timer(timer); +} + +void stop_timer(struct timer_list *timer) {} + +void delete_timer(struct timer_list *timer) +{ + if (!timer) + return; + + del_timer_sync(timer); +} + +u64 ossl_get_real_time(void) +{ + struct timeval tv = {0}; + u64 tv_msec; + + do_gettimeofday(&tv); + + tv_msec = (u64)tv.tv_sec * MSEC_PER_SEC + (u64)tv.tv_usec / USEC_PER_MSEC; + return tv_msec; +} + +#ifdef NEED_MATH64_MUL_U64_U64_DIV_U64 +u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c) +{ + u64 res = 0, div, rem; + int shift; + + /* 62: can a * b overflow ? */ + if (ilog2(a) + ilog2(b) > 62) { + /* + * (b * a) / c is equal to + * + * (b / c) * a + + * (b % c) * a / c + * + * if nothing overflows. Can the 1st multiplication + * overflow? Yes, but we do not care: this can only + * happen if the end result can't fit in u64 anyway. + * + * So the code below does + * + * res = (b / c) * a; + * b = b % c; + */ + div = div64_u64_rem(b, c, &rem); + res = div * a; + b = rem; + + /* 62: if a * b overflow ? b and c both move right shift until a * b not overflow */ + shift = ilog2(a) + ilog2(b) - 62; + if (shift > 0) { + /* drop precision */ + b >>= shift; + c >>= shift; + if (!c) + return res; + } + } + + return res + div64_u64(a * b, c); +} +EXPORT_SYMBOL(mul_u64_u64_div_u64); +#endif + +#if KERNEL_VERSION(5, 10, 0) > LINUX_VERSION_CODE +int sysfs_emit(char *buf, const char *fmt, ...) +{ + va_list args; + int len; + + if (WARN(!buf || offset_in_page(buf), + "invalid %s: buf:%p\n", __func__, buf)) + return 0; + + va_start(args, fmt); + len = vscnprintf(buf, PAGE_SIZE, fmt, args); + va_end(args); + + return len; +} +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/include/hinic5_srv_nic.h b/hinic5/src/dpu_platform_library/host/service/include/hinic5_srv_nic.h new file mode 100644 index 00000000..b60e7332 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/include/hinic5_srv_nic.h @@ -0,0 +1,237 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * Copyright (c) Huawei Technologies Co., Ltd. 2018-2022. All rights reserved. + ****************************************************************************** + * @file hinic5_srv_nic.h + * @details nic service interface + History : + 1.Date : 2018/3/8 + Modification: Created file +***************************************************************************** +*/ + +#ifndef HINIC5_SRV_NIC_H +#define HINIC5_SRV_NIC_H + +#include "nic_cfg_comm.h" +#include "drv_nic_api.h" +#if !defined(__UEFI__) && !defined(__WIN__) +#include <linux/netdevice.h> +#include "hinic5_lld.h" +#endif + +/** + * @brief struct hinic5_event_link_info 端口link事件信息 + * @details link事件上报后获取的端口link信息 + */ +struct hinic5_event_link_info { + u8 valid; /**< 结构体数据是否有效 */ + u8 port_type; /**< 端口类型 */ + u8 autoneg_cap; /**< 自动协商能力 */ + u8 autoneg_state; /**< 自动协商状态 */ + u8 duplex; /**< 双工模式 */ + u8 speed; /**< 端口速率 */ +}; + +enum link_err_type { + LINK_ERR_MODULE_UNRECOGENIZED, /**< 未识别的模块错误类型 */ + LINK_ERR_NUM, +}; + +enum port_module_event_type { + HINIC5_PORT_MODULE_CABLE_PLUGGED, /**< 端口电缆已插入的事件 */ + HINIC5_PORT_MODULE_CABLE_UNPLUGGED, /**< 端口电缆已拔出的事件 */ + HINIC5_PORT_MODULE_LINK_ERR, /**< 端口链路出现错误的事件 */ + HINIC5_PORT_MODULE_MAX_EVENT, +}; + +/** + * @brief struct hinic5_port_module_event 端口事件信息 + * @details DCB事件上报的DCB信息 + */ +struct hinic5_port_module_event { + enum port_module_event_type type; /**< 端口电缆事件类型 */ + enum link_err_type err_type; /**< Link错误事件类型 */ +}; + +/** + * @brief struct hinic5_dcb_info DCB信息 + * @details DCB事件上报的DCB信息 + */ +struct hinic5_dcb_info { + u8 dcb_on; /**< DCB使能状态 */ + u8 default_cos; /**< 默认cos */ + u8 up_cos[NIC_DCB_COS_MAX]; /**< 优先级到cos映射 */ +}; + +enum hinic5_nic_event_type { + EVENT_NIC_LINK_DOWN, /**< Link down事件 */ + EVENT_NIC_LINK_UP, /**< Link up事件 */ + EVENT_NIC_PORT_MODULE_EVENT, /**< 线缆插拔事件 */ + EVENT_NIC_DCB_STATE_CHANGE, /**< DCB状态变化事件 */ +}; + +#if !defined(__UEFI__) && !defined(__VMWARE__) +/** + * @brief 根据netdev获取lld_dev结构体指针 + * + * @param netdev netdev结构体指针 + * + * @details 根据netdev匹配查找查找lld_dev + * + * @attention: 该接口返回不会对lld_dev引用计数++, + * 使用过程中如果lld_dev被释放可能导致访问野指针 + * + * @return: 成功匹配到netdev的lld_dev时返回lld_dev结构体指针,否则返回NULL + */ +struct hinic5_lld_dev *hinic5_get_lld_dev_by_netdev(struct net_device *netdev); +#endif + +/** + * @brief 删除设备mac接口 + * + * @param hwdev device pointer to hwdev + * @param mac_addr mac地址 + * @param vlan_id vlan id 范围[0~4095] + * @param func_id global function index + * @param channel mailbox发送使用的channel id + * + * @details 删除对应function的mac地址 + * + * @attention: 函数内部涉及发送mailbox消息会休眠, + * 禁止中断上下文等不允许休眠的流程中调用 + * + * @return: 删除MAC返回成功或者失败. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, u16 channel); + +/** + * @brief 获取设备DCB状态 + * + * @param hwdev device pointer to hwdev + * @param dcb_state:DCB状态信息 + * + * @details 获取设备DCB状态 + * + * @attention: NA + * @return: DCB状态获取返回成功或者失败 + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_get_dcb_state(void *hwdev, struct hinic5_dcb_state *dcb_state); + +/** + * @brief 获取PF DCB状态 + * + * @param hwdev device pointer to hwdev + * @param dcb_state:DCB状态信息 + * + * @details VF通过mailbox信息发送给PF,获取PF DCB状态信息 + * + * @attention: 仅VF支持,PF调用返回失败;函数内部涉及发送mailbox消息会休眠, + * 禁止中断上下文等不允许休眠的流程中调用 + * + * @return: VF获取PF DCB状态返回成功或者失败 + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_get_pf_dcb_state(void *hwdev, struct hinic5_dcb_state *dcb_state); + +/** + * @brief 根据优先级获取对应cos值 + * + * @param hwdev device pointer to hwdev + * @param pri 优先级 PCP模式[0~7] DSCP模式[0~63] + * @param cos 输出cos值 [0~7] + * + * @details 通过用户输入的pri查询对应的cos值,PCP模式下pri合法值为0~7, + * DSCP模式下,pri合法值0~63 + * + * @attention: NA + * + * @return: pri映射cos查询成功或者失败. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_get_cos_by_pri(void *hwdev, u8 pri, u8 *cos); + +/* TO DO 以下接口待删除 */ +#if !defined(__UEFI__) && !defined(__VMWARE__) +typedef u8 (*hinic5_cqe_cb)(void *lld_dev, void *data); + +int hinic5_register_cqe_cb(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type event, + hinic5_cqe_cb cqe_cb); +void hinic5_unregister_cqe_cb(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type event); +#endif + +enum hinic5_bonding_en { + HINIC5_BONDING_OFFLOAD_DISABLE = 0, + HINIC5_BONDING_OFFLOAD_ENABLE +}; + +enum hinic5_bonding_event_e { + BOND_EVENT_LINK_DOWN = 0, + BOND_EVENT_LINK_UP = 1, + BOND_EVENT_OPEN = 2, + BOND_EVENT_CLOSE = 3 +}; + +/* * + * @brief hinic5_bonding_register_service_func - bonding event register + * @param type: hinic5 service type + * @param func: register function + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_bonding_register_service_func(enum hinic5_service_type type, void (*func)(void *netdev, + u32 bond_id, u8 new_slaves, + enum hinic5_bonding_event_e event)); + +/* * + * @brief hinic5_bonding_unregister_service_func - bonding event unregister + * @param type: hinic5 service type + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_bonding_unregister_service_func(enum hinic5_service_type type); + +/* * + * @brief hinic5_offload_bond_en_get - get bonding offload status + * @param type: void + * @retval zero: bonding offload disable + * @retval non-zero: bonding offload enable + */ +int hinic5_offload_bond_en_get(void); + +/* * + * @brief hinic5_bond_offload_get_uplink_id - get bonding uplink id + * @param type: u16 bond_id, u32 *uplink_id + * @retval zero success + * @retval non-zero failure + */ +int hinic5_bond_offload_get_uplink_id(u16 bond_id, u32 *uplink_id); + +/* * + * @brief hinic5_bond_offload_get_slaves - get bonding slaves info + * @param type: u16 bond_id, void *drv_msg, u8 *slaves + * @retval zero success + * @retval non-zero failure + */ +int hinic5_bond_offload_get_slaves(u16 bond_id, void *drv_msg, u8 *slaves); + +#if !defined(__UEFI__) && !defined(__WIN__) && !defined(__VMWARE__) +int hinic5_get_phy_port_id_by_netdev(struct net_device *netdev, uint8_t *phy_port_id); +#endif + +/* * + * @brief hinic5_get_phy_port_stats - get port stats + * @param hwdev: device pointer to hwdev + * @param stats: port stats + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/182x_cmdq_adapt/182x_cmdq_ops.c b/hinic5/src/dpu_platform_library/host/service/nic/comm/182x_cmdq_adapt/182x_cmdq_ops.c new file mode 100644 index 00000000..a9de8043 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/182x_cmdq_adapt/182x_cmdq_ops.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "nic_npu_cmd.h" +#include "hinic5_nic_cmdq.h" +#include "182x_cmdq_ops.h" + +#define HINIC5_DEAULT_DROP_THD_OFF 0 + +#define WQ_PREFETCH_MAX 4 +#define WQ_PREFETCH_MIN 1 +#define WQ_PREFETCH_THRESHOLD 256 + +#define RQ_CTXT_CEQ_ATTR_CI_WR_SHIFT 0 +#define RQ_CTXT_CEQ_ATTR_INTR_SHIFT 21 +#define RQ_CTXT_CEQ_ATTR_CEQ_ARM_SHIFT 30 +#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 31 + +#define RQ_CTXT_CEQ_ATTR_CI_WR_MASK 0x1U +#define RQ_CTXT_CEQ_ATTR_INTR_MASK 0x3FFU +#define RQ_CTXT_CEQ_ATTR_CEQ_ARM_MASK 0x1U +#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U + +static void hinic5_qp_prepare_cmdq_header(struct hinic5_qp_ctxt_header *qp_ctxt_hdr, + enum hinic5_qp_ctxt_type ctxt_type, u16 num_queues, + u16 q_id) +{ + qp_ctxt_hdr->queue_type = ctxt_type; + qp_ctxt_hdr->num_queues = num_queues; + qp_ctxt_hdr->start_qid = q_id; + qp_ctxt_hdr->rsvd = 0; + + hinic5_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); +} + +static u8 prepare_cmd_buf_qp_context_multi_store(struct hinic5_nic_io *nic_io, + struct hinic5_cmd_buf *cmd_buf, + enum hinic5_qp_ctxt_type ctxt_type, + u16 start_qid, u16 max_ctxts) +{ + struct hinic5_qp_ctxt_block *qp_ctxt_block = NULL; + u16 i; + + qp_ctxt_block = cmd_buf->buf; + + hinic5_qp_prepare_cmdq_header(&qp_ctxt_block->cmdq_hdr, ctxt_type, + max_ctxts, start_qid); + + for (i = 0; i < max_ctxts; i++) { + if (ctxt_type == HINIC5_QP_CTXT_TYPE_RQ) + hinic5_rq_prepare_ctxt(nic_io, &nic_io->rq[start_qid + i], + &qp_ctxt_block->rq_ctxt[i]); + else + hinic5_sq_prepare_ctxt(nic_io, &nic_io->sq[start_qid + i], + start_qid + i, &qp_ctxt_block->sq_ctxt[i]); + } + + return (u8)HINIC5_UCODE_CMD_MODIFY_QUEUE_CTX; +} + +static u8 prepare_cmd_buf_clean_tso_lro_space(struct hinic5_nic_io *nic_io, + struct hinic5_cmd_buf *cmd_buf, + enum hinic5_qp_ctxt_type ctxt_type) +{ + struct hinic5_clean_queue_ctxt *ctxt_block = NULL; + + ctxt_block = cmd_buf->buf; + ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps; + ctxt_block->cmdq_hdr.queue_type = ctxt_type; + ctxt_block->cmdq_hdr.start_qid = 0; + + hinic5_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); + + cmd_buf->size = sizeof(*ctxt_block); + return (u8)HINIC5_UCODE_CMD_CLEAN_QUEUE_CONTEXT; +} + +static u8 prepare_cmd_buf_set_rss_indir_table(const struct hinic5_nic_io *nic_io, + const u32 *indir_table, + struct hinic5_cmd_buf *cmd_buf) +{ + u32 i, size; + u32 *temp = NULL; + struct nic_rss_indirect_tbl *indir_tbl = NULL; + + indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf; + cmd_buf->size = sizeof(struct nic_rss_indirect_tbl); + memset(indir_tbl, 0, sizeof(*indir_tbl)); + + for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) + indir_tbl->entry[i] = (u16)(*(indir_table + i)); + size = sizeof(indir_tbl->entry) / sizeof(u32); + temp = (u32 *)indir_tbl->entry; + for (i = 0; i < size; i++) + temp[i] = cpu_to_be32(temp[i]); + + return (u8)HINIC5_UCODE_CMD_SET_RSS_INDIR_TABLE; +} + +static u8 prepare_cmd_buf_get_rss_indir_table(const struct hinic5_nic_io *nic_io, + const struct hinic5_cmd_buf *cmd_buf) +{ + (void)nic_io; + memset(cmd_buf->buf, 0, cmd_buf->size); + + return (u8)HINIC5_UCODE_CMD_GET_RSS_INDIR_TABLE; +} + +static void cmd_buf_to_rss_indir_table(const struct hinic5_cmd_buf *cmd_buf, u32 *indir_table) +{ + u32 i; + u16 *indir_tbl = NULL; + + indir_tbl = (u16 *)cmd_buf->buf; + for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) + indir_table[i] = *(indir_tbl + i); +} + +static u8 prepare_cmd_buf_modify_svlan(struct hinic5_cmd_buf *cmd_buf, + u16 func_id, u16 vlan_tag, u16 q_id, u8 vlan_mode) +{ + struct nic_vlan_ctx *vlan_ctx = NULL; + + cmd_buf->size = sizeof(struct nic_vlan_ctx); + vlan_ctx = (struct nic_vlan_ctx *)cmd_buf->buf; + + vlan_ctx->func_id = func_id; + vlan_ctx->qid = q_id; + vlan_ctx->vlan_tag = vlan_tag; + vlan_ctx->vlan_sel = 0; /* TPID0 in IPSU */ + vlan_ctx->vlan_mode = vlan_mode; + + hinic5_cpu_to_be32(vlan_ctx, sizeof(struct nic_vlan_ctx)); + return (u8)HINIC5_UCODE_CMD_MODIFY_VLAN_CTX; +} + +static u8 prepare_cmd_buf_clear_vport_stats(const struct hinic5_nic_io *nic_io, + const struct hinic5_cmd_buf *cmd_buf, u16 func_id) +{ + return (u8)HINIC5_UCODE_CMD_CLEAR_VPORT_STATS; +} + +static u8 prepare_cmd_buf_get_vport_stats(const struct hinic5_nic_io *nic_io, + const struct hinic5_cmd_buf *cmd_buf, u16 func_id) +{ + (void)nic_io; + memset(cmd_buf->buf, 0, cmd_buf->size); + + return (u8)HINIC5_UCODE_CMD_GET_VPORT_STATS; +} + +static void cmd_buf_to_vport_stats(const struct hinic5_cmd_buf *cmd_buf, + struct hinic5_vport_stats *stats) +{ + /* 微码 cmdq 获取命令字复制, + 后续修改需要考虑 hinic5_vport_stats 与 nic_cmdq_vport_stats 差异 */ + + memcpy(stats, cmd_buf->buf, sizeof(struct hinic5_vport_stats)); +} + +static void prepare_sq_ctxt_drop_and_prefetch(struct hinic5_sq_ctxt *sq_ctxt) +{ + sq_ctxt->pkt_drop_thd = + SQ_CTXT_PKT_DROP_THD_SET(HINIC5_DEAULT_DROP_THD_ON, THD_ON) | + SQ_CTXT_PKT_DROP_THD_SET(HINIC5_DEAULT_DROP_THD_OFF, THD_OFF); + + sq_ctxt->pref_cache = + SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); +} + +static void prepare_rq_ctxt_ceq_and_prefetch + (struct hinic5_io_queue *rq, struct hinic5_rq_ctxt *rq_ctxt, bool support_rq_sw_compact_wqe) +{ + u16 wqe_type = rq->wqe_type; + + rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) | + RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR); + + if (wqe_type == HINIC5_COMPACT_RQ_WQE && support_rq_sw_compact_wqe) { + rq_ctxt->ceq_attr |= RQ_CTXT_CEQ_ATTR_SET(1, EN); + rq_ctxt->ceq_attr |= RQ_CTXT_CEQ_ATTR_SET(1, CI_WR); + rq_ctxt->ceq_attr |= RQ_CTXT_CEQ_ATTR_SET(1, CEQ_ARM); + } + + rq_ctxt->pref_cache = + RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); +} + +struct hinic5_nic_cmdq_ops *hinic5_nic_cmdq_get_182x_ops(void) +{ + static struct hinic5_nic_cmdq_ops cmdq_182x_ops = { + .prepare_cmd_buf_clean_tso_lro_space = prepare_cmd_buf_clean_tso_lro_space, + .prepare_cmd_buf_qp_context_multi_store = prepare_cmd_buf_qp_context_multi_store, + .prepare_cmd_buf_modify_svlan = prepare_cmd_buf_modify_svlan, + .prepare_cmd_buf_set_rss_indir_table = prepare_cmd_buf_set_rss_indir_table, + .prepare_cmd_buf_get_rss_indir_table = prepare_cmd_buf_get_rss_indir_table, + .prepare_cmd_buf_get_vport_stats = prepare_cmd_buf_get_vport_stats, + .prepare_cmd_buf_clear_vport_stats = prepare_cmd_buf_clear_vport_stats, + .cmd_buf_to_vport_stats = cmd_buf_to_vport_stats, + .cmd_buf_to_rss_indir_table = cmd_buf_to_rss_indir_table, + .prepare_sq_ctxt_drop_and_prefetch = prepare_sq_ctxt_drop_and_prefetch, + .prepare_rq_ctxt_ceq_and_prefetch = prepare_rq_ctxt_ceq_and_prefetch, + }; + + return &cmdq_182x_ops; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/182x_cmdq_adapt/182x_cmdq_ops.h b/hinic5/src/dpu_platform_library/host/service/nic/comm/182x_cmdq_adapt/182x_cmdq_ops.h new file mode 100644 index 00000000..57bb96e1 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/182x_cmdq_adapt/182x_cmdq_ops.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef _182X_CMDQ_PRIVATE_H_ +#define _182X_CMDQ_PRIVATE_H_ + +#include "ossl_knl.h" +#include "hinic5_nic_cmdq.h" + +struct hinic5_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u16 start_qid; + u16 rsvd; +}; + +struct hinic5_clean_queue_ctxt { + struct hinic5_qp_ctxt_header cmdq_hdr; + u32 rsvd; +}; + +struct hinic5_qp_ctxt_block { + struct hinic5_qp_ctxt_header cmdq_hdr; + union { + struct hinic5_sq_ctxt sq_ctxt[HINIC5_Q_CTXT_MAX]; + struct hinic5_rq_ctxt rq_ctxt[HINIC5_Q_CTXT_MAX]; + }; +}; + +struct hinic5_vlan_ctx { + u32 func_id; + u32 qid; /* if qid = 0xFFFF, config current function all queue */ + u32 vlan_id; + u32 vlan_mode; + u32 vlan_sel; +}; + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/187x_cmdq_adapt/187x_cmdq_ops.c b/hinic5/src/dpu_platform_library/host/service/nic/comm/187x_cmdq_adapt/187x_cmdq_ops.c new file mode 100644 index 00000000..a7f4bb55 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/187x_cmdq_adapt/187x_cmdq_ops.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "hinic5_nic_cmdq.h" +#include "187x_cmdq_ops.h" + +#define HINIC5_DEAULT_DROP_THD_OFF 0xFFFF + +#define SQ_PREFETCH_MAX 5 +#define SQ_PREFETCH_MIN 4 +#define SQ_PREFETCH_THRESHOLD 48 + +#define RQ_PREFETCH_MAX 4 +#define RQ_PREFETCH_MIN 2 +#define RQ_PREFETCH_THRESHOLD 32 + +#define RQ_PFH_TH 7 + +#define RQ_CTXT_CEQ_ATTR_PFH_TH_SHIFT 0 +#define RQ_CTXT_CEQ_ATTR_INTR_SHIFT 21 +#define RQ_CTXT_CEQ_ATTR_EN_SHIFT 31 + +#define RQ_CTXT_CEQ_ATTR_PFH_TH_MASK 0x1FU +#define RQ_CTXT_CEQ_ATTR_EN_MASK 0x1U +#define RQ_CTXT_CEQ_ATTR_INTR_MASK 0x3FFU + +#define HI187X_BASE_VF_QUE_ID(nic_io) (4 * (nic_io)->max_qps) + +static void hinic5_qp_prepare_cmdq_header(struct hinic5_qp_ctxt_header *qp_ctxt_hdr, + enum hinic5_qp_ctxt_type ctxt_type, u16 num_queues, + u16 q_id, u16 func_id) +{ + qp_ctxt_hdr->queue_type = ctxt_type; + qp_ctxt_hdr->num_queues = num_queues; + qp_ctxt_hdr->start_qid = q_id; + qp_ctxt_hdr->dest_func_id = func_id; + + hinic5_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); +} + +static u8 prepare_cmd_buf_qp_context_multi_store(struct hinic5_nic_io *nic_io, + struct hinic5_cmd_buf *cmd_buf, + enum hinic5_qp_ctxt_type ctxt_type, + u16 start_qid, u16 max_ctxts) +{ + struct hinic5_qp_ctxt_block *qp_ctxt_block = NULL; + u16 func_id; + u16 i; + + qp_ctxt_block = cmd_buf->buf; + func_id = hinic5_global_func_id(nic_io->hwdev); + hinic5_qp_prepare_cmdq_header(&qp_ctxt_block->cmdq_hdr, ctxt_type, + max_ctxts, start_qid, func_id); + + for (i = 0; i < max_ctxts; i++) { + if (ctxt_type == HINIC5_QP_CTXT_TYPE_RQ) + hinic5_rq_prepare_ctxt(nic_io, &nic_io->rq[start_qid + i], + &qp_ctxt_block->rq_ctxt[i]); + else + hinic5_sq_prepare_ctxt(nic_io, &nic_io->sq[start_qid + i], + start_qid + i, + &qp_ctxt_block->sq_ctxt[i]); + } + + return (u8)HINIC5_HTN_CMD_SQ_RQ_CONTEXT_MULTI_ST; +} + +static u8 prepare_cmd_buf_clean_tso_lro_space(struct hinic5_nic_io *nic_io, + struct hinic5_cmd_buf *cmd_buf, + enum hinic5_qp_ctxt_type ctxt_type) +{ + struct hinic5_clean_queue_ctxt *ctxt_block = NULL; + + ctxt_block = cmd_buf->buf; + ctxt_block->cmdq_hdr.dest_func_id = hinic5_global_func_id(nic_io->hwdev); + ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps; + ctxt_block->cmdq_hdr.queue_type = ctxt_type; + ctxt_block->cmdq_hdr.start_qid = 0; + + hinic5_cpu_to_be32(ctxt_block, sizeof(*ctxt_block)); + + cmd_buf->size = sizeof(*ctxt_block); + return (u8)HINIC5_HTN_CMD_TSO_LRO_SPACE_CLEAN; +} + +static void prepare_rss_indir_table_cmd_header(const struct hinic5_nic_io *nic_io, + const struct hinic5_cmd_buf *cmd_buf) +{ + struct hinic5_rss_cmd_header *header = cmd_buf->buf; + + header->dest_func_id = hinic5_global_func_id(nic_io->hwdev); + hinic5_cpu_to_be32(header, sizeof(*header)); +} + +static u8 prepare_cmd_buf_set_rss_indir_table(const struct hinic5_nic_io *nic_io, + const u32 *indir_table, + struct hinic5_cmd_buf *cmd_buf) +{ + u32 i; + u8 *indir_tbl = NULL; + + indir_tbl = (u8 *)cmd_buf->buf + sizeof(struct hinic5_rss_cmd_header); + cmd_buf->size = sizeof(struct hinic5_rss_cmd_header) + NIC_RSS_INDIR_SIZE; + memset(indir_tbl, 0, NIC_RSS_INDIR_SIZE); + + prepare_rss_indir_table_cmd_header(nic_io, cmd_buf); + + for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) + indir_tbl[i] = (u8)(*(indir_table + i)); + hinic5_cpu_to_be32(indir_tbl, NIC_RSS_INDIR_SIZE); + + return (u8)HINIC5_HTN_CMD_SET_RSS_INDIR_TABLE; +} + +static u8 prepare_cmd_buf_get_rss_indir_table(const struct hinic5_nic_io *nic_io, + const struct hinic5_cmd_buf *cmd_buf) +{ + memset(cmd_buf->buf, 0, cmd_buf->size); + prepare_rss_indir_table_cmd_header(nic_io, cmd_buf); + + return (u8)HINIC5_HTN_CMD_GET_RSS_INDIR_TABLE; +} + +static void cmd_buf_to_rss_indir_table(const struct hinic5_cmd_buf *cmd_buf, u32 *indir_table) +{ + u32 i; + u8 *indir_tbl = NULL; + + indir_tbl = (u8 *)cmd_buf->buf; + hinic5_be32_to_cpu(cmd_buf->buf, NIC_RSS_INDIR_SIZE); + for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) + indir_table[i] = *(indir_tbl + i); +} + +static u8 prepare_cmd_buf_modify_svlan(struct hinic5_cmd_buf *cmd_buf, + u16 func_id, u16 vlan_tag, u16 q_id, u8 vlan_mode) +{ + struct hinic5_vlan_ctx *vlan_ctx = NULL; + + cmd_buf->size = sizeof(struct hinic5_vlan_ctx); + vlan_ctx = (struct hinic5_vlan_ctx *)cmd_buf->buf; + + vlan_ctx->dest_func_id = func_id; + vlan_ctx->start_qid = q_id; + vlan_ctx->vlan_tag = vlan_tag; + vlan_ctx->vlan_sel = 0; /* TPID0 in IPSU */ + vlan_ctx->vlan_mode = vlan_mode; + + hinic5_cpu_to_be32(vlan_ctx, sizeof(struct hinic5_vlan_ctx)); + return (u8)HINIC5_HTN_CMD_SVLAN_MODIFY; +} + +static void prepare_sq_ctxt_drop_and_prefetch(struct hinic5_sq_ctxt *sq_ctxt) +{ + sq_ctxt->pkt_drop_thd = + SQ_CTXT_PKT_DROP_THD_SET(HINIC5_DEAULT_DROP_THD_ON, THD_ON) | + SQ_CTXT_PKT_DROP_THD_SET(HINIC5_DEAULT_DROP_THD_OFF, THD_OFF); + + sq_ctxt->pref_cache = + SQ_CTXT_PREF_SET(SQ_PREFETCH_MIN, CACHE_MIN) | + SQ_CTXT_PREF_SET(SQ_PREFETCH_MAX, CACHE_MAX) | + SQ_CTXT_PREF_SET(SQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); +} + +static void prepare_rq_ctxt_ceq_and_prefetch + (struct hinic5_io_queue *rq, struct hinic5_rq_ctxt *rq_ctxt, bool support_rq_sw_compact_wqe) +{ + rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(0, EN) | + RQ_CTXT_CEQ_ATTR_SET(RQ_PFH_TH, PFH_TH) | + RQ_CTXT_CEQ_ATTR_SET(rq->msix_entry_idx, INTR); + + rq_ctxt->pref_cache = + RQ_CTXT_PREF_SET(RQ_PREFETCH_MIN, CACHE_MIN) | + RQ_CTXT_PREF_SET(RQ_PREFETCH_MAX, CACHE_MAX) | + RQ_CTXT_PREF_SET(RQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); +} + +struct hinic5_nic_cmdq_ops *hinic5_nic_cmdq_get_187x_ops(void) +{ + static struct hinic5_nic_cmdq_ops cmdq_187x_ops = { + .prepare_cmd_buf_clean_tso_lro_space = prepare_cmd_buf_clean_tso_lro_space, + .prepare_cmd_buf_qp_context_multi_store = prepare_cmd_buf_qp_context_multi_store, + .prepare_cmd_buf_modify_svlan = prepare_cmd_buf_modify_svlan, + .prepare_cmd_buf_set_rss_indir_table = prepare_cmd_buf_set_rss_indir_table, + .prepare_cmd_buf_get_rss_indir_table = prepare_cmd_buf_get_rss_indir_table, + .cmd_buf_to_rss_indir_table = cmd_buf_to_rss_indir_table, + .prepare_sq_ctxt_drop_and_prefetch = prepare_sq_ctxt_drop_and_prefetch, + .prepare_rq_ctxt_ceq_and_prefetch = prepare_rq_ctxt_ceq_and_prefetch, + }; + + return &cmdq_187x_ops; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/187x_cmdq_adapt/187x_cmdq_ops.h b/hinic5/src/dpu_platform_library/host/service/nic/comm/187x_cmdq_adapt/187x_cmdq_ops.h new file mode 100644 index 00000000..0f381cb2 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/187x_cmdq_adapt/187x_cmdq_ops.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef _187X_CMDQ_PRIVATE_H_ +#define _187X_CMDQ_PRIVATE_H_ + +#include "ossl_knl.h" +#include "hinic5_nic_cmdq.h" + +struct hinic5_qp_ctxt_header { + u32 rsvd[2]; + u16 num_queues; + u16 queue_type; + u16 start_qid; + u16 dest_func_id; +}; + +struct hinic5_clean_queue_ctxt { + struct hinic5_qp_ctxt_header cmdq_hdr; +}; + +struct hinic5_qp_ctxt_block { + struct hinic5_qp_ctxt_header cmdq_hdr; + union { + struct hinic5_sq_ctxt sq_ctxt[HINIC5_Q_CTXT_MAX]; + struct hinic5_rq_ctxt rq_ctxt[HINIC5_Q_CTXT_MAX]; + }; +}; + +struct hinic5_rss_cmd_header { + u32 rsv[3]; + u16 rsv1; + u16 dest_func_id; +}; + +/* NIC HTN CMD */ +enum hinic5_htn_cmd { + HINIC5_HTN_CMD_SQ_RQ_CONTEXT_MULTI_ST = 0x20, + HINIC5_HTN_CMD_SQ_RQ_CONTEXT_MULTI_LD, + HINIC5_HTN_CMD_TSO_LRO_SPACE_CLEAN, + HINIC5_HTN_CMD_SVLAN_MODIFY, + HINIC5_HTN_CMD_SET_RSS_INDIR_TABLE, + HINIC5_HTN_CMD_GET_RSS_INDIR_TABLE, +}; + +struct hinic5_vlan_ctx { + u32 rsv[2]; + u16 vlan_tag; + u8 vlan_sel; + u8 vlan_mode; + u16 start_qid; + u16 dest_func_id; +}; + +struct hinic5_car_cmd_header { + u32 rsv[2]; + u32 op_num; /* 配置的car_id个数,一次最少1个,最多32个 */ + u16 rsv1; + u16 index; /* 配置的起始index,16B为单位,index必须32B对齐 */ +}; + +struct hinic5_car_cmd_payload { + u32 context[4]; /* 硬件写,驱动读 */ + u32 profile[4]; /* 驱动写,硬件读 */ +}; + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_cmdq_adapt.c b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_cmdq_adapt.c new file mode 100644 index 00000000..0b94238b --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_cmdq_adapt.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include "hinic5_nic_cmdq.h" + +void hinic5_nic_cmdq_adapt_init(struct hinic5_nic_io *nic_io) +{ + if (!HINIC5_SUPPORT_FEATURE(nic_io->hwdev, HTN_CMDQ)) + nic_io->cmdq_ops = hinic5_nic_cmdq_get_182x_ops(); + else + nic_io->cmdq_ops = hinic5_nic_cmdq_get_187x_ops(); +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_mag_cfg.c b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_mag_cfg.c new file mode 100644 index 00000000..c0fb0385 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_mag_cfg.c @@ -0,0 +1,1557 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/module.h> + +#include "ossl_knl.h" +#include "comm_defs.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic.h" +#include "cfm_cmd.h" +#include "hinic5_common.h" +#include "mag_mpu_cmd.h" +#include "nic_mpu_cmd.h" +#include "hinic5_nic_event.h" +#include "mag_mpu_cmd_defs.h" +#include "hinic5_mag_cfg.h" + +static int mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +static int mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u16 channel); + +int hinic5_set_port_enable(void *hwdev, bool enable, u16 channel) +{ + struct mag_cmd_set_port_enable en_state; + u16 out_size = sizeof(en_state); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic5_func_type(hwdev) == TYPE_VF) + return 0; + + memset(&en_state, 0, sizeof(en_state)); + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + en_state.function_id = hinic5_global_func_id(hwdev); + en_state.state = enable ? MAG_CMD_TX_ENABLE | MAG_CMD_RX_ENABLE : + MAG_CMD_PORT_DISABLE; + + err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_SET_PORT_ENABLE, &en_state, + sizeof(en_state), &en_state, &out_size, + channel); + if (err != 0 || out_size == 0 || en_state.head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, en_state.head.status, out_size, channel); + return -EIO; + } + + return 0; +} + +int hinic5_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats) +{ + struct mag_cmd_get_port_stat *port_stats = NULL; + struct mag_cmd_port_stats_info stats_info; + u16 out_size = sizeof(*port_stats); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !stats) + return -ENOMEM; + + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) + return -ENOMEM; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + err = -EINVAL; + goto out; + } + + memset(&stats_info, 0, sizeof(stats_info)); + stats_info.port_id = hinic5_physical_port_id(hwdev); + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_PORT_STAT, + &stats_info, sizeof(stats_info), + port_stats, &out_size); + if (err != 0 || out_size == 0 || port_stats->head.status != 0) { + nic_err(nic_io->dev_hdl, + "Failed to get port statistics, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_stats->head.status, out_size); + err = -EIO; + goto out; + } + + memcpy(stats, &port_stats->counter, sizeof(*stats)); + +out: + kfree(port_stats); + + return err; +} +EXPORT_SYMBOL(hinic5_get_phy_port_stats); + +int hinic5_set_port_funcs_state(void *hwdev, bool enable) +{ + return 0; +} + +int hinic5_reset_port_link_cfg(void *hwdev) +{ + return 0; +} + +int hinic5_force_port_relink(void *hwdev) +{ + return 0; +} + +int hinic5_set_autoneg(void *hwdev, bool enable) +{ + struct hinic5_link_ksettings settings = {0}; + struct hinic5_nic_io *nic_io = NULL; + u32 set_settings = 0; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + set_settings |= HILINK_LINK_SET_AUTONEG; + settings.valid_bitmap = set_settings; + settings.autoneg = enable ? PORT_CFG_AN_ON : PORT_CFG_AN_OFF; + + return hinic5_set_link_settings(hwdev, &settings); +} + +static int hinic5_cfg_loopback_mode(struct hinic5_nic_io *nic_io, u8 opcode, + u8 *mode, u8 *enable) +{ + struct mag_cmd_cfg_loopback_mode lp; + u16 out_size = sizeof(lp); + int err; + + memset(&lp, 0, sizeof(lp)); + lp.port_id = hinic5_physical_port_id(nic_io->hwdev); + lp.opcode = opcode; + if (opcode == MGMT_MSG_CMD_OP_SET) { + lp.lp_mode = *mode; + lp.lp_en = *enable; + } + + err = mag_msg_to_mgmt_sync(nic_io->hwdev, MAG_CMD_CFG_LOOPBACK_MODE, + &lp, sizeof(lp), &lp, &out_size); + if (err != 0 || out_size == 0 || lp.head.status != 0) { + nic_err(nic_io->dev_hdl, + "Failed to %s loopback mode, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == MGMT_MSG_CMD_OP_SET ? "set" : "get", + err, lp.head.status, out_size); + return -EIO; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) { + *mode = lp.lp_mode; + *enable = lp.lp_en; + } + + return 0; +} + +int hinic5_get_loopback_mode(void *hwdev, u8 *mode, u8 *enable) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || !mode || !enable) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + return hinic5_cfg_loopback_mode(nic_io, MGMT_MSG_CMD_OP_GET, mode, + enable); +} + +int hinic5_set_loopback_mode(void *hwdev, u8 mode, u8 enable) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) { + nic_err(nic_io->dev_hdl, "Invalid loopback mode %u to set\n", + mode); + return -EINVAL; + } + + return hinic5_cfg_loopback_mode(nic_io, MGMT_MSG_CMD_OP_SET, &mode, + &enable); +} + +int hinic5_set_led_status(void *hwdev, enum mag_led_type type, + enum mag_led_mode mode) +{ + struct hinic5_nic_io *nic_io = NULL; + struct mag_cmd_set_led_cfg led_info; + u16 out_size = sizeof(led_info); + int err; + + if (!hwdev) + return -EFAULT; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&led_info, 0, sizeof(led_info)); + + led_info.function_id = hinic5_global_func_id(hwdev); + led_info.type = type; + led_info.mode = mode; + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_SET_LED_CFG, &led_info, + sizeof(led_info), &led_info, &out_size); + if (err != 0 || led_info.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n", + err, led_info.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_set_link_settings(void *hwdev, + struct hinic5_link_ksettings *settings) +{ + struct mag_cmd_set_port_cfg info; + u16 out_size = sizeof(info); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !settings) + return -EINVAL; + + memset(&info, 0, sizeof(info)); + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + info.port_id = hinic5_physical_port_id(hwdev); + info.config_bitmap = settings->valid_bitmap; + info.autoneg = settings->autoneg; + info.speed = settings->speed; + info.fec = settings->fec; + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_SET_PORT_CFG, &info, + sizeof(info), &info, &out_size); + if (err != 0 || out_size == 0 || info.head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set link settings, err: %d, status: 0x%x, out size: 0x%x\n", + err, info.head.status, out_size); + return -EIO; + } + + return info.head.status; +} + +int hinic5_get_bond_link_state(void *hwdev, struct hinic5_nic_io *nic_io, u8 *link_state) +{ + int err; + struct hinic5_bond_link_info bond_info = {0}; + u16 out_size = sizeof(bond_info); + + bond_info.port_id = hinic5_physical_port_id(hwdev); + + err = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_CFM, + CFM_MPU_CMD_BOND_LINK_INFO_GET, &bond_info, + sizeof(bond_info), &bond_info, &out_size, + HINIC5_BOND_MSG_TIMEOUT_MS, + HINIC5_CHANNEL_NIC); + if (err != 0 || out_size == 0 || bond_info.head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to get bond link state, err: %d, status: 0x%x, out size: 0x%x\n", + err, bond_info.head.status, out_size); + return -EIO; + } + + *link_state = bond_info.link_status; + + return 0; +} + +int hinic5_get_link_state(void *hwdev, u8 *link_state) +{ + struct mag_cmd_get_link_status get_link; + u16 out_size = sizeof(get_link); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !link_state) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + /* 如果此设备开启半卸载bond 那么需要从bond获取状态 */ + if ((nic_io->feature_cap & NIC_F_HALF_BOND_OFFLOAD) != 0) + return hinic5_get_bond_link_state(hwdev, nic_io, link_state); + + memset(&get_link, 0, sizeof(get_link)); + get_link.port_id = hinic5_physical_port_id(hwdev); + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_LINK_STATUS, &get_link, + sizeof(get_link), &get_link, &out_size); + if (err != 0 || out_size == 0 || get_link.head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n", + err, get_link.head.status, out_size); + return -EIO; + } + + *link_state = get_link.status; + + return 0; +} + +void hinic5_notify_vf_link_status(struct hinic5_nic_io *nic_io, + u16 vf_id, u8 link_status) +{ + struct mag_cmd_get_link_status link; + struct vf_data_storage *vf_infos = nic_io->vf_infos; + u16 out_size = sizeof(link); + int err; + + memset(&link, 0, sizeof(link)); + if (vf_infos[HW_VF_ID_TO_OS(vf_id)].registered) { + link.status = link_status; + link.port_id = hinic5_physical_port_id(nic_io->hwdev); + err = hinic5_mbox_to_vf_without_ack(nic_io->hwdev, vf_id, HINIC5_MOD_HILINK, + MAG_CMD_GET_LINK_STATUS, &link, + sizeof(link), HINIC5_CHANNEL_NIC); + if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) { + nic_warn(nic_io->dev_hdl, "VF%d not initialized, disconnect it\n", + HW_VF_ID_TO_OS(vf_id)); + hinic5_unregister_vf(nic_io, vf_id); + return; + } + if (err != 0 || out_size == 0 || link.head.status != 0) + nic_err(nic_io->dev_hdl, + "Send link change event to VF %d failed, err: %d, status: 0x%x, out_size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, link.head.status, out_size); + } +} + +void hinic5_notify_all_vfs_link_changed(void *hwdev, u8 link_status) +{ + struct hinic5_nic_io *nic_io = NULL; + u16 i; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + + nic_io->link_status = link_status; + for (i = 1; i <= nic_io->max_vfs; i++) { + if (!nic_io->vf_infos[HW_VF_ID_TO_OS(i)].link_forced) + hinic5_notify_vf_link_status(nic_io, i, link_status); + } +} + +static char *g_hw_to_char_fec[HILINK_FEC_MAX_TYPE] = {"not set", "rsfec", "basefec", + "nofec", "llrsfec"}; +static char *g_hw_to_speed_info[PORT_SPEED_UNKNOWN] = {"not set", "10MB", "100MB", "1GB", "10GB", + "25GB", "40GB", "50GB", "100GB", "200GB", + "400GB", "800GB"}; +static char *g_hw_to_an_state_info[PORT_CFG_AN_OFF + 1] = {"not set", "on", "off"}; + +struct port_type_table { + u32 port_type; + char *port_type_name; +}; + +static const struct port_type_table port_optical_type_table_s[] = { + {LINK_PORT_UNKNOWN, "UNKNOWN"}, + {LINK_PORT_OPTICAL_MM, "optical_sr"}, + {LINK_PORT_OPTICAL_SM, "optical_lr"}, + {LINK_PORT_PAS_COPPER, "copper"}, + {LINK_PORT_ACC, "ACC"}, + {LINK_PORT_BASET, "baset"}, + {LINK_PORT_AOC, "AOC"}, + {LINK_PORT_ELECTRIC, "electric"}, + {LINK_PORT_BACKBOARD_INTERFACE, "interface"}, +}; + +static char *get_port_type_name(u32 type) +{ + int i; + + for (i = 0; i < ARRAY_LEN(port_optical_type_table_s); i++) { + if (type == port_optical_type_table_s[i].port_type) + return port_optical_type_table_s[i].port_type_name; + } + return "UNKNOWN TYPE"; +} + +static void get_port_type(struct hinic5_nic_io *nic_io, + struct mag_cmd_event_port_info *info, char **port_type) +{ + if (info->port_type <= LINK_PORT_BACKBOARD_INTERFACE) + *port_type = get_port_type_name(info->port_type); + else + nic_info(nic_io->dev_hdl, "Unknown port type: %u\n", info->port_type); +} + +static int get_port_temperature_power(const struct mag_cmd_event_port_info *info, + char *str, u16 str_len) +{ + char arr[CAP_INFO_MAX_LEN] = {0}; + int err; + + err = snprintf(arr, CAP_INFO_MAX_LEN, "%s, %s, Temperature: %u", str, + (info->sfp_type != 0) ? "QSFP" : "SFP", info->cable_temp); + if (err < 0) + return -EINVAL; + if (info->sfp_type != 0) { + err = snprintf(str, CAP_INFO_MAX_LEN, "%s, rx power: %uuW %uuW %uuW %uuW", + arr, info->power[0x0], info->power[0x1], + info->power[0x2], info->power[0x3]); + if (err < 0) + return -EINVAL; + } + err = snprintf(str, CAP_INFO_MAX_LEN, "%s, rx power: %uuW, tx power: %uuW", + arr, info->power[0x0], info->power[0x1]); + if (err < 0) + return -EINVAL; + return 0; +} + +struct speed_mode_map_s speed_mode_map[] = { + {PORT_SPEED_MODE_400G, PORT_SPEED_400G}, + {PORT_SPEED_MODE_800G, PORT_SPEED_800G}, +}; + +u32 get_real_port_speed_from_inner_speed(u8 speed) +{ + u32 i; + + if (speed <= PORT_SPEED_MODE_START) { // 小于等于200G速率未映射 + return speed; + } + + for (i = 0; i < ARRAY_SIZE(speed_mode_map); i++) + if (speed_mode_map[i].speed_mode == speed) + return speed_mode_map[i].real_speed; + + pr_err("unsupported port speed mode: 0x%x\n", speed); + return speed; +} + +static void print_cable_info(struct hinic5_nic_io *nic_io, struct mag_cmd_event_port_info *info) +{ + char tmp_str[CAP_INFO_MAX_LEN] = {0}; + char tmp_vendor[VENDOR_MAX_LEN] = {0}; + char tmp_vendor_sn[VENDOR_MAX_LEN] = {0}; + char *port_type = "Unknown port type"; + int i; + int err = 0; + + if (info->gpio_insert != 0) { + nic_info(nic_io->dev_hdl, "Cable unpresent\n"); + return; + } + + get_port_type(nic_io, info, &port_type); + + for (i = (int)sizeof(info->vendor_name) - 1; i >= 0; i--) { + if (info->vendor_name[i] == ' ') + info->vendor_name[i] = '\0'; + else + break; + } + + memcpy(tmp_vendor, info->vendor_name, sizeof(info->vendor_name)); + memcpy(tmp_vendor_sn, info->vendor_sn, sizeof(info->vendor_sn)); + + err = snprintf(tmp_str, CAP_INFO_MAX_LEN, + "Vendor: %s, %s, %s, length: %um, max_speed: %uGbps", + tmp_vendor, tmp_vendor_sn, port_type, info->cable_length, + get_real_port_speed_from_inner_speed(info->max_speed)); + if (err <= 0) { + nic_info(nic_io->dev_hdl, "Print vendor failed.\n"); + return; + } + + if (info->port_type == LINK_PORT_OPTICAL_MM || info->port_type == LINK_PORT_OPTICAL_SM || + info->port_type == LINK_PORT_AOC) { + err = get_port_temperature_power(info, tmp_str, CAP_INFO_MAX_LEN); + if (err != 0) + return; + } + + nic_info(nic_io->dev_hdl, "Cable information: %s\n", tmp_str); +} + +static void print_link_info(struct hinic5_nic_io *nic_io, + const struct mag_cmd_event_port_info *info, + enum hinic5_nic_event_type type) +{ + char *fec = "None"; + char *speed = "None"; + char *an_state = "None"; + + if (info->fec < HILINK_FEC_MAX_TYPE) + fec = g_hw_to_char_fec[info->fec]; + else + nic_info(nic_io->dev_hdl, "Unknown fec type: %u\n", info->fec); + + if (info->an_state > PORT_CFG_AN_OFF) { + nic_info(nic_io->dev_hdl, "an_state %u is invalid", info->an_state); + return; + } + + an_state = g_hw_to_an_state_info[info->an_state]; + + if (info->speed >= PORT_SPEED_UNKNOWN) { + nic_info(nic_io->dev_hdl, "speed %u is invalid", info->speed); + return; + } + + speed = g_hw_to_speed_info[info->speed]; + nic_info(nic_io->dev_hdl, "Link information: speed %s, %s, autoneg %s", + speed, fec, an_state); +} + +void print_port_info(struct hinic5_nic_io *nic_io, struct mag_cmd_event_port_info *port_info, + enum hinic5_nic_event_type type) +{ + print_cable_info(nic_io, port_info); + + print_link_info(nic_io, port_info, type); + + if (type == EVENT_NIC_LINK_UP) + return; + + nic_info(nic_io->dev_hdl, "PMA ctrl: %s, tx %s, rx %s, PMA fifo reg: 0x%x, PMA signal ok reg: 0x%x, RF/LF status reg: 0x%x\n", + port_info->pma_ctrl == 1 ? "off" : "on", + (port_info->tx_enable != 0) ? "enable" : "disable", + (port_info->rx_enable != 0) ? "enable" : "disable", port_info->pma_fifo_reg, + port_info->pma_signal_ok_reg, port_info->rf_lf); + nic_info(nic_io->dev_hdl, "alos: 0x%x, rx_los: %u, PCS 64 66b reg: 0x%x, PCS link: 0x%x, MAC link: 0x%x PCS_err_cnt: 0x%x\n", + port_info->alos, port_info->rx_los, port_info->pcs_64_66b_reg, + port_info->pcs_link, port_info->pcs_mac_link, port_info->pcs_err_cnt); + nic_info(nic_io->dev_hdl, "his_link_machine_state = 0x%08x, cur_link_machine_state = 0x%08x\n", + port_info->his_link_machine_state, port_info->cur_link_machine_state); +} + +static int hinic5_get_vf_link_status_msg_handler(struct hinic5_nic_io *nic_io, + u16 vf_id, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size) +{ + struct vf_data_storage *vf_infos = nic_io->vf_infos; + struct mag_cmd_get_link_status *get_link = buf_out; + bool link_forced, link_up; + + link_forced = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced; + link_up = vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up; + + if (link_forced) + get_link->status = link_up ? + HINIC5_LINK_UP : HINIC5_LINK_DOWN; + else + get_link->status = nic_io->link_status; + + get_link->head.status = 0; + *out_size = sizeof(*get_link); + + return 0; +} + +int hinic5_refresh_nic_cfg(void *hwdev, struct mag_port_info *port_info) +{ + int err = 0; + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return -ENODEV; + } + + if (HINIC5_SUPPORT_RATE_LIMIT(hwdev)) { + err = hinic5_set_pf_rate(hwdev, port_info->speed); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to refresh tx pf bandwidth limit\n"); + return err; + } + } + + return err; +} + +static void get_port_info(void *hwdev, + const struct mag_cmd_get_link_status *link_status, + struct hinic5_event_link_info *link_info) +{ + struct mag_port_info port_info = {0}; + struct hinic5_nic_io *nic_io = NULL; + int err; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return; + } + if ((hinic5_func_type(hwdev) != TYPE_VF) && link_status->status != 0) { + err = hinic5_get_port_info(hwdev, &port_info, HINIC5_CHANNEL_NIC); + if (err != 0) { + nic_warn(nic_io->dev_hdl, "Failed to get port info\n"); + } else { + link_info->valid = 1; + link_info->port_type = port_info.port_type; + link_info->autoneg_cap = port_info.autoneg_cap; + link_info->autoneg_state = port_info.autoneg_state; + link_info->duplex = port_info.duplex; + link_info->speed = port_info.speed; + hinic5_refresh_nic_cfg(hwdev, &port_info); + } + } +} + +static void link_status_event_handler(void *hwdev, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct mag_cmd_get_link_status *link_status = buf_in; + struct mag_cmd_get_link_status *ret_link_status = NULL; + struct hinic5_event_info event_info = {0}; + struct hinic5_event_link_info *link_info = (void *)event_info.event_data; + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + /* 开启bond后由bond来通知link状态 */ + if (!nic_io || ((nic_io->feature_cap & NIC_F_HALF_BOND_OFFLOAD) != 0)) + return; + + if (in_size != sizeof(*link_status)) { + nic_err(nic_io->dev_hdl, "Invalid link status event cmd, length: %u, should be %lu\n", + in_size, sizeof(*link_status)); + return; + } + + nic_info(nic_io->dev_hdl, "Link status report received, func_id: %u, status: %u\n", + hinic5_global_func_id(hwdev), link_status->status); + + hinic5_link_event_stats(hwdev, link_status->status); + + /* link event reported only after set vport enable */ + get_port_info(hwdev, link_status, link_info); + + event_info.service = EVENT_SRV_NIC; + event_info.type = (link_status->status != 0) ? + EVENT_NIC_LINK_UP : EVENT_NIC_LINK_DOWN; + + hinic5_event_callback(hwdev, &event_info); + + if (hinic5_func_type(hwdev) != TYPE_VF) { + hinic5_notify_all_vfs_link_changed(hwdev, link_status->status); + ret_link_status = buf_out; + ret_link_status->head.status = 0; + *out_size = sizeof(*ret_link_status); + } +} + +static void port_info_event_printf(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct mag_cmd_event_port_info *port_info = buf_in; + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_event_info event_info; + enum hinic5_nic_event_type type; + + if (!hwdev) { + pr_err("hwdev is NULL\n"); + return; + } + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return; + } + if (in_size != sizeof(*port_info)) { + nic_info(nic_io->dev_hdl, "Invalid port info message size %u, should be %lu\n", + in_size, sizeof(*port_info)); + return; + } + + /* 如果开启bond则不处理 */ + if ((nic_io->feature_cap & NIC_F_HALF_BOND_OFFLOAD) != 0) { + nic_info(nic_io->dev_hdl, "bond enable ignore port event type: %d\n", + port_info->event_type); + return; + } + + ((struct mag_cmd_event_port_info *)buf_out)->head.status = 0; + + type = port_info->event_type; + if (type < EVENT_NIC_LINK_DOWN || type > EVENT_NIC_LINK_UP) { + nic_info(nic_io->dev_hdl, "Invalid hilink info report, type: %d\n", + type); + return; + } + + print_port_info(nic_io, port_info, type); + + memset(&event_info, 0, sizeof(event_info)); + event_info.service = EVENT_SRV_NIC; + event_info.type = type; + + *out_size = sizeof(*port_info); + + hinic5_event_callback(hwdev, &event_info); +} + +static void cable_plug_event(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct mag_cmd_wire_event *plug_event = buf_in; + struct hinic5_port_routine_cmd *rt_cmd = NULL; + struct hinic5_port_routine_cmd_extern *rt_cmd_ext = NULL; + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_event_info event_info; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + + if (in_size != sizeof(*plug_event)) { + nic_err(nic_io->dev_hdl, "Invalid cable plug cmd, length: %u, should be %lu\n", + in_size, sizeof(*plug_event)); + return; + } + + /* 如果开启bond则不处理 */ + if ((nic_io->feature_cap & NIC_F_HALF_BOND_OFFLOAD) != 0) { + nic_info(nic_io->dev_hdl, "bond enable ignore cable plug event\n"); + return; + } + + rt_cmd = &nic_io->nic_cfg.rt_cmd; + rt_cmd_ext = &nic_io->nic_cfg.rt_cmd_ext; + + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + rt_cmd->mpu_send_sfp_abs = false; + rt_cmd->mpu_send_sfp_info = false; + rt_cmd_ext->mpu_send_xsfp_tlv_info = false; + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + + memset(&event_info, 0, sizeof(event_info)); + event_info.service = EVENT_SRV_NIC; + event_info.type = EVENT_NIC_PORT_MODULE_EVENT; + ((struct hinic5_port_module_event *)(void *)event_info.event_data)->type = + (plug_event->status != 0) ? HINIC5_PORT_MODULE_CABLE_PLUGGED : + HINIC5_PORT_MODULE_CABLE_UNPLUGGED; + + *out_size = sizeof(*plug_event); + plug_event = buf_out; + plug_event->head.status = 0; + + hinic5_event_callback(hwdev, &event_info); +} + +static void port_sfp_info_event(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct mag_cmd_get_xsfp_info *sfp_info = buf_in; + struct hinic5_port_routine_cmd *rt_cmd = NULL; + struct hinic5_port_routine_cmd_extern *rt_cmd_ext = NULL; + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + if (in_size != sizeof(*sfp_info)) { + nic_err(nic_io->dev_hdl, "Invalid sfp info cmd, length: %u, should be %lu\n", + in_size, sizeof(*sfp_info)); + return; + } + + rt_cmd = &nic_io->nic_cfg.rt_cmd; + rt_cmd_ext = &nic_io->nic_cfg.rt_cmd_ext; + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + memcpy(&rt_cmd->std_sfp_info, sfp_info, sizeof(struct mag_cmd_get_xsfp_info)); + rt_cmd->mpu_send_sfp_info = true; + rt_cmd_ext->mpu_send_xsfp_tlv_info = false; + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); +} + +#define xsfp_tlv_pre_info_len 4 +static void port_xsfp_tlv_info_event(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, const u16 *out_size) +{ + struct tag_mag_cmd_get_xsfp_tlv_rsp *xsfp_tlv_info = buf_in; + struct hinic5_port_routine_cmd *rt_cmd = NULL; + struct hinic5_port_routine_cmd_extern *rt_cmd_ext = NULL; + struct hinic5_nic_io *nic_io = NULL; + size_t cpy_len = in_size - sizeof(struct mgmt_msg_head) - xsfp_tlv_pre_info_len; + + if (in_size <= sizeof(struct mgmt_msg_head) + xsfp_tlv_pre_info_len) + return; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + + rt_cmd = &nic_io->nic_cfg.rt_cmd; + rt_cmd_ext = &nic_io->nic_cfg.rt_cmd_ext; + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + rt_cmd_ext->std_xsfp_tlv_info.port_id = xsfp_tlv_info->port_id; + + memcpy(rt_cmd_ext->std_xsfp_tlv_info.tlv_buf, xsfp_tlv_info->tlv_buf, cpy_len); + + rt_cmd->mpu_send_sfp_info = false; + rt_cmd_ext->mpu_send_xsfp_tlv_info = true; + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); +} + +static void port_sfp_abs_event(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct mag_cmd_get_xsfp_present *sfp_abs = buf_in; + struct hinic5_port_routine_cmd *rt_cmd = NULL; + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + if (in_size != sizeof(*sfp_abs)) { + nic_err(nic_io->dev_hdl, "Invalid sfp absent cmd, length: %u, should be %lu\n", + in_size, sizeof(*sfp_abs)); + return; + } + + rt_cmd = &nic_io->nic_cfg.rt_cmd; + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + memcpy(&rt_cmd->abs, sfp_abs, sizeof(struct mag_cmd_get_xsfp_present)); + rt_cmd->mpu_send_sfp_abs = true; + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); +} + +bool hinic5_if_sfp_absent(void *hwdev) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_port_routine_cmd *rt_cmd = NULL; + struct mag_cmd_get_xsfp_present sfp_abs; + u8 port_id = hinic5_physical_port_id(hwdev); + u16 out_size = sizeof(sfp_abs); + int err; + bool sfp_abs_status = 0; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return true; + + memset(&sfp_abs, 0, sizeof(sfp_abs)); + + rt_cmd = &nic_io->nic_cfg.rt_cmd; + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + if (rt_cmd->mpu_send_sfp_abs) { + if (rt_cmd->abs.head.status != 0) { + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return true; + } + + sfp_abs_status = (bool)rt_cmd->abs.abs_status; + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return sfp_abs_status; + } + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + + sfp_abs.port_id = port_id; + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_PRESENT, + &sfp_abs, sizeof(sfp_abs), &sfp_abs, + &out_size); + if (sfp_abs.head.status != 0 || err != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Failed to get port%u sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n", + port_id, err, sfp_abs.head.status, out_size); + return true; + } + + return (sfp_abs.abs_status == 0 ? false : true); +} + +int hinic5_get_sfp_tlv_info(void *hwdev, struct drv_tag_mag_cmd_get_xsfp_tlv_rsp *sfp_tlv_info, + const struct tag_mag_cmd_get_xsfp_tlv_req *sfp_tlv_info_req) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_port_routine_cmd_extern *rt_cmd_ext = NULL; + u16 out_size = sizeof(*sfp_tlv_info); + int err; + + if (!hwdev || !sfp_tlv_info) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + rt_cmd_ext = &nic_io->nic_cfg.rt_cmd_ext; + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + if (rt_cmd_ext->mpu_send_xsfp_tlv_info) { + if (rt_cmd_ext->std_xsfp_tlv_info.head.status != 0) { + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return -EIO; + } + + memcpy(sfp_tlv_info, &rt_cmd_ext->std_xsfp_tlv_info, sizeof(*sfp_tlv_info)); + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return 0; + } + + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_TLV_INFO, (void *)sfp_tlv_info_req, + sizeof(*sfp_tlv_info_req), sfp_tlv_info, &out_size); + if (sfp_tlv_info->head.status != 0 || err != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Failed to get port%u sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n", + hinic5_physical_port_id(hwdev), err, + sfp_tlv_info->head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_get_sfp_info(void *hwdev, struct mag_cmd_get_xsfp_info *sfp_info) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_port_routine_cmd *rt_cmd = NULL; + u16 out_size = sizeof(*sfp_info); + int err = 0; + + if (!hwdev || !sfp_info) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + rt_cmd = &nic_io->nic_cfg.rt_cmd; + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + if (rt_cmd->mpu_send_sfp_info) { + if (rt_cmd->std_sfp_info.head.status != 0) { + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return -EIO; + } + + memcpy(sfp_info, &rt_cmd->std_sfp_info, sizeof(*sfp_info)); + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return (err == 0) ? 0 : -ENOMEM; + } + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + + sfp_info->port_id = hinic5_physical_port_id(hwdev); + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_GET_XSFP_INFO, sfp_info, + sizeof(*sfp_info), sfp_info, &out_size); + + if (sfp_info->head.status == HINIC5_MGMT_CMD_UNSUPPORTED) + return -EOPNOTSUPP; + + if (sfp_info->head.status != 0 || err != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Failed to get port%u sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n", + hinic5_physical_port_id(hwdev), err, + sfp_info->head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_get_sfp_eeprom(void *hwdev, u8 *data, u32 len, u32 offset) +{ + struct mag_cmd_get_xsfp_info sfp_info; + int err = 0; + + if (!hwdev || !data || len > PAGE_SIZE) + return -EINVAL; + + if (hinic5_if_sfp_absent(hwdev)) + return -ENXIO; + + memset(&sfp_info, 0, sizeof(sfp_info)); + + err = hinic5_get_sfp_info(hwdev, &sfp_info); + if (err != 0) + return err; + + memcpy(data, sfp_info.sfp_info + offset, len); + + return (err == 0) ? 0 : -ENOMEM; +} + +static void hinic5_prase_cmis_tlp_info(u8 *data, u32 len, u8 *sfp_tlv_info, u32 offset) +{ + struct mgmt_tlv_info *tlv_info = NULL; + u8 *tlv_buf = sfp_tlv_info; + bool need_continue = true; + u8 temp_tlv_info[XSFP_CMIS_INFO_MAX_SIZE]; + u32 temp_offset = 0; + + while (need_continue) { + tlv_info = (struct mgmt_tlv_info *)tlv_buf; + switch (tlv_info->type) { + case MAG_XSFP_TYPE_PAGE: + if (tlv_info->length < MGMT_TLV_U32_SIZE || + tlv_info->length >= XSFP_CMIS_INFO_MAX_SIZE) { + need_continue = false; + break; + } + + memcpy(temp_tlv_info + temp_offset, + tlv_buf + MGMT_TLV_U32_SIZE + sizeof(struct mgmt_tlv_info), + tlv_info->length - MGMT_TLV_U32_SIZE); + temp_offset += tlv_info->length - MGMT_TLV_U32_SIZE; + + case MAG_XSFP_TYPE_WIRE_TYPE: + break; + case MAG_XSFP_TYPE_END: + default: + need_continue = false; + break; + } + + tlv_buf += (sizeof(struct mgmt_tlv_info) + tlv_info->length); + } + memcpy(data, temp_tlv_info + offset, len); +} + +int hinic5_get_cmis_eeprom(void *hwdev, u8 *data, u32 len, u32 offset) +{ + struct drv_tag_mag_cmd_get_xsfp_tlv_rsp sfp_tlv_info; + struct tag_mag_cmd_get_xsfp_tlv_req sfp_tlv_info_req; + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic5_if_sfp_absent(hwdev)) + return -ENXIO; + + memset(&sfp_tlv_info, 0, sizeof(sfp_tlv_info)); + memset(&sfp_tlv_info_req, 0, sizeof(sfp_tlv_info_req)); + + sfp_tlv_info_req.port_id = hinic5_physical_port_id(hwdev); + sfp_tlv_info_req.rsp_buf_len = XSFP_CMIS_INFO_MAX_SIZE; + + err = hinic5_get_sfp_tlv_info(hwdev, &sfp_tlv_info, &sfp_tlv_info_req); + if (err != 0) + return err; + hinic5_prase_cmis_tlp_info(data, len, sfp_tlv_info.tlv_buf, offset); + + return err; +} + +u8 support_page[CMIS_MAX_PAGES] = { + HINIC5_PAGE_L00_H00_OFFSET, HINIC5_PAGE_H01_OFFSET, + HINIC5_PAGE_H02_OFFSET, HINIC5_PAGE_INVALID_OFFSET, + HINIC5_PAGE_INVALID_OFFSET, HINIC5_PAGE_INVALID_OFFSET, + HINIC5_PAGE_INVALID_OFFSET, HINIC5_PAGE_INVALID_OFFSET, + HINIC5_PAGE_INVALID_OFFSET, HINIC5_PAGE_INVALID_OFFSET, + HINIC5_PAGE_INVALID_OFFSET, HINIC5_PAGE_INVALID_OFFSET, + HINIC5_PAGE_INVALID_OFFSET, HINIC5_PAGE_INVALID_OFFSET, + HINIC5_PAGE_INVALID_OFFSET, HINIC5_PAGE_INVALID_OFFSET, + HINIC5_PAGE_H10_OFFSET, HINIC5_PAGE_H11_OFFSET +}; + +int hinic5_eeprom_page_check(u8 page_id, u32 offset, u32 len) +{ + u8 page_offset; + + if (page_id >= CMIS_MAX_PAGES) + return -EINVAL; + + page_offset = support_page[page_id]; + + if (page_offset == HINIC5_PAGE_INVALID_OFFSET) + return -EOPNOTSUPP; + + if (len == 0 || + page_offset * QSFP_CMIS_PAGE_SIZE + offset + len >= XSFP_CMIS_INFO_MAX_SIZE) + return -EINVAL; + + return 0; +} + +int hinic5_get_cmis_eeprom_by_page(void *hwdev, u8 page_id, u32 offset, u8 *data, u32 len) +{ + u32 data_offset; + + data_offset = support_page[page_id] * QSFP_CMIS_PAGE_SIZE + offset; + + return hinic5_get_cmis_eeprom(hwdev, data, len, data_offset); +} + +#define CMIS_UPPER_PAGE_00H_EXT_ID_OFFSET 0x81 +static void process_sfp_data(u8 *sfp_data, u8 *sfp_type, u8 *sfp_type_ext) +{ + *sfp_type = sfp_data[0x0]; + + if (*sfp_type == MODULE_TYPE_SFF8024_ID_QSFP_PLUS_CMIS) + *sfp_type_ext = sfp_data[CMIS_UPPER_PAGE_00H_EXT_ID_OFFSET]; + else + *sfp_type_ext = sfp_data[0x1]; +} + +int hinic5_get_sfp_cmis_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_port_routine_cmd_extern *rt_cmd_ext = NULL; + u8 sfp_data[XSFP_CMIS_INFO_MAX_SIZE] = {0}; + int err; + + if (!hwdev || !sfp_type || !sfp_type_ext) + return -EINVAL; + + if (hinic5_if_sfp_absent(hwdev)) + return -ENXIO; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + rt_cmd_ext = &nic_io->nic_cfg.rt_cmd_ext; + + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + if (rt_cmd_ext->mpu_send_xsfp_tlv_info) { + if (rt_cmd_ext->std_xsfp_tlv_info.head.status != 0) { + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return -EIO; + } + + hinic5_prase_cmis_tlp_info(sfp_data, XSFP_CMIS_INFO_MAX_SIZE, + rt_cmd_ext->std_xsfp_tlv_info.tlv_buf, 0); + process_sfp_data(sfp_data, sfp_type, sfp_type_ext); + + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return 0; + } + + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + + err = hinic5_get_cmis_eeprom(hwdev, (u8 *)sfp_data, + CMIS_UPPER_PAGE_00H_EXT_ID_OFFSET, 0); + if (err != 0) + return err; + + process_sfp_data(sfp_data, sfp_type, sfp_type_ext); + + return 0; +} + +int hinic5_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_port_routine_cmd *rt_cmd = NULL; + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + int err; + + if (!hwdev || !sfp_type || !sfp_type_ext) + return -EINVAL; + + if (hinic5_if_sfp_absent(hwdev)) + return -ENXIO; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + rt_cmd = &nic_io->nic_cfg.rt_cmd; + + mutex_lock(&nic_io->nic_cfg.sfp_mutex); + if (rt_cmd->mpu_send_sfp_info) { + if (rt_cmd->std_sfp_info.head.status != 0) { + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return -EIO; + } + + *sfp_type = rt_cmd->std_sfp_info.sfp_info[0]; + *sfp_type_ext = rt_cmd->std_sfp_info.sfp_info[1]; + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + return 0; + } + mutex_unlock(&nic_io->nic_cfg.sfp_mutex); + + err = hinic5_get_sfp_eeprom(hwdev, (u8 *)sfp_data, + STD_SFP_INFO_MAX_SIZE, 0); + if (err != 0) + return err; + + *sfp_type = sfp_data[0]; + *sfp_type_ext = sfp_data[1]; + + return 0; +} + +int hinic5_set_link_status_follow(void *hwdev, enum hinic5_link_follow_status status) +{ + struct mag_cmd_set_link_follow follow; + struct hinic5_nic_io *nic_io = NULL; + u16 out_size = sizeof(follow); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (status >= HINIC5_LINK_FOLLOW_STATUS_MAX) { + nic_err(nic_io->dev_hdl, "Invalid link follow status: %d\n", status); + return -EINVAL; + } + + memset(&follow, 0, sizeof(follow)); + follow.function_id = hinic5_global_func_id(hwdev); + follow.follow = status; + + err = mag_msg_to_mgmt_sync(hwdev, MAG_CMD_SET_LINK_FOLLOW, &follow, + sizeof(follow), &follow, &out_size); + if ((follow.head.status != HINIC5_MGMT_CMD_UNSUPPORTED && follow.head.status != 0) || + err != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, "Failed to set link status follow port status, err: %d, status: 0x%x, out size: 0x%x\n", + err, follow.head.status, out_size); + return -EFAULT; + } + + return follow.head.status; +} + +int hinic5_update_pf_bw(void *hwdev) +{ + struct mag_port_info port_info = {0}; + struct hinic5_nic_io *nic_io = NULL; + int err; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (hinic5_func_type(hwdev) == TYPE_VF || !HINIC5_SUPPORT_RATE_LIMIT(hwdev)) { + nic_err(nic_io->dev_hdl, "Current function doesn't support to set rate limit\n"); + return -EINVAL; + } + + err = hinic5_get_port_info(hwdev, &port_info, HINIC5_CHANNEL_NIC); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to get port info\n"); + return -EIO; + } + + err = hinic5_set_pf_rate(hwdev, port_info.speed); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to set pf bandwidth\n"); + return err; + } + + return 0; +} + +int hinic5_set_pf_bw_limit(void *hwdev, u32 bw_limit) +{ + struct hinic5_nic_io *nic_io = NULL; + u32 old_bw_limit; + u8 link_state = 0; + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic5_func_type(hwdev) == TYPE_VF) + return 0; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (bw_limit > MAX_LIMIT_BW) { + nic_err(nic_io->dev_hdl, "Invalid bandwidth: %u\n", bw_limit); + return -EINVAL; + } + + err = hinic5_get_link_state(hwdev, &link_state); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to get link state\n"); + return -EIO; + } + + if (link_state == 0) { + nic_err(nic_io->dev_hdl, "Link status must be up when setting pf tx rate\n"); + return -EINVAL; + } + + old_bw_limit = nic_io->nic_cfg.pf_bw_limit; + nic_io->nic_cfg.pf_bw_limit = bw_limit; + + err = hinic5_update_pf_bw(hwdev); + if (err != 0) { + nic_io->nic_cfg.pf_bw_limit = old_bw_limit; + return err; + } + + return 0; +} + +int hinic5_get_pf_bw_limit(void *hwdev, u32 *bw_limit) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || !bw_limit) + return -EINVAL; + + if (hinic5_func_type(hwdev) == TYPE_VF) + return 0; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + *bw_limit = nic_io->nic_cfg.pf_bw_limit; + + return 0; +} + +static const struct vf_msg_handler vf_mag_cmd_handler[] = { + { + .cmd = MAG_CMD_GET_LINK_STATUS, + .handler = hinic5_get_vf_link_status_msg_handler, + }, +}; + +/* pf/ppf handler mbox msg from vf */ +int hinic5_pf_mag_mbox_handler(void *hwdev, u16 vf_id, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + int index, cmd_size = ARRAY_LEN(vf_mag_cmd_handler); + struct hinic5_nic_io *nic_io = NULL; + const struct vf_msg_handler *handler = NULL; + + if (!hwdev) + return -EFAULT; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EFAULT; + + for (index = 0; index < cmd_size; index++) { + handler = &vf_mag_cmd_handler[index]; + if (cmd == handler->cmd) + return handler->handler(nic_io, vf_id, buf_in, in_size, + buf_out, out_size); + } + + nic_warn(nic_io->dev_hdl, "NO handler for mag cmd: %u received from vf id: %u\n", + cmd, vf_id); + + return -EINVAL; +} + +static struct nic_event_handler mag_cmd_handler[] = { + { + .cmd = MAG_CMD_GET_LINK_STATUS, + .handler = link_status_event_handler, + }, + + { + .cmd = MAG_CMD_EVENT_PORT_INFO, + .handler = port_info_event_printf, + }, + + { + .cmd = MAG_CMD_WIRE_EVENT, + .handler = cable_plug_event, + }, + + { + .cmd = MAG_CMD_GET_XSFP_INFO, + .handler = port_sfp_info_event, + }, + + { + .cmd = MAG_CMD_GET_XSFP_PRESENT, + .handler = port_sfp_abs_event, + }, + + { + .cmd = MAG_CMD_GET_XSFP_TLV_INFO, + .handler = (void (*)(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size))port_xsfp_tlv_info_event, + }, +}; + +static int hinic5_mag_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic5_nic_io *nic_io = NULL; + int size = ARRAY_LEN(mag_cmd_handler); + int i; + + if (!hwdev) + return -EINVAL; + + *out_size = 0; + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EFAULT; + for (i = 0; i < size; i++) { + if (cmd == mag_cmd_handler[i].cmd) { + mag_cmd_handler[i].handler(hwdev, buf_in, in_size, + buf_out, out_size); + return 0; + } + } + + /* can't find this event cmd */ + nic_warn(nic_io->dev_hdl, "Unsupported mag event, cmd: %u\n", cmd); + *out_size = sizeof(struct mgmt_msg_head); + ((struct mgmt_msg_head *)buf_out)->status = HINIC5_MGMT_CMD_UNSUPPORTED; + + return 0; +} + +int hinic5_vf_mag_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + return hinic5_mag_event_handler(hwdev, cmd, buf_in, in_size, + buf_out, out_size); +} + +/* pf/ppf handler mgmt cpu report hilink event */ +void hinic5_pf_mag_event_handler(void *pri_handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + hinic5_mag_event_handler(pri_handle, cmd, buf_in, in_size, + buf_out, out_size); +} + +static int _mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u16 channel) +{ + int i, cmd_cnt = ARRAY_LEN(vf_mag_cmd_handler); + + if (hinic5_func_type(hwdev) == TYPE_VF && (!hinic5_is_slave_host(hwdev)) && + (!hinic5_is_vf_isolation(hwdev))) { + for (i = 0; i < cmd_cnt; i++) { + if (cmd == vf_mag_cmd_handler[i].cmd) { + return hinic5_mbox_to_pf(hwdev, HINIC5_MOD_HILINK, cmd, buf_in, + in_size, buf_out, out_size, 0, channel); + } + } + } + + return hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_HILINK, cmd, buf_in, + in_size, buf_out, out_size, 0, channel); +} + +static int mag_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return _mag_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, + out_size, HINIC5_CHANNEL_NIC); +} + +static int mag_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u16 channel) +{ + return _mag_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, + out_size, channel); +} + +int hinic5_set_fec(void *hwdev, u8 advertised_fec) +{ + struct mag_cmd_cfg_fec_mode fec_msg = {0}; + struct hinic5_nic_io *nic_io = NULL; + u16 out_size = sizeof(fec_msg); + int err; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + fec_msg.opcode = MAG_CMD_OPCODE_SET; + fec_msg.port_id = hinic5_physical_port_id(hwdev); + fec_msg.advertised_fec = advertised_fec; + err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_CFG_FEC_MODE, &fec_msg, sizeof(fec_msg), + &fec_msg, &out_size, HINIC5_CHANNEL_NIC); + if (err != 0 || fec_msg.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, "Set FEC mode failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, fec_msg.head.status, out_size); + return -EINVAL; + } + return 0; +} + +int hinic5_get_fec(void *hwdev, u8 *advertised_fec, u8 *supported_fec) +{ + struct mag_cmd_cfg_fec_mode fec_msg = {0}; + struct hinic5_nic_io *nic_io = NULL; + u16 out_size = sizeof(fec_msg); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + fec_msg.opcode = MAG_CMD_OPCODE_GET; + fec_msg.port_id = hinic5_physical_port_id(hwdev); + err = mag_msg_to_mgmt_sync_ch(hwdev, MAG_CMD_CFG_FEC_MODE, &fec_msg, sizeof(fec_msg), + &fec_msg, &out_size, HINIC5_CHANNEL_NIC); + if (err != 0 || fec_msg.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, "Get FEC mode failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, fec_msg.head.status, out_size); + return -EINVAL; + } + + *advertised_fec = fec_msg.advertised_fec; + *supported_fec = fec_msg.supported_fec; + + return 0; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic.h b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic.h new file mode 100644 index 00000000..23e8adfd --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NIC_H +#define HINIC5_NIC_H + +#include <linux/types.h> +#include <linux/semaphore.h> + +#include "hinic5_hw.h" +#include "hinic5_mt.h" +#include "hinic5_common.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_cfg.h" +#include "mag_mpu_cmd_defs.h" + +/* ************************ array index define ********************* */ +#define ARRAY_INDEX_0 0 +#define ARRAY_INDEX_1 1 +#define ARRAY_INDEX_2 2 +#define ARRAY_INDEX_3 3 +#define ARRAY_INDEX_4 4 +#define ARRAY_INDEX_5 5 +#define ARRAY_INDEX_6 6 +#define ARRAY_INDEX_7 7 + +#define SQ_CI_ADDR_SHIFT 2 +#define RQ_CI_ADDR_SHIFT 4 + +#define HW_VF_ID_TO_OS_CO(vf_infos, vf) \ + ((struct vf_data_storage *)((u64)(vf_infos) + \ + (HW_VF_ID_TO_OS(vf) * sizeof(struct vf_data_storage)))) + +enum hinic5_link_port_type { + LINK_PORT_UNKNOWN, + LINK_PORT_OPTICAL_MM, + LINK_PORT_OPTICAL_SM, + LINK_PORT_PAS_COPPER, + LINK_PORT_ACC, + LINK_PORT_BASET, + LINK_PORT_AOC = 0x40, + LINK_PORT_ELECTRIC, + LINK_PORT_BACKBOARD_INTERFACE, +}; + +enum hilink_fibre_subtype { + FIBRE_SUBTYPE_SR = 1, + FIBRE_SUBTYPE_LR, + FIBRE_SUBTYPE_MAX, +}; + +enum hilink_fec_type { + HILINK_FEC_NOT_SET, + HILINK_FEC_RSFEC, + HILINK_FEC_BASEFEC, + HILINK_FEC_NOFEC, + HILINK_FEC_LLRSFE, + HILINK_FEC_MAX_TYPE, +}; + +struct hinic5_sq_attr { + u8 dma_attr_off; + u8 pending_limit; + u8 coalescing_time; + u8 intr_en; + u16 intr_idx; + u32 l2nic_sqn; + u64 ci_dma_base; +}; + +struct hinic5_rq_attr { + u8 cqe_type; + u8 pending_limit; + u8 coalescing_time; + u8 rsv; + u16 intr_idx; + u32 l2nic_rqn; + u64 ci_dma_base; +}; + +struct vf_data_storage { + u8 drv_mac_addr[ETH_ALEN]; + u8 user_mac_addr[ETH_ALEN]; + bool registered; + bool use_specified_mac; + u16 pf_vlan; + u8 pf_qos; + u8 rsvd2; + u32 max_rate; + u32 min_rate; + + bool link_forced; + bool link_up; /* only valid if VF link is forced */ + bool spoofchk; + bool trust; + u16 num_qps; + u32 support_extra_feature; +}; + +struct hinic5_port_routine_cmd { + bool mpu_send_sfp_info; + bool mpu_send_sfp_abs; + + struct mag_cmd_get_xsfp_info std_sfp_info; + struct mag_cmd_get_xsfp_present abs; +}; + +struct hinic5_port_routine_cmd_extern { + bool mpu_send_xsfp_tlv_info; + + struct drv_tag_mag_cmd_get_xsfp_tlv_rsp std_xsfp_tlv_info; +}; + +struct hinic5_nic_cfg { + struct semaphore cfg_lock; + + /* Valid when pfc is disable */ + bool pause_set; + struct nic_pause_config nic_pause; + + u8 pfc_en; + u8 pfc_bitmap; + + struct mag_port_info port_info; + + /* percentage of pf link bandwidth */ + u32 pf_bw_limit; + u32 rsvd2; + + struct hinic5_port_routine_cmd rt_cmd; + + struct hinic5_port_routine_cmd_extern rt_cmd_ext; + + struct mutex sfp_mutex; /* mutex used for copy sfp info */ +}; + +struct hinic5_nic_cmdq_ops; + +struct hinic5_nic_aeqs { + hinic5_aeq_swe_cb nic_aeq_swe_cb[HINIC5_NIC_FATAL_ERROR_MAX]; + void *nic_aeq_swe_data[HINIC5_NIC_FATAL_ERROR_MAX]; + unsigned long nic_aeq_sw_cb_state[HINIC5_NIC_FATAL_ERROR_MAX]; +}; + +struct hinic5_nic_io { + void *hwdev; + void *dev_hdl; + + u8 link_status; + u8 rsvd1; + u32 rsvd2; + + struct hinic5_io_queue *sq; + struct hinic5_io_queue *rq; + + u16 xdp_qps; + u16 num_qps; + u16 max_qps; + + /* TX方向的ci */ + void *sq_ci_vaddr_base; + dma_addr_t sq_ci_dma_base; + + /* RX方向的ci */ + void *rq_ci_vaddr_base; + dma_addr_t rq_ci_dma_base; + + u8 __iomem *sqs_db_addr; + u8 __iomem *rqs_db_addr; + + u16 max_vfs; + u8 enable_queue_pooling; + u8 first_enable_queue_pooling; + u32 rsvd4; + + struct vf_data_storage *vf_infos; + struct hinic5_dcb_state dcb_state; + struct hinic5_nic_cfg nic_cfg; + + u16 rx_buff_len; + u16 rsvd5; + u32 rsvd6; + u64 feature_cap; + u64 rsvd7; + struct hinic5_nic_cmdq_ops *cmdq_ops; + + struct hinic5_nic_aeqs *nic_aeqs; +}; + +struct vf_msg_handler { + u16 cmd; + int (*handler)(struct hinic5_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +}; + +struct nic_event_handler { + u16 cmd; + void (*handler)(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); +}; + +int hinic5_set_sq_ci_ctx(struct hinic5_nic_io *nic_io, struct hinic5_sq_attr *attr); + +int hinic5_set_rq_ci_ctx(struct hinic5_nic_io *nic_io, struct hinic5_rq_attr *attr); + +int l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u16 channel); + +int hinic5_cfg_vf_vlan(struct hinic5_nic_io *nic_io, u8 opcode, u16 vid, + u8 qos, int vf_id); + +int hinic5_vf_event_handler(void *hwdev, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +void hinic5_mgmt_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int hinic5_pf_mbox_handler(void *hwdev, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +int hinic5_vf_func_init(struct hinic5_nic_io *nic_io); + +void hinic5_vf_func_free(struct hinic5_nic_io *nic_io); + +void hinic5_notify_dcb_state_event(struct hinic5_nic_io *nic_io, + struct hinic5_dcb_state *dcb_state); + +int hinic5_save_dcb_state(struct hinic5_nic_io *nic_io, + struct hinic5_dcb_state *dcb_state); + +void hinic5_notify_vf_link_status(struct hinic5_nic_io *nic_io, + u16 vf_id, u8 link_status); + +int hinic5_vf_mag_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +void hinic5_pf_mag_event_handler(void *pri_handle, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + +int hinic5_pf_mag_mbox_handler(void *hwdev, u16 vf_id, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +u8 hinic5_nic_aeqe_handler(void *hwdev, u8 event, u8 *data); + +int hinic5_nic_aeqs_init(struct hinic5_nic_io *nic_io); + +void hinic5_nic_aeqs_free(struct hinic5_nic_io *nic_io); + +void hinic5_unregister_vf(struct hinic5_nic_io *nic_io, u16 vf_id); + +u8 hinic5_nic_sw_aeqe_cnt_handler(void *dev, u8 event, u8 *data); +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cfg.c b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cfg.c new file mode 100644 index 00000000..ae80c8ae --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cfg.c @@ -0,0 +1,2054 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/module.h> + +#include "comm_defs.h" +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_vram_common.h" +#include "hinic5_nic_io.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic.h" +#include "cfm_cmd.h" +#include "hinic5_nic_cmdq.h" +#include "nic_mpu_cmd.h" +#include "nic_mpu_cmd_extend.h" +#include "nic_npu_cmd.h" +#include "hinic5_common.h" +#include "hinic5_nic_event.h" +#include "hinic5_nic_cfg.h" + +#ifdef __UEFI__ +#define memcmp CompareMem +#endif + +int hinic5_set_sq_ci_ctx(struct hinic5_nic_io *nic_io, struct hinic5_sq_attr *attr) +{ + struct hinic5_cmd_cons_idx_attr cons_idx_attr; + u16 out_size = sizeof(cons_idx_attr); + int err; + + if (!nic_io || !attr) + return -EINVAL; + + memset(&cons_idx_attr, 0, sizeof(cons_idx_attr)); + cons_idx_attr.func_idx = hinic5_global_func_id(nic_io->hwdev); + cons_idx_attr.dma_attr_off = attr->dma_attr_off; + cons_idx_attr.pending_limit = attr->pending_limit; + cons_idx_attr.coalescing_time = attr->coalescing_time; + + if (attr->intr_en != 0) { + cons_idx_attr.intr_en = attr->intr_en; + cons_idx_attr.intr_idx = attr->intr_idx; + } + + cons_idx_attr.l2nic_sqn = attr->l2nic_sqn; + cons_idx_attr.ci_addr = attr->ci_dma_base >> SQ_CI_ADDR_SHIFT; + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC5_NIC_CMD_SQ_CI_ATTR_SET, + &cons_idx_attr, sizeof(cons_idx_attr), + &cons_idx_attr, &out_size); + if (err != 0 || out_size == 0 || cons_idx_attr.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, + "Failed to set ci attribute table, err: %d, status: 0x%x, out_size: 0x%x\n", + err, cons_idx_attr.msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic5_set_rq_ci_ctx(struct hinic5_nic_io *nic_io, struct hinic5_rq_attr *attr) +{ + struct hinic5_rq_cqe_ctx cons_idx_ctx; + u16 out_size = sizeof(cons_idx_ctx); + int err; + + if (!nic_io || !attr) + return -EINVAL; + + memset(&cons_idx_ctx, 0, sizeof(cons_idx_ctx)); + cons_idx_ctx.cqe_type = attr->cqe_type; + cons_idx_ctx.rq_id = (u8)(attr->l2nic_rqn & 0xff); + cons_idx_ctx.timer_loop = attr->coalescing_time; + cons_idx_ctx.threshold_cqe_num = attr->pending_limit; + cons_idx_ctx.msix_entry_idx = attr->intr_idx; + cons_idx_ctx.ci_addr_hi = upper_32_bits(attr->ci_dma_base >> RQ_CI_ADDR_SHIFT); + cons_idx_ctx.ci_addr_lo = lower_32_bits(attr->ci_dma_base >> RQ_CI_ADDR_SHIFT); + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC5_NIC_CMD_SET_RQ_CI_CTX, + &cons_idx_ctx, sizeof(cons_idx_ctx), + &cons_idx_ctx, &out_size); + if (err != 0 || out_size == 0 || + (cons_idx_ctx.msg_head.status != 0 && + cons_idx_ctx.msg_head.status != HINIC5_MGMT_CMD_UNSUPPORTED)) { + nic_err(nic_io->dev_hdl, "Set rq cqe ctx fail, qid: %d, err: %d, status: 0x%x, out_size: 0x%x", + attr->l2nic_rqn, err, cons_idx_ctx.msg_head.status, out_size); + return -EFAULT; + } + + return 0; +} + +#define PF_SET_VF_MAC(hwdev, status) \ + (hinic5_func_type(hwdev) == TYPE_VF && \ + (status) == HINIC5_PF_SET_VF_ALREADY) + +static int hinic5_check_mac_info(void *hwdev, u8 status, u16 vlan_id) +{ + if ((status != 0 && status != HINIC5_MGMT_STATUS_EXIST) || + (((vlan_id & CHECK_IPSU_15BIT) != 0) && + status == HINIC5_MGMT_STATUS_EXIST)) { + if (PF_SET_VF_MAC(hwdev, status)) + return 0; + + return -EINVAL; + } + + return 0; +} + +#define HINIC_VLAN_ID_MASK 0x7FFF + +int hinic5_set_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, + u16 channel) +{ + struct hinic5_port_mac_set mac_info; + u16 out_size = sizeof(mac_info); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + memset(&mac_info, 0, sizeof(mac_info)); + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_io->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & HINIC_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.mac, mac_addr); + + err = l2nic_msg_to_mgmt_sync_ch(hwdev, HINIC5_NIC_CMD_SET_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size, channel); + if (err != 0 || out_size == 0 || + (hinic5_check_mac_info(hwdev, mac_info.msg_head.status, + mac_info.vlan_id) != 0)) { + nic_err(nic_io->dev_hdl, + "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, mac_info.msg_head.status, out_size, channel); + return -EIO; + } + + if (PF_SET_VF_MAC(hwdev, mac_info.msg_head.status)) { + nic_warn(nic_io->dev_hdl, "PF has already set VF mac, Ignore set operation\n"); + return HINIC5_PF_SET_VF_ALREADY; + } + + if (mac_info.msg_head.status == HINIC5_MGMT_STATUS_EXIST) { + nic_warn(nic_io->dev_hdl, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} +EXPORT_SYMBOL(hinic5_set_mac); + +int hinic5_del_mac(void *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id, + u16 channel) +{ + struct hinic5_port_mac_set mac_info; + u16 out_size = sizeof(struct hinic5_port_mac_set); + struct hinic5_nic_io *nic_io = NULL; + int err = 0; + + if (!hwdev || !mac_addr) + return -EINVAL; + + memset(&mac_info, 0, sizeof(mac_info)); + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return -EINVAL; + } + + if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_io->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & HINIC_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.mac, mac_addr); + + err = l2nic_msg_to_mgmt_sync_ch(hwdev, HINIC5_NIC_CMD_DEL_MAC, + &mac_info, sizeof(mac_info), &mac_info, + &out_size, channel); + if (err != 0 || out_size == 0) + goto ERR_DEL_MAC; + + switch (mac_info.msg_head.status) { + case 0: + break; + case HINIC5_PF_SET_VF_ALREADY: + if (hinic5_func_type(hwdev) == TYPE_VF) { + nic_warn(nic_io->dev_hdl, "PF has already set VF mac, Ignore delete operation.\n"); + return HINIC5_PF_SET_VF_ALREADY; + } + break; + case HINIC5_DEL_MAC_NO_MATCH: + nic_warn(nic_io->dev_hdl, "Del mac no match, Ignore delete operation.\n"); + break; + default: + goto ERR_DEL_MAC; + } + + return 0; + +ERR_DEL_MAC: + nic_err(nic_io->dev_hdl, + "Failed to delete MAC, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, mac_info.msg_head.status, out_size, channel); + return -EIO; +} +EXPORT_SYMBOL(hinic5_del_mac); + +int hinic5_update_mac(void *hwdev, const u8 *old_mac, u8 *new_mac, u16 vlan_id, + u16 func_id) +{ + struct hinic5_port_mac_update mac_info; + u16 out_size = sizeof(mac_info); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !old_mac || !new_mac) + return -EINVAL; + + memset(&mac_info, 0, sizeof(mac_info)); + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + if ((vlan_id & HINIC_VLAN_ID_MASK) >= VLAN_N_VID) { + nic_err(nic_io->dev_hdl, "Invalid VLAN number: %d\n", + (vlan_id & HINIC_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.old_mac, old_mac); + ether_addr_copy(mac_info.new_mac, new_mac); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_UPDATE_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err != 0 || out_size == 0 || + (hinic5_check_mac_info(hwdev, mac_info.msg_head.status, + mac_info.vlan_id) != 0)) { + nic_err(nic_io->dev_hdl, + "Failed to update MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.msg_head.status, out_size); + return -EIO; + } + + if (PF_SET_VF_MAC(hwdev, mac_info.msg_head.status)) { + nic_warn(nic_io->dev_hdl, "PF or Tool has already set VF MAC. Ignore update operation\n"); + return HINIC5_PF_SET_VF_ALREADY; + } + + if (mac_info.msg_head.status == HINIC5_MGMT_STATUS_EXIST) { + nic_warn(nic_io->dev_hdl, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} + +int hinic5_get_default_mac(void *hwdev, u8 *mac_addr) +{ + struct hinic5_port_mac_set mac_info; + u16 out_size = sizeof(mac_info); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !mac_addr) + return -EINVAL; + + memset(&mac_info, 0, sizeof(mac_info)); + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + mac_info.func_id = hinic5_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_GET_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err != 0 || out_size == 0 || mac_info.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, + "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.msg_head.status, out_size); + return -EINVAL; + } + + ether_addr_copy(mac_addr, mac_info.mac); + + return 0; +} + +static int hinic5_config_vlan(struct hinic5_nic_io *nic_io, u8 opcode, + u16 vlan_id, u16 func_id) +{ + struct hinic5_cmd_vlan_config vlan_info; + u16 out_size = sizeof(vlan_info); + int err; + + memset(&vlan_info, 0, sizeof(vlan_info)); + vlan_info.opcode = opcode; + vlan_info.func_id = func_id; + vlan_info.vlan_id = vlan_id; + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, + HINIC5_NIC_CMD_CFG_FUNC_VLAN, + &vlan_info, sizeof(vlan_info), + &vlan_info, &out_size); + if (err != 0 || out_size == 0 || vlan_info.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, + "Failed to %s vlan, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == HINIC5_CMD_OP_ADD ? "add" : "delete", + err, vlan_info.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic5_add_vlan(void *hwdev, u16 vlan_id, u16 func_id) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + return hinic5_config_vlan(nic_io, HINIC5_CMD_OP_ADD, vlan_id, func_id); +} + +int hinic5_del_vlan(void *hwdev, u16 vlan_id, u16 func_id) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + return hinic5_config_vlan(nic_io, HINIC5_CMD_OP_DEL, vlan_id, func_id); +} + +int hinic5_set_vport_enable(void *hwdev, u16 func_id, bool enable, u16 channel) +{ + struct hinic5_vport_state en_state; + u16 out_size = sizeof(en_state); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + memset(&en_state, 0, sizeof(en_state)); + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + en_state.func_id = func_id; + en_state.state = enable ? 1 : 0; + en_state.num_qps = (u8)nic_io->num_qps; + en_state.rx_compact_wqe_en = + (hinic5_get_rq_wqe_type(nic_io->hwdev) == HINIC5_COMPACT_RQ_WQE); + + err = l2nic_msg_to_mgmt_sync_ch(hwdev, HINIC5_NIC_CMD_SET_VPORT_ENABLE, + &en_state, sizeof(en_state), + &en_state, &out_size, channel); + if (err != 0 || out_size == 0 || en_state.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set vport state, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, en_state.msg_head.status, out_size, channel); + return -EINVAL; + } + + return 0; +} + +int hinic5_set_dcb_state(void *hwdev, struct hinic5_dcb_state *dcb_state) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || !dcb_state) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (memcmp(&nic_io->dcb_state, dcb_state, sizeof(nic_io->dcb_state)) == 0) + return 0; + + /* save in sdk, vf will get dcb state when probing */ + hinic5_save_dcb_state(nic_io, dcb_state); + + /* notify stateful in pf, than notify all vf */ + hinic5_notify_dcb_state_event(nic_io, dcb_state); + + return 0; +} + +int hinic5_get_dcb_state(void *hwdev, struct hinic5_dcb_state *dcb_state) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || !dcb_state) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memcpy(dcb_state, &nic_io->dcb_state, sizeof(*dcb_state)); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_dcb_state); + +int hinic5_get_cos_by_pri(void *hwdev, u8 pri, u8 *cos) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || !cos) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (pri >= NIC_DCB_UP_MAX && nic_io->dcb_state.trust == HINIC5_DCB_PCP) + return -EINVAL; + + if (pri >= NIC_DCB_IP_PRI_MAX && nic_io->dcb_state.trust == HINIC5_DCB_DSCP) + return -EINVAL; + + if (nic_io->dcb_state.dcb_on != 0) { + if (nic_io->dcb_state.trust == HINIC5_DCB_PCP) + *cos = nic_io->dcb_state.pcp2cos[pri]; + else + *cos = nic_io->dcb_state.dscp2cos[pri]; + } else { + *cos = nic_io->dcb_state.default_cos; + } + + return 0; +} +EXPORT_SYMBOL(hinic5_get_cos_by_pri); + +int hinic5_save_dcb_state(struct hinic5_nic_io *nic_io, + struct hinic5_dcb_state *dcb_state) +{ + memcpy(&nic_io->dcb_state, dcb_state, sizeof(*dcb_state)); + + return 0; +} + +int hinic5_get_pf_dcb_state(void *hwdev, struct hinic5_dcb_state *dcb_state) +{ + struct hinic5_cmd_vf_dcb_state vf_dcb; + struct hinic5_nic_io *nic_io = NULL; + u16 out_size = sizeof(vf_dcb); + int err; + + if (!hwdev || !dcb_state) + return -EINVAL; + + memset(&vf_dcb, 0, sizeof(vf_dcb)); + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (hinic5_func_type(hwdev) != TYPE_VF) { + nic_err(nic_io->dev_hdl, "Only vf need to get pf dcb state\n"); + return -EINVAL; + } + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_VF_COS, &vf_dcb, + sizeof(vf_dcb), &vf_dcb, &out_size); + if (err != 0 || out_size == 0 || vf_dcb.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to get vf default cos, err: %d, status: 0x%x, out size: 0x%x\n", + err, vf_dcb.msg_head.status, out_size); + return -EFAULT; + } + + memcpy(dcb_state, &vf_dcb.state, sizeof(*dcb_state)); + /* Save dcb_state in hw for stateful module */ + hinic5_save_dcb_state(nic_io, dcb_state); + + return 0; +} +EXPORT_SYMBOL(hinic5_get_pf_dcb_state); + +#define UNSUPPORT_SET_PAUSE 0x10 +static int hinic5_cfg_hw_pause(struct hinic5_nic_io *nic_io, u8 opcode, + struct nic_pause_config *nic_pause) +{ + struct hinic5_cmd_pause_config pause_info; + u16 out_size = sizeof(pause_info); + int err; + + memset(&pause_info, 0, sizeof(pause_info)); + + pause_info.port_id = hinic5_physical_port_id(nic_io->hwdev); + pause_info.opcode = opcode; + if (opcode == HINIC5_CMD_OP_SET) { + pause_info.auto_neg = nic_pause->auto_neg; + pause_info.rx_pause = nic_pause->rx_pause; + pause_info.tx_pause = nic_pause->tx_pause; + } + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, + HINIC5_NIC_CMD_CFG_PAUSE_INFO, + &pause_info, sizeof(pause_info), + &pause_info, &out_size); + if (err != 0 || out_size == 0 || pause_info.msg_head.status != 0) { + if (pause_info.msg_head.status == UNSUPPORT_SET_PAUSE) { + err = -EOPNOTSUPP; + nic_err(nic_io->dev_hdl, "Can not set pause when pfc is enable\n"); + } else { + err = -EFAULT; + nic_err(nic_io->dev_hdl, "Failed to %s pause info, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == HINIC5_CMD_OP_SET ? "set" : "get", + err, pause_info.msg_head.status, out_size); + } + return err; + } + + if (opcode == HINIC5_CMD_OP_GET) { + nic_pause->auto_neg = pause_info.auto_neg; + nic_pause->rx_pause = pause_info.rx_pause; + nic_pause->tx_pause = pause_info.tx_pause; + } + + return 0; +} + +int hinic5_set_pause_info(void *hwdev, struct nic_pause_config nic_pause) +{ + struct hinic5_nic_cfg *nic_cfg = NULL; + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + nic_cfg = &nic_io->nic_cfg; + + down(&nic_cfg->cfg_lock); + + err = hinic5_cfg_hw_pause(nic_io, HINIC5_CMD_OP_SET, &nic_pause); + if (err != 0) { + up(&nic_cfg->cfg_lock); + return err; + } + + nic_cfg->pfc_en = 0; + nic_cfg->pfc_bitmap = 0; + nic_cfg->pause_set = true; + nic_cfg->nic_pause.auto_neg = nic_pause.auto_neg; + nic_cfg->nic_pause.rx_pause = nic_pause.rx_pause; + nic_cfg->nic_pause.tx_pause = nic_pause.tx_pause; + + up(&nic_cfg->cfg_lock); + + return 0; +} + +int hinic5_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause) +{ + struct hinic5_nic_io *nic_io = NULL; + int err = 0; + + if (!hwdev || !nic_pause) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + err = hinic5_cfg_hw_pause(nic_io, HINIC5_CMD_OP_GET, nic_pause); + if (err != 0) + return err; + + return 0; +} + +int hinic5_sync_dcb_state(void *hwdev, u8 op_code, u8 state) +{ + struct hinic5_cmd_set_dcb_state dcb_state; + struct hinic5_nic_io *nic_io = NULL; + u16 out_size = sizeof(dcb_state); + int err; + + if (!hwdev) + return -EINVAL; + + if (hinic5_func_type(hwdev) == TYPE_VF) + return 0; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&dcb_state, 0, sizeof(dcb_state)); + + dcb_state.op_code = op_code; + dcb_state.state = state; + dcb_state.func_id = hinic5_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_QOS_DCB_STATE, + &dcb_state, sizeof(dcb_state), &dcb_state, &out_size); + if (err != 0 || dcb_state.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Failed to set dcb state, err: %d, status: 0x%x, out size: 0x%x\n", + err, dcb_state.head.status, out_size); + return -EFAULT; + } + + return 0; +} + +int hinic5_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map, + u32 max_map_num) +{ + return 0; +} + +int hinic5_flush_qps_res(void *hwdev) +{ + struct hinic5_cmd_clear_qp_resource sq_res; + u16 out_size = sizeof(sq_res); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&sq_res, 0, sizeof(sq_res)); + + sq_res.func_id = hinic5_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_CLEAR_QP_RESOURCE, + &sq_res, sizeof(sq_res), &sq_res, + &out_size); + if (err != 0 || out_size == 0 || sq_res.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to clear sq resources, err: %d, status: 0x%x, out size: 0x%x\n", + err, sq_res.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic5_flush_qps_res_by_nums(void *hwdev, u16 qp_num) +{ + struct hinic5_cmd_clear_assign_qp_res sq_res; + u16 out_size = sizeof(sq_res); + u16 i; + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&sq_res, 0, sizeof(sq_res)); + + sq_res.func_id = hinic5_global_func_id(hwdev); + sq_res.qp_num = qp_num; + for (i = 0; i < qp_num; i++) + sq_res.qp[i] = i; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_CLEAR_ASSIGN_QP_RES, + &sq_res, sizeof(sq_res), &sq_res, + &out_size); + if (err != 0 || out_size == 0 || sq_res.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to clear sq resources by num, err: %d, status: 0x%x, out size: 0x%x\n", + err, sq_res.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic5_cache_out_qps_res(void *hwdev) +{ + struct hinic5_cmd_cache_out_qp_resource qp_res; + u16 out_size = sizeof(qp_res); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&qp_res, 0, sizeof(qp_res)); + + qp_res.func_id = hinic5_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_CACHE_OUT_QP_RES, + &qp_res, sizeof(qp_res), &qp_res, &out_size); + if (err != 0 || out_size == 0 || qp_res.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to cache out qp resources, err: %d, status: 0x%x, out size: 0x%x\n", + err, qp_res.msg_head.status, out_size); + return -EIO; + } + + return 0; +} + +static int hinic5_get_vport_stats_by_cmdq(void *hwdev, u16 func_id, + struct hinic5_vport_stats *stats) +{ + struct hinic5_cmd_buf *cmd_buf = NULL; + struct hinic5_nic_io *nic_io = NULL; + u8 cmd; + int err; + + if (!hwdev || !stats) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + cmd_buf = hinic5_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_io->dev_hdl, "Failed to allocate cmd_buf.\n"); + return -ENOMEM; + } + + cmd = nic_io->cmdq_ops->prepare_cmd_buf_get_vport_stats(nic_io, cmd_buf, func_id); + err = hinic5_cmdq_detail_resp(hwdev, HINIC5_MOD_L2NIC, + cmd, cmd_buf, cmd_buf, NULL, 0, + HINIC5_CHANNEL_NIC); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to get vport stats\n"); + goto get_indir_tbl_failed; + } + + nic_io->cmdq_ops->cmd_buf_to_vport_stats(cmd_buf, stats); + +get_indir_tbl_failed: + hinic5_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +static int hinic5_get_vport_stats_by_mailbox(void *hwdev, u16 func_id, + struct hinic5_vport_stats *stats) +{ + struct hinic5_port_stats_info stats_info; + struct hinic5_cmd_vport_stats vport_stats; + u16 out_size = sizeof(vport_stats); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !stats) + return -EINVAL; + + memset(&stats_info, 0, sizeof(stats_info)); + memset(&vport_stats, 0, sizeof(vport_stats)); + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + stats_info.func_id = func_id; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_GET_VPORT_STAT, + &stats_info, sizeof(stats_info), + &vport_stats, &out_size); + if (err != 0 || out_size == 0 || vport_stats.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, + "Failed to get function statistics, err: %d, status: 0x%x, out size: 0x%x\n", + err, vport_stats.msg_head.status, out_size); + return -EFAULT; + } + + memcpy(stats, &vport_stats.stats, sizeof(*stats)); + + return (err == 0) ? 0 : -ENOMEM; +} + +int hinic5_get_vport_stats(void *hwdev, u16 func_id, struct hinic5_vport_stats *stats) +{ + if (HINIC5_SUPPORT_GET_COUNTER_BY_CMDQ(hwdev)) + return hinic5_get_vport_stats_by_cmdq(hwdev, func_id, stats); + else + return hinic5_get_vport_stats_by_mailbox(hwdev, func_id, stats); +} + +static int hinic5_set_function_table(struct hinic5_nic_io *nic_io, u32 cfg_bitmap, + const struct hinic5_func_tbl_cfg *cfg) +{ + struct hinic5_cmd_set_func_tbl cmd_func_tbl; + u16 out_size = sizeof(cmd_func_tbl); + int err; + + memset(&cmd_func_tbl, 0, sizeof(cmd_func_tbl)); + cmd_func_tbl.func_id = hinic5_global_func_id(nic_io->hwdev); + cmd_func_tbl.cfg_bitmap = cfg_bitmap; + cmd_func_tbl.tbl_cfg = *cfg; + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, + HINIC5_NIC_CMD_SET_FUNC_TBL, + &cmd_func_tbl, sizeof(cmd_func_tbl), + &cmd_func_tbl, &out_size); + if (err != 0 || cmd_func_tbl.msg_head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x, out size: 0x%x\n", + cfg_bitmap, err, cmd_func_tbl.msg_head.status, + out_size); + return -EFAULT; + } + + return 0; +} + +static int hinic5_init_function_table(struct hinic5_nic_io *nic_io) +{ + struct hinic5_func_tbl_cfg func_tbl_cfg = {0}; + u32 cfg_bitmap = BIT(FUNC_CFG_INIT) | BIT(FUNC_CFG_MTU) | + BIT(FUNC_CFG_RX_BUF_SIZE); + + if (hinic5_vram_get_kexec_flag() != 0) + return 0; + + func_tbl_cfg.mtu = 0x3FFF; /* default, max mtu */ + func_tbl_cfg.rx_wqe_buf_size = nic_io->rx_buff_len; + + return hinic5_set_function_table(nic_io, cfg_bitmap, &func_tbl_cfg); +} + +int hinic5_set_port_mtu(void *hwdev, u16 new_mtu) +{ + struct hinic5_func_tbl_cfg func_tbl_cfg = {0}; + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (new_mtu < HINIC5_MIN_MTU_SIZE) { + nic_err(nic_io->dev_hdl, + "Invalid mtu size: %ubytes, mtu size < %ubytes", + new_mtu, HINIC5_MIN_MTU_SIZE); + return -EINVAL; + } + + if (new_mtu > HINIC5_MAX_JUMBO_FRAME_SIZE) { + nic_err(nic_io->dev_hdl, "Invalid mtu size: %ubytes, mtu size > %ubytes", + new_mtu, HINIC5_MAX_JUMBO_FRAME_SIZE); + return -EINVAL; + } + + func_tbl_cfg.mtu = new_mtu; + return hinic5_set_function_table(nic_io, BIT(FUNC_CFG_MTU), + &func_tbl_cfg); +} + +static int nic_feature_nego(void *hwdev, u8 opcode, u64 *s_feature, u16 size) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_cmd_feature_nego feature_nego; + u16 out_size = sizeof(feature_nego); + int err; + + if (!hwdev || !s_feature || size > NIC_MAX_FEATURE_QWORD) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + memset(&feature_nego, 0, sizeof(feature_nego)); + feature_nego.func_id = hinic5_global_func_id(hwdev); + feature_nego.opcode = opcode; + if (opcode == HINIC5_CMD_OP_SET) + memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64)); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_FEATURE_NEGO, + &feature_nego, sizeof(feature_nego), + &feature_nego, &out_size); + if (err != 0 || out_size == 0 || feature_nego.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to negotiate nic feature, err:%d, status: 0x%x, out_size: 0x%x\n", + err, feature_nego.msg_head.status, out_size); + return -EIO; + } + + if (opcode == HINIC5_CMD_OP_GET) + memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64)); + + return 0; +} + +static int hinic5_get_bios_pf_bw_limit(void *hwdev, u32 *pf_bw_limit) +{ + struct hinic5_nic_io *nic_io = NULL; + struct nic_cmd_bios_cfg cfg = {{0}}; + u16 out_size = sizeof(cfg); + int err; + + if (!hwdev || !pf_bw_limit) + return -EINVAL; + + if (hinic5_func_type(hwdev) == TYPE_VF || !HINIC5_SUPPORT_RATE_LIMIT(hwdev)) + return 0; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + cfg.bios_cfg.func_id = (u8)hinic5_global_func_id(hwdev); + cfg.bios_cfg.func_valid = 1; + cfg.op_code = 0 | NIC_NVM_DATA_PF_SPEED_LIMIT; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_BIOS_CFG, &cfg, sizeof(cfg), + &cfg, &out_size); + if (err != 0 || out_size == 0 || cfg.head.status != 0) { + nic_err(nic_io->dev_hdl, + "Failed to get bios pf bandwidth limit, err: %d, status: 0x%x, out size: 0x%x\n", + err, cfg.head.status, out_size); + return -EIO; + } + + /* check data is valid or not */ + if (cfg.bios_cfg.signature != BIOS_CFG_SIGNATURE) + nic_warn(nic_io->dev_hdl, "Invalid bios configuration data, signature: 0x%x\n", + cfg.bios_cfg.signature); + + if (cfg.bios_cfg.pf_bw > MAX_LIMIT_BW) { + nic_err(nic_io->dev_hdl, "Invalid bios cfg pf bandwidth limit: %u\n", + cfg.bios_cfg.pf_bw); + return -EINVAL; + } + + *pf_bw_limit = cfg.bios_cfg.pf_bw; + + return 0; +} + +int hinic5_set_pf_rate(void *hwdev, u8 speed_level) +{ + struct hinic5_cmd_rate_cfg rate_cfg = {{0}}; + struct hinic5_cmd_rate_cfg_ret rate_cfg_ret = {{0}}; + struct hinic5_nic_io *nic_io = NULL; + u16 out_size = sizeof(rate_cfg_ret); + u32 pf_rate; + int err; + u32 speed_convert[PORT_SPEED_UNKNOWN] = { + 0, 10, 100, 1000, 10000, 25000, 40000, 50000, 100000, 200000, 400000, 800000 + }; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (speed_level >= PORT_SPEED_UNKNOWN) { + nic_err(nic_io->dev_hdl, "Invalid speed level: %u\n", speed_level); + return -EINVAL; + } + + if (nic_io->nic_cfg.pf_bw_limit == MAX_LIMIT_BW) { + pf_rate = 0; + } else { + /* divided by 100 to convert to percentage */ + pf_rate = (speed_convert[speed_level] / MAX_LIMIT_BW) * nic_io->nic_cfg.pf_bw_limit; + /* bandwidth limit is very small but not unlimit in this case */ + if (pf_rate == 0 && speed_level != PORT_SPEED_NOT_SET) + pf_rate = 1; + } + + rate_cfg.func_id = hinic5_global_func_id(hwdev); + rate_cfg.cir = 0; + rate_cfg.pir = pf_rate; + rate_cfg.direct = NIC_RATE_DIRECT_TX_BW; + rate_cfg.cfg_mode = NIC_RATE_OP_UNUSE | NIC_RATE_OP_SET; + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_SET_MAX_MIN_RATE, &rate_cfg, + sizeof(rate_cfg), &rate_cfg_ret, &out_size); + if (err != 0 || out_size == 0 || rate_cfg_ret.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set rate(%u), err: %d, status: 0x%x, out size: 0x%x\n", + pf_rate, err, rate_cfg_ret.msg_head.status, out_size); + return (rate_cfg_ret.msg_head.status != 0) ? rate_cfg_ret.msg_head.status : -EIO; + } + + return 0; +} + +int hinic5_get_nic_feature_from_hw(void *hwdev, u64 *s_feature, u16 size) +{ + return nic_feature_nego(hwdev, HINIC5_CMD_OP_GET, s_feature, size); +} + +int hinic5_set_nic_feature_to_hw(void *hwdev) +{ + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + return nic_feature_nego(hwdev, HINIC5_CMD_OP_SET, &nic_io->feature_cap, 1); +} + +u64 hinic5_get_feature_cap(void *hwdev) +{ + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return 0; + + return nic_io->feature_cap; +} + +void hinic5_update_nic_feature(void *hwdev, u64 s_feature) +{ + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return; + } + nic_io->feature_cap = s_feature; + nic_info(nic_io->dev_hdl, "Update nic feature to 0x%llx\n", nic_io->feature_cap); +} + +static int hinic5_init_nic_io(void *hwdev, void *dev_hdl, + struct hinic5_nic_io **nic_io) +{ + if (!hwdev || !dev_hdl) + return -EINVAL; + + *nic_io = kzalloc(sizeof(**nic_io), GFP_KERNEL); + if ((*nic_io) == NULL) + return -ENOMEM; + + (*nic_io)->dev_hdl = dev_hdl; + (*nic_io)->hwdev = hwdev; + + sema_init(&((*nic_io)->nic_cfg.cfg_lock), 1); + mutex_init(&((*nic_io)->nic_cfg.sfp_mutex)); + + (*nic_io)->nic_cfg.rt_cmd.mpu_send_sfp_abs = false; + (*nic_io)->nic_cfg.rt_cmd.mpu_send_sfp_info = false; + (*nic_io)->nic_cfg.rt_cmd_ext.mpu_send_xsfp_tlv_info = false; + + return 0; +} + +/* + * hinic5_init_nic_hwdev - init nic hwdev + * @hwdev: pointer to hwdev + * @dev_hdl: pointer to pcidev->dev or handler, for nic_err() or dma_alloc() + * @rx_buff_len: rx_buff_len is receive buffer length + */ +int hinic5_init_nic_hwdev(void *hwdev, void *dev_hdl, u16 rx_buff_len) +{ + struct hinic5_nic_io *nic_io = NULL; + int err; + + err = hinic5_init_nic_io(hwdev, dev_hdl, &nic_io); + if (err != 0) + return err; + + nic_io->rx_buff_len = rx_buff_len; + + err = hinic5_register_service_adapter(hwdev, nic_io, SERVICE_T_NIC); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to register service adapter\n"); + goto register_sa_err; + } + + err = hinic5_nic_aeqs_init(nic_io); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to init nic aeqs\n"); + goto nic_aeqs_init_err; + } + + err = hinic5_set_func_svc_used_state(hwdev, SVC_T_NIC, 1, HINIC5_CHANNEL_NIC); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to set function svc used state\n"); + goto set_used_state_err; + } + + err = hinic5_init_function_table(nic_io); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to init function table\n"); + goto err_out; + } + + err = hinic5_get_nic_feature_from_hw(hwdev, &nic_io->feature_cap, 1); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to get nic features\n"); + goto err_out; + } + + nic_info(dev_hdl, "nic features: 0x%llx\n", nic_io->feature_cap); + hinic5_nic_cmdq_adapt_init(nic_io); + + err = hinic5_get_bios_pf_bw_limit(hwdev, &nic_io->nic_cfg.pf_bw_limit); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to get pf bandwidth limit\n"); + goto err_out; + } + + err = hinic5_vf_func_init(nic_io); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to init vf info\n"); + goto err_out; + } + + return 0; + +err_out: + hinic5_set_func_svc_used_state(hwdev, SVC_T_NIC, 0, HINIC5_CHANNEL_NIC); + +set_used_state_err: + hinic5_nic_aeqs_free(nic_io); + +nic_aeqs_init_err: + hinic5_unregister_service_adapter(hwdev, SERVICE_T_NIC); + +register_sa_err: + mutex_deinit(&nic_io->nic_cfg.sfp_mutex); + sema_deinit(&nic_io->nic_cfg.cfg_lock); + + kfree(nic_io); + + return err; +} + +void hinic5_free_nic_hwdev(void *hwdev) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev) + return; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + + hinic5_vf_func_free(nic_io); + + hinic5_set_func_svc_used_state(hwdev, SVC_T_NIC, 0, HINIC5_CHANNEL_NIC); + + hinic5_nic_aeqs_free(nic_io); + + hinic5_unregister_service_adapter(hwdev, SERVICE_T_NIC); + + mutex_deinit(&nic_io->nic_cfg.sfp_mutex); + sema_deinit(&nic_io->nic_cfg.cfg_lock); + + kfree(nic_io); +} + +int hinic5_force_drop_tx_pkt(void *hwdev) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_force_pkt_drop pkt_drop; + u16 out_size = sizeof(pkt_drop); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&pkt_drop, 0, sizeof(pkt_drop)); + pkt_drop.port = hinic5_physical_port_id(hwdev); + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_FORCE_PKT_DROP, + &pkt_drop, sizeof(pkt_drop), + &pkt_drop, &out_size); + if ((pkt_drop.msg_head.status != HINIC5_MGMT_CMD_UNSUPPORTED && + pkt_drop.msg_head.status != 0) || err != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Failed to set force tx packets drop, err: %d, status: 0x%x, out size: 0x%x\n", + err, pkt_drop.msg_head.status, out_size); + return -EFAULT; + } + + return pkt_drop.msg_head.status; +} + +int hinic5_set_rx_mode(void *hwdev, u32 enable) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_rx_mode_config rx_mode_cfg; + u16 out_size = sizeof(rx_mode_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&rx_mode_cfg, 0, sizeof(rx_mode_cfg)); + rx_mode_cfg.func_id = hinic5_global_func_id(hwdev); + rx_mode_cfg.rx_mode = enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_SET_RX_MODE, + &rx_mode_cfg, sizeof(rx_mode_cfg), + &rx_mode_cfg, &out_size); + if (err != 0 || out_size == 0 || rx_mode_cfg.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set rx mode, err: %d, status: 0x%x, out size: 0x%x\n", + err, rx_mode_cfg.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic5_set_rx_vlan_offload(void *hwdev, u8 en) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_cmd_vlan_offload vlan_cfg; + u16 out_size = sizeof(vlan_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&vlan_cfg, 0, sizeof(vlan_cfg)); + vlan_cfg.func_id = hinic5_global_func_id(hwdev); + vlan_cfg.vlan_offload = en; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_SET_RX_VLAN_OFFLOAD, + &vlan_cfg, sizeof(vlan_cfg), + &vlan_cfg, &out_size); + if (err != 0 || out_size == 0 || vlan_cfg.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set rx vlan offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_cfg.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic5_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id) +{ + struct vf_data_storage *vf_info = NULL; + struct hinic5_nic_io *nic_io = NULL; + u16 func_id; + int err; + + if (!hwdev || old_vlan >= VLAN_N_VID || new_vlan >= VLAN_N_VID) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + vf_info = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf_id); + if (!nic_io->vf_infos || is_zero_ether_addr(vf_info->drv_mac_addr)) + return 0; + + func_id = hinic5_glb_pf_vf_offset(nic_io->hwdev) + (u16)vf_id; + + err = hinic5_del_mac(nic_io->hwdev, vf_info->drv_mac_addr, + old_vlan, func_id, HINIC5_CHANNEL_NIC); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to delete VF %d MAC %pM vlan %u\n", + HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac_addr, old_vlan); + return err; + } + + err = hinic5_set_mac(nic_io->hwdev, vf_info->drv_mac_addr, + new_vlan, func_id, HINIC5_CHANNEL_NIC); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to add VF %d MAC %pM vlan %u\n", + HW_VF_ID_TO_OS(vf_id), vf_info->drv_mac_addr, new_vlan); + hinic5_set_mac(nic_io->hwdev, vf_info->drv_mac_addr, + old_vlan, func_id, HINIC5_CHANNEL_NIC); + return err; + } + + return 0; +} + +static int hinic5_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, + u8 lro_max_pkt_len) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_cmd_lro_config lro_cfg; + u16 out_size = sizeof(lro_cfg); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&lro_cfg, 0, sizeof(lro_cfg)); + lro_cfg.func_id = hinic5_global_func_id(hwdev); + lro_cfg.opcode = HINIC5_CMD_OP_SET; + lro_cfg.lro_ipv4_en = ipv4_en; + lro_cfg.lro_ipv6_en = ipv6_en; + lro_cfg.lro_max_pkt_len = lro_max_pkt_len; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_CFG_RX_LRO, + &lro_cfg, sizeof(lro_cfg), + &lro_cfg, &out_size); + if (err != 0 || out_size == 0 || lro_cfg.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_cfg.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +static int hinic5_set_rx_lro_timer(void *hwdev, u32 timer_value) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_cmd_lro_timer lro_timer; + u16 out_size = sizeof(lro_timer); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&lro_timer, 0, sizeof(lro_timer)); + lro_timer.opcode = HINIC5_CMD_OP_SET; + lro_timer.timer = timer_value; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_CFG_LRO_TIMER, + &lro_timer, sizeof(lro_timer), + &lro_timer, &out_size); + if (err != 0 || out_size == 0 || lro_timer.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_timer.msg_head.status, out_size); + + return -EINVAL; + } + + return 0; +} + +int hinic5_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, + u32 lro_max_pkt_len) +{ + struct hinic5_nic_io *nic_io = NULL; + u8 ipv4_en = 0, ipv6_en = 0; + int err; + + if (!hwdev) + return -EINVAL; + + ipv4_en = (lro_en != 0) ? 1 : 0; + ipv6_en = (lro_en != 0) ? 1 : 0; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + nic_info(nic_io->dev_hdl, "Set LRO max coalesce packet size to %uK\n", + lro_max_pkt_len); + + err = hinic5_set_rx_lro(hwdev, ipv4_en, ipv6_en, (u8)lro_max_pkt_len); + if (err != 0) + return err; + + /* we don't set LRO timer for VF */ + if (hinic5_func_type(hwdev) == TYPE_VF) + return 0; + + nic_info(nic_io->dev_hdl, "Set LRO timer to %u\n", lro_timer); + + return hinic5_set_rx_lro_timer(hwdev, lro_timer); +} + +int hinic5_get_veb_offload(void *hwdev, u16 *veb_offload_status) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_veb_set veb_set_info; + u16 out_size; + int err; + + if (!hwdev) + return -EINVAL; + + out_size = sizeof(veb_set_info); + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&veb_set_info, 0, sizeof(veb_set_info)); + veb_set_info.opcode = VEB_OFFLOAD_QUERY; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_SET_VEB, + &veb_set_info, sizeof(veb_set_info), + &veb_set_info, &out_size); + if (err != 0 || out_size == 0 || + veb_set_info.msg_head.status != 0 || + veb_set_info.cur_status == VEB_OFFLOAD_STATUS_INVALID) { + nic_err(nic_io->dev_hdl, "Failed to get veb offload status, err: %d, status: 0x%x, out size: 0x%x\n", + err, veb_set_info.msg_head.status, out_size); + return -EINVAL; + } + + *veb_offload_status = veb_set_info.cur_status; + + return 0; +} + +int hinic5_set_veb_offload(void *hwdev, u16 veb_offload_status) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_veb_set veb_set_info; + u16 out_size; + int err; + + if (!hwdev) + return -EINVAL; + + out_size = sizeof(veb_set_info); + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&veb_set_info, 0, sizeof(veb_set_info)); + veb_set_info.opcode = VEB_OFFLOAD_SET; + veb_set_info.set_status = veb_offload_status; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_SET_VEB, + &veb_set_info, sizeof(veb_set_info), + &veb_set_info, &out_size); + if (err != 0 || out_size == 0 || veb_set_info.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set veb offload status, err: %d, status: 0x%x, out size: 0x%x\n", + err, veb_set_info.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic5_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_cmd_set_vlan_filter vlan_filter; + u16 out_size = sizeof(vlan_filter); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&vlan_filter, 0, sizeof(vlan_filter)); + vlan_filter.func_id = hinic5_global_func_id(hwdev); + vlan_filter.vlan_filter_ctrl = vlan_filter_ctrl; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_SET_VLAN_FILTER_EN, + &vlan_filter, sizeof(vlan_filter), + &vlan_filter, &out_size); + if (err != 0 || out_size == 0 || vlan_filter.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set vlan filter, err: %d, status: 0x%x, out size: 0x%x\n", + err, vlan_filter.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic5_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en) +{ + struct nic_cmd_capture_info cap_info = {{0}}; + u16 out_size = sizeof(cap_info); + int err; + + if (!hwdev) + return -EINVAL; + + cap_info.is_en_trx = cap_en; + cap_info.func_port = func_id; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_SET_UCAPTURE_OPT, + &cap_info, sizeof(cap_info), + &cap_info, &out_size); + if (err != 0 || out_size == 0 || cap_info.msg_head.status != 0) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(hinic5_set_func_capture_en); + +int hinic5_add_tcam_rule(void *hwdev, struct nic_tcam_cfg_rule *tcam_rule) +{ + u16 out_size = sizeof(struct nic_cmd_fdir_add_rule); + struct nic_cmd_fdir_add_rule tcam_cmd; + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !tcam_rule) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + if (tcam_rule->index >= HINIC5_MAX_TCAM_RULES_NUM) { + nic_err(nic_io->dev_hdl, "Tcam rules num to add is invalid\n"); + return -EINVAL; + } + + memset(&tcam_cmd, 0, sizeof(struct nic_cmd_fdir_add_rule)); + memcpy((void *)&tcam_cmd.rule, (void *)tcam_rule, sizeof(struct nic_tcam_cfg_rule)); + tcam_cmd.func_id = hinic5_global_func_id(hwdev); + tcam_cmd.type = TCAM_RULE_FDIR_TYPE; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_ADD_TC_FLOW, + &tcam_cmd, sizeof(tcam_cmd), + &tcam_cmd, &out_size); + if (err != 0 || tcam_cmd.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Add tcam rule failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, tcam_cmd.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_del_tcam_rule(void *hwdev, u32 index) +{ + u16 out_size = sizeof(struct nic_cmd_fdir_del_rules); + struct nic_cmd_fdir_del_rules tcam_cmd; + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (index >= HINIC5_MAX_TCAM_RULES_NUM) { + nic_err(nic_io->dev_hdl, "Tcam rules num to del is invalid\n"); + return -EINVAL; + } + + memset(&tcam_cmd, 0, sizeof(struct nic_cmd_fdir_del_rules)); + tcam_cmd.index_start = index; + tcam_cmd.index_num = 1; + tcam_cmd.func_id = hinic5_global_func_id(hwdev); + tcam_cmd.type = TCAM_RULE_FDIR_TYPE; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_DEL_TC_FLOW, + &tcam_cmd, sizeof(tcam_cmd), + &tcam_cmd, &out_size); + if (err != 0 || tcam_cmd.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Del tcam rule failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, tcam_cmd.head.status, out_size); + return -EIO; + } + + return 0; +} + +/** + * hinic5_mgmt_tcam_block - alloc or free tcam block for IO packet. + * + * @param hwdev + * The hardware interface of a nic device. + * @param alloc_en + * 1 alloc block. + * 0 free block. + * @param index + * block index from firmware. + * @return + * 0 on success, + * negative error value otherwise. + */ +static int hinic5_mgmt_tcam_block(void *hwdev, u8 alloc_en, u16 *index) +{ + struct nic_cmd_ctrl_tcam_block_out tcam_block_info; + u16 out_size = sizeof(struct nic_cmd_ctrl_tcam_block_out); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !index) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + memset(&tcam_block_info, 0, sizeof(struct nic_cmd_ctrl_tcam_block_out)); + + tcam_block_info.func_id = hinic5_global_func_id(hwdev); + tcam_block_info.alloc_en = alloc_en; + tcam_block_info.tcam_type = NIC_TCAM_BLOCK_TYPE_LARGE; + tcam_block_info.tcam_block_index = *index; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_CFG_TCAM_BLOCK, + &tcam_block_info, sizeof(tcam_block_info), + &tcam_block_info, &out_size); + if (err != 0 || tcam_block_info.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Set tcam block failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, tcam_block_info.head.status, out_size); + return -EIO; + } + + if (alloc_en != 0) + *index = tcam_block_info.tcam_block_index; + + return 0; +} + +int hinic5_alloc_tcam_block(void *hwdev, u16 *index) +{ + return hinic5_mgmt_tcam_block(hwdev, HINIC5_TCAM_BLOCK_ENABLE, index); +} + +int hinic5_free_tcam_block(void *hwdev, u16 *index) +{ + return hinic5_mgmt_tcam_block(hwdev, HINIC5_TCAM_BLOCK_DISABLE, index); +} + +int hinic5_set_fdir_tcam_rule_filter(void *hwdev, bool enable) +{ + struct nic_cmd_set_tcam_enable port_tcam_cmd; + u16 out_size = sizeof(port_tcam_cmd); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&port_tcam_cmd, 0, sizeof(port_tcam_cmd)); + port_tcam_cmd.func_id = hinic5_global_func_id(hwdev); + port_tcam_cmd.tcam_enable = (u8)enable; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_ENABLE_TCAM, + &port_tcam_cmd, sizeof(port_tcam_cmd), + &port_tcam_cmd, &out_size); + if (err != 0 || port_tcam_cmd.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, "Set fdir tcam filter failed, err: %d, status: 0x%x, out size: 0x%x, enable: 0x%x\n", + err, port_tcam_cmd.head.status, out_size, + enable); + return -EIO; + } + + return 0; +} + +int hinic5_flush_tcam_rule(void *hwdev) +{ + struct nic_cmd_flush_tcam_rules tcam_flush; + u16 out_size = sizeof(struct nic_cmd_flush_tcam_rules); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + memset(&tcam_flush, 0, sizeof(struct nic_cmd_flush_tcam_rules)); + tcam_flush.func_id = hinic5_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_FLUSH_TCAM, + &tcam_flush, + sizeof(struct nic_cmd_flush_tcam_rules), + &tcam_flush, &out_size); + if (err != 0 || tcam_flush.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Flush tcam fdir rules failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, tcam_flush.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_get_rxq_hw_info(void *hwdev, struct rxq_check_info *rxq_info, u16 num_qps, u16 wqe_type) +{ + struct hinic5_cmd_buf *cmd_buf = NULL; + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_rxq_hw *rxq_hw = NULL; + struct rxq_check_info *rxq_info_out = NULL; + int err; + u16 i; + + if (!hwdev || !rxq_info) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + cmd_buf = hinic5_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_io->dev_hdl, "Failed to allocate cmd_buf.\n"); + return -ENOMEM; + } + + rxq_hw = cmd_buf->buf; + rxq_hw->func_id = hinic5_global_func_id(hwdev); + rxq_hw->num_queues = num_qps; + + hinic5_cpu_to_be32(rxq_hw, sizeof(struct hinic5_rxq_hw)); + + cmd_buf->size = sizeof(struct hinic5_rxq_hw); + + err = hinic5_cmdq_detail_resp(hwdev, HINIC5_MOD_L2NIC, HINIC5_UCODE_CMD_RXQ_INFO_GET, + cmd_buf, cmd_buf, NULL, 0, HINIC5_CHANNEL_NIC); + if (err != 0) + goto get_rxq_info_failed; + + rxq_info_out = cmd_buf->buf; + for (i = 0; i < num_qps; i++) { + rxq_info[i].hw_pi = rxq_info_out[i].hw_pi >> wqe_type; + rxq_info[i].hw_ci = rxq_info_out[i].hw_ci >> wqe_type; + } + +get_rxq_info_failed: + hinic5_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +int hinic5_pf_set_vf_link_state(void *hwdev, bool vf_link_forced, bool link_state) +{ + struct hinic5_nic_io *nic_io = NULL; + struct vf_data_storage *vf_infos = NULL; + int vf_id; + + if (!hwdev) { + pr_err("hwdev is null.\n"); + return -EINVAL; + } + + if (hinic5_func_type(hwdev) == TYPE_VF) { + pr_err("VF are not supported to set link state.\n"); + return -EINVAL; + } + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("nic_io is null.\n"); + return -EINVAL; + } + + vf_infos = nic_io->vf_infos; + for (vf_id = 0; vf_id < nic_io->max_vfs; vf_id++) { + vf_infos[vf_id].link_up = link_state; + vf_infos[vf_id].link_forced = vf_link_forced; + } + + return 0; +} +EXPORT_SYMBOL(hinic5_pf_set_vf_link_state); + +int hinic5_add_tc_flow_rule(void *hwdev, struct hinic5_tc_cfg_info *tc_flow_rule, bool default_rule) +{ + u16 out_size = sizeof(struct hinic5_tc_cfg_info); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !tc_flow_rule) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + tc_flow_rule->index = default_rule ? TCAM_INVLD_INDEX : 0; + tc_flow_rule->opcode = HINIC5_TC_CFG_RULE_OPS_ADD; + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_CFG_TC_FLOW_RULE, + tc_flow_rule, sizeof(*tc_flow_rule), + tc_flow_rule, &out_size); + if (err != 0 || tc_flow_rule->head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Add tc flow rule failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, tc_flow_rule->head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_del_tc_flow_rule(void *hwdev, u16 rule_id) +{ + u16 out_size = sizeof(struct hinic5_tc_cfg_info); + struct hinic5_tc_cfg_info cmd; + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&cmd, 0, sizeof(struct hinic5_tc_cfg_info)); + cmd.index = rule_id; + cmd.opcode = HINIC5_TC_CFG_RULE_OPS_DEL; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_CFG_TC_FLOW_RULE, + &cmd, sizeof(cmd), &cmd, &out_size); + if (err != 0 || cmd.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Del tc flow rule failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, cmd.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_flush_tc_flow_rule(void *hwdev, ulong *bitmap) +{ + u16 out_size = sizeof(struct hinic5_tc_flush_info); + struct hinic5_tc_flush_info cmd; + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + memcpy(cmd.active_bitmap, bitmap, sizeof(cmd.active_bitmap)); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_FLUSH_TC_FLOW, + &cmd, sizeof(cmd), &cmd, &out_size); + if (err != 0 || cmd.head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Flush tc flow rule failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, cmd.head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_get_pfe_cfg(void *hwdev, struct hinic5_tc_pfe_cfg_reg_info *cfg_info) +{ + u16 out_size = sizeof(struct hinic5_tc_pfe_cfg_reg_info); + int err; + + if (!hwdev || !cfg_info) + return -EINVAL; + + memset(cfg_info, 0, out_size); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_GET_PFE_CFG, + cfg_info, sizeof(struct hinic5_tc_pfe_cfg_reg_info), + cfg_info, &out_size); + if (err != 0 || cfg_info->head.status != 0 || out_size == 0) { + pr_err("Get pfe cfg failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, cfg_info->head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_move_tc_tcam_table(void *hwdev, struct hinic5_tc_move_info *acl_move_info) +{ + u16 out_size = sizeof(struct hinic5_tc_move_info); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !acl_move_info) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_MOVE_TC_TBL, + acl_move_info, sizeof(*acl_move_info), + acl_move_info, &out_size); + if (err != 0 || acl_move_info->head.status != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, + "Move tcam table failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, acl_move_info->head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_send_arp_to_mpu(void *hwdev, struct hinic5_arp_pkt_info *info) +{ + int err; + u16 out_size = sizeof(struct hinic5_arp_pkt_info); + + err = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_CFM, + CFM_MPU_CMD_PASS_ARP_PKT, info, + sizeof(struct hinic5_arp_pkt_info), info, + &out_size, HINIC5_BOND_MSG_TIMEOUT_MS, + HINIC5_CHANNEL_NIC); + if (err != 0 || info->head.status != 0 || out_size == 0) { + pr_err("Send ARP failed, err: %d, status: 0x%x, out size: 0x%x\n", + err, info->head.status, out_size); + return -EIO; + } + return 0; +} + +bool hinic5_check_dev_need_dual_send(void *hwdev) +{ + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return false; + } + + if ((nic_io->feature_cap & NIC_F_ARP_DUAL) != 0 && + (nic_io->feature_cap & NIC_F_HALF_BOND_OFFLOAD) != 0) + return true; + + return false; +} + +int hinic5_vxlan_port_config(void *hwdev, u16 func_id, u16 port, u8 action, u8 pkt_fmt) +{ + struct hinic5_cmd_vxlan_port_info vxlan_port_info = { + .opcode = action, + .cfg_mode = 0, + .func_id = func_id, + .vxlan_port = port, + .pkt_fmt = pkt_fmt + }; + u16 out_size = sizeof(vxlan_port_info); + struct hinic5_nic_io *nic_io = NULL; + int err; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (hinic5_func_type(hwdev) == TYPE_VF) + return 0; + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_CFG_VXLAN_PORT, + &vxlan_port_info, sizeof(vxlan_port_info), + &vxlan_port_info, &out_size); + if (err != 0 || out_size == 0 || vxlan_port_info.msg_head.status != 0) { + if (vxlan_port_info.msg_head.status == HINIC5_VXLAN_DPORT_SET_UNSUPPORT && + err == 0) + return -EOPNOTSUPP; + if (vxlan_port_info.msg_head.status == HINIC5_VXLAN_DPORT_SET_BY_HINICADM && + err == 0) { // other tool set failed + nic_warn(nic_io->dev_hdl, + "Dst port has already been set\n"); + } else { + nic_err(nic_io->dev_hdl, + "Failed to %s vxlan dst port, cmd: 0x%x, err: %d, status: 0x%x, out size: 0x%x\n", + action == HINIC5_CMD_OP_ADD ? "add" : "delete", + HINIC5_NIC_CMD_CFG_VXLAN_PORT, + err, vxlan_port_info.msg_head.status, out_size); + } + return err; + } + + return 0; +} + +int hinic5_set_queue_pooling(void *hwdev, u8 enable_queue_pooling) +{ + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return false; + } + + if (nic_io->enable_queue_pooling == 0 && enable_queue_pooling != 0) + nic_io->first_enable_queue_pooling = 1; + nic_io->enable_queue_pooling = enable_queue_pooling; + return 0; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cfg_vf.c b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cfg_vf.c new file mode 100644 index 00000000..83207596 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cfg_vf.c @@ -0,0 +1,696 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/module.h> + +#include "comm_defs.h" +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic.h" +#include "hinic5_nic_cmdq.h" +#include "nic_mpu_cmd.h" +#include "hinic5_nic_cfg_vf.h" + +static unsigned char set_vf_link_state; +module_param(set_vf_link_state, byte, 0444); +MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0: link auto, 1: link always up, 2: link always down (default=0)"); + +static int hinic5_set_vlan_ctx(struct hinic5_nic_io *nic_io, u16 func_id, + u16 vlan_tag, u16 q_id, bool add) +{ + struct hinic5_cmd_buf *cmd_buf = NULL; + u64 out_param = 0; + int err; + u8 cmd, vlan_mode; + + cmd_buf = hinic5_alloc_cmd_buf(nic_io->hwdev); + if (!cmd_buf) { + nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + vlan_mode = add ? NIC_QINQ_INSERT_ENABLE : NIC_CVLAN_INSERT_ENABLE; + + cmd = nic_io->cmdq_ops->prepare_cmd_buf_modify_svlan(cmd_buf, func_id, + vlan_tag, q_id, vlan_mode); + + err = hinic5_cmdq_direct_resp(nic_io->hwdev, HINIC5_MOD_L2NIC, + cmd, cmd_buf, &out_param, 0, HINIC5_CHANNEL_NIC); + + hinic5_free_cmd_buf(nic_io->hwdev, cmd_buf); + + if (err != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, "Failed to set vlan context, err: %d, out_param: 0x%llx\n", + err, out_param); + return -EFAULT; + } + + return err; +} + +int hinic5_cfg_vf_vlan(struct hinic5_nic_io *nic_io, u8 opcode, u16 vid, + u8 qos, int vf_id) +{ + struct hinic5_cmd_vf_vlan_config vf_vlan; + u16 out_size = sizeof(vf_vlan); + u16 glb_func_id; + int err; + u16 vlan_tag; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (vid == 0 && opcode == HINIC5_CMD_OP_DEL) + return 0; + + memset(&vf_vlan, 0, sizeof(vf_vlan)); + + vf_vlan.opcode = opcode; + vf_vlan.func_id = hinic5_glb_pf_vf_offset(nic_io->hwdev) + (u16)vf_id; + vf_vlan.vlan_id = vid; + vf_vlan.qos = qos; + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC5_NIC_CMD_CFG_VF_VLAN, + &vf_vlan, sizeof(vf_vlan), + &vf_vlan, &out_size); + if (err != 0 || out_size == 0 || vf_vlan.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set VF %d vlan, err: %d, status: 0x%x,out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, vf_vlan.msg_head.status, + out_size); + return -EFAULT; + } + + vlan_tag = vid + (u16)(qos << VLAN_PRIO_SHIFT); + + glb_func_id = hinic5_glb_pf_vf_offset(nic_io->hwdev) + (u16)vf_id; + err = hinic5_set_vlan_ctx(nic_io, glb_func_id, vlan_tag, + NIC_CONFIG_ALL_QUEUE_VLAN_CTX, + opcode == HINIC5_CMD_OP_ADD); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to set VF %d vlan ctx, err: %d\n", + HW_VF_ID_TO_OS(vf_id), err); + + /* rollback vlan config */ + if (opcode == HINIC5_CMD_OP_DEL) + vf_vlan.opcode = HINIC5_CMD_OP_ADD; + else + vf_vlan.opcode = HINIC5_CMD_OP_DEL; + l2nic_msg_to_mgmt_sync(nic_io->hwdev, + HINIC5_NIC_CMD_CFG_VF_VLAN, &vf_vlan, + sizeof(vf_vlan), &vf_vlan, &out_size); + return err; + } + + return 0; +} + +/* + * this function just be called by hinic5_ndo_set_vf_mac, + * others are not permitted. + */ +int hinic5_set_vf_mac(void *hwdev, int vf_id, const unsigned char *mac_addr) +{ + struct vf_data_storage *vf_info = NULL; + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + vf_info = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf_id); +#ifndef __VMWARE__ + /* duplicate request, so just return success */ + if (ether_addr_equal(vf_info->user_mac_addr, mac_addr)) + return 0; + +#else + if (ether_addr_equal(vf_info->user_mac_addr, mac_addr)) + return 0; +#endif + ether_addr_copy(vf_info->user_mac_addr, mac_addr); + + return 0; +} + +int hinic5_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos) +{ + struct hinic5_nic_io *nic_io = NULL; + int err; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + err = hinic5_cfg_vf_vlan(nic_io, HINIC5_CMD_OP_ADD, vlan, qos, vf_id); + if (err != 0) + return err; + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = vlan; + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = qos; + + nic_info(nic_io->dev_hdl, "Setting VLAN %u, QOS 0x%x on VF %d\n", + vlan, qos, HW_VF_ID_TO_OS(vf_id)); + + return 0; +} + +int hinic5_kill_vf_vlan(void *hwdev, int vf_id) +{ + struct vf_data_storage *vf_infos = NULL; + struct hinic5_nic_io *nic_io = NULL; + int err; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + vf_infos = nic_io->vf_infos; + + err = hinic5_cfg_vf_vlan(nic_io, HINIC5_CMD_OP_DEL, + vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, + vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos, vf_id); + if (err != 0) + return err; + + nic_info(nic_io->dev_hdl, "Remove VLAN %u on VF %d\n", + vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan, + HW_VF_ID_TO_OS(vf_id)); + + vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan = 0; + vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos = 0; + + return 0; +} + +u16 hinic5_vf_info_vlanprio(void *hwdev, int vf_id) +{ + struct hinic5_nic_io *nic_io = NULL; + u16 pf_vlan, vlanprio; + u8 pf_qos; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + pf_vlan = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_vlan; + pf_qos = nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].pf_qos; + vlanprio = (u16)(pf_vlan | (pf_qos << HINIC5_VLAN_PRIORITY_SHIFT)); + + return vlanprio; +} + +int hinic5_set_vf_link_state(void *hwdev, u16 vf_id, int link) +{ + u8 link_status = 0; + struct vf_data_storage *vf_infos = NULL; + struct hinic5_nic_io *nic_io = + hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + vf_infos = nic_io->vf_infos; + + switch (link) { + case HINIC5_IFLA_VF_LINK_STATE_AUTO: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = false; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = (nic_io->link_status != 0) ? + true : false; + link_status = nic_io->link_status; + break; + case HINIC5_IFLA_VF_LINK_STATE_ENABLE: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = true; + link_status = HINIC5_LINK_UP; + break; + case HINIC5_IFLA_VF_LINK_STATE_DISABLE: + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_forced = true; + vf_infos[HW_VF_ID_TO_OS(vf_id)].link_up = false; + link_status = HINIC5_LINK_DOWN; + break; + default: + return -EINVAL; + } + + /* Notify the VF of its new link state */ + hinic5_notify_vf_link_status(nic_io, vf_id, link_status); + + return 0; +} + +int hinic5_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk) +{ + struct hinic5_cmd_spoofchk_set spoofchk_cfg; + struct vf_data_storage *vf_infos = NULL; + u16 out_size = sizeof(spoofchk_cfg); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + vf_infos = nic_io->vf_infos; + + memset(&spoofchk_cfg, 0, sizeof(spoofchk_cfg)); + + spoofchk_cfg.func_id = hinic5_glb_pf_vf_offset(hwdev) + vf_id; + spoofchk_cfg.state = spoofchk ? 1 : 0; + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_SET_SPOOFCHK_STATE, + &spoofchk_cfg, + sizeof(spoofchk_cfg), &spoofchk_cfg, + &out_size); + if (err != 0 || out_size == 0 || spoofchk_cfg.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set VF(%d) spoofchk, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, + spoofchk_cfg.msg_head.status, out_size); + err = -EINVAL; + } + + vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk = spoofchk; + + return err; +} + +bool hinic5_vf_info_spoofchk(void *hwdev, int vf_id) +{ + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return false; + + return nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].spoofchk; +} + +#ifdef HAVE_NDO_SET_VF_TRUST +int hinic5_set_vf_trust(void *hwdev, u16 vf_id, bool trust) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_cmd_vf_trust_config vf_trust = {0}; + u16 out_size = sizeof(vf_trust); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io || vf_id > nic_io->max_vfs) + return -EINVAL; + + vf_trust.func_id = hinic5_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + vf_trust.trust = (u8)trust; + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust = trust; + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, + HINIC5_NIC_CMD_CFG_VF_TRUST, + &vf_trust, out_size, &vf_trust, + &out_size); + if (vf_trust.msg_head.status == NIC_VF_TRUST_UNSUPPORT && err == 0) { + nic_info(nic_io->dev_hdl, "Succeeded to set vf trust to driver, did not set vf trust to chip\n"); + return 0; + } + if (err != 0 || out_size == 0 || vf_trust.msg_head.status != 0) + nic_warn(nic_io->dev_hdl, "Failed to set vf trust, err: %d, out_size: 0x%x, status:0x%x\n", + err, out_size, vf_trust.msg_head.status); + + return 0; +} + +bool hinic5_get_vf_trust(void *hwdev, int vf_id) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev) + return false; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io || vf_id > nic_io->max_vfs) + return false; + + return nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].trust; +} +#endif + +static int hinic5_set_vf_tx_rate_max_min(struct hinic5_nic_io *nic_io, + u16 vf_id, u32 max_rate, u32 min_rate) +{ + struct hinic5_cmd_rate_cfg rate_cfg; + struct hinic5_cmd_rate_cfg_ret rate_cfg_ret = {0}; + u16 out_size = sizeof(rate_cfg_ret); + int err; + + memset(&rate_cfg, 0, sizeof(rate_cfg)); + + rate_cfg.func_id = hinic5_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + rate_cfg.pir = max_rate; + rate_cfg.cir = min_rate; + rate_cfg.direct = NIC_RATE_DIRECT_TX_BW; + rate_cfg.cfg_mode = NIC_RATE_OP_SET; + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, + HINIC5_NIC_CMD_SET_MAX_MIN_RATE, + &rate_cfg, sizeof(rate_cfg), &rate_cfg_ret, + &out_size); + if (rate_cfg_ret.msg_head.status != 0 || err != 0 || out_size == 0) { + nic_err(nic_io->dev_hdl, "Failed to set VF %d max rate %u, min rate %u, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), max_rate, min_rate, err, + rate_cfg_ret.msg_head.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic5_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate) +{ + struct hinic5_nic_io *nic_io = NULL; + int err; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + if (!HINIC5_SUPPORT_RATE_LIMIT(hwdev)) { + nic_err(nic_io->dev_hdl, "Current function doesn't support to set vf rate limit\n"); + return -EOPNOTSUPP; + } + + err = hinic5_set_vf_tx_rate_max_min(nic_io, vf_id, max_rate, min_rate); + if (err != 0) + return err; + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].max_rate = max_rate; + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].min_rate = min_rate; + + return 0; +} + +void hinic5_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi) +{ + struct vf_data_storage *vfinfo = NULL; + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + + vfinfo = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf_id); + if (!vfinfo) + return; + + ivi->vf = HW_VF_ID_TO_OS(vf_id); + ether_addr_copy(ivi->mac, vfinfo->user_mac_addr); + ivi->vlan = vfinfo->pf_vlan; + ivi->qos = vfinfo->pf_qos; + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = vfinfo->spoofchk; +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST + ivi->trusted = vfinfo->trust; +#endif + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = vfinfo->max_rate; + ivi->min_tx_rate = vfinfo->min_rate; +#else + ivi->tx_rate = vfinfo->max_rate; +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (!vfinfo->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vfinfo->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; +#endif +} + +static int hinic5_init_vf_infos(struct hinic5_nic_io *nic_io, u16 vf_id) +{ + struct vf_data_storage *vf_infos = nic_io->vf_infos; + u8 vf_link_state; + + if (set_vf_link_state > HINIC5_IFLA_VF_LINK_STATE_DISABLE) { + nic_warn(nic_io->dev_hdl, "Module Parameter set_vf_link_state value %u is out of range, resetting to %d\n", + set_vf_link_state, HINIC5_IFLA_VF_LINK_STATE_AUTO); + set_vf_link_state = HINIC5_IFLA_VF_LINK_STATE_AUTO; + } + + vf_link_state = set_vf_link_state; + + switch (vf_link_state) { + case HINIC5_IFLA_VF_LINK_STATE_AUTO: + vf_infos[vf_id].link_forced = false; + break; + case HINIC5_IFLA_VF_LINK_STATE_ENABLE: + vf_infos[vf_id].link_forced = true; + vf_infos[vf_id].link_up = true; + break; + case HINIC5_IFLA_VF_LINK_STATE_DISABLE: + vf_infos[vf_id].link_forced = true; + vf_infos[vf_id].link_up = false; + break; + default: + nic_err(nic_io->dev_hdl, "Input parameter set_vf_link_state error: %u\n", + vf_link_state); + return -EINVAL; + } + + return 0; +} + +static int vf_func_register(struct hinic5_nic_io *nic_io) +{ + struct hinic5_cmd_register_vf register_info; + u16 out_size = sizeof(register_info); + int err; + + err = hinic5_register_vf_mbox_cb(nic_io->hwdev, HINIC5_MOD_L2NIC, + nic_io->hwdev, hinic5_vf_event_handler); + if (err != 0) + return err; + + err = hinic5_register_vf_mbox_cb(nic_io->hwdev, HINIC5_MOD_HILINK, + nic_io->hwdev, hinic5_vf_mag_event_handler); + if (err != 0) + goto reg_hilink_err; + + if (hinic5_is_slave_host(nic_io->hwdev)) { + nic_info(nic_io->dev_hdl, "The VF(slave host) does not need to register with the PF."); + return 0; + } + + if (hinic5_is_vf_isolation(nic_io->hwdev)) { + nic_info(nic_io->dev_hdl, "The isolated VF does not need to register with the PF."); + return 0; + } + + memset(®ister_info, 0, sizeof(register_info)); + register_info.op_register = 1; + register_info.support_extra_feature = 0; + err = hinic5_mbox_to_pf(nic_io->hwdev, HINIC5_MOD_L2NIC, + HINIC5_NIC_CMD_VF_REGISTER, + ®ister_info, sizeof(register_info), + ®ister_info, &out_size, 0, + HINIC5_CHANNEL_NIC); + if (err != 0 || out_size == 0 || register_info.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", + err, register_info.msg_head.status, out_size); + err = -EIO; + goto register_err; + } + + return 0; + +register_err: + hinic5_unregister_vf_mbox_cb(nic_io->hwdev, HINIC5_MOD_HILINK); + +reg_hilink_err: + hinic5_unregister_vf_mbox_cb(nic_io->hwdev, HINIC5_MOD_L2NIC); + + return err; +} + +static int pf_init_vf_infos(struct hinic5_nic_io *nic_io) +{ + u32 size; + int err; + u16 i; + + nic_io->max_vfs = hinic5_func_max_vf(nic_io->hwdev); + size = sizeof(*nic_io->vf_infos) * nic_io->max_vfs; + if (size == 0) + return 0; + + nic_io->vf_infos = kzalloc(size, GFP_KERNEL); + if (!nic_io->vf_infos) + return -ENOMEM; + + for (i = 0; i < nic_io->max_vfs; i++) { + err = hinic5_init_vf_infos(nic_io, i); + if (err != 0) + goto init_vf_infos_err; + } + + err = hinic5_register_pf_mbox_cb(nic_io->hwdev, HINIC5_MOD_L2NIC, + nic_io->hwdev, hinic5_pf_mbox_handler); + if (err != 0) + goto register_pf_mbox_cb_err; + + err = hinic5_register_pf_mbox_cb(nic_io->hwdev, HINIC5_MOD_HILINK, + nic_io->hwdev, hinic5_pf_mag_mbox_handler); + if (err != 0) + goto register_pf_mag_mbox_cb_err; + + return 0; + +register_pf_mag_mbox_cb_err: + hinic5_unregister_pf_mbox_cb(nic_io->hwdev, HINIC5_MOD_L2NIC); + +register_pf_mbox_cb_err: +init_vf_infos_err: + kfree(nic_io->vf_infos); + + return err; +} + +int hinic5_vf_func_init(struct hinic5_nic_io *nic_io) +{ + int err; + + err = hinic5_register_mgmt_msg_cb(nic_io->hwdev, HINIC5_MOD_L2NIC, + nic_io->hwdev, hinic5_mgmt_event_handler); + if (err != 0) + return err; + + if (hinic5_func_type(nic_io->hwdev) == TYPE_VF) + return vf_func_register(nic_io); + + err = hinic5_register_mgmt_msg_cb(nic_io->hwdev, HINIC5_MOD_HILINK, + nic_io->hwdev, hinic5_pf_mag_event_handler); + if (err != 0) + goto register_mgmt_msg_cb_err; + + err = pf_init_vf_infos(nic_io); + if (err != 0) + goto pf_init_vf_infos_err; + + return 0; + +pf_init_vf_infos_err: + hinic5_unregister_mgmt_msg_cb(nic_io->hwdev, HINIC5_MOD_HILINK); +register_mgmt_msg_cb_err: + hinic5_unregister_mgmt_msg_cb(nic_io->hwdev, HINIC5_MOD_L2NIC); + + return err; +} + +void hinic5_vf_func_free(struct hinic5_nic_io *nic_io) +{ + struct hinic5_cmd_register_vf unregister; + u16 out_size = sizeof(unregister); + int err; + + memset(&unregister, 0, sizeof(unregister)); + unregister.op_register = 0; + if (hinic5_func_type(nic_io->hwdev) == TYPE_VF) { + do { + if (hinic5_is_slave_host(nic_io->hwdev)) { + nic_info(nic_io->dev_hdl, "The VF(slave host) does not need to unregister with the PF."); + break; + } + if (hinic5_is_vf_isolation(nic_io->hwdev)) { + nic_info(nic_io->dev_hdl, "The isolated VF does not need to unregister with the PF."); + break; + } + err = hinic5_mbox_to_pf(nic_io->hwdev, HINIC5_MOD_L2NIC, + HINIC5_NIC_CMD_VF_REGISTER, + &unregister, sizeof(unregister), + &unregister, &out_size, 0, + HINIC5_CHANNEL_NIC); + if (err != 0 || out_size == 0 || unregister.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to unregister VF, err: %d, status: 0x%x, out_size: 0x%x\n", + err, unregister.msg_head.status, out_size); + } + } while (0); + hinic5_unregister_vf_mbox_cb(nic_io->hwdev, HINIC5_MOD_HILINK); + hinic5_unregister_vf_mbox_cb(nic_io->hwdev, HINIC5_MOD_L2NIC); + } else { + if (nic_io->vf_infos) { + hinic5_unregister_pf_mbox_cb(nic_io->hwdev, HINIC5_MOD_HILINK); + hinic5_unregister_pf_mbox_cb(nic_io->hwdev, HINIC5_MOD_L2NIC); + hinic5_clear_vfs_info(nic_io->hwdev, 0, nic_io->max_vfs); + kfree(nic_io->vf_infos); + nic_io->vf_infos = NULL; + } + hinic5_unregister_mgmt_msg_cb(nic_io->hwdev, HINIC5_MOD_HILINK); + hinic5_unregister_mgmt_msg_cb(nic_io->hwdev, HINIC5_MOD_L2NIC); + } +} + +static void clear_vf_infos(void *hwdev, u16 vf_id) +{ + struct vf_data_storage *vf_infos = NULL; + struct hinic5_nic_io *nic_io = NULL; + u16 func_id; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return; + } + + func_id = hinic5_glb_pf_vf_offset(hwdev) + vf_id; + vf_infos = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf_id); + if (vf_infos->use_specified_mac) + hinic5_del_mac(hwdev, vf_infos->drv_mac_addr, + vf_infos->pf_vlan, func_id, HINIC5_CHANNEL_NIC); + + if (hinic5_vf_info_vlanprio(hwdev, vf_id) != 0) + hinic5_kill_vf_vlan(hwdev, vf_id); + + if (vf_infos->max_rate != 0) + hinic5_set_vf_tx_rate(hwdev, vf_id, 0, 0); + + if (vf_infos->spoofchk) + hinic5_set_vf_spoofchk(hwdev, vf_id, false); + +#ifdef HAVE_NDO_SET_VF_TRUST + if (vf_infos->trust) + hinic5_set_vf_trust(hwdev, vf_id, false); +#endif + + memset(vf_infos, 0, sizeof(*vf_infos)); + /* set vf_infos to default */ + hinic5_init_vf_infos(nic_io, HW_VF_ID_TO_OS(vf_id)); +} + +void hinic5_clear_vfs_info(void *hwdev, u32 start_vf_id, u32 end_vf_id) +{ + struct hinic5_nic_io *nic_io = + hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + u16 i; + + if (!nic_io) { + pr_err("Nic io is null\n"); + return; + } + + for (i = 0; i < nic_io->max_vfs; i++) + clear_vf_infos(hwdev, OS_VF_ID_TO_HW(i)); +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cmdq.h b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cmdq.h new file mode 100644 index 00000000..be2b44ad --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_cmdq.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NIC_CMDQ_H +#define HINIC5_NIC_CMDQ_H + +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_nic.h" + +#define HINIC5_Q_CTXT_MAX 31U /* (2048 - 8) / 64 */ +#define HINIC5_QP_CTXT_HEADER_SIZE 16U + +#define HINIC5_DEAULT_DROP_THD_ON (0xFFFF) + +#define SQ_CTXT_PKT_DROP_THD_ON_SHIFT 0 +#define SQ_CTXT_PKT_DROP_THD_OFF_SHIFT 16 + +#define SQ_CTXT_PKT_DROP_THD_ON_MASK 0xFFFFU +#define SQ_CTXT_PKT_DROP_THD_OFF_MASK 0xFFFFU + +#define SQ_CTXT_PKT_DROP_THD_SET(val, member) (((val) & \ + SQ_CTXT_PKT_DROP_##member##_MASK) \ + << SQ_CTXT_PKT_DROP_##member##_SHIFT) + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU +#define SQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU + +#define SQ_CTXT_PREF_SET(val, member) (((val) & \ + SQ_CTXT_PREF_##member##_MASK) \ + << SQ_CTXT_PREF_##member##_SHIFT) + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFFU +#define RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FFU +#define RQ_CTXT_PREF_CACHE_MIN_MASK 0x7FU + +#define RQ_CTXT_PREF_SET(val, member) (((val) & \ + RQ_CTXT_PREF_##member##_MASK) << \ + RQ_CTXT_PREF_##member##_SHIFT) + +#define RQ_CTXT_CEQ_ATTR_SET(val, member) (((val) & \ + RQ_CTXT_CEQ_ATTR_##member##_MASK) \ + << RQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +enum hinic5_qp_ctxt_type { + HINIC5_QP_CTXT_TYPE_SQ, + HINIC5_QP_CTXT_TYPE_RQ, +}; + +struct hinic5_sq_ctxt { + u32 ci_pi; + u32 drop_mode_sp; + u32 wq_pfn_hi_owner; + u32 wq_pfn_lo; + + u32 rsvd0; + u32 pkt_drop_thd; + u32 global_sq_id; + u32 vlan_ceq_attr; + + u32 pref_cache; + u32 pref_ci_owner; + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 rsvd8; + u32 rsvd9; + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct hinic5_rq_ctxt { + u32 ci_pi; + u32 ceq_attr; + u32 wq_pfn_hi_type_owner; + u32 wq_pfn_lo; + + u32 rsvd[3]; + u32 cqe_sge_len; + + u32 pref_cache; + u32 pref_ci_owner; + u32 pref_wq_pfn_hi_ci; + u32 pref_wq_pfn_lo; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + u32 wq_block_pfn_hi; + u32 wq_block_pfn_lo; +}; + +struct hinic5_nic_cmdq_ops { + u8 (*prepare_cmd_buf_clean_tso_lro_space)(struct hinic5_nic_io *nic_io, + struct hinic5_cmd_buf *cmd_buf, + enum hinic5_qp_ctxt_type ctxt_type); + u8 (*prepare_cmd_buf_qp_context_multi_store)(struct hinic5_nic_io *nic_io, + struct hinic5_cmd_buf *cmd_buf, + enum hinic5_qp_ctxt_type ctxt_type, + u16 start_qid, u16 max_ctxts); + u8 (*prepare_cmd_buf_modify_svlan)(struct hinic5_cmd_buf *cmd_buf, + u16 func_id, u16 vlan_tag, u16 q_id, u8 vlan_mode); + u8 (*prepare_cmd_buf_set_rss_indir_table)(const struct hinic5_nic_io *nic_io, + const u32 *indir_table, + struct hinic5_cmd_buf *cmd_buf); + u8 (*prepare_cmd_buf_get_rss_indir_table)(const struct hinic5_nic_io *nic_io, + const struct hinic5_cmd_buf *cmd_buf); + void (*cmd_buf_to_rss_indir_table)(const struct hinic5_cmd_buf *cmd_buf, u32 *indir_table); + void (*cmd_buf_to_vport_stats)(const struct hinic5_cmd_buf *cmd_buf, + struct hinic5_vport_stats *stats); + u8 (*prepare_cmd_buf_get_vport_stats)(const struct hinic5_nic_io *nic_io, + const struct hinic5_cmd_buf *cmd_buf, u16 func_id); + u8 (*prepare_cmd_buf_clear_vport_stats)(const struct hinic5_nic_io *nic_io, + const struct hinic5_cmd_buf *cmd_buf, u16 func_id); + void (*prepare_sq_ctxt_drop_and_prefetch)(struct hinic5_sq_ctxt *sq_ctxt); + void (*prepare_rq_ctxt_ceq_and_prefetch)(struct hinic5_io_queue *rq, + struct hinic5_rq_ctxt *rq_ctxt, + bool support_rq_sw_compact_wqe); +}; + +struct hinic5_nic_cmdq_ops *hinic5_nic_cmdq_get_182x_ops(void); +struct hinic5_nic_cmdq_ops *hinic5_nic_cmdq_get_187x_ops(void); + +void hinic5_nic_cmdq_adapt_init(struct hinic5_nic_io *nic_io); +void hinic5_sq_prepare_ctxt(struct hinic5_nic_io *nic_io, struct hinic5_io_queue *sq, u16 sq_id, + struct hinic5_sq_ctxt *sq_ctxt); +void hinic5_rq_prepare_ctxt(struct hinic5_nic_io *nic_io, struct hinic5_io_queue *rq, + struct hinic5_rq_ctxt *rq_ctxt); +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_dbg.c b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_dbg.c new file mode 100644 index 00000000..e2769d5f --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_dbg.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/types.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_nic_sq.h" +#include "hinic5_nic_rq.h" +#include "hinic5_nic_io.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic.h" +#include "hinic5_nic_dbg.h" + +int hinic5_dbg_get_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, const u16 *wqe_size, enum hinic5_queue_type q_type) +{ + struct hinic5_io_queue *queue = NULL; + struct hinic5_nic_io *nic_io = NULL; + u8 *wqe_tmp = wqe; + void *src_wqebb = NULL; + u32 i, offset; + + if (!hwdev) { + pr_err("hwdev is NULL.\n"); + return -EINVAL; + } + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (q_id >= nic_io->num_qps) { + pr_err("q_id[%u] > num_qps_cfg[%u].\n", q_id, nic_io->num_qps); + return -EINVAL; + } + + queue = (q_type == HINIC5_SQ) ? &nic_io->sq[q_id] : &nic_io->rq[q_id]; + + if ((idx + wqebb_cnt) > queue->wq.q_depth) { + pr_err("(idx[%u] + idx[%u]) > q_depth[%u].\n", idx, wqebb_cnt, queue->wq.q_depth); + return -EINVAL; + } + + if (*wqe_size != (queue->wq.wqebb_size * wqebb_cnt)) { + pr_err("Unexpect out buf size from user :%u, expect: %d\n", + *wqe_size, (queue->wq.wqebb_size * wqebb_cnt)); + return -EINVAL; + } + + for (i = 0; i < wqebb_cnt; i++) { + src_wqebb = hinic5_wq_wqebb_addr(&queue->wq, (u16)WQ_MASK_IDX(&queue->wq, idx + i)); + offset = queue->wq.wqebb_size * i; + memcpy(wqe_tmp + offset, src_wqebb, queue->wq.wqebb_size); + } + + return 0; +} + +int hinic5_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, + u32 msg_size) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_io_queue *sq = NULL; + + if (!hwdev || !sq_info) { + pr_err("hwdev or sq_info is NULL.\n"); + return -EINVAL; + } + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + if (q_id >= nic_io->num_qps) { + nic_err(nic_io->dev_hdl, "Input queue id(%u) is larger than the actual queue number\n", + q_id); + return -EINVAL; + } + + if (msg_size != sizeof(*sq_info)) { + nic_err(nic_io->dev_hdl, "Unexpect out buf size from user :%u, expect: %lu\n", + msg_size, sizeof(*sq_info)); + return -EINVAL; + } + + sq = &nic_io->sq[q_id]; + + sq_info->q_id = q_id; + sq_info->pi = hinic5_get_sq_local_pi(sq); + sq_info->ci = hinic5_get_sq_local_ci(sq); + sq_info->fi = hinic5_get_sq_hw_ci(sq); + sq_info->q_depth = sq->wq.q_depth; + sq_info->wqebb_size = sq->wq.wqebb_size; + sq_info->ci_wqe_page_addr = hinic5_wq_get_first_wqe_page_addr(&sq->wq); + sq_info->cla_addr = sq->wq.wq_block_paddr; + sq_info->slq_handle = sq; + + sq_info->doorbell.map_addr = (u64 *)sq->db_addr; + + return 0; +} + +int hinic5_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, + u32 msg_size) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_io_queue *rq = NULL; + + if (!hwdev || !rq_info) { + pr_err("hwdev or rq_info is NULL.\n"); + return -EINVAL; + } + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + if (q_id >= nic_io->num_qps) { + nic_err(nic_io->dev_hdl, "Input queue id(%u) is larger than the actual queue number\n", + q_id); + return -EINVAL; + } + + if (msg_size != sizeof(*rq_info)) { + nic_err(nic_io->dev_hdl, "Unexpect out buf size from user: %u, expect: %lu\n", + msg_size, sizeof(*rq_info)); + return -EINVAL; + } + + rq = &nic_io->rq[q_id]; + + rq_info->q_id = q_id; + rq_info->hw_ci = hinic5_get_rq_hw_ci(rq); + rq_info->wqebb_size = rq->wq.wqebb_size; + rq_info->q_depth = (u16)rq->wq.q_depth; + rq_info->buf_len = nic_io->rx_buff_len; + rq_info->slq_handle = rq; + rq_info->ci_wqe_page_addr = hinic5_wq_get_first_wqe_page_addr(&rq->wq); + rq_info->ci_cla_tbl_addr = rq->wq.wq_block_paddr; + rq_info->msix_idx = rq->msix_entry_idx; + + return 0; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_event.c b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_event.c new file mode 100644 index 00000000..bf8474f1 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_event.c @@ -0,0 +1,961 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> +#ifndef __UEFI__ +#include <linux/cpumask.h> +#endif + +#include "comm_defs.h" +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic.h" +#include "nic_mpu_cmd.h" +#include "hinic5_nic_event.h" + +enum hinic5_aeq_cb_state { + HINIC5_NIC_AEQ_SW_CB_REG, + HINIC5_NIC_AEQ_SW_CB_RUNNING, +}; + +#define AEQ_USLEEP_LOW_BOUND 900 +#define AEQ_USLEEP_HIG_BOUND 1000 + +static int hinic5_init_vf_config(struct hinic5_nic_io *nic_io, u16 vf_id) +{ + struct vf_data_storage *vf_info = NULL; + u16 func_id; + int err = 0; + + vf_info = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf_id); + ether_addr_copy(vf_info->drv_mac_addr, vf_info->user_mac_addr); + if (!is_zero_ether_addr(vf_info->drv_mac_addr)) { + vf_info->use_specified_mac = true; + func_id = hinic5_glb_pf_vf_offset(nic_io->hwdev) + vf_id; + + err = hinic5_set_mac(nic_io->hwdev, vf_info->drv_mac_addr, + vf_info->pf_vlan, func_id, + HINIC5_CHANNEL_NIC); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to set VF %d MAC\n", + HW_VF_ID_TO_OS(vf_id)); + return err; + } + } else { + vf_info->use_specified_mac = false; + } + + if (hinic5_vf_info_vlanprio(nic_io->hwdev, vf_id) != 0) { + err = hinic5_cfg_vf_vlan(nic_io, HINIC5_CMD_OP_ADD, + vf_info->pf_vlan, vf_info->pf_qos, + vf_id); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to add VF %d VLAN_QOS\n", + HW_VF_ID_TO_OS(vf_id)); + return err; + } + } + + if (vf_info->max_rate != 0) { + err = hinic5_set_vf_tx_rate(nic_io->hwdev, vf_id, + vf_info->max_rate, + vf_info->min_rate); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to set VF %d max rate %u, min rate %u\n", + HW_VF_ID_TO_OS(vf_id), vf_info->max_rate, + vf_info->min_rate); + return err; + } + } + + return 0; +} + +static int register_vf_msg_handler(struct hinic5_nic_io *nic_io, u16 vf_id) +{ + int err; + + if (vf_id > nic_io->max_vfs) { + nic_err(nic_io->dev_hdl, "Register VF id %d exceed limit[0-%d]\n", + HW_VF_ID_TO_OS(vf_id), HW_VF_ID_TO_OS(nic_io->max_vfs)); + return -EFAULT; + } + + err = hinic5_init_vf_config(nic_io, vf_id); + if (err != 0) + return err; + + nic_io->vf_infos[HW_VF_ID_TO_OS(vf_id)].registered = true; + + return 0; +} + +static int unregister_vf_msg_handler(struct hinic5_nic_io *nic_io, u16 vf_id) +{ + struct vf_data_storage *vf_info = + HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf_id); + struct hinic5_port_mac_set mac_info; + u16 out_size = sizeof(mac_info); + int err; + + if (vf_id > nic_io->max_vfs) + return -EFAULT; + + vf_info->registered = false; + + memset(&mac_info, 0, sizeof(mac_info)); + mac_info.func_id = hinic5_glb_pf_vf_offset(nic_io->hwdev) + (u16)vf_id; + mac_info.vlan_id = vf_info->pf_vlan; + ether_addr_copy(mac_info.mac, vf_info->drv_mac_addr); + + if (vf_info->use_specified_mac || vf_info->pf_vlan != 0) { + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, + HINIC5_NIC_CMD_DEL_MAC, + &mac_info, sizeof(mac_info), + &mac_info, &out_size); + if (err != 0 || out_size == 0) + goto ERR_DEL_MAC; + + switch (mac_info.msg_head.status) { + case 0: + break; + case HINIC5_DEL_MAC_NO_MATCH: + nic_warn(nic_io->dev_hdl, "Del mac no match, Ignore delete operation.\n"); + break; + default: + goto ERR_DEL_MAC; + } + } + + memset(vf_info->drv_mac_addr, 0, ETH_ALEN); + + return 0; + +ERR_DEL_MAC: + nic_err(nic_io->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf_id), err, + mac_info.msg_head.status, out_size); + return -EFAULT; +} + +static int hinic5_register_vf_msg_handler(struct hinic5_nic_io *nic_io, + u16 vf_id, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic5_cmd_register_vf *register_vf = buf_in; + struct hinic5_cmd_register_vf *register_info = buf_out; + struct vf_data_storage *vf_info = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf_id); + int err; + + if (!vf_info) + return -EINVAL; + + if (register_vf->op_register != 0) { + vf_info->support_extra_feature = register_vf->support_extra_feature; + err = register_vf_msg_handler(nic_io, vf_id); + } else { + err = unregister_vf_msg_handler(nic_io, vf_id); + vf_info->support_extra_feature = 0; + } + + if (err != 0) + register_info->msg_head.status = EFAULT; + + *out_size = sizeof(*register_info); + + return 0; +} + +void hinic5_unregister_vf(struct hinic5_nic_io *nic_io, u16 vf_id) +{ + struct vf_data_storage *vf_info = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf_id); + + if (!vf_info) + return; + unregister_vf_msg_handler(nic_io, vf_id); + vf_info->support_extra_feature = 0; +} + +static int hinic5_get_vf_cos_msg_handler(struct hinic5_nic_io *nic_io, + u16 vf_id, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic5_cmd_vf_dcb_state *dcb_state = buf_out; + + memcpy(&dcb_state->state, &nic_io->dcb_state, sizeof(nic_io->dcb_state)); + dcb_state->msg_head.status = 0; + *out_size = sizeof(*dcb_state); + return 0; +} + +static int hinic5_get_vf_mac_msg_handler(struct hinic5_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf); + struct hinic5_port_mac_set *mac_in = (struct hinic5_port_mac_set *)buf_in; + struct hinic5_port_mac_set *mac_info = buf_out; + int err; + + if (!mac_info || !vf_info) + return -EINVAL; + + mac_in->func_id = vf + hinic5_glb_pf_vf_offset(nic_io->hwdev); + + if (HINIC5_SUPPORT_VF_MAC(nic_io->hwdev) != 0) { + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC5_NIC_CMD_GET_MAC, buf_in, + in_size, buf_out, out_size); + if (err == 0) { + if (is_zero_ether_addr(mac_info->mac)) + ether_addr_copy(mac_info->mac, vf_info->drv_mac_addr); + } + return err; + } + + ether_addr_copy(mac_info->mac, vf_info->drv_mac_addr); + mac_info->msg_head.status = 0; + *out_size = sizeof(*mac_info); + + return 0; +} + +static int hinic5_set_vf_mac_msg_handler(struct hinic5_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf); + struct hinic5_port_mac_set *mac_in = buf_in; + struct hinic5_port_mac_set *mac_out = buf_out; + int err; + + if (!vf_info) + return -EINVAL; + + mac_in->func_id = vf + hinic5_glb_pf_vf_offset(nic_io->hwdev); + + if (vf_info->use_specified_mac && !vf_info->trust && + is_valid_ether_addr(mac_in->mac)) { + nic_warn(nic_io->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", + HW_VF_ID_TO_OS(vf)); + mac_out->msg_head.status = HINIC5_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + if (is_valid_ether_addr(mac_in->mac)) + mac_in->vlan_id = vf_info->pf_vlan; + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC5_NIC_CMD_SET_MAC, + buf_in, in_size, buf_out, out_size); + if (err != 0 || (*out_size) == 0) { + nic_err(nic_io->dev_hdl, "Failed to set VF %d MAC address, err: %d,status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, + *out_size); + return -EFAULT; + } + + if (is_valid_ether_addr(mac_in->mac) && mac_out->msg_head.status == 0) + ether_addr_copy(vf_info->drv_mac_addr, mac_in->mac); + + return err; +} + +static int hinic5_del_vf_mac_msg_handler(struct hinic5_nic_io *nic_io, u16 vf, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf); + struct hinic5_port_mac_set *mac_in = buf_in; + struct hinic5_port_mac_set *mac_out = buf_out; + int err; + + if (!vf_info) + return -EINVAL; + + mac_in->func_id = vf + hinic5_glb_pf_vf_offset(nic_io->hwdev); + + if (vf_info->use_specified_mac && !vf_info->trust && + is_valid_ether_addr(mac_in->mac)) { + nic_warn(nic_io->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", + HW_VF_ID_TO_OS(vf)); + mac_out->msg_head.status = HINIC5_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } + + if (is_valid_ether_addr(mac_in->mac)) + mac_in->vlan_id = vf_info->pf_vlan; + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC5_NIC_CMD_DEL_MAC, + buf_in, in_size, buf_out, out_size); + if (err != 0 || (*out_size) == 0) + goto ERR_DEL_MAC; + + switch (mac_out->msg_head.status) { + case 0: + break; + case HINIC5_DEL_MAC_NO_MATCH: + nic_warn(nic_io->dev_hdl, "Del mac no match, Ignore delete operation.\n"); + break; + default: + goto ERR_DEL_MAC; + } + + if (is_valid_ether_addr(mac_in->mac)) + eth_zero_addr(vf_info->drv_mac_addr); + + return err; + +ERR_DEL_MAC: + nic_err(nic_io->dev_hdl, "Failed to delete VF %d MAC, err: %d, status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, + *out_size); + return -EFAULT; +} + +static int hinic5_update_vf_mac_msg_handler(struct hinic5_nic_io *nic_io, + u16 vf, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct vf_data_storage *vf_info = HW_VF_ID_TO_OS_CO(nic_io->vf_infos, vf); + struct hinic5_port_mac_update *mac_in = buf_in; + struct hinic5_port_mac_update *mac_out = buf_out; + int err; + + if (!vf_info) + return -EINVAL; + if (!is_valid_ether_addr(mac_in->new_mac)) { + nic_err(nic_io->dev_hdl, "Update VF MAC is invalid.\n"); + return -EINVAL; + } + + mac_in->func_id = vf + hinic5_glb_pf_vf_offset(nic_io->hwdev); + +#ifndef __VMWARE__ + if (vf_info->use_specified_mac && !vf_info->trust) { + nic_warn(nic_io->dev_hdl, "PF has already set VF %d MAC address, and vf trust is off.\n", + HW_VF_ID_TO_OS(vf)); + mac_out->msg_head.status = HINIC5_PF_SET_VF_ALREADY; + *out_size = sizeof(*mac_out); + return 0; + } +#else + err = hinic_config_vf_request(((struct hinic5_hwdev *)nic_io->hwdev)->pcidev_hdl, + HW_VF_ID_TO_OS(vf), + HINIC_CFG_VF_MAC_CHANGED, + (void *)mac_in->new_mac); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to config VF %d MAC request, err: %d\n", + HW_VF_ID_TO_OS(vf), err); + return err; + } +#endif + mac_in->vlan_id = vf_info->pf_vlan; + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, HINIC5_NIC_CMD_UPDATE_MAC, + buf_in, in_size, buf_out, out_size); + if (err != 0 || (*out_size) == 0) { + nic_warn(nic_io->dev_hdl, "Failed to update VF %d MAC, err: %d,status: 0x%x, out size: 0x%x\n", + HW_VF_ID_TO_OS(vf), err, mac_out->msg_head.status, + *out_size); + return -EFAULT; + } + + if (mac_out->msg_head.status == 0) + ether_addr_copy(vf_info->drv_mac_addr, mac_in->new_mac); + + return err; +} + +const struct vf_msg_handler vf_cmd_handler[] = { + { + .cmd = HINIC5_NIC_CMD_VF_REGISTER, + .handler = hinic5_register_vf_msg_handler, + }, + + { + .cmd = HINIC5_NIC_CMD_GET_MAC, + .handler = hinic5_get_vf_mac_msg_handler, + }, + + { + .cmd = HINIC5_NIC_CMD_SET_MAC, + .handler = hinic5_set_vf_mac_msg_handler, + }, + + { + .cmd = HINIC5_NIC_CMD_DEL_MAC, + .handler = hinic5_del_vf_mac_msg_handler, + }, + + { + .cmd = HINIC5_NIC_CMD_UPDATE_MAC, + .handler = hinic5_update_vf_mac_msg_handler, + }, + + { + .cmd = HINIC5_NIC_CMD_VF_COS, + .handler = hinic5_get_vf_cos_msg_handler + }, +}; + +static int _l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, + u16 channel) +{ + int i, cmd_cnt = ARRAY_LEN(vf_cmd_handler); + + if (hinic5_func_type(hwdev) == TYPE_VF && (!hinic5_is_slave_host(hwdev)) + && (!hinic5_is_vf_isolation(hwdev))) { + for (i = 0; i < cmd_cnt; i++) { + if (cmd == vf_cmd_handler[i].cmd) + return hinic5_mbox_to_pf(hwdev, HINIC5_MOD_L2NIC, cmd, buf_in, + in_size, buf_out, out_size, 0, channel); + } + } + + return hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_L2NIC, cmd, buf_in, + in_size, buf_out, out_size, 0, channel); +} + +int l2nic_msg_to_mgmt_sync(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return _l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, + out_size, HINIC5_CHANNEL_NIC); +} + +int l2nic_msg_to_mgmt_sync_ch(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u16 channel) +{ + return _l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in, in_size, buf_out, + out_size, channel); +} + +/* pf/ppf handler mbox msg from vf */ +int hinic5_pf_mbox_handler(void *hwdev, + u16 vf_id, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + int index, cmd_size = ARRAY_LEN(vf_cmd_handler); + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev) + return -EFAULT; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + for (index = 0; index < cmd_size; index++) { + if (cmd == vf_cmd_handler[index].cmd) + return vf_cmd_handler[index].handler(nic_io, vf_id, + buf_in, in_size, + buf_out, out_size); + } + + nic_warn(nic_io->dev_hdl, "NO handler for nic cmd(%u) received from vf id: %u\n", + cmd, vf_id); + + return -EINVAL; +} + +void hinic5_notify_dcb_state_event(struct hinic5_nic_io *nic_io, + struct hinic5_dcb_state *dcb_state) +{ + struct hinic5_event_info event_info = {0}; + int i; + + if (dcb_state->trust == HINIC5_DCB_PCP) { + /* This is 8 user priority to cos mapping relationships */ + nic_info(nic_io->dev_hdl, "DCB %s, default cos %u, pcp2cos %u%u%u%u%u%u%u%u\n", + (dcb_state->dcb_on != 0) ? "on" : "off", dcb_state->default_cos, + dcb_state->pcp2cos[ARRAY_INDEX_0], dcb_state->pcp2cos[ARRAY_INDEX_1], + dcb_state->pcp2cos[ARRAY_INDEX_2], dcb_state->pcp2cos[ARRAY_INDEX_3], + dcb_state->pcp2cos[ARRAY_INDEX_4], dcb_state->pcp2cos[ARRAY_INDEX_5], + dcb_state->pcp2cos[ARRAY_INDEX_6], dcb_state->pcp2cos[ARRAY_INDEX_7]); + } else { + for (i = 0; i < NIC_DCB_DSCP_NUM; i++) { + nic_info(nic_io->dev_hdl, + "DCB %s, default cos %u, dscp2cos %u%u%u%u%u%u%u%u\n", + (dcb_state->dcb_on != 0) ? "on" : "off", dcb_state->default_cos, + dcb_state->dscp2cos[ARRAY_INDEX_0 + i * NIC_DCB_DSCP_NUM], + dcb_state->dscp2cos[ARRAY_INDEX_1 + i * NIC_DCB_DSCP_NUM], + dcb_state->dscp2cos[ARRAY_INDEX_2 + i * NIC_DCB_DSCP_NUM], + dcb_state->dscp2cos[ARRAY_INDEX_3 + i * NIC_DCB_DSCP_NUM], + dcb_state->dscp2cos[ARRAY_INDEX_4 + i * NIC_DCB_DSCP_NUM], + dcb_state->dscp2cos[ARRAY_INDEX_5 + i * NIC_DCB_DSCP_NUM], + dcb_state->dscp2cos[ARRAY_INDEX_6 + i * NIC_DCB_DSCP_NUM], + dcb_state->dscp2cos[ARRAY_INDEX_7 + i * NIC_DCB_DSCP_NUM]); + } + } + /* Saved in sdk for stateful module */ + hinic5_save_dcb_state(nic_io, dcb_state); + + event_info.service = EVENT_SRV_NIC; + event_info.type = EVENT_NIC_DCB_STATE_CHANGE; + memcpy((void *)event_info.event_data, dcb_state, sizeof(*dcb_state)); + hinic5_event_callback(nic_io->hwdev, &event_info); +} + +static void tx_pause_excp_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct nic_cmd_tx_pause_notice *excp_info = buf_in; + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return; + } + + if (in_size != sizeof(*excp_info)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %lu\n", + in_size, sizeof(*excp_info)); + return; + } + + nic_warn(nic_io->dev_hdl, "Receive tx pause exception event, excp: %u, level: %u\n", + excp_info->tx_pause_except, excp_info->except_level); + + hinic5_fault_event_report(hwdev, HINIC5_FAULT_SRC_TX_PAUSE_EXCP, + (u16)excp_info->except_level); +} + +static void bond_active_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic5_bond_active_report_info *active_info = buf_in; + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_event_info event_info = {0}; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return; + } + + if (in_size != sizeof(*active_info)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %lu\n", + in_size, sizeof(*active_info)); + return; + } + + event_info.service = EVENT_SRV_NIC; + event_info.type = HINIC5_NIC_CMD_BOND_ACTIVE_NOTICE; + memcpy((void *)event_info.event_data, active_info, sizeof(*active_info)); + + hinic5_event_callback(nic_io->hwdev, &event_info); +} + +int bond_link_event_handler(struct hinic5_nic_io *nic_io, struct hinic5_bond_link_info *bond_info) +{ + int err; + u8 link_state; + struct mag_port_info port_info = {0}; + struct hinic5_event_info event_info = {0}; + struct hinic5_event_link_info *link_info = (void *)event_info.event_data; + + /* 删除bond后需要根据mag获取link状态 */ + event_info.service = EVENT_SRV_NIC; + if (bond_info->bond_en != 0) { + nic_info(nic_io->dev_hdl, "bond link event, link_status: %u\n", + bond_info->link_status); + nic_io->feature_cap |= NIC_F_HALF_BOND_OFFLOAD; + event_info.type = (bond_info->link_status != 0) ? + EVENT_NIC_LINK_UP : EVENT_NIC_LINK_DOWN; + } else { + nic_io->feature_cap &= ~NIC_F_HALF_BOND_OFFLOAD; + err = hinic5_get_link_state(nic_io->hwdev, &link_state); + if (err != 0) + return err; + event_info.type = (link_state != 0) ? EVENT_NIC_LINK_UP : EVENT_NIC_LINK_DOWN; + } + + err = hinic5_get_port_info(nic_io->hwdev, &port_info, HINIC5_CHANNEL_NIC); + if (err != 0) { + nic_warn(nic_io->dev_hdl, "Failed to get port info\n"); + return err; + } + link_info->valid = 1; + link_info->autoneg_cap = port_info.autoneg_cap; + link_info->port_type = port_info.port_type; + link_info->duplex = port_info.duplex; + link_info->speed = port_info.speed; + link_info->autoneg_state = port_info.autoneg_state; + + hinic5_event_callback(nic_io->hwdev, &event_info); + + return 0; +} + +void half_bond_link_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + int err; + struct hinic5_bond_link_info *bond_info = buf_in; + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return; + } + + if (in_size != sizeof(struct hinic5_bond_link_info)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %lu\n", + in_size, sizeof(struct hinic5_bond_link_info)); + return; + } + + err = bond_link_event_handler(nic_io, bond_info); + if (err != 0) + nic_err(nic_io->dev_hdl, "Failed to handle bond pf link event\n"); +} + +void macsec_pn_expired_msg_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct macsec_pn_expired_report_cmd *cmd_in = (struct macsec_pn_expired_report_cmd *)buf_in; + struct hinic5_nic_io *nic_io = NULL; + u8 index = 0; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return; + } + + if (!buf_in) { + nic_err(nic_io->dev_hdl, "MACsec event process error, in buf is null"); + return; + } + + if (in_size != sizeof(struct macsec_pn_expired_report_cmd)) { + nic_err(nic_io->dev_hdl, "MACsec event process error, in size(0x%x) is invalid", + in_size); + return; + } + + for (; index < cmd_in->info.pn_expired_size; index++) { + nic_info(nic_io->dev_hdl, "MACsec pn exceeding threshold, sci=0x%llx, an=0x%x", + cmd_in->info.sci[index], cmd_in->info.an[index]); + /* TODO 上报MKA软件 */ + } +} + +void offload_bond_cfg_event_handler(void *hwdev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic5_cmd_cfg_bond *bond_info = buf_in; + struct hinic5_nic_io *nic_io = NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Nic io is null\n"); + return; + } + + if (in_size != sizeof(struct hinic5_cmd_cfg_bond)) { + nic_err(nic_io->dev_hdl, "Invalid in_size: %u, should be %lu\n", + in_size, sizeof(struct hinic5_cmd_cfg_bond)); + return; + } + + /* 获取卸载bond arp双发使能情况 */ + if (bond_info->arp_en != 0) + nic_io->feature_cap |= NIC_F_ARP_DUAL; + else + nic_io->feature_cap &= ~NIC_F_ARP_DUAL; + + nic_info(nic_io->dev_hdl, "Arp dual status: %s\n", + (bond_info->arp_en != 0) ? "Enable" : "Disable"); +} + +static const struct nic_event_handler nic_cmd_handler[] = { + { + .cmd = HINIC5_NIC_CMD_TX_PAUSE_EXCP_NOTICE, + .handler = tx_pause_excp_event_handler, + }, + + { + .cmd = HINIC5_NIC_CMD_BOND_ACTIVE_NOTICE, + .handler = bond_active_event_handler, + }, + + { + .cmd = HINIC5_NIC_CMD_BOND_LINK_INFO_GET, + .handler = half_bond_link_event_handler, + }, + + { + .cmd = HINIC5_NIC_CMD_MACSEC_PN_EXPIRED_NOTICE, + .handler = macsec_pn_expired_msg_handler, + }, + + { + .cmd = HINIC5_NIC_CMD_BOND_DEV_CFG, + .handler = offload_bond_cfg_event_handler, + }, +}; + +static int _event_handler(void *hwdev, u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic5_nic_io *nic_io = NULL; + u32 size = sizeof(nic_cmd_handler) / sizeof(struct nic_event_handler); + u32 i; + + if (!hwdev) + return -EINVAL; + + *out_size = 0; + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + for (i = 0; i < size; i++) { + if (cmd == nic_cmd_handler[i].cmd) { + nic_cmd_handler[i].handler(hwdev, buf_in, in_size, + buf_out, out_size); + return 0; + } + } + + /* can't find this event cmd */ + nic_warn(nic_io->dev_hdl, "Unsupported nic event, cmd: %u\n", cmd); + *out_size = sizeof(struct mgmt_msg_head); + ((struct mgmt_msg_head *)buf_out)->status = HINIC5_MGMT_CMD_UNSUPPORTED; + + return 0; +} + +/* vf handler mbox msg from ppf/pf */ +/* vf link change event + * vf fault report event, TBD + */ +int hinic5_vf_event_handler(void *hwdev, + u16 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return _event_handler(hwdev, cmd, buf_in, in_size, buf_out, out_size); +} + +/* pf/ppf handler mgmt cpu report nic event */ +void hinic5_mgmt_event_handler(void *hwdev, u16 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + _event_handler(hwdev, cmd, buf_in, in_size, buf_out, out_size); +} + +/** + * hinic5_nic_sw_aeqe_cnt_handler - count ucode aeq callback for sw event + * @dev: the pointer to nic_io + * @event: soft event for the handler + * @data: cqe data + **/ +u8 hinic5_nic_sw_aeqe_cnt_handler(void *dev, u8 event, u8 *data) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!dev) + return -EINVAL; + + nic_io = (struct hinic5_nic_io *)dev; + return hinic5_nic_sw_aeqe_stats(nic_io->hwdev, event, data); +} + +/** + * hinic5_nic_aeq_register_swe_cb - register nic aeq callback for sw event + * @hwdev: the pointer to hwdev + * @pri_handle: the pointer to private handler + * @event: soft event for the handler + * @sw_cb: callback function + **/ +int hinic5_nic_aeq_register_swe_cb(void *hwdev, void *pri_handle, + enum hinic5_ucode_event_type event, + hinic5_aeq_swe_cb nic_aeq_swe_cb) +{ + struct hinic5_nic_aeqs *nic_aeqs = NULL; + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || !pri_handle || !nic_aeq_swe_cb || event >= HINIC5_NIC_FATAL_ERROR_MAX) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + nic_aeqs = nic_io->nic_aeqs; + if (!nic_aeqs) { + nic_err(nic_io->dev_hdl, "nic_aeqs is null\n"); + return -EINVAL; + } + + nic_aeqs->nic_aeq_swe_cb[event] = nic_aeq_swe_cb; + nic_aeqs->nic_aeq_swe_data[event] = pri_handle; + + set_bit(HINIC5_NIC_AEQ_SW_CB_REG, &nic_aeqs->nic_aeq_sw_cb_state[event]); + + return 0; +} + +/** + * hinic5_nic_aeq_unregister_swe_cb - unregister the nic aeq callback for sw event + * @hwdev: the pointer to hwdev + * @event: soft event for the handler + **/ +void hinic5_nic_aeq_unregister_swe_cb(void *hwdev, enum hinic5_ucode_event_type event) +{ + struct hinic5_nic_aeqs *nic_aeqs = NULL; + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || event >= HINIC5_NIC_FATAL_ERROR_MAX) + return; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + + nic_aeqs = nic_io->nic_aeqs; + if (!nic_aeqs) + return; + + clear_bit(HINIC5_NIC_AEQ_SW_CB_REG, &nic_aeqs->nic_aeq_sw_cb_state[event]); + + while (test_bit(HINIC5_NIC_AEQ_SW_CB_RUNNING, + &nic_aeqs->nic_aeq_sw_cb_state[event])) + usleep_range(AEQ_USLEEP_LOW_BOUND, AEQ_USLEEP_HIG_BOUND); + + nic_aeqs->nic_aeq_swe_cb[event] = NULL; +} + +/** + * hinic5_nic_aeqe_handler - callback for nic aeqe event + * @hwdev: the pointer to hwdev + * @event: soft event for the handler + * @data: cqe data + **/ +u8 hinic5_nic_aeqe_handler(void *hwdev, u8 event, u8 *data) +{ + struct hinic5_nic_aeqs *nic_aeqs = NULL; + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + nic_aeqs = nic_io->nic_aeqs; + + if (!nic_aeqs) { + nic_err(nic_io->dev_hdl, "nic_aeqs is null\n"); + return -EINVAL; + } + + set_bit(HINIC5_NIC_AEQ_SW_CB_RUNNING, + &nic_aeqs->nic_aeq_sw_cb_state[event]); + if (test_bit(HINIC5_NIC_AEQ_SW_CB_REG, &nic_aeqs->nic_aeq_sw_cb_state[event])) + nic_aeqs->nic_aeq_swe_cb[event](nic_aeqs->nic_aeq_swe_data[event], event, data); + + clear_bit(HINIC5_NIC_AEQ_SW_CB_RUNNING, &nic_aeqs->nic_aeq_sw_cb_state[event]); + + return 0; +} + +/** + * hinic5_nic_aeqs_init - init all the nic_aeqs + * @nic_io: the pointer to nic_io + * Return: 0 - Success, Negative - failure + **/ +int hinic5_nic_aeqs_init(struct hinic5_nic_io *nic_io) +{ + struct hinic5_nic_aeqs *nic_aeqs = NULL; + int err; + + if (!nic_io) + return -EINVAL; + + nic_aeqs = kzalloc(sizeof(*nic_aeqs), GFP_KERNEL); + if (!nic_aeqs) + return -ENOMEM; + + nic_io->nic_aeqs = nic_aeqs; + + err = hinic5_nic_aeq_register_swe_cb(nic_io->hwdev, nic_io, + HINIC5_INTERNAL_OTHER_FATAL_ERROR, + hinic5_nic_sw_aeqe_cnt_handler); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to register HINIC5_INTERNAL_OTHER_FATAL_ERROR\n"); + goto err_out; + } + err = hinic5_nic_aeq_register_swe_cb(nic_io->hwdev, nic_io, HINIC5_CHANNEL_BUSY, + hinic5_nic_sw_aeqe_cnt_handler); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to register HINIC5_CHANNEL_BUSY\n"); + goto err_out; + } + + err = hinic5_register_stateless_aeqs(nic_io->hwdev, nic_io->hwdev, + (hinic5_aeq_swe_cb)hinic5_nic_aeqe_handler); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to register stateless aeqs\n"); + goto err_out; + } + + return 0; + +err_out: + hinic5_nic_aeqs_free(nic_io); + + return err; +} + +/** + * hinic5_nic_aeqs_free - free all the nic_aeqs + * @nic_io: the pointer to nic_io + **/ +void hinic5_nic_aeqs_free(struct hinic5_nic_io *nic_io) +{ + struct hinic5_nic_aeqs *nic_aeqs = NULL; + u32 stateless_aeq_event; + + if (!nic_io) + return; + + hinic5_unregister_stateless_aeqs(nic_io->hwdev); + + stateless_aeq_event = (u32)HINIC5_INTERNAL_OTHER_FATAL_ERROR; + nic_aeqs = nic_io->nic_aeqs; + + if (!nic_aeqs) + return; + + for (; stateless_aeq_event < (u32)HINIC5_NIC_FATAL_ERROR_MAX; stateless_aeq_event++) + hinic5_nic_aeq_unregister_swe_cb(nic_io->hwdev, + (enum hinic5_ucode_event_type)stateless_aeq_event); + + kfree(nic_aeqs); +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_io.c b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_io.c new file mode 100644 index 00000000..6586c916 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_nic_io.c @@ -0,0 +1,1172 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/module.h> + +#include "comm_defs.h" +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_common.h" +#include "hinic5_nic_sq.h" +#include "hinic5_nic_rq.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic.h" +#include "hinic5_nic_cmdq.h" +#include "hinic5_nic_io.h" + +#define HINIC5_DEAULT_TX_CI_PENDING_LIMIT 1 +#define HINIC5_DEAULT_TX_CI_COALESCING_TIME 1 +#define HINIC5_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 +#define RX_CQE_TIMER_LOOP 0 +#define RX_CQE_COALESCE_NUM 0 + +static unsigned char tx_pending_limit = HINIC5_DEAULT_TX_CI_PENDING_LIMIT; +module_param(tx_pending_limit, byte, 0444); +MODULE_PARM_DESC(tx_pending_limit, "TX CI coalescing parameter pending_limit (default=1, unit=16 pkts)"); + +static unsigned char tx_coalescing_time = HINIC5_DEAULT_TX_CI_COALESCING_TIME; +module_param(tx_coalescing_time, byte, 0444); +MODULE_PARM_DESC(tx_coalescing_time, "TX CI coalescing parameter coalescing_time (default=1, unit=5 us)"); + +static unsigned char rq_wqe_type = HINIC5_COMPACT_RQ_WQE; +module_param(rq_wqe_type, byte, 0444); +MODULE_PARM_DESC(rq_wqe_type, "RQ WQE type, 0: COMPACT, 1: NORMAL, 2: EXTEND (default=0)"); + +#define HINIC5_MAX_CQE_AGGREGATE_NUM 1023 +#define HINIC5_DEFAULT_CQE_AGGREGATE_NUM 64 + +static unsigned short cqe_aggregate_num = HINIC5_DEFAULT_CQE_AGGREGATE_NUM; +module_param(cqe_aggregate_num, ushort, 0444); +MODULE_PARM_DESC(cqe_aggregate_num, "CQE aggregate num, 0-1023 (default=64)"); + +inline void hinic5_nic_io_param_validate(void) +{ + if (cqe_aggregate_num > HINIC5_MAX_CQE_AGGREGATE_NUM) + cqe_aggregate_num = HINIC5_DEFAULT_CQE_AGGREGATE_NUM; +} + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define HINIC5_CI_Q_ADDR_SIZE (64U) + +#define CI_TABLE_SIZE(num_qps, pg_sz) \ + (ALIGN((num_qps) * HINIC5_CI_Q_ADDR_SIZE, pg_sz)) + +#define HINIC5_CI_VADDR(base_addr, q_id) ((u8 *)(base_addr) + \ + (u32)((q_id) * HINIC5_CI_Q_ADDR_SIZE)) + +#define HINIC5_CI_PADDR(base_paddr, q_id) ((base_paddr) + \ + (q_id) * HINIC5_CI_Q_ADDR_SIZE) + +#define CI_IDX_HIGH_SHIFH 12 + +#define CI_HIGN_IDX(val) ((val) >> CI_IDX_HIGH_SHIFH) + +#define SQ_CTXT_PI_IDX_SHIFT 0 +#define SQ_CTXT_CI_IDX_SHIFT 16 + +#define SQ_CTXT_PI_IDX_MASK 0xFFFFU +#define SQ_CTXT_CI_IDX_MASK 0xFFFFU + +#define SQ_CTXT_CI_PI_SET(val, member) (((val) & \ + SQ_CTXT_##member##_MASK) \ + << SQ_CTXT_##member##_SHIFT) + +#define SQ_CTXT_MODE_SP_FLAG_SHIFT 0 +#define SQ_CTXT_MODE_PKT_DROP_SHIFT 1 + +#define SQ_CTXT_MODE_SP_FLAG_MASK 0x1U +#define SQ_CTXT_MODE_PKT_DROP_MASK 0x1U + +#define SQ_CTXT_MODE_SET(val, member) (((val) & \ + SQ_CTXT_MODE_##member##_MASK) \ + << SQ_CTXT_MODE_##member##_SHIFT) + +#define SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define SQ_CTXT_WQ_PAGE_OWNER_SHIFT 23 + +#define SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define SQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U + +#define SQ_CTXT_WQ_PAGE_SET(val, member) (((val) & \ + SQ_CTXT_WQ_PAGE_##member##_MASK) \ + << SQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define SQ_CTXT_GLOBAL_SQ_ID_SHIFT 0 + +#define SQ_CTXT_GLOBAL_SQ_ID_MASK 0x1FFFU + +#define SQ_CTXT_GLOBAL_QUEUE_ID_SET(val, member) (((val) & \ + SQ_CTXT_##member##_MASK) \ + << SQ_CTXT_##member##_SHIFT) + +#define SQ_CTXT_VLAN_TAG_SHIFT 0 +#define SQ_CTXT_VLAN_TYPE_SEL_SHIFT 16 +#define SQ_CTXT_VLAN_INSERT_MODE_SHIFT 19 +#define SQ_CTXT_VLAN_CEQ_EN_SHIFT 23 + +#define SQ_CTXT_VLAN_TAG_MASK 0xFFFFU +#define SQ_CTXT_VLAN_TYPE_SEL_MASK 0x7U +#define SQ_CTXT_VLAN_INSERT_MODE_MASK 0x3U +#define SQ_CTXT_VLAN_CEQ_EN_MASK 0x1U + +#define SQ_CTXT_VLAN_CEQ_SET(val, member) (((val) & \ + SQ_CTXT_VLAN_##member##_MASK) \ + << SQ_CTXT_VLAN_##member##_SHIFT) + +#define SQ_CTXT_PREF_CI_HI_SHIFT 0 +#define SQ_CTXT_PREF_OWNER_SHIFT 4 + +#define SQ_CTXT_PREF_CI_HI_MASK 0xFU +#define SQ_CTXT_PREF_OWNER_MASK 0x1U + +#define SQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 +#define SQ_CTXT_PREF_CI_LOW_SHIFT 20 + +#define SQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define SQ_CTXT_PREF_CI_LOW_MASK 0xFFFU + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define SQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define SQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & \ + SQ_CTXT_WQ_BLOCK_##member##_MASK) \ + << SQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define RQ_CTXT_PI_IDX_SHIFT 0 +#define RQ_CTXT_CI_IDX_SHIFT 16 + +#define RQ_CTXT_PI_IDX_MASK 0xFFFFU +#define RQ_CTXT_CI_IDX_MASK 0xFFFFU + +#define RQ_CTXT_CI_PI_SET(val, member) (((val) & \ + RQ_CTXT_##member##_MASK) \ + << RQ_CTXT_##member##_SHIFT) + +#define RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define RQ_CTXT_WQ_PAGE_WQE_TYPE_SHIFT 28 +#define RQ_CTXT_WQ_PAGE_OWNER_SHIFT 31 + +#define RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFFU +#define RQ_CTXT_WQ_PAGE_WQE_TYPE_MASK 0x3U +#define RQ_CTXT_WQ_PAGE_OWNER_MASK 0x1U + +#define RQ_CTXT_WQ_PAGE_SET(val, member) (((val) & \ + RQ_CTXT_WQ_PAGE_##member##_MASK) << \ + RQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define RQ_CTXT_CQE_LEN_SHIFT 28 +#define RQ_CTXT_MAX_COUNT_SHIFT 18 + +#define RQ_CTXT_CQE_LEN_MASK 0x3U +#define RQ_CTXT_MAX_COUNT_MASK 0x3FFU + +#define RQ_CTXT_CQE_LEN_SET(val, member) (((val) & \ + RQ_CTXT_##member##_MASK) << \ + RQ_CTXT_##member##_SHIFT) + +#define RQ_CTXT_PREF_CI_HI_SHIFT 0 +#define RQ_CTXT_PREF_OWNER_SHIFT 4 + +#define RQ_CTXT_PREF_CI_HI_MASK 0xFU +#define RQ_CTXT_PREF_OWNER_MASK 0x1U + +#define RQ_CTXT_PREF_WQ_PFN_HI_SHIFT 0 +#define RQ_CTXT_PREF_CI_LOW_SHIFT 20 + +#define RQ_CTXT_PREF_WQ_PFN_HI_MASK 0xFFFFFU +#define RQ_CTXT_PREF_CI_LOW_MASK 0xFFFU + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_SHIFT 0 + +#define RQ_CTXT_WQ_BLOCK_PFN_HI_MASK 0x7FFFFFU + +#define RQ_CTXT_WQ_BLOCK_SET(val, member) (((val) & \ + RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ + RQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define SIZE_16BYTES(size) (ALIGN((size), 16) >> 4) + +#define WQ_PAGE_PFN_SHIFT 12 +#define WQ_BLOCK_PFN_SHIFT 9 + +#define WQ_PAGE_PFN(page_addr) ((page_addr) >> WQ_PAGE_PFN_SHIFT) +#define WQ_BLOCK_PFN(page_addr) ((page_addr) >> WQ_BLOCK_PFN_SHIFT) + +/* sq and rq */ +#define TOTAL_DB_NUM(num_qps) ((u16)(2 * (num_qps))) + +static int hinic5_create_sq(struct hinic5_nic_io *nic_io, struct hinic5_io_queue *sq, + u16 q_id, u32 sq_depth, u16 sq_msix_idx) +{ + int err; + + /* sq used & hardware request init 1 */ + sq->owner = 1; + + sq->q_id = q_id; + sq->msix_entry_idx = sq_msix_idx; + + err = hinic5_wq_create(nic_io->hwdev, &sq->wq, sq_depth, + (u16)BIT(HINIC5_SQ_WQEBB_SHIFT)); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to create tx queue(%u) wq\n", + q_id); + return err; + } + + sq->head_addr = (u64)hinic5_wq_wqebb_addr(&sq->wq, 0); + sq->tail_addr = (u64)(sq->head_addr + sq_depth * BIT(HINIC5_SQ_WQEBB_SHIFT)); + + return 0; +} + +static void hinic5_destroy_sq(struct hinic5_nic_io *nic_io, struct hinic5_io_queue *sq) +{ + hinic5_wq_destroy(&sq->wq); +} + +int hinic5_get_rq_wqe_type(void *hwdev) +{ +#ifdef __UEFI__ + return HINIC5_NORMAL_RQ_WQE; +#endif + + struct hinic5_hwdev *dev = hwdev; + + /* rq_wqe_type is the configuration when the driver is installed, + * but it may not be the actual configuration. + */ + if (HINIC5_SUPPORT_RX_HW_COMPACT_CQE(hwdev) || HINIC5_SUPPORT_RX_SW_COMPACT_CQE(hwdev)) { + if (rq_wqe_type != HINIC5_COMPACT_RQ_WQE && rq_wqe_type != HINIC5_NORMAL_RQ_WQE && + rq_wqe_type != HINIC5_EXTEND_RQ_WQE) { + return HINIC5_NORMAL_RQ_WQE; + } + } else { + if (rq_wqe_type != HINIC5_NORMAL_RQ_WQE && rq_wqe_type != HINIC5_EXTEND_RQ_WQE) + return HINIC5_NORMAL_RQ_WQE; + } + + if (HINIC5_SUPPORT_FEATURE(dev, TC_FLOWER_OFFLOAD)) + return (rq_wqe_type != HINIC5_COMPACT_RQ_WQE) ? rq_wqe_type : HINIC5_NORMAL_RQ_WQE; + + return rq_wqe_type; +} + +static int hinic5_create_rq(struct hinic5_nic_io *nic_io, struct hinic5_io_queue *rq, + u16 q_id, u32 rq_depth, u16 rq_msix_idx) +{ + int err; + + rq->wqe_type = (u8)(hinic5_get_rq_wqe_type(nic_io->hwdev)); + + rq->q_id = q_id; + rq->msix_entry_idx = rq_msix_idx; + + err = hinic5_wq_create(nic_io->hwdev, &rq->wq, rq_depth, + (u16)BIT(HINIC5_RQ_WQEBB_SHIFT + rq->wqe_type)); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to create rx queue(%u) wq\n", + q_id); + return err; + } + + return 0; +} + +static void hinic5_destroy_rq(struct hinic5_nic_io *nic_io, struct hinic5_io_queue *rq) +{ + hinic5_wq_destroy(&rq->wq); +} + +static int create_qp(struct hinic5_nic_io *nic_io, struct hinic5_io_queue *sq, + struct hinic5_io_queue *rq, u16 q_id, u32 sq_depth, + u32 rq_depth, u16 qp_msix_idx) +{ + int err; + + err = hinic5_create_sq(nic_io, sq, q_id, sq_depth, qp_msix_idx); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to create sq, qid: %u\n", + q_id); + return err; + } + + err = hinic5_create_rq(nic_io, rq, q_id, rq_depth, qp_msix_idx); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to create rq, qid: %u\n", + q_id); + goto create_rq_err; + } + + return 0; + +create_rq_err: + hinic5_destroy_sq(nic_io, sq); + + return err; +} + +static void destroy_qp(struct hinic5_nic_io *nic_io, struct hinic5_io_queue *sq, + struct hinic5_io_queue *rq) +{ + hinic5_destroy_sq(nic_io, sq); + hinic5_destroy_rq(nic_io, rq); +} + +int hinic5_init_nicio_res(void *hwdev, u16 usr_qps_num) +{ + struct hinic5_nic_io *nic_io = NULL; + void __iomem *db_base = NULL; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Failed to get nic service adapter\n"); + goto fail_to_out; + } + + nic_io->max_qps = hinic5_func_max_qnum(hwdev) - usr_qps_num; + + err = hinic5_alloc_db_addr(hwdev, &db_base, NULL); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to allocate doorbell for sqs\n"); + goto alloc_sq_db_fail; + } + nic_io->sqs_db_addr = (u8 *)db_base; + + err = hinic5_alloc_db_addr(hwdev, &db_base, NULL); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to allocate doorbell for rqs\n"); + goto alloc_rq_db_fail; + } + nic_io->rqs_db_addr = (u8 *)db_base; + + nic_io->sq_ci_vaddr_base = + dma_zalloc_coherent(nic_io->dev_hdl, CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), + &nic_io->sq_ci_dma_base, GFP_KERNEL); + if (!nic_io->sq_ci_vaddr_base) { + nic_err(nic_io->dev_hdl, "Failed to allocate sq ci area\n"); + goto alloc_tx_vaddr_base_fail; + } + + nic_io->rq_ci_vaddr_base = + dma_zalloc_coherent(nic_io->dev_hdl, CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), + &nic_io->rq_ci_dma_base, GFP_KERNEL); + if (!nic_io->rq_ci_vaddr_base) { + nic_err(nic_io->dev_hdl, "Failed to allocate rq ci area\n"); + goto alloc_rx_vaddr_base_fail; + } + + return 0; + +alloc_rx_vaddr_base_fail: + dma_free_coherent(nic_io->dev_hdl, CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), + nic_io->sq_ci_vaddr_base, nic_io->sq_ci_dma_base); + +alloc_tx_vaddr_base_fail: + hinic5_free_db_addr(hwdev, nic_io->rqs_db_addr, NULL); + +alloc_rq_db_fail: + hinic5_free_db_addr(hwdev, nic_io->sqs_db_addr, NULL); + +alloc_sq_db_fail: + return -ENOMEM; + +fail_to_out: + return -EFAULT; +} + +void hinic5_deinit_nicio_res(void *hwdev) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev) + return; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Failed to get nic service adapter\n"); + return; + } + + dma_free_coherent(nic_io->dev_hdl, + CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), + nic_io->sq_ci_vaddr_base, nic_io->sq_ci_dma_base); + + dma_free_coherent(nic_io->dev_hdl, + CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE), + nic_io->rq_ci_vaddr_base, nic_io->rq_ci_dma_base); +/* free all doorbell */ + hinic5_free_db_addr(hwdev, nic_io->sqs_db_addr, NULL); + hinic5_free_db_addr(hwdev, nic_io->rqs_db_addr, NULL); +} + +int hinic5_alloc_qps(void *hwdev, struct irq_info *qps_msix_arry, + struct hinic5_dyna_qp_params *qp_params) +{ + struct hinic5_io_queue *sqs = NULL; + struct hinic5_io_queue *rqs = NULL; + struct hinic5_nic_io *nic_io = NULL; + u16 q_id, i, total_num_qps; + int err; + + if (!hwdev || !qps_msix_arry || !qp_params) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Failed to get nic service adapter\n"); + return -EFAULT; + } + + total_num_qps = qp_params->num_qps + qp_params->xdp_qps; + if (total_num_qps > nic_io->max_qps || qp_params->num_qps == 0) + return -EINVAL; + + sqs = kcalloc(total_num_qps, sizeof(*sqs), GFP_KERNEL); + if (!sqs) { + err = -ENOMEM; + goto alloc_sqs_err; + } + + rqs = kcalloc(total_num_qps, sizeof(*rqs), GFP_KERNEL); + if (!rqs) { + err = -ENOMEM; + goto alloc_rqs_err; + } + + for (q_id = 0; q_id < total_num_qps; q_id++) { + err = create_qp(nic_io, &sqs[q_id], &rqs[q_id], q_id, qp_params->sq_depth, + qp_params->rq_depth, qps_msix_arry[q_id].msix_entry_idx); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to allocate qp %u, err: %d\n", q_id, err); + goto create_qp_err; + } + } + + qp_params->sqs = sqs; + qp_params->rqs = rqs; + + return 0; + +create_qp_err: + for (i = 0; i < q_id; i++) + destroy_qp(nic_io, &sqs[i], &rqs[i]); + + kfree(rqs); + +alloc_rqs_err: + kfree(sqs); + +alloc_sqs_err: + + return err; +} + +void hinic5_free_qps(void *hwdev, struct hinic5_dyna_qp_params *qp_params) +{ + struct hinic5_nic_io *nic_io = NULL; + u16 q_id, total_num_qps; + + if (!hwdev || !qp_params) + return; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Failed to get nic service adapter\n"); + return; + } + + total_num_qps = qp_params->num_qps + qp_params->xdp_qps; + + for (q_id = 0; q_id < total_num_qps; q_id++) + destroy_qp(nic_io, &qp_params->sqs[q_id], + &qp_params->rqs[q_id]); + + kfree(qp_params->sqs); + kfree(qp_params->rqs); +} + +static void init_qps_info(struct hinic5_nic_io *nic_io, + struct hinic5_dyna_qp_params *qp_params) +{ + struct hinic5_io_queue *sqs = qp_params->sqs; + struct hinic5_io_queue *rqs = qp_params->rqs; + u16 q_id; + u16 total_num_qps = qp_params->num_qps + qp_params->xdp_qps; + + nic_io->xdp_qps = qp_params->xdp_qps; + nic_io->num_qps = qp_params->num_qps; + nic_io->sq = qp_params->sqs; + nic_io->rq = qp_params->rqs; + for (q_id = 0; q_id < total_num_qps; q_id++) { + sqs[q_id].cons_idx_addr = HINIC5_CI_VADDR(nic_io->sq_ci_vaddr_base, q_id); + /* clear ci value */ + *(u16 *)sqs[q_id].cons_idx_addr = 0; + sqs[q_id].db_addr = nic_io->sqs_db_addr; + + rqs[q_id].cons_idx_addr = HINIC5_CI_VADDR(nic_io->rq_ci_vaddr_base, q_id); + *(u32 *)rqs[q_id].cons_idx_addr = 0; + /* The first num_qps doorbell is used by sq */ + rqs[q_id].db_addr = nic_io->rqs_db_addr; + } +} + +int hinic5_init_qps(void *hwdev, struct hinic5_dyna_qp_params *qp_params) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || !qp_params) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Failed to get nic service adapter\n"); + return -EFAULT; + } + + init_qps_info(nic_io, qp_params); + + return hinic5_init_qp_ctxts(hwdev); +} + +void hinic5_deinit_qps(void *hwdev, struct hinic5_dyna_qp_params *qp_params) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || !qp_params) + return; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Failed to get nic service adapter\n"); + return; + } + + qp_params->sqs = nic_io->sq; + qp_params->rqs = nic_io->rq; + qp_params->num_qps = nic_io->num_qps; + qp_params->xdp_qps = nic_io->xdp_qps; + + if (nic_io->enable_queue_pooling == 0) + hinic5_free_qp_ctxts(hwdev); +} + +int hinic5_create_qps(void *hwdev, u16 num_qp, u32 sq_depth, u32 rq_depth, + struct irq_info *qps_msix_arry) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_dyna_qp_params qp_params = {0}; + int err; + + if (!hwdev || !qps_msix_arry) + return -EFAULT; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) { + pr_err("Failed to get nic service adapter\n"); + return -EFAULT; + } + + err = hinic5_init_nicio_res(hwdev, 0); + if (err != 0) + return err; + + qp_params.num_qps = num_qp; + qp_params.sq_depth = sq_depth; + qp_params.rq_depth = rq_depth; + err = hinic5_alloc_qps(hwdev, qps_msix_arry, &qp_params); + if (err != 0) { + hinic5_deinit_nicio_res(hwdev); + nic_err(nic_io->dev_hdl, + "Failed to allocate qps, err: %d\n", err); + return err; + } + + init_qps_info(nic_io, &qp_params); + + return 0; +} + +void hinic5_destroy_qps(void *hwdev) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_dyna_qp_params qp_params = {0}; + + if (!hwdev) + return; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return; + + hinic5_deinit_qps(hwdev, &qp_params); + hinic5_free_qps(hwdev, &qp_params); + hinic5_deinit_nicio_res(hwdev); +} + +void *hinic5_get_nic_queue(void *hwdev, u16 q_id, enum hinic5_queue_type q_type) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || q_type >= HINIC5_MAX_QUEUE_TYPE) + return NULL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return NULL; + if (q_id >= nic_io->max_qps) + return NULL; + + return ((q_type == HINIC5_SQ) ? &nic_io->sq[q_id] : &nic_io->rq[q_id]); +} + +void hinic5_sq_prepare_ctxt(struct hinic5_nic_io *nic_io, struct hinic5_io_queue *sq, + u16 sq_id, struct hinic5_sq_ctxt *sq_ctxt) +{ + u64 wq_page_addr; + u64 wq_page_pfn, wq_block_pfn; + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u16 pi_start, ci_start; + + nic_io->cmdq_ops->prepare_sq_ctxt_drop_and_prefetch(sq_ctxt); + + ci_start = hinic5_get_sq_local_ci(sq); + pi_start = hinic5_get_sq_local_pi(sq); + + wq_page_addr = hinic5_wq_get_first_wqe_page_addr(&sq->wq); + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = WQ_BLOCK_PFN(sq->wq.wq_block_paddr); + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + sq_ctxt->ci_pi = + SQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | + SQ_CTXT_CI_PI_SET(pi_start, PI_IDX); + + sq_ctxt->drop_mode_sp = + SQ_CTXT_MODE_SET(0, SP_FLAG) | + SQ_CTXT_MODE_SET(0, PKT_DROP); + + sq_ctxt->wq_pfn_hi_owner = + SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + SQ_CTXT_WQ_PAGE_SET(1, OWNER); + + sq_ctxt->wq_pfn_lo = wq_page_pfn_lo; + + sq_ctxt->global_sq_id = + SQ_CTXT_GLOBAL_QUEUE_ID_SET(sq_id, GLOBAL_SQ_ID); + + /* enable insert c-vlan in default */ + sq_ctxt->vlan_ceq_attr = + SQ_CTXT_VLAN_CEQ_SET(0, CEQ_EN) | + SQ_CTXT_VLAN_CEQ_SET(1, INSERT_MODE); + + sq_ctxt->rsvd0 = 0; + + sq_ctxt->pref_ci_owner = + SQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) | + SQ_CTXT_PREF_SET(1, OWNER); + + sq_ctxt->pref_wq_pfn_hi_ci = + SQ_CTXT_PREF_SET(ci_start, CI_LOW) | + SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI); + + sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; + + sq_ctxt->wq_block_pfn_hi = + SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); + + sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; + + hinic5_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); +} + +static void hinic5_rq_prepare_ctxt_get_wq_info(struct hinic5_io_queue *rq, + u32 *wq_page_pfn_hi, u32 *wq_page_pfn_lo, + u32 *wq_block_pfn_hi, u32 *wq_block_pfn_lo) +{ + u64 wq_page_addr; + u64 wq_page_pfn, wq_block_pfn; + + wq_page_addr = hinic5_wq_get_first_wqe_page_addr(&rq->wq); + + wq_page_pfn = WQ_PAGE_PFN(wq_page_addr); + *wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + *wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = WQ_BLOCK_PFN(rq->wq.wq_block_paddr); + *wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + *wq_block_pfn_lo = lower_32_bits(wq_block_pfn); +} + +void hinic5_rq_prepare_ctxt(struct hinic5_nic_io *nic_io, + struct hinic5_io_queue *rq, + struct hinic5_rq_ctxt *rq_ctxt) +{ + u32 wq_page_pfn_hi, wq_page_pfn_lo; + u32 wq_block_pfn_hi, wq_block_pfn_lo; + u16 pi_start, ci_start; + u16 wqe_type = rq->wqe_type; + u64 ci_dma_base; + bool support_rq_sw_compact_wqe = false; + + /* RQ depth is in unit of 8Bytes */ + ci_start = (u16)((u32)hinic5_get_rq_local_ci(rq) << wqe_type); + pi_start = (u16)((u32)hinic5_get_rq_local_pi(rq) << wqe_type); + + hinic5_rq_prepare_ctxt_get_wq_info(rq, &wq_page_pfn_hi, &wq_page_pfn_lo, + &wq_block_pfn_hi, &wq_block_pfn_lo); + + support_rq_sw_compact_wqe = HINIC5_SUPPORT_RX_SW_COMPACT_CQE(nic_io->hwdev); + nic_io->cmdq_ops->prepare_rq_ctxt_ceq_and_prefetch(rq, rq_ctxt, support_rq_sw_compact_wqe); + + rq_ctxt->ci_pi = + RQ_CTXT_CI_PI_SET(ci_start, CI_IDX) | + RQ_CTXT_CI_PI_SET(pi_start, PI_IDX); + + rq_ctxt->wq_pfn_hi_type_owner = + RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + RQ_CTXT_WQ_PAGE_SET(1, OWNER); + + switch (wqe_type) { + case HINIC5_EXTEND_RQ_WQE: + /* use 32Byte WQE with SGE for CQE */ + rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(0, WQE_TYPE); + break; + case HINIC5_NORMAL_RQ_WQE: + /* use 16Byte WQE with 32Bytes SGE for CQE */ + rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(2, WQE_TYPE); + rq_ctxt->cqe_sge_len = RQ_CTXT_CQE_LEN_SET(1, CQE_LEN); + break; + case HINIC5_COMPACT_RQ_WQE: + /* use 8Byte WQE */ + rq_ctxt->wq_pfn_hi_type_owner |= RQ_CTXT_WQ_PAGE_SET(3, WQE_TYPE); + if (support_rq_sw_compact_wqe) { + rq_ctxt->cqe_sge_len |= RQ_CTXT_CQE_LEN_SET(cqe_aggregate_num, MAX_COUNT); + ci_dma_base = HINIC5_CI_PADDR(nic_io->rq_ci_dma_base, rq->q_id); + rq_ctxt->pi_paddr_hi = upper_32_bits(ci_dma_base >> RQ_CI_ADDR_SHIFT); + rq_ctxt->pi_paddr_lo = lower_32_bits(ci_dma_base >> RQ_CI_ADDR_SHIFT); + } + break; + default: + pr_err("Invalid rq wqe type: %u", wqe_type); + } + + rq_ctxt->wq_pfn_lo = wq_page_pfn_lo; + + rq_ctxt->pref_ci_owner = + RQ_CTXT_PREF_SET(CI_HIGN_IDX(ci_start), CI_HI) | + RQ_CTXT_PREF_SET(1, OWNER); + + rq_ctxt->pref_wq_pfn_hi_ci = + RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) | + RQ_CTXT_PREF_SET(ci_start, CI_LOW); + + rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo; + + rq_ctxt->wq_block_pfn_hi = + RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI); + + rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo; + + hinic5_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); +} + +static inline u16 hinic5_get_max_ctxts(u16 num_qps, u16 cmd_buf_size) +{ + u16 max_ctxts = (cmd_buf_size - HINIC5_QP_CTXT_HEADER_SIZE) / sizeof(struct hinic5_rq_ctxt); + + max_ctxts = min_t(u16, HINIC5_Q_CTXT_MAX, max_ctxts); + return (u16)min(max_ctxts, num_qps); +} + +static int init_sq_ctxts(struct hinic5_nic_io *nic_io) +{ + struct hinic5_cmd_buf *cmd_buf = NULL; + u64 out_param = 0; + u16 q_id, max_ctxts, use_buf_size; + int err = 0; + u8 cmd; + u32 qp_nums = nic_io->num_qps + nic_io->xdp_qps; + + cmd_buf = hinic5_alloc_cmd_buf(nic_io->hwdev); + if (!cmd_buf) { + nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + q_id = 0; + while (q_id < qp_nums) { + max_ctxts = hinic5_get_max_ctxts(qp_nums - q_id, cmd_buf->size); + use_buf_size = + HINIC5_QP_CTXT_HEADER_SIZE + max_ctxts * sizeof(struct hinic5_sq_ctxt); + memset(cmd_buf->buf, 0, use_buf_size); + + cmd = nic_io->cmdq_ops->prepare_cmd_buf_qp_context_multi_store(nic_io, cmd_buf, + HINIC5_QP_CTXT_TYPE_SQ, q_id, max_ctxts); + + err = hinic5_cmdq_direct_resp(nic_io->hwdev, HINIC5_MOD_L2NIC, + cmd, cmd_buf, &out_param, 0, HINIC5_CHANNEL_NIC); + if (err != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, "Failed to set SQ ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + hinic5_free_cmd_buf(nic_io->hwdev, cmd_buf); + + return err; +} + +static int init_rq_ctxts(struct hinic5_nic_io *nic_io) +{ + struct hinic5_cmd_buf *cmd_buf = NULL; + u64 out_param = 0; + u16 q_id, max_ctxts, use_buf_size; + u8 cmd; + int err = 0; + + cmd_buf = hinic5_alloc_cmd_buf(nic_io->hwdev); + if (!cmd_buf) { + nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + q_id = 0; + while (q_id < nic_io->num_qps) { + max_ctxts = hinic5_get_max_ctxts(nic_io->num_qps - q_id, cmd_buf->size); + use_buf_size = + HINIC5_QP_CTXT_HEADER_SIZE + max_ctxts * sizeof(struct hinic5_rq_ctxt); + memset(cmd_buf->buf, 0, use_buf_size); + + cmd = nic_io->cmdq_ops->prepare_cmd_buf_qp_context_multi_store(nic_io, cmd_buf, + HINIC5_QP_CTXT_TYPE_RQ, q_id, max_ctxts); + err = hinic5_cmdq_direct_resp(nic_io->hwdev, HINIC5_MOD_L2NIC, + cmd, cmd_buf, &out_param, 0, + HINIC5_CHANNEL_NIC); + if (err != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, "Failed to set RQ ctxts, err: %d, out_param: 0x%llx\n", + err, out_param); + + err = -EFAULT; + break; + } + + q_id += max_ctxts; + } + + hinic5_free_cmd_buf(nic_io->hwdev, cmd_buf); + + return err; +} + +static int init_qp_ctxts(struct hinic5_nic_io *nic_io) +{ + int err; + + err = init_sq_ctxts(nic_io); + if (err != 0) + return err; + + err = init_rq_ctxts(nic_io); + if (err != 0) + return err; + + return 0; +} + +static int clean_queue_offload_ctxt(struct hinic5_nic_io *nic_io, + enum hinic5_qp_ctxt_type ctxt_type) +{ + struct hinic5_cmd_buf *cmd_buf = NULL; + u64 out_param = 0; + u8 cmd; + int err; + + cmd_buf = hinic5_alloc_cmd_buf(nic_io->hwdev); + if (!cmd_buf) { + nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + if (nic_io->cmdq_ops) + cmd = nic_io->cmdq_ops->prepare_cmd_buf_clean_tso_lro_space(nic_io, cmd_buf, + ctxt_type); + else + return -ENOMEM; + + err = hinic5_cmdq_direct_resp(nic_io->hwdev, HINIC5_MOD_L2NIC, + cmd, cmd_buf, &out_param, 0, + HINIC5_CHANNEL_NIC); + if (err != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, "Failed to clean queue offload ctxts, err: %d,out_param: 0x%llx\n", + err, out_param); + + err = -EFAULT; + } + + hinic5_free_cmd_buf(nic_io->hwdev, cmd_buf); + + return err; +} + +static int clean_qp_offload_ctxt(struct hinic5_nic_io *nic_io) +{ + /* clean LRO/TSO context space */ + return ((clean_queue_offload_ctxt(nic_io, HINIC5_QP_CTXT_TYPE_SQ) != 0) || + (clean_queue_offload_ctxt(nic_io, HINIC5_QP_CTXT_TYPE_RQ) != 0)); +} + +static int init_sq_ci_ctxts(struct hinic5_nic_io *nic_io) +{ + struct hinic5_sq_attr sq_attr; + u16 q_id; + int err; + u32 qp_nums; + + qp_nums = nic_io->num_qps + nic_io->xdp_qps; + + for (q_id = 0; q_id < qp_nums; q_id++) { + sq_attr.ci_dma_base = + HINIC5_CI_PADDR(nic_io->sq_ci_dma_base, q_id); + sq_attr.pending_limit = tx_pending_limit; + sq_attr.coalescing_time = tx_coalescing_time; + sq_attr.intr_en = 1; + sq_attr.intr_idx = nic_io->sq[q_id].msix_entry_idx; + sq_attr.l2nic_sqn = q_id; + sq_attr.dma_attr_off = 0; + err = hinic5_set_sq_ci_ctx(nic_io, &sq_attr); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to set sq ci context\n"); + return -EFAULT; + } + } + + return 0; +} + +static int init_rq_ci_ctxts(struct hinic5_nic_io *nic_io) +{ + struct hinic5_rq_attr rq_attr; + u16 q_id; + int err; + + for (q_id = 0; q_id < nic_io->num_qps; q_id++) { + rq_attr.ci_dma_base = 0; + rq_attr.pending_limit = 0; + rq_attr.coalescing_time = 0; + rq_attr.intr_idx = nic_io->rq[q_id].msix_entry_idx; + rq_attr.l2nic_rqn = q_id; + rq_attr.cqe_type = 0; + if (hinic5_get_rq_wqe_type(nic_io->hwdev) == HINIC5_COMPACT_RQ_WQE) { + rq_attr.cqe_type = 1; + rq_attr.ci_dma_base = HINIC5_CI_PADDR(nic_io->rq_ci_dma_base, q_id); + rq_attr.coalescing_time = RX_CQE_TIMER_LOOP; + rq_attr.pending_limit = RX_CQE_COALESCE_NUM; + } + + err = hinic5_set_rq_ci_ctx(nic_io, &rq_attr); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to set rq ci context\n"); + return -EFAULT; + } + } + + return 0; +} + +/* init qps ctxt and set sq ci attr and arm all sq */ +int hinic5_init_qp_ctxts(void *hwdev) +{ + struct hinic5_nic_io *nic_io = NULL; + u32 rq_depth; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EFAULT; + + err = init_qp_ctxts(nic_io); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to init QP ctxts\n"); + return err; + } + + /* clean LRO/TSO context space */ + err = clean_qp_offload_ctxt(nic_io); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to clean qp offload ctxts\n"); + return err; + } + + if (nic_io->enable_queue_pooling == 0 || nic_io->first_enable_queue_pooling != 0) { + nic_io->first_enable_queue_pooling = 0; + rq_depth = nic_io->rq[0].wq.q_depth << nic_io->rq[0].wqe_type; + err = hinic5_set_root_ctxt(hwdev, rq_depth, nic_io->sq[0].wq.q_depth, + nic_io->rx_buff_len, HINIC5_CHANNEL_NIC); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to set root context\n"); + return err; + } + } + + err = init_sq_ci_ctxts(nic_io); + if (err != 0) + goto clean_root_ctxt; + + if (HINIC5_SUPPORT_RX_HW_COMPACT_CQE(hwdev)) { + /* init rxq cqe context */ + err = init_rq_ci_ctxts(nic_io); + if (err != 0) + goto clean_root_ctxt; + } + + return 0; + +clean_root_ctxt: + hinic5_clean_root_ctxt(hwdev, HINIC5_CHANNEL_NIC); + + return err; +} + +void hinic5_free_qp_ctxts(void *hwdev) +{ + if (!hwdev) + return; + + hinic5_clean_root_ctxt(hwdev, HINIC5_CHANNEL_NIC); +} + +static int hinic5_update_sq_coalesce(struct hinic5_nic_io *nic_io, u32 sq_id, u8 num, u8 time) +{ + struct hinic5_sq_attr sq_attr = {0}; + int err; + + sq_attr.ci_dma_base = HINIC5_CI_PADDR(nic_io->sq_ci_dma_base, sq_id); + sq_attr.pending_limit = num; + sq_attr.coalescing_time = time; + sq_attr.intr_en = 1; + sq_attr.intr_idx = nic_io->sq[sq_id].msix_entry_idx; + sq_attr.l2nic_sqn = sq_id; + err = hinic5_set_sq_ci_ctx(nic_io, &sq_attr); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to update sq coalesce\n"); + return -EFAULT; + } + + return 0; +} + +static int hinic5_update_rq_coalesce(struct hinic5_nic_io *nic_io, u32 rq_id, u8 num, u8 time) +{ + struct hinic5_rq_attr rq_attr = {0}; + int err; + + rq_attr.ci_dma_base = 0; + rq_attr.pending_limit = num; + rq_attr.coalescing_time = time; + rq_attr.intr_idx = nic_io->rq[rq_id].msix_entry_idx; + rq_attr.l2nic_rqn = rq_id; + rq_attr.cqe_type = 0; + if (hinic5_get_rq_wqe_type(nic_io->hwdev) == HINIC5_COMPACT_RQ_WQE) { + rq_attr.cqe_type = 1; + rq_attr.ci_dma_base = HINIC5_CI_PADDR(nic_io->rq_ci_dma_base, rq_id); + } + err = hinic5_set_rq_ci_ctx(nic_io, &rq_attr); + if (err != 0) { + nic_err(nic_io->dev_hdl, "Failed to update rq coalesce\n"); + return -EFAULT; + } + + return 0; +} + +int hinic5_set_sq_rq_coalesce_cfg(void *hwdev, u32 q_id, u32 type, + struct hinic5_qp_coalesce_info *coal_info) +{ + struct hinic5_nic_io *nic_io = NULL; + struct interrupt_info info = {0}; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + if (!HINIC5_SUPPORT_SQ_RQ_CI_COALESCE(hwdev)) { + info.coalesc_timer_cfg = coal_info->coalesce_timer_cfg; + info.pending_limt = coal_info->pending_limt; + info.interrupt_coalesc_set = 1; + info.msix_index = nic_io->sq[q_id].msix_entry_idx; + info.resend_timer_cfg = HINIC5_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; + return hinic5_set_interrupt_cfg(hwdev, info, HINIC5_CHANNEL_NIC); + } + + if ((type & HINIC5_SQ_COALESCE) != 0) { + err = hinic5_update_sq_coalesce(nic_io, q_id, coal_info->tx_pending_limt, + coal_info->tx_coalesce_timer_cfg); + if (err != 0) + return err; + } + + if ((type & HINIC5_RQ_COALESCE) != 0) { + err = hinic5_update_rq_coalesce(nic_io, q_id, coal_info->rx_pending_limt, + coal_info->rx_coalesce_timer_cfg); + if (err != 0) + return err; + } + return 0; +} + +#ifdef __UEFI__ +void hinic5_write_db(void *pcidev, struct hinic5_io_queue *queue, int cos, + u8 cflag, u16 pi) +{ + struct hinic5_nic_db db; + BUS_IO_PROTOCOL *BusIo = pcidev; + EFI_STATUS Status; + + db.db_info = + DB_INFO_SET(SRC_TYPE, TYPE) | DB_INFO_SET(cflag, CFLAG) | + DB_INFO_SET(cos, COS) | DB_INFO_SET(queue->q_id, QID); + db.pi_hi = DB_PI_HIGH(pi); + /* Data should be written to HW in Big Endian Format */ + db.db_info = hinic5_hw_be32(db.db_info); + db.pi_hi = hinic5_hw_be32(db.pi_hi); + + MemoryFence(); + Status = BusIo->Mem.Write(BusIo, EfiBusIoWidthUint64, HINIC5_DB_BAR, + (u64)(DB_ADDR(queue, pi)), 1, + (void *)(&db.db_info)); + MemoryFence(); + + if (EFI_ERROR(Status)) + DEBUGPRINT(CRITICAL, "Write doorbell fails: %r\n", Status); +} +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_rss_cfg.c b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_rss_cfg.c new file mode 100644 index 00000000..fc597043 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/comm/hinic5_rss_cfg.c @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/dcbnl.h> + +#include "comm_defs.h" +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_nic_cfg.h" +#include "nic_mpu_cmd.h" +#include "nic_npu_cmd.h" +#include "hinic5_hw.h" +#include "hinic5_nic.h" +#include "hinic5_nic_cmdq.h" +#include "hinic5_common.h" + +static int hinic5_rss_cfg_hash_key(struct hinic5_nic_io *nic_io, u8 opcode, + u8 *key, u16 key_size) +{ + struct hinic5_cmd_rss_hash_key hash_key; + u16 out_size = sizeof(hash_key); + int err; + + memset(&hash_key, 0, out_size); + hash_key.func_id = hinic5_global_func_id(nic_io->hwdev); + hash_key.opcode = opcode; + + if (opcode == HINIC5_CMD_OP_SET) + memcpy(hash_key.key, key, key_size); + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, + HINIC5_NIC_CMD_CFG_RSS_HASH_KEY, + &hash_key, sizeof(hash_key), + &hash_key, &out_size); + if (err != 0 || out_size == 0 || hash_key.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to %s hash key, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == HINIC5_CMD_OP_SET ? "set" : "get", + err, hash_key.msg_head.status, out_size); + return -EINVAL; + } + + if (opcode == HINIC5_CMD_OP_GET) + memcpy(key, hash_key.key, NIC_RSS_KEY_SIZE); + + return 0; +} + +int hinic5_rss_set_hash_key(void *hwdev, const u8 *key) +{ + struct hinic5_nic_io *nic_io = NULL; + u8 hash_key[NIC_RSS_KEY_SIZE]; + + if (!hwdev || !key) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memcpy(hash_key, key, NIC_RSS_KEY_SIZE); + return hinic5_rss_cfg_hash_key(nic_io, HINIC5_CMD_OP_SET, hash_key, NIC_RSS_KEY_SIZE); +} + +int hinic5_rss_get_hash_key(void *hwdev, u8 *key) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || !key) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + return hinic5_rss_cfg_hash_key(nic_io, HINIC5_CMD_OP_GET, key, NIC_RSS_KEY_SIZE); +} + +int hinic5_rss_set_indir_tbl(void *hwdev, const u32 *indir_table) +{ + struct hinic5_cmd_buf *cmd_buf = NULL; + struct hinic5_nic_io *nic_io = NULL; + u8 cmd; + u64 out_param = 0; + int err; + + if (!hwdev || !indir_table) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + cmd_buf = hinic5_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + cmd = nic_io->cmdq_ops->prepare_cmd_buf_set_rss_indir_table(nic_io, indir_table, cmd_buf); + + err = hinic5_cmdq_direct_resp(hwdev, HINIC5_MOD_L2NIC, + cmd, cmd_buf, &out_param, 0, HINIC5_CHANNEL_NIC); + if (err != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, "Failed to set rss indir table\n"); + err = -EFAULT; + } + + hinic5_free_cmd_buf(hwdev, cmd_buf); + return err; +} + +static int hinic5_cmdq_set_rss_type(void *hwdev, struct nic_rss_type rss_type) +{ + struct nic_rss_context_tbl *ctx_tbl = NULL; + struct hinic5_cmd_buf *cmd_buf = NULL; + struct hinic5_nic_io *nic_io = NULL; + u32 ctx = 0; + u64 out_param = 0; + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + cmd_buf = hinic5_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + nic_err(nic_io->dev_hdl, "Failed to allocate cmd buf\n"); + return -ENOMEM; + } + + ctx |= HINIC5_RSS_TYPE_SET(1, VALID) | + HINIC5_RSS_TYPE_SET(rss_type.ipv4, IPV4) | + HINIC5_RSS_TYPE_SET(rss_type.ipv6, IPV6) | + HINIC5_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | + HINIC5_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | + HINIC5_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | + HINIC5_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | + HINIC5_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | + HINIC5_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); + + cmd_buf->size = sizeof(struct nic_rss_context_tbl); + ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf; + memset(ctx_tbl, 0, sizeof(*ctx_tbl)); + ctx_tbl->ctx = cpu_to_be32(ctx); + + /* cfg the rss context table by command queue */ + err = hinic5_cmdq_direct_resp(hwdev, HINIC5_MOD_L2NIC, + HINIC5_UCODE_CMD_SET_RSS_CONTEXT_TABLE, + cmd_buf, &out_param, 0, + HINIC5_CHANNEL_NIC); + + hinic5_free_cmd_buf(hwdev, cmd_buf); + + if (err != 0 || out_param != 0) { + nic_err(nic_io->dev_hdl, "cmdq set set rss context table failed, err: %d\n", + err); + return -EFAULT; + } + + return 0; +} + +static int hinic5_mgmt_set_rss_type(void *hwdev, struct nic_rss_type rss_type) +{ + struct hinic5_nic_io *nic_io = NULL; + struct hinic5_rss_context_table ctx_tbl; + u32 ctx = 0; + u16 out_size = sizeof(ctx_tbl); + int err; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + memset(&ctx_tbl, 0, sizeof(ctx_tbl)); + ctx_tbl.func_id = hinic5_global_func_id(hwdev); + ctx |= HINIC5_RSS_TYPE_SET(1, VALID) | + HINIC5_RSS_TYPE_SET(rss_type.ipv4, IPV4) | + HINIC5_RSS_TYPE_SET(rss_type.ipv6, IPV6) | + HINIC5_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) | + HINIC5_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) | + HINIC5_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) | + HINIC5_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) | + HINIC5_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) | + HINIC5_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6); + ctx_tbl.context = ctx; + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_SET_RSS_CTX_TBL_INTO_FUNC, + &ctx_tbl, sizeof(ctx_tbl), + &ctx_tbl, &out_size); + + if (ctx_tbl.msg_head.status == HINIC5_MGMT_CMD_UNSUPPORTED) { + return HINIC5_MGMT_CMD_UNSUPPORTED; + } else if ((err != 0) || (out_size == 0) || (ctx_tbl.msg_head.status != 0)) { + nic_err(nic_io->dev_hdl, "mgmt Failed to set rss context offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, ctx_tbl.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + +int hinic5_set_rss_type(void *hwdev, struct nic_rss_type rss_type) +{ + int err; + + err = hinic5_mgmt_set_rss_type(hwdev, rss_type); + if (err == HINIC5_MGMT_CMD_UNSUPPORTED) + err = hinic5_cmdq_set_rss_type(hwdev, rss_type); + + return err; +} + +int hinic5_get_rss_type(void *hwdev, struct nic_rss_type *rss_type) +{ + struct hinic5_rss_context_table ctx_tbl; + u16 out_size = sizeof(ctx_tbl); + struct hinic5_nic_io *nic_io = NULL; + int err; + + if (!hwdev || !rss_type) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&ctx_tbl, 0, out_size); + ctx_tbl.func_id = hinic5_global_func_id(hwdev); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_GET_RSS_CTX_TBL, + &ctx_tbl, sizeof(ctx_tbl), + &ctx_tbl, &out_size); + if (err != 0 || out_size == 0 || ctx_tbl.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to get hash type, err: %d, status: 0x%x, out size: 0x%x\n", + err, ctx_tbl.msg_head.status, out_size); + return -EINVAL; + } + + rss_type->ipv4 = HINIC5_RSS_TYPE_GET(ctx_tbl.context, IPV4); + rss_type->ipv6 = HINIC5_RSS_TYPE_GET(ctx_tbl.context, IPV6); + rss_type->ipv6_ext = HINIC5_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT); + rss_type->tcp_ipv4 = HINIC5_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4); + rss_type->tcp_ipv6 = HINIC5_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6); + rss_type->tcp_ipv6_ext = HINIC5_RSS_TYPE_GET(ctx_tbl.context, + TCP_IPV6_EXT); + rss_type->udp_ipv4 = HINIC5_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4); + rss_type->udp_ipv6 = HINIC5_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6); + + return 0; +} + +static int hinic5_rss_cfg_hash_engine(struct hinic5_nic_io *nic_io, u8 opcode, + u8 *type) +{ + struct hinic5_cmd_rss_engine_type hash_type; + u16 out_size = sizeof(hash_type); + int err; + + if (!nic_io) + return -EINVAL; + + memset(&hash_type, 0, out_size); + + hash_type.func_id = hinic5_global_func_id(nic_io->hwdev); + hash_type.opcode = opcode; + + if (opcode == HINIC5_CMD_OP_SET) + hash_type.hash_engine = *type; + + err = l2nic_msg_to_mgmt_sync(nic_io->hwdev, + HINIC5_NIC_CMD_CFG_RSS_HASH_ENGINE, + &hash_type, sizeof(hash_type), + &hash_type, &out_size); + if (err != 0 || out_size == 0 || hash_type.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to %s hash engine, err: %d, status: 0x%x, out size: 0x%x\n", + opcode == HINIC5_CMD_OP_SET ? "set" : "get", + err, hash_type.msg_head.status, out_size); + return -EIO; + } + + if (opcode == HINIC5_CMD_OP_GET) + *type = hash_type.hash_engine; + + return 0; +} + +int hinic5_rss_set_hash_engine(void *hwdev, u8 type) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + return hinic5_rss_cfg_hash_engine(nic_io, HINIC5_CMD_OP_SET, &type); +} + +int hinic5_rss_get_hash_engine(void *hwdev, u8 *type) +{ + struct hinic5_nic_io *nic_io = NULL; + + if (!hwdev || !type) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + return hinic5_rss_cfg_hash_engine(nic_io, HINIC5_CMD_OP_GET, type); +} + +int hinic5_rss_cfg(void *hwdev, u8 rss_en, u8 cos_num, u8 *prio_tc, u16 num_qps) +{ + struct hinic5_cmd_rss_config rss_cfg; + u16 out_size = sizeof(rss_cfg); + struct hinic5_nic_io *nic_io = NULL; + int err; + + /* micro code required: number of TC should be power of 2 */ + if (!hwdev || !prio_tc || ((cos_num & (cos_num - 1)) != 0)) + return -EINVAL; + + nic_io = hinic5_get_service_adapter(hwdev, SERVICE_T_NIC); + if (!nic_io) + return -EINVAL; + + memset(&rss_cfg, 0, out_size); + rss_cfg.func_id = hinic5_global_func_id(hwdev); + rss_cfg.rss_en = rss_en; + rss_cfg.rq_priority_number = (cos_num != 0) ? (u8)ilog2(cos_num) : 0; + rss_cfg.num_qps = num_qps; + + memcpy(rss_cfg.prio_tc, prio_tc, NIC_DCB_UP_MAX); + + err = l2nic_msg_to_mgmt_sync(hwdev, HINIC5_NIC_CMD_RSS_CFG, + &rss_cfg, sizeof(rss_cfg), + &rss_cfg, &out_size); + if (err != 0 || out_size == 0 || rss_cfg.msg_head.status != 0) { + nic_err(nic_io->dev_hdl, "Failed to set rss cfg, err: %d, status: 0x%x, out size: 0x%x\n", + err, rss_cfg.msg_head.status, out_size); + return -EINVAL; + } + + return 0; +} + diff --git a/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_mag_cfg.h b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_mag_cfg.h new file mode 100644 index 00000000..8961c129 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_mag_cfg.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_MAG_CFG_H +#define HINIC5_MAG_CFG_H + +#include <linux/types.h> + +#define CAP_INFO_MAX_LEN 512 +#define VENDOR_MAX_LEN 17 + +#define LOOP_MODE_MIN 1 +#define LOOP_MODE_MAX 6 + +/** + * @brief 设置设备物理端口状态 + * + * @param hwdev device pointer to hwdev + * @param enable 设置端口状态值,true--使能,false--去使能 + * @param channel, mailbox发送使用的channel id + * + * @details 设置该设备关联的物理端口状态,通过mailbox发给MPU设置MAG端口状态 + * + * @attention: 仅PF支持,VF调用返回0;函数内部涉及发送mailbox消息会休眠, + * 禁止中断上下文等不允许休眠的流程中调用 + * @return: 设备物理端口状态设置返回成功或者失败. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_set_port_enable(void *hwdev, bool enable, u16 channel); +int hinic5_get_fec(void *hwdev, u8 *advertised_fec, u8 *supported_fec); +int hinic5_set_fec(void *hwdev, u8 advertised_fec); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_cfg.h b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_cfg.h new file mode 100644 index 00000000..6bb61d72 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_cfg.h @@ -0,0 +1,776 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NIC_CFG_H +#define HINIC5_NIC_CFG_H + +#include <linux/types.h> +#include <linux/netdevice.h> + +#include "nic_cfg_comm.h" +#include "nic_mpu_tc_cmd_defs.h" +#include "mag_mpu_cmd_defs.h" + +#define nic_err(dev, format, ...) dev_err(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_warn(dev, format, ...) dev_warn(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_notice(dev, format, ...) dev_notice(dev, "[NIC]" format, ##__VA_ARGS__) +#define nic_info(dev, format, ...) dev_info(dev, "[NIC]" format, ##__VA_ARGS__) + +#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) +#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1) + +#define HINIC5_VLAN_PRIORITY_SHIFT 13 + +#define HINIC5_RSS_INDIR_4B_UNIT 3 +#define HINIC5_RSS_INDIR_NUM 2 + +#define HINIC5_RSS_KEY_RSV_NUM 2 +#define HINIC5_MAX_NUM_RQ 256 + +#define HINIC5_MIN_MTU_SIZE 256 +#define HINIC5_MAX_JUMBO_FRAME_SIZE 9600 + +#define HINIC5_DEL_MAC_NO_MATCH 0x5 +#define HINIC5_MGMT_STATUS_EXIST 0x6 +#define CHECK_IPSU_15BIT 0x8000 + +#define HINIC5_MGMT_STATUS_TABLE_EMPTY 0xB /* Table empty */ +#define HINIC5_MGMT_STATUS_TABLE_FULL 0xC /* Table full */ + +#define HINIC5_LOWEST_LATENCY 3 +#define HINIC5_MULTI_VM_LATENCY 32 +#define HINIC5_MULTI_VM_PENDING_LIMIT 4 + +#define HINIC5_RX_RATE_LOW 200000 +#define HINIC5_RX_COAL_TIME_LOW 25 +#define HINIC5_RX_PENDING_LIMIT_LOW 2 + +#define HINIC5_RX_RATE_HIGH 700000 +#define HINIC5_RX_COAL_TIME_HIGH 225 +#define HINIC5_RX_PENDING_LIMIT_HIGH 8 + +#define HINIC5_RX_RATE_THRESH 50000 +#define HINIC5_TX_RATE_THRESH 50000 +#define HINIC5_RX_RATE_LOW_VM 100000 +#define HINIC5_RX_PENDING_LIMIT_HIGH_VM 87 + +#define HINIC5_DCB_PCP 0 +#define HINIC5_DCB_DSCP 1 + +#define HINIC5_VXLAN_DPORT_SET_BY_HINICADM 0x2 /* mpu return ERR_FAILED */ +#define HINIC5_VXLAN_DPORT_SET_UNSUPPORT 0xff /* mpu return ERR_UNSUPPORT */ + +#define MAX_LIMIT_BW 100 +#define MIN_LIMIT_BW 0 + +enum hinic5_valid_link_settings { + HILINK_LINK_SET_SPEED = 0x1, + HILINK_LINK_SET_AUTONEG = 0x2, + HILINK_LINK_SET_FEC = 0x4, +}; + +enum hinic5_link_follow_status { + HINIC5_LINK_FOLLOW_DEFAULT, + HINIC5_LINK_FOLLOW_PORT, + HINIC5_LINK_FOLLOW_SEPARATE, + HINIC5_LINK_FOLLOW_STATUS_MAX, +}; + +struct hinic5_link_ksettings { + u32 valid_bitmap; + u8 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off; 1 - on */ + u8 fec; /* 0 - RSFEC; 1 - BASEFEC; 2 - NOFEC */ +}; + +u64 hinic5_get_feature_cap(void *hwdev); + +#define HINIC5_SUPPORT_FEATURE(hwdev, feature) \ + ((hinic5_get_feature_cap(hwdev) & NIC_F_##feature) != 0) +#define HINIC5_SUPPORT_CSUM(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, CSUM) +#define HINIC5_SUPPORT_SCTP_CRC(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, SCTP_CRC) +#define HINIC5_SUPPORT_TSO(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, TSO) +#define HINIC5_SUPPORT_UFO(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, UFO) +#define HINIC5_SUPPORT_LRO(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, LRO) +#define HINIC5_SUPPORT_RSS(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, RSS) +#define HINIC5_SUPPORT_RXVLAN_FILTER(hwdev) \ + HINIC5_SUPPORT_FEATURE(hwdev, RX_VLAN_FILTER) +#define HINIC5_SUPPORT_VLAN_OFFLOAD(hwdev) \ + (HINIC5_SUPPORT_FEATURE(hwdev, RX_VLAN_STRIP) && \ + HINIC5_SUPPORT_FEATURE(hwdev, TX_VLAN_INSERT)) +#define HINIC5_SUPPORT_VXLAN_OFFLOAD(hwdev) \ + HINIC5_SUPPORT_FEATURE(hwdev, VXLAN_OFFLOAD) +#define HINIC5_SUPPORT_GENEVE_OFFLOAD(hwdev) \ + HINIC5_SUPPORT_FEATURE(hwdev, GENEVE_OFFLOAD) +#define HINIC5_SUPPORT_IPXIP_OFFLOAD(hwdev) \ + HINIC5_SUPPORT_FEATURE(hwdev, IPXIP_OFFLOAD) +#define HINIC5_SUPPORT_IPSEC_OFFLOAD(hwdev) \ + HINIC5_SUPPORT_FEATURE(hwdev, IPSEC_OFFLOAD) +#define HINIC5_SUPPORT_FDIR(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, FDIR) +#define HINIC5_SUPPORT_PROMISC(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, PROMISC) +#define HINIC5_SUPPORT_ALLMULTI(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, ALLMULTI) +#define HINIC5_SUPPORT_VF_MAC(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, VF_MAC) +#define HINIC5_SUPPORT_RATE_LIMIT(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, RATE_LIMIT) +#define HINIC5_SUPPORT_TX_WQE_COMPACT_TASK(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, TX_WQE_COMPACT_TASK) +#define HINIC5_SUPPORT_RX_HW_COMPACT_CQE(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, RX_HW_COMPACT_CQE) +#define HINIC5_SUPPORT_RX_SW_COMPACT_CQE(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, RX_SW_COMPACT_CQE) + +#define HINIC5_SUPPORT_RXQ_RECOVERY(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, RXQ_RECOVERY) +#define HINIC5_SUPPORT_PTP_1588_V2(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, PTP_1588_V2) +#define HINIC5_SUPPORT_SQ_RQ_CI_COALESCE(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, SQ_RQ_CI_COALESCE) +#define HINIC5_SUPPORT_GET_COUNTER_BY_CMDQ(hwdev) HINIC5_SUPPORT_FEATURE(hwdev, GET_COUNTER_BY_CMDQ) + +struct nic_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +enum hinic5_rss_hash_type { + HINIC5_RSS_HASH_ENGINE_TYPE_XOR = 0, + HINIC5_RSS_HASH_ENGINE_TYPE_TOEP, + HINIC5_RSS_HASH_ENGINE_TYPE_MAX, +}; + +/* rss */ +struct nic_rss_indirect_tbl { + u32 user_data[2]; + u32 rsvd[2]; /* Make sure that 16B beyond entry[] */ + u16 entry[NIC_RSS_INDIR_SIZE]; +}; + +struct nic_rss_context_tbl { + u32 rsvd[4]; + u32 ctx; +}; + +struct nic_vlan_ctx { + u32 func_id; + u32 qid; /* if qid = 0xFFFF, config current function all queue */ + u32 vlan_tag; + u32 vlan_mode; + u32 vlan_sel; +}; + +enum hinic5_link_status { + HINIC5_LINK_DOWN = 0, + HINIC5_LINK_UP +}; + +struct nic_pause_config { + u8 auto_neg; + u8 rx_pause; + u8 tx_pause; +}; + +struct rxq_check_info { + u16 hw_pi; + u16 hw_ci; +}; + +struct hinic5_rxq_hw { + u32 func_id; + u32 num_queues; + + u32 rsvd[14]; +}; + +#define MODULE_TYPE_SFP 0x3 +#define MODULE_TYPE_QSFP28 0x11 +#define MODULE_TYPE_QSFP 0x0C +#define MODULE_TYPE_QSFP_PLUS 0x0D +#define MODULE_TYPE_SFF8024_ID_QSFP_PLUS_CMIS 0x1E +#define MODULE_TYPE_SFF8024_ID_DSFP 0x1B + +#define TCAM_IP_TYPE_MASK 0x1 +#define TCAM_TUNNEL_TYPE_MASK 0xF +#define TCAM_FUNC_ID_MASK 0x7FFF + +int hinic5_add_tcam_rule(void *hwdev, struct nic_tcam_cfg_rule *tcam_rule); +int hinic5_del_tcam_rule(void *hwdev, u32 index); + +int hinic5_alloc_tcam_block(void *hwdev, u16 *index); +int hinic5_free_tcam_block(void *hwdev, u16 *index); + +int hinic5_set_fdir_tcam_rule_filter(void *hwdev, bool enable); + +int hinic5_flush_tcam_rule(void *hwdev); + +/* * + * @brief hinic5_update_mac - update mac address to hardware + * @param hwdev: device pointer to hwdev + * @param old_mac: old mac to delete + * @param new_mac: new mac to update + * @param vlan_id: vlan id + * @param func_id: function index + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_update_mac(void *hwdev, const u8 *old_mac, u8 *new_mac, u16 vlan_id, + u16 func_id); + +/* * + * @brief hinic5_get_default_mac - get default mac address + * @param hwdev: device pointer to hwdev + * @param mac_addr: mac address from hardware + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_default_mac(void *hwdev, u8 *mac_addr); + +/* * + * @brief hinic5_set_port_mtu - set function mtu + * @param hwdev: device pointer to hwdev + * @param new_mtu: mtu + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_port_mtu(void *hwdev, u16 new_mtu); + +/* * + * @brief hinic5_get_link_state - get link state + * @param hwdev: device pointer to hwdev + * @param link_state: link state, 0-link down, 1-link up + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_link_state(void *hwdev, u8 *link_state); + +/* * + * @brief hinic5_get_vport_stats - get function stats + * @param hwdev: device pointer to hwdev + * @param func_id: function index + * @param stats: function stats + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_vport_stats(void *hwdev, u16 func_id, struct hinic5_vport_stats *stats); + +/* * + * @brief hinic5_notify_all_vfs_link_changed - notify to all vfs link changed + * @param hwdev: device pointer to hwdev + * @param link_status: link state, 0-link down, 1-link up + */ +void hinic5_notify_all_vfs_link_changed(void *hwdev, u8 link_status); + +/* * + * @brief hinic5_force_drop_tx_pkt - force drop tx packet + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_force_drop_tx_pkt(void *hwdev); + +/* * + * @brief hinic5_set_rx_mode - set function rx mode + * @param hwdev: device pointer to hwdev + * @param enable: rx mode state + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_rx_mode(void *hwdev, u32 enable); + +/* * + * @brief hinic5_set_rx_vlan_offload - set function vlan offload valid state + * @param hwdev: device pointer to hwdev + * @param en: 0-disable, 1-enable + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_rx_vlan_offload(void *hwdev, u8 en); + +/* * + * @brief hinic5_set_rx_lro_state - set rx LRO configuration + * @param hwdev: device pointer to hwdev + * @param lro_en: 0-disable, 1-enable + * @param lro_timer: LRO aggregation timeout + * @param lro_max_pkt_len: LRO coalesce packet size(unit is 1K) + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_rx_lro_state(void *hwdev, u8 lro_en, u32 lro_timer, + u32 lro_max_pkt_len); + +/* * + * @brief hinic5_get_veb_offload - get veb offload + * @param hwdev: device pointer to hwdev + * @param veb_offload_status: veb offload status + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_veb_offload(void *hwdev, u16 *veb_offload_status); + +/* * + * @brief hinic5_set_veb_offload - set veb offload + * @param hwdev: device pointer to hwdev + * @param veb_offload_status: veb offload status + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_veb_offload(void *hwdev, u16 veb_offload_status); + +/* * + * @brief hinic5_set_vf_spoofchk - set vf spoofchk + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param spoofchk: spoofchk + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_vf_spoofchk(void *hwdev, u16 vf_id, bool spoofchk); + +/* * + * @brief hinic5_vf_info_spoofchk - get vf spoofchk info + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @retval spoofchk state + */ +bool hinic5_vf_info_spoofchk(void *hwdev, int vf_id); + +/* * + * @brief hinic5_add_vf_vlan - add vf vlan id + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param vlan: vlan id + * @param qos: qos + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_add_vf_vlan(void *hwdev, int vf_id, u16 vlan, u8 qos); + +/* * + * @brief hinic5_kill_vf_vlan - kill vf vlan + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param vlan: vlan id + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_kill_vf_vlan(void *hwdev, int vf_id); + +/* * + * @brief hinic5_set_vf_mac - set vf mac + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param mac_addr: vf mac address + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_vf_mac(void *hwdev, int vf_id, const unsigned char *mac_addr); + +/* * + * @brief hinic5_vf_info_vlanprio - get vf vlan priority + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @retval zero: vlan priority + */ +u16 hinic5_vf_info_vlanprio(void *hwdev, int vf_id); + +/* * + * @brief hinic5_set_vf_tx_rate - set vf tx rate + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param max_rate: max rate + * @param min_rate: min rate + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_vf_tx_rate(void *hwdev, u16 vf_id, u32 max_rate, u32 min_rate); + +/* * + * @brief hinic5_set_vf_tx_rate - set vf tx rate + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param ivi: vf info + * @retval zero: success + * @retval non-zero: failure + */ +void hinic5_get_vf_config(void *hwdev, u16 vf_id, struct ifla_vf_info *ivi); + +/* * + * @brief hinic5_set_vf_link_state - set vf link state + * @param hwdev: device pointer to hwdev + * @param vf_id: vf id + * @param link: link state + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_vf_link_state(void *hwdev, u16 vf_id, int link); + +/* * + * @brief hinic5_set_rss_type - set rss type + * @param hwdev: device pointer to hwdev + * @param rss_type: rss type + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_rss_type(void *hwdev, struct nic_rss_type rss_type); + +/* * + * @brief hinic5_get_rss_type - get rss type + * @param hwdev: device pointer to hwdev + * @param rss_type: rss type + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_rss_type(void *hwdev, struct nic_rss_type *rss_type); + +/* * + * @brief hinic5_rss_get_hash_engine - get rss hash engine + * @param hwdev: device pointer to hwdev + * @param type: hash engine + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_rss_get_hash_engine(void *hwdev, u8 *type); + +/* * + * @brief hinic5_rss_set_hash_engine - set rss hash engine + * @param hwdev: device pointer to hwdev + * @param type: hash engine + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_rss_set_hash_engine(void *hwdev, u8 type); + +/* * + * @brief hinic5_rss_cfg - set rss configuration + * @param hwdev: device pointer to hwdev + * @param rss_en: enable rss flag + * @param type: number of TC + * @param cos_num: cos num + * @param num_qps: number of queue + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_rss_cfg(void *hwdev, u8 rss_en, u8 cos_num, u8 *prio_tc, + u16 num_qps); + +/* * + * @brief hinic5_rss_set_template_tbl - set template table + * @param hwdev: device pointer to hwdev + * @param key: rss key + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_rss_set_hash_key(void *hwdev, const u8 *key); + +/* * + * @brief hinic5_rss_get_template_tbl - get template table + * @param hwdev: device pointer to hwdev + * @param key: rss key + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_rss_get_hash_key(void *hwdev, u8 *key); + +/* * + * @brief hinic5_refresh_nic_cfg - refresh port cfg + * @param hwdev: device pointer to hwdev + * @param port_info: port information + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_refresh_nic_cfg(void *hwdev, struct mag_port_info *port_info); + +/* * + * @brief hinic5_add_vlan - add vlan + * @param hwdev: device pointer to hwdev + * @param vlan_id: vlan id + * @param func_id: function id + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_add_vlan(void *hwdev, u16 vlan_id, u16 func_id); + +/* * + * @brief hinic5_del_vlan - delete vlan + * @param hwdev: device pointer to hwdev + * @param vlan_id: vlan id + * @param func_id: function id + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_del_vlan(void *hwdev, u16 vlan_id, u16 func_id); + +/* * + * @brief hinic5_rss_set_indir_tbl - set rss indirect table + * @param hwdev: device pointer to hwdev + * @param indir_table: rss indirect table + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_rss_set_indir_tbl(void *hwdev, const u32 *indir_table); + +/* * + * @brief hinic5_get_phy_port_stats - get port stats + * @param hwdev: device pointer to hwdev + * @param stats: port stats + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_phy_port_stats(void *hwdev, struct mag_cmd_port_stats *stats); + +int hinic5_set_port_funcs_state(void *hwdev, bool enable); + +int hinic5_reset_port_link_cfg(void *hwdev); + +int hinic5_force_port_relink(void *hwdev); + +int hinic5_set_dcb_state(void *hwdev, struct hinic5_dcb_state *dcb_state); + +int hinic5_dcb_set_pfc(void *hwdev, u8 pfc_en, u8 pfc_bitmap); + +int hinic5_dcb_get_pfc(void *hwdev, u8 *pfc_en_bitmap); + +int hinic5_dcb_set_ets(void *hwdev, u8 *cos_tc, u8 *cos_bw, u8 *cos_prio, + u8 *tc_bw, u8 *tc_prio); + +int hinic5_dcb_set_cos_up_map(void *hwdev, u8 cos_valid_bitmap, u8 *cos_up, + u8 max_cos_num); + +int hinic5_dcb_set_rq_iq_mapping(void *hwdev, u32 num_rqs, u8 *map, + u32 max_map_num); + +int hinic5_sync_dcb_state(void *hwdev, u8 op_code, u8 state); + +int hinic5_get_pause_info(void *hwdev, struct nic_pause_config *nic_pause); + +int hinic5_set_pause_info(void *hwdev, struct nic_pause_config nic_pause); + +int hinic5_set_link_settings(void *hwdev, + struct hinic5_link_ksettings *settings); + +int hinic5_set_vlan_fliter(void *hwdev, u32 vlan_filter_ctrl); + +void hinic5_clear_vfs_info(void *hwdev, u32 start_vf_id, u32 end_vf_id); + +int hinic5_update_mac_vlan(void *hwdev, u16 old_vlan, u16 new_vlan, int vf_id); + +int hinic5_set_led_status(void *hwdev, enum mag_led_type type, + enum mag_led_mode mode); + +int hinic5_set_func_capture_en(void *hwdev, u16 func_id, bool cap_en); + +int hinic5_set_loopback_mode(void *hwdev, u8 mode, u8 enable); +int hinic5_get_loopback_mode(void *hwdev, u8 *mode, u8 *enable); + +#ifdef HAVE_NDO_SET_VF_TRUST +bool hinic5_get_vf_trust(void *hwdev, int vf_id); +int hinic5_set_vf_trust(void *hwdev, u16 vf_id, bool trust); +#endif + +int hinic5_set_autoneg(void *hwdev, bool enable); + +int hinic5_get_sfp_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext); +int hinic5_get_sfp_cmis_type(void *hwdev, u8 *sfp_type, u8 *sfp_type_ext); +int hinic5_get_sfp_eeprom(void *hwdev, u8 *data, u32 len, u32 offset); +int hinic5_get_cmis_eeprom(void *hwdev, u8 *data, u32 len, u32 offset); +int hinic5_eeprom_page_check(u8 page_id, u32 offset, u32 len); +int hinic5_get_cmis_eeprom_by_page(void *hwdev, u8 page_id, u32 offset, u8 *data, u32 len); + +bool hinic5_if_sfp_absent(void *hwdev); +int hinic5_get_sfp_info(void *hwdev, struct mag_cmd_get_xsfp_info *sfp_info); +int hinic5_get_sfp_tlv_info(void *hwdev, struct drv_tag_mag_cmd_get_xsfp_tlv_rsp *sfp_tlv_info, + const struct tag_mag_cmd_get_xsfp_tlv_req *sfp_tlv_info_req); + +/* * + * @brief hinic5_set_nic_feature_to_hw - sync nic feature to hardware + * @param hwdev: device pointer to hwdev + */ +int hinic5_set_nic_feature_to_hw(void *hwdev); + +/* * + * @brief hinic5_update_nic_feature - update nic feature + * @param hwdev: device pointer to hwdev + * @param s_feature: nic features + * @param size: @s_feature's array size + */ +void hinic5_update_nic_feature(void *hwdev, u64 s_feature); + +/* * + * @brief hinic5_set_link_status_follow - set link follow status + * @param hwdev: device pointer to hwdev + * @param status: link follow status + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_link_status_follow(void *hwdev, enum hinic5_link_follow_status status); + +/* * + * @brief hinic5_update_pf_bw - update pf bandwidth + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_update_pf_bw(void *hwdev); + +/* * + * @brief hinic5_set_pf_bw_limit - set pf bandwidth limit + * @param hwdev: device pointer to hwdev + * @param bw_limit: pf bandwidth limit + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_pf_bw_limit(void *hwdev, u32 bw_limit); + +/* * + * @brief hinic5_get_pf_bw_limit - get pf bandwidth limit + * @param hwdev: device pointer to hwdev + * @param bw_limit: pf bandwidth limit + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_pf_bw_limit(void *hwdev, u32 *bw_limit); + +/* * + * @brief hinic5_set_pf_rate - set pf rate + * @param hwdev: device pointer to hwdev + * @param speed_level: speed level + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_set_pf_rate(void *hwdev, u8 speed_level); + +int hinic5_get_rxq_hw_info(void *hwdev, struct rxq_check_info *rxq_info, u16 num_qps, u16 wqe_type); + +/* * + * @brief hinic5_add_tc_flow_rule - add tc flow rule + * @param hwdev: device pointer to hwdev + * @param hinic5_tc_cfg_info: tc flow rule + * @param default_rule: is default rule + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_add_tc_flow_rule(void *hwdev, struct hinic5_tc_cfg_info *tc_flow_rule, + bool default_rule); + +/* * + * @brief hinic5_del_tc_flow_rule - del tc flow rule + * @param hwdev: device pointer to hwdev + * @param rule_id: tc flow rule id + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_del_tc_flow_rule(void *hwdev, u16 rule_id); + +/* * + * @brief hinic5_flush_tc_flow_rule - flush tc flow rule + * @param hwdev: device pointer to hwdev + * @param bitmap: tc rules bitmap + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_flush_tc_flow_rule(void *hwdev, ulong *bitmap); + +/* * + * @brief hinic5_get_pfe_cfg - get pfe cfg + * @param hwdev: device pointer to hwdev + * @param cfg_info: pfe cfg reg info + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_get_pfe_cfg(void *hwdev, struct hinic5_tc_pfe_cfg_reg_info *cfg_info); + +/* * + * @brief hinic5_move_tc_tcam_table - move tcam table + * @param hwdev: device pointer to hwdev + * @param hinic5_tc_move_info: tc move info + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_move_tc_tcam_table(void *hwdev, struct hinic5_tc_move_info *acl_move_info); + +/* * + * @brief hinic5_cache_out_qps_res - cache out queue pairs wqe resource in hardware + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_cache_out_qps_res(void *hwdev); + +/* * + * @brief hinic5_init_nic_hwdev - init nic hwdev + * @param hwdev: device pointer to hwdev + * @param dev_hdl: pointer to pcidev->dev or handler, for sdk_err() or + * dma_alloc() + * @param rx_buff_len: rx_buff_len is receive buffer length + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_init_nic_hwdev(void *hwdev, void *dev_hdl, u16 rx_buff_len); + +/* * + * @brief hinic5_free_nic_hwdev - free nic hwdev + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +void hinic5_free_nic_hwdev(void *hwdev); + +/* * + * @brief hinic5_flush_qps_res - flush queue pairs resource in hardware + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_flush_qps_res(void *hwdev); + +/* * + * @brief hinic5_flush_qps_res_by_nums - flush queue pairs resource by nums in hardware + * @param hwdev: device pointer to hwdev + * @param qp_num: flush queue num + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_flush_qps_res_by_nums(void *hwdev, u16 qp_num); + +/** + * @brief 设置vport状态 + * + * @param hwdev device pointer to hwdev + * @param func_id global function index + * @param enable 设置vport状态值,true--使能,false--去使能 + * @param channel,mailbox发送使用的channel id + * + * @details 设置对应function vport使能(function表valid标志位)状态, + * 通过mailbox发给MPU设置MAG端口状态 + * @attention: 函数内部涉及发送mailbox消息会休眠, + * 禁止中断上下文等不允许休眠的流程中调用 + * @return: 设备vport状态设置返回成功或者失败. + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_set_vport_enable(void *hwdev, u16 func_id, bool enable, u16 channel); + +/* 修改vxlan dport */ +int hinic5_vxlan_port_config(void *hwdev, u16 func_id, u16 port, u8 action, u8 pkt_fmt); + +/** + * @brief 获取芯片nic特性能力 + * + * @param hwdev device pointer to hwdev + * @param s_feature:返回的芯片能力bitmap + * @param size:bitmap个数 + * + * @details 发送mbox消息,获取芯片nic特性能力 + * + * @attention: N/A + * + * @return: 获取结果返回成功或者失败 + * @retval 0 成功 + * @retval 非0 失败 + */ +int hinic5_get_nic_feature_from_hw(void *hwdev, u64 *s_feature, u16 size); + +int hinic5_send_arp_to_mpu(void *hwdev, struct hinic5_arp_pkt_info *info); + +bool hinic5_check_dev_need_dual_send(void *hwdev); + +int hinic5_set_queue_pooling(void *hwdev, u8 enable_queue_pooling); +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_cfg_vf.h b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_cfg_vf.h new file mode 100644 index 00000000..17e668cf --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_cfg_vf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NIC_CFG_VF_H +#define HINIC5_NIC_CFG_VF_H + +/* In order to adapt different linux version */ +enum { + HINIC5_IFLA_VF_LINK_STATE_AUTO, /* link state of the uplink */ + HINIC5_IFLA_VF_LINK_STATE_ENABLE, /* link always up */ + HINIC5_IFLA_VF_LINK_STATE_DISABLE, /* link always down */ +}; + +#define NIC_CVLAN_INSERT_ENABLE 0x1 +#define NIC_QINQ_INSERT_ENABLE 0x3 +#define NIC_VF_TRUST_UNSUPPORT 0xFF + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_dbg.h b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_dbg.h new file mode 100644 index 00000000..395bbdce --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_dbg.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NIC_DBG_H +#define HINIC5_NIC_DBG_H + +#include "nic_pub_cmd.h" +#include "hinic5_nic_io.h" +#include "hinic5_srv_nic.h" + +int hinic5_dbg_get_sq_info(void *hwdev, u16 q_id, struct nic_sq_info *sq_info, + u32 msg_size); + +int hinic5_dbg_get_rq_info(void *hwdev, u16 q_id, struct nic_rq_info *rq_info, + u32 msg_size); + +int hinic5_dbg_get_wqe_info(void *hwdev, u16 q_id, u16 idx, u16 wqebb_cnt, + u8 *wqe, const u16 *wqe_size, + enum hinic5_queue_type q_type); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_event.h b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_event.h new file mode 100644 index 00000000..242275e5 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_event.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NIC_EVENT_H +#define HINIC5_NIC_EVENT_H + +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> + +#include "hinic5_common.h" +#include "hinic5_mt.h" +#include "hinic5_hw.h" + +int hinic5_nic_aeq_register_swe_cb(void *hwdev, void *pri_handle, + enum hinic5_ucode_event_type event, + hinic5_aeq_swe_cb nic_aeq_swe_cb); + +void hinic5_nic_aeq_unregister_swe_cb(void *hwdev, enum hinic5_ucode_event_type event); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_io.h b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_io.h new file mode 100644 index 00000000..d6c3d261 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_io.h @@ -0,0 +1,415 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NIC_IO_H +#define HINIC5_NIC_IO_H + +#include "hinic5_crm.h" +#include "hinic5_common.h" +#include "hinic5_wq.h" +#include "hinic5_nic_sq.h" +#include "hinic5_nic_rq.h" + +#define HINIC5_MAX_TX_QUEUE_DEPTH 16384 +#define HINIC5_MAX_RX_QUEUE_DEPTH 16384 + +#define HINIC5_MIN_QUEUE_DEPTH 128 + +#define HINIC5_SQ_WQEBB_SHIFT 4 +#define HINIC5_RQ_WQEBB_SHIFT 3 + +#define HINIC5_SQ_WQEBB_SIZE BIT(HINIC5_SQ_WQEBB_SHIFT) +#define HINIC5_CQE_SIZE_SHIFT 4 + +enum hinic5_rq_wqe_type { + HINIC5_COMPACT_RQ_WQE, + HINIC5_NORMAL_RQ_WQE, + HINIC5_EXTEND_RQ_WQE, +}; + +#define HINIC5_SQ_COALESCE BIT(0) +#define HINIC5_RQ_COALESCE BIT(1) +#define HINIC5_SQ_RQ_COALESCE (HINIC5_SQ_COALESCE | HINIC5_RQ_COALESCE) + +struct hinic5_io_queue { + struct hinic5_wq wq; + + u64 head_addr; + u64 tail_addr; + + union { + u8 wqe_type; /* for rq */ + u8 owner; /* for sq */ + }; + u8 rsvd1; + u16 rsvd2; + + u16 q_id; + u16 msix_entry_idx; + + u8 __iomem *db_addr; + void *cons_idx_addr; +} ____cacheline_aligned; + +struct hinic5_nic_db { + u32 db_info; + u32 pi_hi; +}; + +struct hinic5_rq_ci_wb { + union { + struct { + u16 cqe_num; + u16 hw_ci; + } bs; + u32 value; + } dw0; +}; + +/* * + * @brief hinic5_get_sq_free_wqebbs - get send queue free wqebb + * @param sq: send queue + * @retval : number of free wqebb + */ +static inline u16 hinic5_get_sq_free_wqebbs(struct hinic5_io_queue *sq) +{ + return hinic5_wq_free_wqebbs(&sq->wq); +} + +/* * + * @brief hinic5_update_sq_local_ci - update send queue local consumer index + * @param sq: send queue + * @param wqe_cnt: number of wqebb + */ +static inline void hinic5_update_sq_local_ci(struct hinic5_io_queue *sq, + u16 wqebb_cnt) +{ + hinic5_wq_put_wqebbs(&sq->wq, wqebb_cnt); +} + +/* * + * @brief hinic5_get_sq_local_ci - get send queue local consumer index + * @param sq: send queue + * @retval : local consumer index + */ +static inline u16 hinic5_get_sq_local_ci(const struct hinic5_io_queue *sq) +{ + return WQ_MASK_IDX(&sq->wq, sq->wq.cons_idx); +} + +/* * + * @brief hinic5_get_sq_local_pi - get send queue local producer index + * @param sq: send queue + * @retval : local producer index + */ +static inline u16 hinic5_get_sq_local_pi(const struct hinic5_io_queue *sq) +{ + return WQ_MASK_IDX(&sq->wq, sq->wq.prod_idx); +} + +/* * + * @brief hinic5_get_sq_hw_ci - get send queue hardware consumer index + * @param sq: send queue + * @retval : hardware consumer index + */ +static inline u16 hinic5_get_sq_hw_ci(const struct hinic5_io_queue *sq) +{ + return WQ_MASK_IDX(&sq->wq, + hinic5_hw_cpu16(*(u16 *)sq->cons_idx_addr)); +} + +static inline u16 hinic5_get_rq_hw_ci(const struct hinic5_io_queue *rq) +{ + struct hinic5_rq_ci_wb rq_ci_wb; + + rq_ci_wb.dw0.value = hinic5_hw_cpu32(*(u32 *)rq->cons_idx_addr); + + return WQ_MASK_IDX(&rq->wq, rq_ci_wb.dw0.bs.hw_ci); +} + +/* * + * @brief hinic5_get_sq_one_wqebb - get send queue wqe with single wqebb + * @param sq: send queue + * @param pi: return current pi + * @retval : wqe base address + */ +static inline void *hinic5_get_sq_one_wqebb(struct hinic5_io_queue *sq, u16 *pi) +{ + return hinic5_wq_get_one_wqebb(&sq->wq, pi); +} + +static inline void *hinic5_get_sq_wqebbs(struct hinic5_io_queue *sq, u16 wqebb_cnt, u16 *pi) +{ + return hinic5_wq_get_wqebbs(&sq->wq, wqebb_cnt, pi); +} + +/* * + * @brief hinic5_get_sq_multi_wqebb - get send queue wqe with multiple wqebbs + * @param sq: send queue + * @param wqebb_cnt: wqebb counter + * @param pi: return current pi + * @param second_part_wqebbs_addr: second part wqebbs base address + * @param first_part_wqebbs_num: number wqebbs of first part + * @retval : first part wqebbs base address + */ +static inline void *hinic5_get_sq_multi_wqebbs(struct hinic5_io_queue *sq, + u16 wqebb_cnt, u16 *pi, + void **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num) +{ + return hinic5_wq_get_multi_wqebbs(&sq->wq, wqebb_cnt, pi, + second_part_wqebbs_addr, + first_part_wqebbs_num); +} + +/* * + * @brief hinic5_get_and_update_sq_owner - get and update send queue owner bit + * @param sq: send queue + * @param curr_pi: current pi + * @param wqebb_cnt: wqebb counter + * @retval : owner bit + */ +static inline u16 hinic5_get_and_update_sq_owner(struct hinic5_io_queue *sq, + u16 curr_pi, u16 wqebb_cnt) +{ + u16 owner = sq->owner; + + if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth)) + sq->owner = (sq->owner != 0) ? 0 : 1; + + return owner; +} + +/* * + * @brief hinic5_get_sq_wqe_with_owner - get send queue wqe with owner + * @param sq: send queue + * @param wqebb_cnt: wqebb counter + * @param pi: return current pi + * @param owner: return owner bit + * @param second_part_wqebbs_addr: second part wqebbs base address + * @param first_part_wqebbs_num: number wqebbs of first part + * @retval : first part wqebbs base address + */ +static inline void *hinic5_get_sq_wqe_with_owner(struct hinic5_io_queue *sq, + u16 wqebb_cnt, u16 *pi, + u16 *owner, + void **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num) +{ + void *wqe = hinic5_wq_get_multi_wqebbs(&sq->wq, wqebb_cnt, pi, + second_part_wqebbs_addr, + first_part_wqebbs_num); + + *owner = sq->owner; + if (unlikely(*pi + wqebb_cnt >= sq->wq.q_depth)) + sq->owner = (sq->owner != 0) ? 0 : 1; + + return wqe; +} + +/* * + * @brief hinic5_rollback_sq_wqebbs - rollback send queue wqe + * @param sq: send queue + * @param wqebb_cnt: wqebb counter + * @param owner: owner bit + */ +static inline void hinic5_rollback_sq_wqebbs(struct hinic5_io_queue *sq, + u16 wqebb_cnt, u16 owner) +{ + if (owner != sq->owner) + sq->owner = (u8)owner; + sq->wq.prod_idx -= wqebb_cnt; +} + +/* * + * @brief hinic5_rq_wqe_addr - get receive queue wqe address by queue index + * @param rq: receive queue + * @param idx: wq index + * @retval: wqe base address + */ +static inline void *hinic5_rq_wqe_addr(struct hinic5_io_queue *rq, u16 idx) +{ + return hinic5_wq_wqebb_addr(&rq->wq, idx); +} + +/* * + * @brief hinic5_update_rq_local_ci - update receive queue local consumer index + * @param sq: receive queue + * @param wqe_cnt: number of wqebb + */ +static inline void hinic5_update_rq_local_ci(struct hinic5_io_queue *rq, + u16 wqebb_cnt) +{ + hinic5_wq_put_wqebbs(&rq->wq, wqebb_cnt); +} + +/* * + * @brief hinic5_get_rq_local_ci - get receive queue local ci + * @param rq: receive queue + * @retval: receive queue local ci + */ +static inline u16 hinic5_get_rq_local_ci(const struct hinic5_io_queue *rq) +{ + return WQ_MASK_IDX(&rq->wq, rq->wq.cons_idx); +} + +/* * + * @brief hinic5_get_rq_local_pi - get receive queue local pi + * @param rq: receive queue + * @retval: receive queue local pi + */ +static inline u16 hinic5_get_rq_local_pi(const struct hinic5_io_queue *rq) +{ + return WQ_MASK_IDX(&rq->wq, rq->wq.prod_idx); +} + +/* ******************** DB INFO ******************** */ +#define DB_INFO_QID_SHIFT 0 +#define DB_INFO_NON_FILTER_SHIFT 22 +#define DB_INFO_CFLAG_SHIFT 23 +#define DB_INFO_COS_SHIFT 24 +#define DB_INFO_TYPE_SHIFT 27 + +#define DB_INFO_QID_MASK 0x1FFFU +#define DB_INFO_NON_FILTER_MASK 0x1U +#define DB_INFO_CFLAG_MASK 0x1U +#define DB_INFO_COS_MASK 0x7U +#define DB_INFO_TYPE_MASK 0x1FU +#define DB_INFO_SET(val, member) \ + (((u32)(val) & DB_INFO_##member##_MASK) << \ + DB_INFO_##member##_SHIFT) + +#define DB_PI_LOW_MASK 0xFFU +#define DB_PI_HIGH_MASK 0xFFU +#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK) +#define DB_PI_HI_SHIFT 8 +#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK) +#define DB_ADDR(queue, pi) ((u64 *)((queue)->db_addr) + (u16)DB_PI_LOW(pi)) +#define SRC_TYPE 1 + +/* CFLAG_DATA_PATH */ +#define SQ_CFLAG_DP 0U +#define RQ_CFLAG_DP 1U +/* * + * @brief hinic5_write_db - write doorbell + * @param queue: nic io queue + * @param cos: cos index + * @param cflag: 0--sq, 1--rq + * @param pi: product index + */ +#ifndef __UEFI__ +static inline void hinic5_write_db(struct hinic5_io_queue *queue, int cos, + u8 cflag, u16 pi) +{ + struct hinic5_nic_db db; + + db.db_info = DB_INFO_SET(SRC_TYPE, TYPE) | DB_INFO_SET(cflag, CFLAG) | + DB_INFO_SET(cos, COS) | DB_INFO_SET(queue->q_id, QID); + db.pi_hi = DB_PI_HIGH(pi); + /* Data should be written to HW in Big Endian Format */ + db.db_info = hinic5_hw_be32(db.db_info); + db.pi_hi = hinic5_hw_be32(db.pi_hi); + + wmb(); /* Write all before the doorbell */ + + writeq(*((u64 *)&db), DB_ADDR(queue, pi)); +} +#else +void hinic5_write_db(void *pcidev, struct hinic5_io_queue *queue, int cos, + u8 cflag, u16 pi); +#endif + +struct hinic5_dyna_qp_params { + u16 num_qps; + u16 xdp_qps; + u32 sq_depth; + u32 rq_depth; + + struct hinic5_io_queue *sqs; + struct hinic5_io_queue *rqs; +}; + +struct hinic5_qp_coalesce_info { + union { + struct { + u8 rx_pending_limt; + u8 rx_coalesce_timer_cfg; + u8 tx_pending_limt; + u8 tx_coalesce_timer_cfg; + }; + + struct { + u8 pending_limt; + u8 coalesce_timer_cfg; + }; + }; + + u64 pkt_rate_low; + u8 rx_usecs_low; + u8 rx_pending_limt_low; + + u64 pkt_rate_high; + u8 rx_usecs_high; + u8 rx_pending_limt_high; +}; + +enum hinic5_queue_type { + HINIC5_SQ, + HINIC5_RQ, + HINIC5_MAX_QUEUE_TYPE +}; + +int hinic5_alloc_qps(void *hwdev, struct irq_info *qps_msix_arry, + struct hinic5_dyna_qp_params *qp_params); +void hinic5_free_qps(void *hwdev, struct hinic5_dyna_qp_params *qp_params); +int hinic5_init_qps(void *hwdev, struct hinic5_dyna_qp_params *qp_params); +void hinic5_deinit_qps(void *hwdev, struct hinic5_dyna_qp_params *qp_params); +int hinic5_init_nicio_res(void *hwdev, u16 usr_qps_num); +void hinic5_deinit_nicio_res(void *hwdev); +int hinic5_set_sq_rq_coalesce_cfg(void *hwdev, u32 q_id, u32 type, + struct hinic5_qp_coalesce_info *coal_info); +int hinic5_get_rq_wqe_type(void *hwdev); +void hinic5_nic_io_param_validate(void); + +/* * + * @brief hinic5_create_qps - create queue pairs + * @param hwdev: device pointer to hwdev + * @param num_qp: number of queue pairs + * @param sq_depth: sq depth + * @param rq_depth: rq depth + * @param qps_msix_arry: msix info + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_create_qps(void *hwdev, u16 num_qp, u32 sq_depth, u32 rq_depth, + struct irq_info *qps_msix_arry); + +/* * + * @brief hinic5_destroy_qps - destroy queue pairs + * @param hwdev: device pointer to hwdev + */ +void hinic5_destroy_qps(void *hwdev); + +/* * + * @brief hinic5_get_nic_queue - get nic queue + * @param hwdev: device pointer to hwdev + * @param q_id: queue index + * @param q_type: queue type + * @retval queue address + */ +void *hinic5_get_nic_queue(void *hwdev, u16 q_id, enum hinic5_queue_type q_type); + +/* * + * @brief hinic5_init_qp_ctxts - init queue pair context + * @param hwdev: device pointer to hwdev + * @retval zero: success + * @retval non-zero: failure + */ +int hinic5_init_qp_ctxts(void *hwdev); + +/* * + * @brief hinic5_free_qp_ctxts - free queue pairs + * @param hwdev: device pointer to hwdev + */ +void hinic5_free_qp_ctxts(void *hwdev); +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_rq.h b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_rq.h new file mode 100644 index 00000000..aba03cf5 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_rq.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NIC_RQ_H +#define HINIC5_NIC_RQ_H + +#include "hinic5_common.h" + +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_SHIFT 0 +#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_SHIFT 5 +#define RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_SHIFT 7 +#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_SHIFT 8 +#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_SHIFT 19 +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_SHIFT 21 +#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_SHIFT 24 + +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK 0x1FU +#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK 0x3U +#define RQ_CQE_OFFOLAD_TYPE_ENC_L3_TYPE_MASK 0x1U +#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK 0xFU +#define RQ_CQE_OFFOLAD_TYPE_PKT_UMBCAST_MASK 0x3U +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK 0x1U +#define RQ_CQE_OFFOLAD_TYPE_RSS_TYPE_MASK 0xFFU + +#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ + (((val) >> RQ_CQE_OFFOLAD_TYPE_##member##_SHIFT) & \ + RQ_CQE_OFFOLAD_TYPE_##member##_MASK) + +#define HINIC5_GET_RX_PKT_TYPE(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE) +#define HINIC5_GET_RX_IP_TYPE(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE) +#define HINIC5_GET_RX_ENC_L3_TYPE(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, ENC_L3_TYPE) +#define HINIC5_GET_RX_TUNNEL_PKT_FORMAT(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT) + +#define HINIC5_GET_RX_PKT_UMBCAST(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_UMBCAST) + +#define HINIC5_GET_RX_VLAN_OFFLOAD_EN(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, VLAN_EN) + +#define HINIC5_GET_RSS_TYPES(offload_type) \ + RQ_CQE_OFFOLAD_TYPE_GET(offload_type, RSS_TYPE) + +#define RQ_CQE_SGE_VLAN_SHIFT 0 +#define RQ_CQE_SGE_LEN_SHIFT 16 + +#define RQ_CQE_SGE_VLAN_MASK 0xFFFFU +#define RQ_CQE_SGE_LEN_MASK 0xFFFFU + +#define RQ_CQE_SGE_GET(val, member) \ + (((val) >> RQ_CQE_SGE_##member##_SHIFT) & RQ_CQE_SGE_##member##_MASK) + +#define HINIC5_GET_RX_VLAN_TAG(vlan_len) RQ_CQE_SGE_GET(vlan_len, VLAN) + +#define HINIC5_GET_RX_PKT_LEN(vlan_len) RQ_CQE_SGE_GET(vlan_len, LEN) + +#define RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 +#define RQ_CQE_STATUS_NUM_LRO_SHIFT 16 +#define RQ_CQE_STATUS_LRO_PUSH_SHIFT 25 +#define RQ_CQE_STATUS_LRO_ENTER_SHIFT 26 +#define RQ_CQE_STATUS_LRO_INTR_SHIFT 27 + +#define RQ_CQE_STATUS_BP_EN_SHIFT 30 +#define RQ_CQE_STATUS_RXDONE_SHIFT 31 +#define RQ_CQE_STATUS_DECRY_PKT_SHIFT 29 +#define RQ_CQE_STATUS_FLUSH_SHIFT 28 + +#define RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU +#define RQ_CQE_STATUS_NUM_LRO_MASK 0xFFU +#define RQ_CQE_STATUS_LRO_PUSH_MASK 0X1U +#define RQ_CQE_STATUS_LRO_ENTER_MASK 0X1U +#define RQ_CQE_STATUS_LRO_INTR_MASK 0X1U +#define RQ_CQE_STATUS_BP_EN_MASK 0X1U +#define RQ_CQE_STATUS_RXDONE_MASK 0x1U +#define RQ_CQE_STATUS_FLUSH_MASK 0x1U +#define RQ_CQE_STATUS_DECRY_PKT_MASK 0x1U + +#define RQ_CQE_STATUS_GET(val, member) \ + (((val) >> RQ_CQE_STATUS_##member##_SHIFT) & \ + RQ_CQE_STATUS_##member##_MASK) + +#define HINIC5_GET_RX_CSUM_ERR(status) RQ_CQE_STATUS_GET(status, CSUM_ERR) + +#define HINIC5_GET_RX_DONE(status) RQ_CQE_STATUS_GET(status, RXDONE) + +#define HINIC5_GET_RX_FLUSH(status) RQ_CQE_STATUS_GET(status, FLUSH) + +#define HINIC5_GET_RX_BP_EN(status) RQ_CQE_STATUS_GET(status, BP_EN) + +#define HINIC5_GET_RX_NUM_LRO(status) RQ_CQE_STATUS_GET(status, NUM_LRO) + +#define HINIC5_RX_IS_DECRY_PKT(status) RQ_CQE_STATUS_GET(status, DECRY_PKT) + +#define RQ_CQE_SUPER_CQE_EN_SHIFT 0 +#define RQ_CQE_PKT_NUM_SHIFT 1 +#define RQ_CQE_PKT_LAST_LEN_SHIFT 6 +#define RQ_CQE_PKT_FIRST_LEN_SHIFT 19 + +#define RQ_CQE_SUPER_CQE_EN_MASK 0x1 +#define RQ_CQE_PKT_NUM_MASK 0x1FU +#define RQ_CQE_PKT_FIRST_LEN_MASK 0x1FFFU +#define RQ_CQE_PKT_LAST_LEN_MASK 0x1FFFU + +#define RQ_CQE_PKT_NUM_GET(val, member) \ + (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK) +#define HINIC5_GET_RQ_CQE_PKT_NUM(pkt_info) RQ_CQE_PKT_NUM_GET(pkt_info, NUM) + +#define RQ_CQE_SUPER_CQE_EN_GET(val, member) \ + (((val) >> RQ_CQE_##member##_SHIFT) & RQ_CQE_##member##_MASK) +#define HINIC5_GET_SUPER_CQE_EN(pkt_info) \ + RQ_CQE_SUPER_CQE_EN_GET(pkt_info, SUPER_CQE_EN) + +#define RQ_CQE_PKT_LEN_GET(val, member) \ + (((val) >> RQ_CQE_PKT_##member##_SHIFT) & RQ_CQE_PKT_##member##_MASK) + +#define RQ_CQE_DECRY_INFO_DECRY_STATUS_SHIFT 8 +#define RQ_CQE_DECRY_INFO_ESP_NEXT_HEAD_SHIFT 0 + +#define RQ_CQE_DECRY_INFO_DECRY_STATUS_MASK 0xFFU +#define RQ_CQE_DECRY_INFO_ESP_NEXT_HEAD_MASK 0xFFU + +#define RQ_CQE_DECRY_INFO_GET(val, member) \ + (((val) >> RQ_CQE_DECRY_INFO_##member##_SHIFT) & \ + RQ_CQE_DECRY_INFO_##member##_MASK) + +#define HINIC5_GET_DECRYPT_STATUS(decry_info) \ + RQ_CQE_DECRY_INFO_GET(decry_info, DECRY_STATUS) + +#define HINIC5_GET_ESP_NEXT_HEAD(decry_info) \ + RQ_CQE_DECRY_INFO_GET(decry_info, ESP_NEXT_HEAD) + +/* compact cqe field */ +/* cqe dw0 */ +#define RQ_COMPACT_CQE_STATUS_RXDONE_SHIFT 31 +#define RQ_COMPACT_CQE_STATUS_CQE_TYPE_SHIFT 30 +#define RQ_COMPACT_CQE_STATUS_TS_FLAG_SHIFT 29 +#define RQ_COMPACT_CQE_STATUS_VLAN_EN_SHIFT 28 +#define RQ_COMPACT_CQE_STATUS_PKT_FORMAT_SHIFT 25 +#define RQ_COMPACT_CQE_STATUS_IP_TYPE_SHIFT 24 +#define RQ_COMPACT_CQE_STATUS_CQE_LEN_SHIFT 23 +#define RQ_COMPACT_CQE_STATUS_PKT_MC_SHIFT 21 +#define RQ_COMPACT_CQE_STATUS_CSUM_ERR_SHIFT 19 +#define RQ_COMPACT_CQE_STATUS_PKT_TYPE_SHIFT 16 +#define RQ_COMPACT_CQE_STATUS_PKT_LEN_SHIFT 0 + +#define RQ_COMPACT_CQE_STATUS_RXDONE_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_CQE_TYPE_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_TS_FLAG_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_VLAN_EN_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_PKT_FORMAT_MASK 0x7U +#define RQ_COMPACT_CQE_STATUS_IP_TYPE_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_PKT_MC_MASK 0x3U +#define RQ_COMPACT_CQE_STATUS_CQE_LEN_MASK 0x1U +#define RQ_COMPACT_CQE_STATUS_CSUM_ERR_MASK 0x3U +#define RQ_COMPACT_CQE_STATUS_PKT_TYPE_MASK 0x7U +#define RQ_COMPACT_CQE_STATUS_PKT_LEN_MASK 0xFFFFU + +#define RQ_COMPACT_CQE_STATUS_GET(val, member) \ + ((((val) >> RQ_COMPACT_CQE_STATUS_##member##_SHIFT) & \ + RQ_COMPACT_CQE_STATUS_##member##_MASK)) + +/* cqe dw2 */ +#define RQ_COMPACT_CQE_OFFLOAD_NUM_LRO_SHIFT 24 +#define RQ_COMPACT_CQE_OFFLOAD_VLAN_SHIFT 8 +#define RQ_COMPACT_CQE_OFFLOAD_PFE_PKT_SRC_SHIFT 5 +#define RQ_COMPACT_CQE_OFFLOAD_PFE_PORT_ID_SHIFT 3 +#define RQ_COMPACT_CQE_OFFLOAD_FLOW_MARK_VLD_SHIFT 2 +#define RQ_COMPACT_CQE_OFFLOAD_SRC_FUNC_ID_HIGH_SHIFT 0 +#define RQ_COMPACT_CQE_OFFLOAD_SRC_FUNC_ID_SHIFT 8 + +/* cqe dw3 */ +#define RQ_COMPACT_CQE_OFFLOAD_SRC_FUNC_ID_LOW_SHIFT 24 +#define RQ_COMPACT_CQE_OFFLOAD_FLOW_MARK_SHIFT 0 + +#define RQ_COMPACT_CQE_OFFLOAD_NUM_LRO_MASK 0xFFU +#define RQ_COMPACT_CQE_OFFLOAD_VLAN_MASK 0xFFFFU +#define RQ_COMPACT_CQE_OFFLOAD_PFE_PKT_SRC_MASK 0x1U +#define RQ_COMPACT_CQE_OFFLOAD_PFE_PORT_ID_MASK 0x3U +#define RQ_COMPACT_CQE_OFFLOAD_FLOW_MARK_VLD_MASK 0x1U +#define RQ_COMPACT_CQE_OFFLOAD_SRC_FUNC_ID_HIGH_MASK 0x3U +#define RQ_COMPACT_CQE_OFFLOAD_SRC_FUNC_ID_LOW_MASK 0xFFU +#define RQ_COMPACT_CQE_OFFLOAD_FLOW_MARK_MASK 0xFFFFFFU + +#define RQ_COMPACT_CQE_OFFLOAD_GET(val, member) \ + (((val) >> RQ_COMPACT_CQE_OFFLOAD_##member##_SHIFT) & \ + RQ_COMPACT_CQE_OFFLOAD_##member##_MASK) + +#define RQ_COMPACT_CQE_16BYTE 0 +#define RQ_COMPACT_CQE_8BYTE 1 + +enum RQ_CAST_TYPE { + UNICAST = 0, + BROADCAST, + MULTICAST, + RESERVED, +}; + +struct hinic5_rq_cqe { + u32 status; + u32 vlan_len; + + u32 offload_type; + u32 hash_val; + u32 xid; + u32 decrypt_info; + u32 rsvd6; + u32 pkt_info; +}; + +struct hinic5_cqe_info { + u8 packet_offset; + + u8 pkt_mc; + u8 pfe_pkt_src; + u8 pfe_port_id; + + u8 lro_num; + u8 vlan_offload; + u8 pkt_fmt; + u8 ip_type; + + u8 pkt_type; + u8 cqe_len; + u8 cqe_type; + u8 ts_flag; + + u16 csum_err; + u16 vlan_tag; + + u16 pkt_len; + u16 rss_type; + + u32 rss_hash_value; + + /* CQE info for PFE */ + u16 src_func_id; + u8 flow_mark_vld; + u8 rsvd0; + + u32 flow_mark; +} __aligned(32); + +struct hinic5_sge_sect { + struct hinic5_sge sge; + u32 rsvd; +}; + +struct hinic5_rq_extend_wqe { + struct hinic5_sge_sect buf_desc; + struct hinic5_sge_sect cqe_sect; +}; + +struct hinic5_rq_normal_wqe { + u32 buf_hi_addr; + u32 buf_lo_addr; + u32 cqe_hi_addr; + u32 cqe_lo_addr; +}; + +struct hinic5_rq_compact_wqe { + u32 buf_hi_addr; + u32 buf_lo_addr; +}; + +struct hinic5_rq_wqe { + union { + struct hinic5_rq_compact_wqe compact_wqe; + struct hinic5_rq_normal_wqe normal_wqe; + struct hinic5_rq_extend_wqe extend_wqe; + }; +}; + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_sq.h b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_sq.h new file mode 100644 index 00000000..00824f61 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/include/hinic5_nic_sq.h @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NIC_SQ_H +#define HINIC5_NIC_SQ_H + +#include "hinic5_common.h" + +#define TX_MSS_DEFAULT 0x3E00 +#define TX_MSS_MIN 0x50 + +#define HINIC5_MAX_SQ_SGE 18 + +struct hinic5_sq_wqe_desc { + u32 ctrl_len; + u32 queue_info; + u32 hi_addr; + u32 lo_addr; +}; + +/* Engine only pass first 12B TS field directly to uCode through metadata + * vlan_offoad is used for hardware when vlan insert in tx + */ +struct hinic5_sq_task { + u32 pkt_info0; + u32 ip_identify; + u32 pkt_info2; /* ipsec used as spi */ + u32 vlan_offload; +}; + +struct hinic5_sq_bufdesc { + u32 len; /* 31-bits Length, L2NIC only use length[17:0] */ + u32 rsvd; + u32 hi_addr; + u32 lo_addr; +}; + +struct hinic5_sq_compact_wqe { + struct hinic5_sq_wqe_desc wqe_desc; +}; + +struct hinic5_sq_extend_wqe { + struct hinic5_sq_wqe_desc wqe_desc; + struct hinic5_sq_task task; + struct hinic5_sq_bufdesc buf_desc[]; +}; + +struct hinic5_sq_wqe { + union { + struct hinic5_sq_compact_wqe compact_wqe; + struct hinic5_sq_extend_wqe extend_wqe; + }; +}; + +/* use section pointer for support non continuous wqe */ +struct hinic5_sq_wqe_combo { + struct hinic5_sq_wqe_desc *ctrl_bd0; + struct hinic5_sq_task *task; + struct hinic5_sq_bufdesc *bds_head; + struct hinic5_sq_bufdesc *bds_sec2; + + u16 first_bds_num; + u8 wqe_type; + u8 task_type; + + u16 wqebb_cnt; + u8 offload; + u8 rsvd; +}; + +/* ************* SQ_CTRL ************** */ +enum sq_wqe_data_format { + SQ_NORMAL_WQE = 0, +}; + +enum sq_wqe_ec_type { + SQ_WQE_COMPACT_TYPE = 0, + SQ_WQE_EXTENDED_TYPE = 1, +}; + +enum sq_wqe_tasksect_len_type { + SQ_WQE_TASKSECT_4BYTES = 0, + SQ_WQE_TASKSECT_16BYTES = 1, +}; + +struct hinic5_offload_info { + u8 encapsulation; + u8 esp_next_proto; + u8 inner_l4_en; + u8 inner_l3_en; + u8 out_l4_en; + u8 out_l3_en; + u8 ipsec_offload; + u8 pkt_1588; + u8 vlan_sel; + u8 vlan_valid; + u16 vlan1_tag; + u32 ip_identify; +}; + +struct hinic5_queue_info { + u8 pri; + u8 uc; + u8 sctp; + u8 udp_dp_en; + u8 tso; + u8 ufo; + u8 payload_offset; + u8 pkt_type; + u16 mss; +}; + +#define SQ_CTRL_BD0_LEN_SHIFT 0 +#define SQ_CTRL_RSVD_SHIFT 18 +#define SQ_CTRL_BUFDESC_NUM_SHIFT 19 +#define SQ_CTRL_TASKSECT_LEN_SHIFT 27 +#define SQ_CTRL_DATA_FORMAT_SHIFT 28 +#define SQ_CTRL_DIRECT_SHIFT 29 +#define SQ_CTRL_EXTENDED_SHIFT 30 +#define SQ_CTRL_OWNER_SHIFT 31 + +#define SQ_CTRL_BD0_LEN_MASK 0x3FFFFU +#define SQ_CTRL_RSVD_MASK 0x1U +#define SQ_CTRL_BUFDESC_NUM_MASK 0xFFU +#define SQ_CTRL_TASKSECT_LEN_MASK 0x1U +#define SQ_CTRL_DATA_FORMAT_MASK 0x1U +#define SQ_CTRL_DIRECT_MASK 0x1U +#define SQ_CTRL_EXTENDED_MASK 0x1U +#define SQ_CTRL_OWNER_MASK 0x1U + +#define SQ_CTRL_SET(val, member) \ + (((u32)(val) & SQ_CTRL_##member##_MASK) << SQ_CTRL_##member##_SHIFT) + +#define SQ_CTRL_GET(val, member) \ + (((val) >> SQ_CTRL_##member##_SHIFT) & SQ_CTRL_##member##_MASK) + +#define SQ_CTRL_CLEAR(val, member) \ + ((val) & (~(SQ_CTRL_##member##_MASK << SQ_CTRL_##member##_SHIFT))) + +#define SQ_CTRL_QUEUE_INFO_PKT_TYPE_SHIFT 0 +#define SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 +#define SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 +#define SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 +#define SQ_CTRL_QUEUE_INFO_UDP_DP_EN_SHIFT 12 +#define SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 +#define SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 +#define SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 +#define SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 + +#define SQ_CTRL_QUEUE_INFO_PKT_TYPE_MASK 0x3U +#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFFU +#define SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_UDP_DP_EN_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFFU +#define SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_UC_MASK 0x1U +#define SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7U + +#define SQ_CTRL_QUEUE_INFO_SET(val, member) \ + (((u32)(val) & SQ_CTRL_QUEUE_INFO_##member##_MASK) << \ + SQ_CTRL_QUEUE_INFO_##member##_SHIFT) + +#define SQ_CTRL_QUEUE_INFO_GET(val, member) \ + (((val) >> SQ_CTRL_QUEUE_INFO_##member##_SHIFT) & \ + SQ_CTRL_QUEUE_INFO_##member##_MASK) + +#define SQ_CTRL_QUEUE_INFO_CLEAR(val, member) \ + ((val) & (~(SQ_CTRL_QUEUE_INFO_##member##_MASK << \ + SQ_CTRL_QUEUE_INFO_##member##_SHIFT))) + +#define SQ_TASK_INFO0_TUNNEL_FLAG_SHIFT 19 +#define SQ_TASK_INFO0_ESP_NEXT_PROTO_SHIFT 22 +#define SQ_TASK_INFO0_INNER_L4_EN_SHIFT 24 +#define SQ_TASK_INFO0_INNER_L3_EN_SHIFT 25 +#define SQ_TASK_INFO0_INNER_L4_PSEUDO_SHIFT 26 +#define SQ_TASK_INFO0_OUT_L4_EN_SHIFT 27 +#define SQ_TASK_INFO0_OUT_L3_EN_SHIFT 28 +#define SQ_TASK_INFO0_OUT_L4_PSEUDO_SHIFT 29 +#define SQ_TASK_INFO0_ESP_OFFLOAD_SHIFT 30 +#define SQ_TASK_INFO0_IPSEC_PROTO_SHIFT 31 + +#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK 0x1U +#define SQ_TASK_INFO0_ESP_NEXT_PROTO_MASK 0x3U +#define SQ_TASK_INFO0_INNER_L4_EN_MASK 0x1U +#define SQ_TASK_INFO0_INNER_L3_EN_MASK 0x1U +#define SQ_TASK_INFO0_INNER_L4_PSEUDO_MASK 0x1U +#define SQ_TASK_INFO0_OUT_L4_EN_MASK 0x1U +#define SQ_TASK_INFO0_OUT_L3_EN_MASK 0x1U +#define SQ_TASK_INFO0_OUT_L4_PSEUDO_MASK 0x1U +#define SQ_TASK_INFO0_ESP_OFFLOAD_MASK 0x1U +#define SQ_TASK_INFO0_IPSEC_PROTO_MASK 0x1U + +#define SQ_TASK_INFO0_SET(val, member) \ + (((u32)(val) & SQ_TASK_INFO0_##member##_MASK) << \ + SQ_TASK_INFO0_##member##_SHIFT) +#define SQ_TASK_INFO0_GET(val, member) \ + (((val) >> SQ_TASK_INFO0_##member##_SHIFT) & \ + SQ_TASK_INFO0_##member##_MASK) + +#define SQ_TASK_INFO1_SET(val, member) \ + (((val) & SQ_TASK_INFO1_##member##_MASK) << \ + SQ_TASK_INFO1_##member##_SHIFT) +#define SQ_TASK_INFO1_GET(val, member) \ + (((val) >> SQ_TASK_INFO1_##member##_SHIFT) & \ + SQ_TASK_INFO1_##member##_MASK) + +#define SQ_TASK_INFO3_VLAN_TAG_SHIFT 0 +#define SQ_TASK_INFO3_VLAN_TYPE_SHIFT 16 +#define SQ_TASK_INFO3_VLAN_TAG_VALID_SHIFT 19 + +#define SQ_TASK_INFO3_VLAN_TAG_MASK 0xFFFFU +#define SQ_TASK_INFO3_VLAN_TYPE_MASK 0x7U +#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK 0x1U + +#define SQ_TASK_INFO3_SET(val, member) \ + (((val) & SQ_TASK_INFO3_##member##_MASK) << \ + SQ_TASK_INFO3_##member##_SHIFT) +#define SQ_TASK_INFO3_GET(val, member) \ + (((val) >> SQ_TASK_INFO3_##member##_SHIFT) & \ + SQ_TASK_INFO3_##member##_MASK) + +/* the task section format in compact wqe */ +#define SQ_TASK_INFO_PKT_1588_SHIFT 31 +#define SQ_TASK_INFO_IPSEC_PROTO_SHIFT 30 +#define SQ_TASK_INFO_OUT_L3_EN_SHIFT 28 +#define SQ_TASK_INFO_OUT_L4_EN_SHIFT 27 +#define SQ_TASK_INFO_INNER_L3_EN_SHIFT 25 +#define SQ_TASK_INFO_INNER_L4_EN_SHIFT 24 +#define SQ_TASK_INFO_ESP_NEXT_PROTO_SHIFT 22 +#define SQ_TASK_INFO_VLAN_VALID_SHIFT 19 +#define SQ_TASK_INFO_VLAN_SEL_SHIFT 16 +#define SQ_TASK_INFO_VLAN_TAG_SHIFT 0 + +#define SQ_TASK_INFO_PKT_1588_MASK 0x1U +#define SQ_TASK_INFO_IPSEC_PROTO_MASK 0x1U +#define SQ_TASK_INFO_OUT_L3_EN_MASK 0x1U +#define SQ_TASK_INFO_OUT_L4_EN_MASK 0x1U +#define SQ_TASK_INFO_INNER_L3_EN_MASK 0x1U +#define SQ_TASK_INFO_INNER_L4_EN_MASK 0x1U +#define SQ_TASK_INFO_ESP_NEXT_PROTO_MASK 0x3U +#define SQ_TASK_INFO_VLAN_VALID_MASK 0x1U +#define SQ_TASK_INFO_VLAN_SEL_MASK 0x7U +#define SQ_TASK_INFO_VLAN_TAG_MASK 0xFFFFU + +#define SQ_TASK_INFO_SET(val, member) \ + (((u32)(val) & SQ_TASK_INFO_##member##_MASK) << \ + SQ_TASK_INFO_##member##_SHIFT) +#define SQ_TASK_INFO_GET(val, member) \ + (((val) >> SQ_TASK_INFO_##member##_SHIFT) & \ + SQ_TASK_INFO_##member##_MASK) + +#define SQ_CTRL_15BIT_QUEUE_INFO_PKT_TYPE_SHIFT 14 +#define SQ_CTRL_15BIT_QUEUE_INFO_PLDOFF_SHIFT 16 +#define SQ_CTRL_15BIT_QUEUE_INFO_UFO_SHIFT 24 +#define SQ_CTRL_15BIT_QUEUE_INFO_TSO_SHIFT 25 +#define SQ_CTRL_15BIT_QUEUE_INFO_UDP_DP_EN_SHIFT 26 +#define SQ_CTRL_15BIT_QUEUE_INFO_SCTP_SHIFT 27 + +#define SQ_CTRL_15BIT_QUEUE_INFO_PKT_TYPE_MASK 0x3U +#define SQ_CTRL_15BIT_QUEUE_INFO_PLDOFF_MASK 0xFFU +#define SQ_CTRL_15BIT_QUEUE_INFO_UFO_MASK 0x1U +#define SQ_CTRL_15BIT_QUEUE_INFO_TSO_MASK 0x1U +#define SQ_CTRL_15BIT_QUEUE_INFO_UDP_DP_EN_MASK 0x1U +#define SQ_CTRL_15BIT_QUEUE_INFO_SCTP_MASK 0x1U + +#define SQ_CTRL_15BIT_QUEUE_INFO_SET(val, member) \ + (((u32)(val) & SQ_CTRL_15BIT_QUEUE_INFO_##member##_MASK) << \ + SQ_CTRL_15BIT_QUEUE_INFO_##member##_SHIFT) + +#define SQ_CTRL_15BIT_QUEUE_INFO_GET(val, member) \ + (((val) >> SQ_CTRL_15BIT_QUEUE_INFO_##member##_SHIFT) & \ + SQ_CTRL_15BIT_QUEUE_INFO_##member##_MASK) + +#define SQ_CTRL_15BIT_QUEUE_INFO_CLEAR(val, member) \ + ((val) & (~(SQ_CTRL_15BIT_QUEUE_INFO_##member##_MASK << \ + SQ_CTRL_15BIT_QUEUE_INFO_##member##_SHIFT))) + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/1588/hinic5_ptp.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/1588/hinic5_ptp.c new file mode 100644 index 00000000..47cbd0bf --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/1588/hinic5_ptp.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#include <linux/skbuff.h> +#include <linux/ptp_clock_kernel.h> +#include "ossl_knl.h" +#include "hinic5_nic_dev.h" +#include "hinic5_mt.h" +#include "hinic5_tx.h" +#include "hinic5_hw.h" +#include "hinic5_nic_event.h" +#include "hinic5_ptp.h" + +static unsigned int ptp_clock_pf; +module_param(ptp_clock_pf, uint, 0444); +MODULE_PARM_DESC(ptp_clock_pf, "ptp_clock_pf, 0: pf0, 1: pf1, 2: pf2, 3: pf3 (default=0)"); + +static int hinic5_ptp_gettime64(struct ptp_clock_info *ptp_info, struct timespec64 *ts) +{ + int ret; + struct hinic5_ptp_ctrl *ptp_ctrl = container_of(ptp_info, struct hinic5_ptp_ctrl, ptp_info); + + spin_lock_bh(&ptp_ctrl->ptp_clock_lock); + ret = hinic5_ts_up_en(ptp_ctrl->hwdev, PTP_RD_UP_EN_FLAG); + if (ret == 0) + hinic5_read_ts_data(ptp_ctrl->hwdev, ts); + + spin_unlock_bh(&ptp_ctrl->ptp_clock_lock); + return ret; +} + +#ifdef HAVE_PTP_INFO_GETTIMEX64 +static int hinic5_ptp_gettimex64(struct ptp_clock_info *ptp_info, struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + int ret; + struct hinic5_ptp_ctrl *ptp_ctrl = container_of(ptp_info, struct hinic5_ptp_ctrl, ptp_info); + + spin_lock_bh(&ptp_ctrl->ptp_clock_lock); + ptp_read_system_prets(sts); + ret = hinic5_ts_up_en(ptp_ctrl->hwdev, PTP_RD_UP_EN_FLAG); + ptp_read_system_postts(sts); + + if (ret == 0) + hinic5_read_ts_data(ptp_ctrl->hwdev, ts); + + spin_unlock_bh(&ptp_ctrl->ptp_clock_lock); + return ret; +} +#endif + +static int hinic5_ptp_settime64(struct ptp_clock_info *ptp_info, const struct timespec64 *ts) +{ + struct hinic5_ptp_ctrl *ptp_ctrl = container_of(ptp_info, struct hinic5_ptp_ctrl, ptp_info); + + spin_lock_bh(&ptp_ctrl->ptp_clock_lock); + hinic5_write_ts_data(ptp_ctrl->hwdev, ts); + spin_unlock_bh(&ptp_ctrl->ptp_clock_lock); + return 0; +} + +static int hinic5_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + int ret; + s32 update_val; + struct timespec64 ts, delta_ts; + struct hinic5_ptp_ctrl *ptp_ctrl = container_of(ptp_info, struct hinic5_ptp_ctrl, ptp_info); + + if (delta > PTP_CLOCK_MAX_ADJ_TIME_VALUE || -delta > PTP_CLOCK_MAX_ADJ_TIME_VALUE) { + delta_ts = ns_to_timespec64(delta); + ret = hinic5_ptp_gettime64(ptp_info, &ts); + if (ret != 0) + return ret; + + ts = timespec64_add(ts, delta_ts); + hinic5_ptp_settime64(ptp_info, &ts); + return 0; + } + + if (delta >= 0) + update_val = (s32)delta; + else + update_val = -(s32)(-delta); + + spin_lock_bh(&ptp_ctrl->ptp_clock_lock); + hinic5_ptp_ts_update(ptp_ctrl->hwdev, update_val); + spin_unlock_bh(&ptp_ctrl->ptp_clock_lock); + return 0; +} + +static void hinic5_ptp_set_inc_per_cycle(struct hinic5_ptp_ctrl *ptp_ctrl, u32 inc_val) +{ + spin_lock_bh(&ptp_ctrl->ptp_clock_lock); + hinic5_set_ptp_inc(ptp_ctrl->hwdev, inc_val); + spin_unlock_bh(&ptp_ctrl->ptp_clock_lock); +} + +static int hinic5_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) +{ + struct hinic5_ptp_ctrl *ptp_ctrl = container_of(ptp_info, struct hinic5_ptp_ctrl, ptp_info); + u32 adj_inc_val = (u32)adjust_by_scaled_ppm(ptp_ctrl->inc_val, scaled_ppm); + + hinic5_ptp_set_inc_per_cycle(ptp_ctrl, adj_inc_val); + return 0; +} + +static void hinic5_ptp_tx_time_out(struct hinic5_nic_dev *nic_dev) +{ + struct sk_buff *skb = nic_dev->ptp_ctrl.tx_saved_skb; + + if (time_is_after_jiffies(nic_dev->ptp_ctrl.tx_start + HZ) != 0) + return; + + nic_dev->ptp_ctrl.tx_saved_skb = NULL; + clear_bit(HINIC5_PTP_TX_BUSY, &nic_dev->ptp_ctrl.flags); + dev_kfree_skb_any(skb); +} + +int hinic5_ptp_tx_process(struct hinic5_nic_dev *nic_dev, struct sk_buff *skb) +{ + if (!nic_dev->hwdev || test_bit(HINIC5_PTP_CLOCK, &nic_dev->flags) == 0 || + nic_dev->ptp_ctrl.tx_enable == 0) + return -EINVAL; + + if (test_and_set_bit(HINIC5_PTP_TX_BUSY, &nic_dev->ptp_ctrl.flags)) { + hinic5_ptp_tx_time_out(nic_dev); + if (test_and_set_bit(HINIC5_PTP_TX_BUSY, &nic_dev->ptp_ctrl.flags)) + return -EBUSY; + } + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + nic_dev->ptp_ctrl.tx_saved_skb = skb_get(skb); + nic_dev->ptp_ctrl.tx_start = jiffies; + return 0; +} + +u8 hinic5_ptp_tx_event_handle(void *dev, u8 event, const u8 *data) +{ + struct hinic5_nic_dev *nic_dev = (struct hinic5_nic_dev *)dev; + struct timespec64 ts = {0}; + struct skb_shared_hwtstamps shhwtstamps; + union hinic5_hw_ts32 hw_ts = { .val = *(u32 *)data }; + struct sk_buff *skb = nic_dev->ptp_ctrl.tx_saved_skb; + + if (test_bit(HINIC5_PTP_CLOCK, &nic_dev->flags) == 0 || !skb) + return 0; + + hinic5_ptp_gettime64(&nic_dev->ptp_ctrl.ptp_info, &ts); + ts.tv_nsec = hw_ts.time_ns; + if (((u32)ts.tv_sec & 0x3) < hw_ts.time_s && ts.tv_sec != 0) { // 0x3 :lower 2bit sec + ts.tv_sec--; + } + /* 0x3 :lower 2bit sec */ + ts.tv_sec = (u32)(ts.tv_sec - ((u32)ts.tv_sec & 0x3)) + hw_ts.time_s; + nic_dev->ptp_ctrl.tx_saved_skb = NULL; + clear_bit(HINIC5_PTP_TX_BUSY, &nic_dev->ptp_ctrl.flags); + shhwtstamps.hwtstamp = timespec64_to_ktime(ts); + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); + return 0; +} + +void hinic5_ptp_rx_hwtstamp(struct hinic5_nic_dev *nic_dev, struct sk_buff *skb) +{ + struct timespec64 ts = {0}; + union hinic5_hw_ts32 hw_ts32; + + if (test_bit(HINIC5_PTP_CLOCK, &nic_dev->flags) == 0 || nic_dev->ptp_ctrl.rx_enable == 0) + return; + + hinic5_ptp_gettime64(&nic_dev->ptp_ctrl.ptp_info, &ts); + + hw_ts32 = *(union hinic5_hw_ts32 *)(skb_tail_pointer(skb) - sizeof(union hinic5_hw_ts32)); + /* 时戳会填到报文尾部,转换后删除 */ + skb->len = skb->len - PTP_SKB_HWTSTAMPS_LENGTH; + skb_set_tail_pointer(skb, (int)(skb->len)); + hw_ts32.val = be32_to_cpu(hw_ts32.val); + /* 0x3 :lower 2bit sec */ + if (((u32)ts.tv_sec & 0x3) < hw_ts32.time_s && ts.tv_sec != 0) + ts.tv_sec--; + /* 0x3 :lower 2bit sec */ + ts.tv_sec = (u32)(ts.tv_sec - ((u32)ts.tv_sec & 0x3)) + hw_ts32.time_s; + ts.tv_nsec = hw_ts32.time_ns; + + skb_hwtstamps(skb)->hwtstamp = timespec64_to_ktime(ts); +} + +int hinic5_ptp_get_ts_config(struct hinic5_nic_dev *nic_dev, struct ifreq *ifr) +{ + if (test_bit(HINIC5_PTP_CLOCK, &nic_dev->flags) == 0) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, &nic_dev->ptp_ctrl.config, + sizeof(struct hwtstamp_config)) != 0 ? -EFAULT : 0; +} + +static int hinic5_ptp_set_config(struct hinic5_nic_dev *nic_dev, struct hwtstamp_config *config) +{ + if (config->tx_type == HWTSTAMP_TX_ON) + nic_dev->ptp_ctrl.tx_enable = 1; + else if (config->tx_type == HWTSTAMP_TX_OFF) + nic_dev->ptp_ctrl.tx_enable = 0; + else + return -ERANGE; + + if (config->rx_filter == HWTSTAMP_FILTER_NONE) { + nic_dev->ptp_ctrl.rx_enable = 0; + } else { + nic_dev->ptp_ctrl.rx_enable = 1; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + } + + // TBM : call MPU API to turn off rx hw PTP offload + return 0; +} + +int hinic5_ptp_set_ts_config(struct hinic5_nic_dev *nic_dev, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (test_bit(HINIC5_PTP_CLOCK, &nic_dev->flags) == 0) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config)) != 0) + return -EFAULT; + + err = hinic5_ptp_set_config(nic_dev, &config); + if (err != 0) + return err; + + nic_dev->ptp_ctrl.config = config; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) != 0 ? + -EFAULT : 0; +} + +void hinic5_ptp_init(struct hinic5_nic_dev *nic_dev) +{ + struct timespec64 ts; + struct ptp_clock_info *ptp_info = NULL; + + if (!nic_dev->hwdev || hinic5_global_func_id(nic_dev->hwdev) != ptp_clock_pf || + !HINIC5_SUPPORT_PTP_1588_V2(nic_dev->hwdev)) + return; + + ptp_info = &nic_dev->ptp_ctrl.ptp_info; + nic_dev->ptp_ctrl.hwdev = nic_dev->hwdev; + strscpy(ptp_info->name, HINIC5_CHIP_NAME, sizeof(ptp_info->name) - 1); + + ptp_info->name[sizeof(ptp_info->name) - 1] = '\0'; + ptp_info->owner = THIS_MODULE; + ptp_info->max_adj = PTP_CLOCK_MAX_ADJ_TIME_VALUE; + ptp_info->settime64 = hinic5_ptp_settime64; + ptp_info->gettime64 = hinic5_ptp_gettime64; +#ifdef HAVE_PTP_INFO_GETTIMEX64 + ptp_info->gettimex64 = hinic5_ptp_gettimex64; +#endif + ptp_info->adjfine = hinic5_ptp_adjfine; + ptp_info->adjtime = hinic5_ptp_adjtime; + spin_lock_init(&nic_dev->ptp_ctrl.ptp_clock_lock); + + nic_dev->ptp_ctrl.ptp_clock = ptp_clock_register(ptp_info, nic_dev->lld_dev->dev); + if (IS_ERR(nic_dev->ptp_ctrl.ptp_clock)) + return; + + nic_dev->ptp_ctrl.flags = 0; + nic_dev->ptp_ctrl.config.flags = 0; + nic_dev->ptp_ctrl.config.rx_filter = HWTSTAMP_FILTER_NONE; + nic_dev->ptp_ctrl.config.tx_type = HWTSTAMP_TX_OFF; + // lower 16bit: 0.xx ns , 2: 2ns per cycle + nic_dev->ptp_ctrl.inc_val = 2 << 16; + nic_dev->ptp_ctrl.tx_saved_skb = NULL; + hinic5_ptp_set_config(nic_dev, &nic_dev->ptp_ctrl.config); + hinic5_ptp_set_inc_per_cycle(&nic_dev->ptp_ctrl, nic_dev->ptp_ctrl.inc_val); + + ktime_get_real_ts64(&ts); + hinic5_ptp_settime64(&nic_dev->ptp_ctrl.ptp_info, &ts); + hinic5_nic_aeq_register_swe_cb(nic_dev->hwdev, nic_dev, HINIC5_HTN_PTP_EVENT, + (hinic5_aeq_swe_cb)hinic5_ptp_tx_event_handle); + set_bit(HINIC5_PTP_CLOCK, &nic_dev->flags); +} + +void hinic5_ptp_deinit(struct hinic5_nic_dev *nic_dev) +{ + struct sk_buff *skb = NULL; + + if (nic_dev->ptp_ctrl.tx_saved_skb) { + skb = nic_dev->ptp_ctrl.tx_saved_skb; + nic_dev->ptp_ctrl.tx_saved_skb = NULL; + clear_bit(HINIC5_PTP_TX_BUSY, &nic_dev->ptp_ctrl.flags); + dev_kfree_skb_any(skb); + } + if (nic_dev->ptp_ctrl.ptp_clock) { + nic_dev->ptp_ctrl.tx_enable = 0; + nic_dev->ptp_ctrl.rx_enable = 0; + ptp_clock_unregister(nic_dev->ptp_ctrl.ptp_clock); + hinic5_nic_aeq_unregister_swe_cb(nic_dev->hwdev, HINIC5_HTN_PTP_EVENT); + nic_dev->ptp_ctrl.ptp_clock = NULL; + } +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/1588/hinic5_ptp.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/1588/hinic5_ptp.h new file mode 100644 index 00000000..c87484a9 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/1588/hinic5_ptp.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_PTP_H +#define HINIC5_PTP_H +#define PTP_DEBUG + +#include "ossl_knl.h" +#include "hinic5_nic_dev.h" + +union hinic5_hw_ts32 { + struct { + u32 time_ns : 30; + u32 time_s : 2; + }; + u32 val; +}; + +enum hinic5_ptp_flags { + HINIC5_PTP_TX_BUSY, +}; + +#ifdef PTP_DEBUG +#define ptp_dbg(format, ...) pr_info("[PTP]" format, ##__VA_ARGS__) +#else +#define ptp_dbg(format, ...) +#endif + +#define PTP_CLOCK_MAX_ADJ_TIME_VALUE 0x3FFFFFFF +#define PTP_SKB_HWTSTAMPS_LENGTH 0x4 +#define PTP_WR_UP_EN_FLAG BIT(0) +#define PTP_RD_UP_EN_FLAG BIT(1) + +int hinic5_ptp_tx_process(struct hinic5_nic_dev *nic_dev, struct sk_buff *skb); +void hinic5_ptp_rx_hwtstamp(struct hinic5_nic_dev *nic_dev, struct sk_buff *skb); +int hinic5_ptp_set_ts_config(struct hinic5_nic_dev *nic_dev, struct ifreq *ifr); +int hinic5_ptp_get_ts_config(struct hinic5_nic_dev *nic_dev, struct ifreq *ifr); +void hinic5_ptp_init(struct hinic5_nic_dev *nic_dev); +void hinic5_ptp_deinit(struct hinic5_nic_dev *nic_dev); +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/CMakeLists.txt b/hinic5/src/dpu_platform_library/host/service/nic/linux/CMakeLists.txt new file mode 100644 index 00000000..10be8753 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/CMakeLists.txt @@ -0,0 +1,82 @@ +if("${BUILD_VERSION}" MATCHES "ub_ascend") + set(UMMU_CORE_BUILD_DIR ${UBUS_BUILD_DIR}/kernel/ummu-core-v1) +elseif("${PRODUCT}" STREQUAL "ascend910D" OR "${PRODUCT}" STREQUAL "ascend910Desl") + set(UBUS_DIR ${TOP_DIR}/drivers/ubus) + set(KDIR ${KERNEL_WORK_DIR}/../linux-4.19) + set(UMMU_CORE_BUILD_DIR ${UBUS_DIR}/kernel/ummu-core-v1) +else() + set(UMMU_CORE_BUILD_DIR ${UBUS_BUILD_DIR}/kernel/ummu-core) +endif() + +set(HISDK5_BUILD_DIR ${BUILD_CACHE_TOP_DIR}/cmake/ChipSolution/src/dpu_platform_library/host/sdk/knldk/lld) + +# =============================== 使用KCompat自动化工具时临时适配1650的NIC驱动编译(TODO)=============================== +message(STATUS "KDIR = ${KDIR}") +set(HI1823_TRUNK_DIR ${TOP_DIR}/ChipSolution) +set(NIC_KCOMPAT_GENERATOR_PATH "${HI1823_TRUNK_DIR}/build/host/linux/nic/nic-kcompat-generator.sh") +set(NIC_KCOMPAT_PATH "${HI1823_TRUNK_DIR}/src/dpu_develop_interface/drv_sdk_intf/ossl/nic_kcompat.h") + +if("${KDIR}" MATCHES "2403_SP2") + message(STATUS "KNL_HEADER_TYPE: UB1650") + set(KERN_VER "NULL") + set(KSRC "${KDIR}/../../../../open_source/2403_SP2") + message(STATUS "KSRC = ${KSRC}") + + if(EXISTS ${NIC_KCOMPAT_GENERATOR_PATH}) + message(STATUS "${NIC_KCOMPAT_GENERATOR_PATH} file is exist!") + endif() + + string(RANDOM LENGTH 4 RAND_NUM) + set(NIC_KCOMPAT_GENERATOR_PATH_TMP "${HI1823_TRUNK_DIR}/build/host/linux/nic/nic-kcompat-generator_${RAND_NUM}.sh") + + if(EXISTS ${NIC_KCOMPAT_GENERATOR_PATH_TMP}) + file(REMOVE ${NIC_KCOMPAT_GENERATOR_PATH_TMP}) + endif() + + file(COPY_FILE "${NIC_KCOMPAT_GENERATOR_PATH}" "${NIC_KCOMPAT_GENERATOR_PATH_TMP}") + if(EXISTS ${NIC_KCOMPAT_GENERATOR_PATH_TMP}) + message(STATUS "${NIC_KCOMPAT_GENERATOR_PATH_TMP} file copy succeed") + else() + message(STATUS "${NIC_KCOMPAT_GENERATOR_PATH_TMP} file copy failed") + endif() + + file(READ ${NIC_KCOMPAT_GENERATOR_PATH_TMP} FILE_CONTENTS) + string(REPLACE "KERN_VER=\$(uname -r)" "KERN_VER=${KERN_VER}" FILE_CONTENTS "${FILE_CONTENTS}") + string(REPLACE "KSRC=\"\"" "KSRC=\"${KSRC}\"" FILE_CONTENTS "${FILE_CONTENTS}") + file(WRITE ${NIC_KCOMPAT_GENERATOR_PATH_TMP} "${FILE_CONTENTS}") + + execute_process( + COMMAND bash -c "source ${NIC_KCOMPAT_GENERATOR_PATH_TMP} && gen_nic_kcompat ${NIC_KCOMPAT_PATH}" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + RESULT_VARIABLE RESULT + ) + if(EXISTS ${NIC_KCOMPAT_GENERATOR_PATH_TMP}) + file(REMOVE ${NIC_KCOMPAT_GENERATOR_PATH_TMP}) + endif() +else() + message(STATUS "KNL_HEADER_TYPE: DEFAULT") + execute_process( + COMMAND bash -c "source ${NIC_KCOMPAT_GENERATOR_PATH} && gen_nic_kcompat ${NIC_KCOMPAT_PATH}" + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + RESULT_VARIABLE RESULT + ) +endif() + +if(EXISTS ${NIC_KCOMPAT_PATH}) + message(STATUS "${NIC_KCOMPAT_PATH} file is exist!") +else() + message(STATUS "${NIC_KCOMPAT_PATH} file is not exist!") +endif() +# ===================================================================================================== + +add_custom_target(hinic5_ko + COMMENT echo "build ${CMAKE_CURRENT_SOURCE_DIR} start." + COMMAND cd ${TOP_DIR}/ChipSolution && git apply build/host/linux/sdk/patch_code/knl6_6_compile.patch && cd - + COMMAND cp -f ${CMAKE_CURRENT_SOURCE_DIR}/Makefile ${CMAKE_CURRENT_BINARY_DIR} + COMMAND ${MAKE} -j64 -C ${KDIR} M=${CMAKE_CURRENT_BINARY_DIR} src=${CMAKE_CURRENT_SOURCE_DIR} HI1823_TRUNK_DIR=${TOP_DIR}/ChipSolution HI1823_BUILD_DIR=${TOP_DIR}/ChipSolution HI1823_OS_RELEASE=openEuler24.03 HISDK5_SYMVERS=${HISDK5_BUILD_DIR}/Module.symvers + COMMAND cp -f *.ko ${CMAKE_INSTALL_PREFIX}/ko + COMMAND cd ${TOP_DIR}/ChipSolution && git apply --reverse build/host/linux/sdk/patch_code/knl6_6_compile.patch && cd - + DEPENDS kernel +) + +add_dependencies(hinic5_ko hisdk5_ko) \ No newline at end of file diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/Makefile b/hinic5/src/dpu_platform_library/host/service/nic/linux/Makefile new file mode 100644 index 00000000..4f94c7f0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/Makefile @@ -0,0 +1,245 @@ +# NIC Makefile +EXPORT_SYMBOL := true + +EXTRA_CFLAGS += -DSECUREC_EXPORT_KERNEL_SYMBOL=0 + +TRUNK_DIR = $(srctree)/drivers/net/ethernet/huawei/hinic5 +HWSDK_SRC_PATH = $(TRUNK_DIR)/src/dpu_platform_library/host/sdk/knldk +SDK3_DIR := ../../../sdk/knldk/lld + +# HWSDK INNER +EXTRA_CFLAGS += -I$(HWSDK_SRC_PATH)/hinic5_cqm +EXTRA_CFLAGS += -I$(HWSDK_SRC_PATH)/crm +EXTRA_CFLAGS += -I$(HWSDK_SRC_PATH)/lld +EXTRA_CFLAGS += -I$(HWSDK_SRC_PATH)/hwif +EXTRA_CFLAGS += -I$(HWSDK_SRC_PATH)/mt +EXTRA_CFLAGS += -I$(HWSDK_SRC_PATH)/include + +# HWSDK HEADER +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/include +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/include/drv_tool_msg +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/include/drv_fw_msg/mpu +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/include/sdk/knldk +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/include/cfm/fast_msg +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/drv_sdk_intf/hisdk +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/drv_sdk_intf/ossl + +# DBGTOOL +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/public +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/mag +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/mpu +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/hinic5_cqm +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/bond +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/cfm +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/cfg_mgmt +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/tools/micro_log + +#SDK +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/drv_srvc_intf +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/drv_sdk_intf/ossl +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/public +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/cfg_mgmt +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/mpu +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/include/sdk/knldk +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/drv_sdk_intf/hisdk +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/sdk/knldk/lld + +#NIC +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/include/drv_tool_msg +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/service/nic/linux/1588 +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/service/nic/linux/ctrl +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/service/nic/linux/ethtool +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/service/nic/linux/ioctl +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/service/nic/linux/netdev +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/service/nic/linux/nicio +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/service/nic/include +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/service/include +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/common/nic +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/service/nic/comm +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/nic +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/bond +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/mag +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/cfm +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/drv_srvc_intf/nic +#BOND +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/include/drv_fw_msg/cfm +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/include/cfm/bond + +#MACsec +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_develop_interface/fw_msg_intf/macsec +EXTRA_CFLAGS += -I$(TRUNK_DIR)/src/dpu_platform_library/host/service/nic/linux/macsec + +EXTRA_CFLAGS += -Wno-implicit-fallthrough +EXTRA_CFLAGS += -Wframe-larger-than=2048 + +ifeq ($(CONFIG_SP_DEVICE), y) +EXTRA_CFLAGS += -DCONFIG_SP_VID_DID +endif + +ifndef __TIME_STR__ +SYS_TIME=$(shell date +%Y-%m-%d_%H:%M:%S) +ccflags-y += -D __TIME_STR__=\"$(SYS_TIME)\" +endif +GLOBAL_VERSION=$(shell cat $(TRUNK_DIR)/src/GLOBAL_VERSION_NEW | grep driver | awk -F ':' '{print $$2}') +ccflags-y += -DGLOBAL_VERSION_STR=\"$(GLOBAL_VERSION)\" + +$(warning cflags, $(ccflags-y)) +V ?= 0 + +ifeq ($(RELEASE_TYPE), LLT) + ccflags-y += -D_LLT_TEST_ +else + ccflags-y += -DHW_CONVERT_ENDIAN +endif + +ccflags-y += -D__LINUX__ + +spsdk3-objs := \ + $(SDK3_DIR)/../hinic5_vram/hinic5_vram_common.o \ + $(SDK3_DIR)/../hinic5_vram/hinic5_hinic5_vram.o \ + $(SDK3_DIR)/../../../cfm/fast_msg/hinic5_fast_msg_init.o \ + $(SDK3_DIR)/../../../cfm/fast_msg/hinic5_fast_msg.o \ + $(SDK3_DIR)/../crm/hinic5_hwdev.o \ + $(SDK3_DIR)/../crm/hinic5_hw_cfg.o \ + $(SDK3_DIR)/../crm/hinic5_hw_comm.o \ + $(SDK3_DIR)/../crm/hinic5_prof_adap.o \ + $(SDK3_DIR)/hinic5_sriov.o \ + $(SDK3_DIR)/hinic5_lld.o \ + $(SDK3_DIR)/hinic5_bus.o \ + $(SDK3_DIR)/hinic5_pcie.o \ + $(SDK3_DIR)/hinic5_sysfs.o \ + $(SDK3_DIR)/hinic5_dev_mgmt.o \ + $(SDK3_DIR)/../hwif/hinic5_common.o \ + $(SDK3_DIR)/../hwif/hinic5_hwif.o \ + $(SDK3_DIR)/../hwif/hinic5_wq.o \ + $(SDK3_DIR)/../hwif/hinic5_cmdq.o \ + $(SDK3_DIR)/../hwif/hinic5_enhance_cmdq.o \ + $(SDK3_DIR)/../hwif/hinic5_eqs.o \ + $(SDK3_DIR)/../hwif/hinic5_mbox.o \ + $(SDK3_DIR)/../hwif/hinic5_mgmt.o \ + $(SDK3_DIR)/../hwif/hinic5_api_cmd.o \ + $(SDK3_DIR)/../hwif/hinic5_hw_api.o \ + $(SDK3_DIR)/../hwif/hinic5_sml_lt.o \ + $(SDK3_DIR)/../../../../../../src/tools/micro_log/micro_log_comm.o \ + $(SDK3_DIR)/../../../../../../src/tools/micro_log/micro_log_procfs_cmd.o \ + $(SDK3_DIR)/../../../../../../src/tools/micro_log/micro_log_index.o \ + $(SDK3_DIR)/../../../../../../src/tools/micro_log/hinic5_micro_log.o \ + $(SDK3_DIR)/../mt/hinic5_fw_update.o \ + $(SDK3_DIR)/../mt/hinic5_hw_mt.o \ + $(SDK3_DIR)/../mt/hinic5_nictool.o \ + $(SDK3_DIR)/../mt/hinic5_non_ptp.o \ + $(SDK3_DIR)/../mt/hinic5_devlink.o \ + $(SDK3_DIR)/../mt/hinic5_sdk_attack.o \ + $(SDK3_DIR)/../../ossl/linux/kernel/ossl_knl_linux.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_bat_cla.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_bitmap_table.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_object_intern.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_bloomfilter.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_cmd.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_db.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_fast_msg.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_object.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_main.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_cmdq_adapt.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_182x_cmdq_adapt/hinic5_cqm_182x_cmdq_ops.o \ + $(SDK3_DIR)/../hinic5_cqm/hinic5_cqm_187x_cmdq_adapt/hinic5_cqm_187x_cmdq_ops.o \ + +hinic5-objs := \ + ./1588/hinic5_ptp.o \ + ./ctrl/hinic5_main.o \ + ./ctrl/hinic5_irq.o \ + ./ethtool/hinic5_rss.o \ + ./ethtool/hinic5_ntuple.o \ + ./ethtool/hinic5_ethtool.o \ + ./ethtool/hinic5_ethtool_coalesce.o \ + ./ethtool/hinic5_ethtool_lb_test.o \ + ./ethtool/hinic5_ethtool_priv_flags.o \ + ./ethtool/hinic5_ethtool_port_stats.o \ + ./ethtool/hinic5_ethtool_link_stats.o \ + ./ethtool/hinic5_tc.o \ + ./ioctl/hinic5_dcb.o \ + ./ioctl/hinic5_dbg.o \ + ./netdev/hinic5_netdev_ops.o \ + ./netdev/hinic5_filter.o \ + ./nicio/hinic5_tx.o \ + ./nicio/hinic5_rx.o \ + ./nicio/hinic5_xdp.o \ + ./macsec/hinic5_macsec_dfx.o \ + ./macsec/hinic5_macsec_main.o \ + ./macsec/hinic5_macsec_mgmt.o \ + ./macsec/hinic5_macsec_nictool.o \ + ./macsec/hinic5_macsec_protocol.o \ + ./macsec/hinic5_macsec_service.o \ + ../comm/hinic5_nic_cfg.o \ + ../comm/hinic5_mag_cfg.o \ + ../comm/hinic5_nic_cfg_vf.o \ + ../comm/hinic5_rss_cfg.o \ + ../comm/hinic5_nic_event.o \ + ../comm/hinic5_nic_io.o \ + ../comm/hinic5_nic_dbg.o \ + ../comm/hinic5_cmdq_adapt.o \ + ../comm/182x_cmdq_adapt/182x_cmdq_ops.o \ + ../comm/187x_cmdq_adapt/187x_cmdq_ops.o \ + ../../../cfm/bond/hinic5_bond_event.o \ + ../../../cfm/bond/hinic5_bond.o \ + $(spsdk3-objs) + +# UB release support +ifeq ($(CONFIG_UB), y) +hisdk5-objs += hinic5_ubus.o hinic5_ubus_sriov.o +EXTRA_CFLAGS += -D__UBUS_DRIVER__ -DUB_SUPPORT_ENTITY -DUB_SUPPORT_B177 +endif + +# UB devel support +ifeq ($(CONFIG_UBUS_DEVICE), y) +KBUILD_EXTRA_SYMBOLS += $(UBUS_UBC_BUILD_DIR)/Module.symvers +KBUILD_EXTRA_SYMBOLS += $(UMMU_CORE_BUILD_DIR)/Module.symvers +EXTRA_CFLAGS += -D__UBUS_DRIVER__ +hisdk5-objs += hinic5_ubus.o +hisdk5-objs += hinic5_ubus_sriov.o +ifeq ($(UB_BUILD_B173), y) +EXTRA_CFLAGS += -DUB_SUPPORT_ENTITY +endif +ifeq ($(UB_BUILD_B177), y) +EXTRA_CFLAGS += -DUB_SUPPORT_B177 +endif +endif + +# Driver Extension +ifneq ($(DPU_HISDK5_DRV_EXTEND_MK),) +$(info Using hisdk5 extension $(DPU_HISDK5_DRV_EXTEND_MK)) +export DPU_DRV_MK_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +include $(DPU_HISDK5_DRV_EXTEND_MK) +hisdk5-objs += $(DPU_DRV_HISDK5_EXTEND_OBJS) +endif +ifneq ($(DPU_HINIC5_DRV_EXTEND_MK),) +$(info Using hinic5 extension $(DPU_HINIC5_DRV_EXTEND_MK)) +# Dirctory of NIC makefile +export DPU_DRV_MK_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) +include $(DPU_HINIC5_DRV_EXTEND_MK) +hinic5-objs += $(DPU_DRV_HINIC5_EXTEND_OBJS) +endif + +obj-m += hinic5.o + +all:build_info default + +build_info: + mkdir -p build + @echo "CURDIR=$(CURDIR)" > build/build_src.txt + @for obj in $(hinic5-objs); do \ + echo $$(realpath $$obj) >> build/build_src.txt; \ + done + +KERNEL_VER := $(shell uname -r 2>/dev/null) +KERNEL_DIR := /lib/modules/$(KERNEL_VER)/build + +obj-m += hinic5.o + +default: + $(MAKE) -C $(KERNEL_DIR) M=$(shell pwd) -W modules + +clean: + rm -rf *.o *.ko *.order .*.cmd *.mod.* .H* .tm* .tmp_versions Module.symvers *.ko.unsigned null \ No newline at end of file diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_irq.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_irq.c new file mode 100644 index 00000000..d35d328d --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_irq.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/debugfs.h> + +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_crm.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_dev.h" +#include "hinic5_tx.h" +#include "hinic5_rx.h" + +int hinic5_poll(struct napi_struct *napi, int budget) +{ + int tx_pkts, rx_pkts; + struct hinic5_irq *irq_cfg = + container_of(napi, struct hinic5_irq, napi); + struct hinic5_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + + rx_pkts = hinic5_rx_poll(irq_cfg->rxq, budget); + + tx_pkts = hinic5_tx_poll(irq_cfg->txq, budget); + if (tx_pkts >= budget || rx_pkts >= budget) + return budget; + + napi_complete(napi); + + hinic5_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + HINIC5_MSIX_ENABLE); + + return max(tx_pkts, rx_pkts); +} + +static void qp_add_napi(struct hinic5_irq *irq_cfg) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + + netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic5_poll, nic_dev->poll_weight); + napi_enable(&irq_cfg->napi); +} + +static void qp_del_napi(struct hinic5_irq *irq_cfg) +{ + napi_disable(&irq_cfg->napi); + netif_napi_del(&irq_cfg->napi); +} + +static irqreturn_t qp_irq(int irq, void *data) +{ + struct hinic5_irq *irq_cfg = (struct hinic5_irq *)data; + struct hinic5_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + + hinic5_misx_intr_clear_resend_bit(nic_dev->hwdev, irq_cfg->msix_entry_idx, 1); + + napi_schedule(&irq_cfg->napi); + + return IRQ_HANDLED; +} + +static int hinic5_request_irq(struct hinic5_irq *irq_cfg, u16 q_id) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + struct interrupt_info info = {0}; + int err; + + qp_add_napi(irq_cfg); + info.msix_index = irq_cfg->msix_entry_idx; + /* bind the msix_entry to this function */ + err = hinic5_set_interrupt_cfg(nic_dev->hwdev, info, HINIC5_CHANNEL_NIC); + if (err != 0) { + nicif_err(nic_dev, drv, irq_cfg->netdev, "Failed to set RX interrupt cfg.\n"); + qp_del_napi(irq_cfg); + return err; + } + + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = + nic_dev->intr_coalesce[q_id].rx_coalesce_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = + nic_dev->intr_coalesce[q_id].rx_pending_limt; + err = hinic5_set_sq_rq_coalesce_cfg(nic_dev->hwdev, q_id, HINIC5_SQ_RQ_COALESCE, + &nic_dev->intr_coalesce[q_id]); + if (err != 0) { + nicif_err(nic_dev, drv, irq_cfg->netdev, + "Failed to set RX interrupt coalescing attribute.\n"); + qp_del_napi(irq_cfg); + return err; + } + + err = request_irq(irq_cfg->irq_id, &qp_irq, 0, irq_cfg->irq_name, irq_cfg); + if (err != 0) { + nicif_err(nic_dev, drv, irq_cfg->netdev, "Failed to request Rx irq\n"); + qp_del_napi(irq_cfg); + return err; + } + + irq_set_affinity_hint(irq_cfg->irq_id, &irq_cfg->affinity_mask); + + return 0; +} + +static void hinic5_release_irq(struct hinic5_irq *irq_cfg, u32 nic_dev_state) +{ + irq_set_affinity_hint(irq_cfg->irq_id, NULL); + synchronize_irq(irq_cfg->irq_id); + free_irq(irq_cfg->irq_id, irq_cfg); + + /* + * During sdinanoos-hotreplace, the netif-napi does not need to be deleted. + * (The ETH device is disabled by netif_carrier_off on the 'hinic5_vport_down' interface) + */ + if (nic_dev_state == 0) + qp_del_napi(irq_cfg); +} + +int hinic5_qps_irq_init(struct hinic5_nic_dev *nic_dev) +{ + struct irq_info *qp_irq_info = NULL; + struct hinic5_irq *irq_cfg = NULL; + u16 q_id, i; + u32 local_cpu; + int err; + + for (q_id = 0; q_id < nic_dev->q_params.num_qps + nic_dev->q_params.xdp_qps; q_id++) { + qp_irq_info = &nic_dev->qps_irq_info[q_id]; + irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; + + irq_cfg->irq_id = qp_irq_info->irq_id; + irq_cfg->msix_entry_idx = qp_irq_info->msix_entry_idx; + irq_cfg->netdev = nic_dev->netdev; + irq_cfg->txq = &nic_dev->txqs[q_id]; + irq_cfg->rxq = &nic_dev->rxqs[q_id]; + nic_dev->rxqs[q_id].irq_cfg = irq_cfg; + + local_cpu = cpumask_local_spread(q_id, dev_to_node(nic_dev->lld_dev->dev)); + cpumask_set_cpu(local_cpu, &irq_cfg->affinity_mask); + + err = snprintf(irq_cfg->irq_name, sizeof(irq_cfg->irq_name), + "%s_qp%u", nic_dev->netdev->name, q_id); + if (err < 0) { + err = -EINVAL; + goto req_tx_irq_err; + } + + err = hinic5_request_irq(irq_cfg, q_id); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to request Rx irq\n"); + goto req_tx_irq_err; + } + + hinic5_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + HINIC5_SET_MSIX_AUTO_MASK); + hinic5_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, HINIC5_MSIX_ENABLE); + } + + INIT_DELAYED_WORK(&nic_dev->moderation_task, hinic5_auto_moderation_work); + + return 0; + +req_tx_irq_err: + for (i = 0; i < q_id; i++) { + irq_cfg = &nic_dev->q_params.irq_cfg[i]; + hinic5_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, HINIC5_MSIX_DISABLE); + hinic5_set_msix_auto_mask_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + HINIC5_CLR_MSIX_AUTO_MASK); + hinic5_release_irq(irq_cfg, nic_dev->state); + } + + return err; +} + +void hinic5_qps_irq_deinit(struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_irq *irq_cfg = NULL; + u16 q_id; + + for (q_id = 0; q_id < nic_dev->q_params.num_qps + nic_dev->q_params.xdp_qps; q_id++) { + irq_cfg = &nic_dev->q_params.irq_cfg[q_id]; + hinic5_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + HINIC5_MSIX_DISABLE); + hinic5_set_msix_auto_mask_state(nic_dev->hwdev, + irq_cfg->msix_entry_idx, + HINIC5_CLR_MSIX_AUTO_MASK); + hinic5_release_irq(irq_cfg, nic_dev->state); + } +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_irq.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_irq.h new file mode 100644 index 00000000..b35040e7 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_irq.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_IRQ_H +#define HINIC5_IRQ_H + +#include "hinic5_nic_dev.h" + +int hinic5_qps_irq_init(struct hinic5_nic_dev *nic_dev); + +void hinic5_qps_irq_deinit(struct hinic5_nic_dev *nic_dev); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_main.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_main.c new file mode 100644 index 00000000..bcf7dce0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_main.c @@ -0,0 +1,1578 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> +#include <linux/dcbnl.h> +#include <linux/tcp.h> +#include <linux/ip.h> +#include <linux/debugfs.h> +#include <linux/notifier.h> +#include <linux/skbuff.h> + +#include "ossl_knl.h" +#include "drv_nic_api.h" +#include "hinic5_bond.h" +#include "hinic5_hw.h" +#include "hinic5_crm.h" +#include "hinic5_mt.h" +#include "hinic5_hinic5_vram.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_dev.h" +#include "hinic5_tx.h" +#include "hinic5_rx.h" +#include "hinic5_tc.h" +#include "hinic5_lld.h" +#include "hinic5_rss.h" +#include "hinic5_dcb.h" +#include "hinic5_ptp.h" +#include "hinic5_nic_event.h" +#include "hinic5_hinic5_vram_api.h" +#include "hinic5_macsec_api.h" +#include "hinic5_main.h" +#include "hinic5_lld.h" + +#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_UDP_TUNNEL_NIC_INFO) +#include <net/udp_tunnel.h> +#endif /* HAVE_NDO_UDP_TUNNEL_ADD || HAVE_UDP_TUNNEL_NIC_INFO */ + +#define CFM_BOND_FULL + +#define DEFAULT_POLL_WEIGHT 64 +static unsigned int poll_weight = DEFAULT_POLL_WEIGHT; +module_param(poll_weight, uint, 0444); +MODULE_PARM_DESC(poll_weight, "Number packets for NAPI budget (default=64)"); + +#define HINIC5_DEAULT_TXRX_MSIX_PENDING_LIMIT 2 +#define HINIC5_MAX_TXRX_MSIX_PENDING_LIMIT 255 + +#define HINIC5_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 25 +#define HINIC5_MAX_TXRX_MSIX_COALESC_TIMER_CFG 255 + +static unsigned char qp_pending_limit = HINIC5_DEAULT_TXRX_MSIX_PENDING_LIMIT; +module_param(qp_pending_limit, byte, 0444); +MODULE_PARM_DESC(qp_pending_limit, "QP MSI-X Interrupt coalescing parameter pending_limit, 0-255 (default=2, unit=8)"); + +static unsigned char qp_coalesc_timer_cfg = + HINIC5_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; +module_param(qp_coalesc_timer_cfg, byte, 0444); +MODULE_PARM_DESC(qp_coalesc_timer_cfg, "QP MSI-X Interrupt coalescing parameter coalesc_timer_cfg, 0-255 (default=25, unit=5us)"); + +#define DEFAULT_RX_BUFF_LEN 2 +u16 rx_buff = DEFAULT_RX_BUFF_LEN; +module_param(rx_buff, ushort, 0444); +MODULE_PARM_DESC(rx_buff, "Set rx_buff size, 2,4,8 (unit: KB, default=2KB)"); + +static unsigned int lro_replenish_thld = 256; +module_param(lro_replenish_thld, uint, 0444); +MODULE_PARM_DESC(lro_replenish_thld, "Number wqe for lro replenish buffer, 0-16384 (unit: wqebb, default=256)"); + +static unsigned char set_link_status_follow = HINIC5_LINK_FOLLOW_STATUS_MAX; +module_param(set_link_status_follow, byte, 0444); +MODULE_PARM_DESC(set_link_status_follow, "Set link status follow port status, 0: DEFAULT, 1: PORT, 2: SEPARATE, 3: UNSET (default=3)"); + +static bool page_pool_enabled = true; +module_param(page_pool_enabled, bool, 0444); +MODULE_PARM_DESC(page_pool_enabled, "Set page_pool feature state, 0: DISABLE, 1: ENABLE (default=1)"); + +static bool macsec_enabled; +module_param(macsec_enabled, bool, 0444); +MODULE_PARM_DESC(macsec_enabled, "Set macsec module state, 0: DISABLE, 1: ENABLE (default=0)"); + +#define HINIC5_MAX_POLL_WEIGHT 16384 + +#define HINIC5_MAX_LRO_REPLENISH_THLD 16384 +#define HINIC5_DEFAULT_LRO_REPLENISH_THLD 255 + +#define HINIC5_MAX_RX_BUFF 8 +#define HINIC5_MIN_RX_BUFF 2 + +static inline void hinic5_main_param_validate(void) +{ + if (poll_weight == 0 || poll_weight > HINIC5_MAX_POLL_WEIGHT) { + poll_weight = DEFAULT_POLL_WEIGHT; + pr_warn("[NIC] poll_weight is out of range(0-%u), reset to default %u\n", + HINIC5_MAX_POLL_WEIGHT, DEFAULT_POLL_WEIGHT); + } + + if (qp_pending_limit > HINIC5_MAX_TXRX_MSIX_PENDING_LIMIT) { + qp_pending_limit = HINIC5_DEAULT_TXRX_MSIX_PENDING_LIMIT; + pr_warn("[NIC] qp_pending_limit is out of range(0-255), reset to default %u\n", + HINIC5_DEAULT_TXRX_MSIX_PENDING_LIMIT); + } + + if (qp_coalesc_timer_cfg > HINIC5_MAX_TXRX_MSIX_COALESC_TIMER_CFG) { + qp_coalesc_timer_cfg = HINIC5_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; + pr_warn("[NIC] qp_coalesc_timer_cfg is out of range(0-255), reset to default %u\n", + HINIC5_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG); + } + + if (rx_buff != RX_BUFF_VALID_2KB && rx_buff != RX_BUFF_VALID_4KB && + rx_buff != RX_BUFF_VALID_8KB) { + rx_buff = DEFAULT_RX_BUFF_LEN; + pr_warn("[NIC] rx_buff is invalid(%u), only 2/4/8KB supported, reset to default %uKB\n", + rx_buff, DEFAULT_RX_BUFF_LEN); + } + + if (lro_replenish_thld > HINIC5_MAX_LRO_REPLENISH_THLD) { + lro_replenish_thld = HINIC5_DEFAULT_LRO_REPLENISH_THLD; + pr_warn("[NIC] lro_replenish_thld is out of range(0-%u), reset to default %u\n", + HINIC5_MAX_LRO_REPLENISH_THLD, HINIC5_DEFAULT_LRO_REPLENISH_THLD); + } + + if (set_link_status_follow > HINIC5_LINK_FOLLOW_STATUS_MAX) { + set_link_status_follow = HINIC5_LINK_FOLLOW_STATUS_MAX; + pr_warn("[NIC] set_link_status_follow is out of range(0-3), reset to default %u\n", + HINIC5_LINK_FOLLOW_STATUS_MAX); + } +} + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN +static int hinic5_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr); + +#define HINIC5_ASIC_WAIT_FLUSH_QP_RESOURCE_TIMEOUT 100 +#define HINIC5_FPGA_WAIT_FLUSH_QP_RESOURCE_TIMEOUT 2000 +#define HINIC5_EMU_WAIT_FLUSH_QP_RESOURCE_TIMEOUT 2000 +#define HINIC5_EDA_WAIT_FLUSH_QP_RESOURCE_TIMEOUT 2000 + +#define HINIC5_GET_TIMEOUT(board, type) HINIC5_##board##_##type##_TIMEOUT + +/* used for netdev notifier register/unregister */ +static DEFINE_MUTEX(hinic5_netdev_notifiers_mutex); +static int hinic5_netdev_notifiers_ref_cnt; +static struct notifier_block hinic5_netdev_notifier = { + .notifier_call = hinic5_netdev_event, +}; + +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +static const struct udp_tunnel_nic_info hinic5_udp_tunnels = { + .set_port = hinic5_udp_tunnel_set_port, + .unset_port = hinic5_udp_tunnel_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ + +static void hinic5_register_notifier(struct hinic5_nic_dev *nic_dev) +{ + int err; + + mutex_lock(&hinic5_netdev_notifiers_mutex); + hinic5_netdev_notifiers_ref_cnt++; + if (hinic5_netdev_notifiers_ref_cnt == 1) { + err = register_netdevice_notifier(&hinic5_netdev_notifier); + if (err != 0) { + nic_info(nic_dev->lld_dev->dev, "Register netdevice notifier failed, err: %d\n", + err); + hinic5_netdev_notifiers_ref_cnt--; + } + } + mutex_unlock(&hinic5_netdev_notifiers_mutex); +} + +static void hinic5_unregister_notifier(struct hinic5_nic_dev *nic_dev) +{ + mutex_lock(&hinic5_netdev_notifiers_mutex); + if (hinic5_netdev_notifiers_ref_cnt == 1) + unregister_netdevice_notifier(&hinic5_netdev_notifier); + + if (hinic5_netdev_notifiers_ref_cnt != 0) + hinic5_netdev_notifiers_ref_cnt--; + mutex_unlock(&hinic5_netdev_notifiers_mutex); +} + +#define HINIC5_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT 1 +#define HINIC5_VLAN_CLEAR_OFFLOAD (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_SCTP_CRC | NETIF_F_RXCSUM | \ + NETIF_F_ALL_TSO) + +static int hinic5_netdev_event(struct notifier_block *notifier, unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct net_device *real_dev = NULL; + struct net_device *ret = NULL; + u16 vlan_depth; + + if (!virt_addr_valid((const void *)ndev)) + return NOTIFY_DONE; + + if (!is_vlan_dev(ndev)) + return NOTIFY_DONE; + + dev_hold(ndev); + + switch (event) { + case NETDEV_REGISTER: + real_dev = vlan_dev_real_dev(ndev); + if (!hinic5_is_netdev_ops_match(real_dev)) + goto out; + + vlan_depth = 1; + ret = vlan_dev_priv(ndev)->real_dev; + while (is_vlan_dev(ret)) { + ret = vlan_dev_priv(ret)->real_dev; + vlan_depth++; + } + + if (vlan_depth == HINIC5_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { + ndev->vlan_features &= (~HINIC5_VLAN_CLEAR_OFFLOAD); + } else if (vlan_depth > HINIC5_MAX_VLAN_DEPTH_OFFLOAD_SUPPORT) { +#ifdef HAVE_NDO_SET_FEATURES +#ifdef HAVE_NDO_SET_U32_FEATURES + set_netdev_hw_features(ndev, + get_netdev_hw_features(ndev) & + (~HINIC5_VLAN_CLEAR_OFFLOAD)); +#else + ndev->hw_features &= (~HINIC5_VLAN_CLEAR_OFFLOAD); +#endif +#endif + ndev->features &= (~HINIC5_VLAN_CLEAR_OFFLOAD); + } + + break; + + default: + break; + }; + +out: + dev_put(ndev); + + return NOTIFY_DONE; +} +#endif + +void hinic5_link_status_change(struct hinic5_nic_dev *nic_dev, bool status) +{ + struct net_device *netdev = nic_dev->netdev; + + if (!HINIC5_CHANNEL_RES_VALID(nic_dev) || + test_bit(HINIC5_LP_TEST, &nic_dev->flags) || + test_bit(HINIC5_FORCE_LINK_UP, &nic_dev->flags)) + return; + + if (status) { + if (netif_carrier_ok(netdev)) + return; + + nic_dev->link_status = status; + netif_carrier_on(netdev); + nicif_info(nic_dev, link, netdev, "Link is up\n"); + } else { + if (!netif_carrier_ok(netdev)) + return; + + nic_dev->link_status = status; + netif_carrier_off(netdev); + nicif_info(nic_dev, link, netdev, "Link is down\n"); + } +} + +static void netdev_vlan_feature_init(struct hinic5_nic_dev *nic_dev, netdev_features_t *vlan_fts) +{ + if (HINIC5_SUPPORT_VLAN_OFFLOAD(nic_dev->hwdev) != 0) { +#if defined(NETIF_F_HW_VLAN_CTAG_TX) + *vlan_fts |= NETIF_F_HW_VLAN_CTAG_TX; +#elif defined(NETIF_F_HW_VLAN_TX) + *vlan_fts |= NETIF_F_HW_VLAN_TX; +#endif + +#if defined(NETIF_F_HW_VLAN_CTAG_RX) + *vlan_fts |= NETIF_F_HW_VLAN_CTAG_RX; +#elif defined(NETIF_F_HW_VLAN_RX) + *vlan_fts |= NETIF_F_HW_VLAN_RX; +#endif + } + + if (HINIC5_SUPPORT_RXVLAN_FILTER(nic_dev->hwdev) != 0) { +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + *vlan_fts |= NETIF_F_HW_VLAN_CTAG_FILTER; +#elif defined(NETIF_F_HW_VLAN_FILTER) + *vlan_fts |= NETIF_F_HW_VLAN_FILTER; +#endif + } +} + +static void netdev_hw_feature_init(struct net_device *netdev, + struct hinic5_nic_dev *nic_dev, netdev_features_t *hw_features) +{ + /* LRO is disable in default, only set hw features */ + if (HINIC5_SUPPORT_LRO(nic_dev->hwdev) != 0) + *hw_features |= NETIF_F_LRO; + +#if (KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE) + if (HINIC5_SUPPORT_UFO(nic_dev->hwdev) != 0) { + /* UFO is disable in default */ + *hw_features |= NETIF_F_UFO; + netdev->vlan_features |= NETIF_F_UFO; + } +#endif +} + +static void netdev_tso_feature_init(struct hinic5_nic_dev *nic_dev, netdev_features_t *tso_fts) +{ +#ifdef HAVE_ENCAPSULATION_TSO + if (HINIC5_SUPPORT_VXLAN_OFFLOAD(nic_dev->hwdev) || + HINIC5_SUPPORT_GENEVE_OFFLOAD(nic_dev->hwdev)) + *tso_fts |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (HINIC5_SUPPORT_IPXIP_OFFLOAD(nic_dev->hwdev)) + *tso_fts |= NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_IPXIP6; +#endif /* HAVE_ENCAPSULATION_TSO */ +} + +static void netdev_feature_init(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + netdev_features_t cso_fts = 0; + netdev_features_t vlan_fts = 0; + netdev_features_t tso_fts = 0; + netdev_features_t hw_features = 0; + netdev_features_t dft_fts = NETIF_F_SG | NETIF_F_HIGHDMA; + + if (HINIC5_SUPPORT_CSUM(nic_dev->hwdev) != 0) + cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + if (HINIC5_SUPPORT_SCTP_CRC(nic_dev->hwdev) != 0) + cso_fts |= NETIF_F_SCTP_CRC; + + if (HINIC5_SUPPORT_TSO(nic_dev->hwdev) != 0) + tso_fts |= NETIF_F_TSO | NETIF_F_TSO6; + + netdev_vlan_feature_init(nic_dev, &vlan_fts); + + netdev_tso_feature_init(nic_dev, &tso_fts); + + if (HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, TC_FLOWER_OFFLOAD)) { + netdev->features |= NETIF_F_HW_TC; + hw_features |= NETIF_F_HW_TC; + } + netdev_hw_feature_init(netdev, nic_dev, &hw_features); + + netdev->features |= dft_fts | cso_fts | tso_fts | vlan_fts; + netdev->vlan_features |= dft_fts | cso_fts | tso_fts; + +#ifdef HAVE_NDO_SET_U32_FEATURES + hw_features |= get_netdev_hw_features(netdev); +#else + hw_features |= netdev->hw_features; +#endif + + hw_features |= netdev->features; + +#ifdef HAVE_NDO_SET_U32_FEATURES + set_netdev_hw_features(netdev, hw_features); +#else + netdev->hw_features = hw_features; +#endif + +#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; +#endif + +#ifdef HAVE_ENCAPSULATION_CSUM + netdev->hw_enc_features |= dft_fts; + if (HINIC5_SUPPORT_VXLAN_OFFLOAD(nic_dev->hwdev) || + HINIC5_SUPPORT_GENEVE_OFFLOAD(nic_dev->hwdev)) { + netdev->hw_enc_features |= cso_fts & (~NETIF_F_SCTP_CRC); +#ifdef HAVE_ENCAPSULATION_TSO + netdev->hw_enc_features |= tso_fts | NETIF_F_TSO_ECN; +#endif /* HAVE_ENCAPSULATION_TSO */ + } + + /* When the chip does not support parsing IPinIP tunnel packets, + * disable the checksum offloading for inner SCTP. + */ + if (HINIC5_SUPPORT_IPXIP_OFFLOAD(nic_dev->hwdev)) + netdev->hw_enc_features |= NETIF_F_SCTP_CRC; +#endif /* HAVE_ENCAPSULATION_CSUM */ +#ifdef HAVE_NETDEV_XDP_ACT_NDO_XMIT + netdev->xdp_features = NETDEV_XDP_ACT_NDO_XMIT; +#endif /* HAVE_NETDEV_XDP_ACT_NDO_XMIT */ +} + +static void init_intr_coal_param(struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_qp_coalesce_info *info = NULL; + u16 i; + + for (i = 0; i < nic_dev->max_qps; i++) { + info = &nic_dev->intr_coalesce[i]; + + info->tx_pending_limt = qp_pending_limit; + info->tx_coalesce_timer_cfg = qp_coalesc_timer_cfg; + info->rx_pending_limt = qp_pending_limit; + info->rx_coalesce_timer_cfg = qp_coalesc_timer_cfg; + + info->pkt_rate_high = HINIC5_RX_RATE_HIGH; + info->rx_usecs_high = HINIC5_RX_COAL_TIME_HIGH; + info->rx_pending_limt_high = HINIC5_RX_PENDING_LIMIT_HIGH; + + info->pkt_rate_low = HINIC5_RX_RATE_LOW; + info->rx_usecs_low = HINIC5_RX_COAL_TIME_LOW; + info->rx_pending_limt_low = HINIC5_RX_PENDING_LIMIT_LOW; + } +} + +static int hinic5_init_intr_coalesce(struct hinic5_nic_dev *nic_dev) +{ + u64 size; + + if (qp_pending_limit != HINIC5_DEAULT_TXRX_MSIX_PENDING_LIMIT || + qp_coalesc_timer_cfg != HINIC5_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG) + nic_dev->intr_coal_set_flag = 1; + else + nic_dev->intr_coal_set_flag = 0; + + size = sizeof(*nic_dev->intr_coalesce) * nic_dev->max_qps; + if (size == 0) { + nic_err(nic_dev->lld_dev->dev, "Cannot allocate zero size intr coalesce\n"); + return -EINVAL; + } + nic_dev->intr_coalesce = kzalloc(size, GFP_KERNEL); + if (!nic_dev->intr_coalesce) + return -ENOMEM; + + init_intr_coal_param(nic_dev); + + if (test_bit(HINIC5_INTR_ADAPT, &nic_dev->flags) != 0) + nic_dev->adaptive_rx_coal = 1; + else + nic_dev->adaptive_rx_coal = 0; + + return 0; +} + +static void hinic5_free_intr_coalesce(struct hinic5_nic_dev *nic_dev) +{ + kfree(nic_dev->intr_coalesce); + nic_dev->intr_coalesce = NULL; +} + +static int hinic5_alloc_txrxqs(struct hinic5_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err; + + err = hinic5_alloc_txqs(netdev); + if (err != 0) { + nic_err(nic_dev->lld_dev->dev, "Failed to alloc txqs\n"); + return err; + } + + err = hinic5_alloc_rxqs(netdev); + if (err != 0) { + nic_err(nic_dev->lld_dev->dev, "Failed to alloc rxqs\n"); + goto alloc_rxqs_err; + } + + err = hinic5_init_intr_coalesce(nic_dev); + if (err != 0) { + nic_err(nic_dev->lld_dev->dev, "Failed to init_intr_coalesce\n"); + goto init_intr_err; + } + + return 0; + +init_intr_err: + hinic5_free_rxqs(netdev); + +alloc_rxqs_err: + hinic5_free_txqs(netdev); + + return err; +} + +static void hinic5_free_txrxqs(struct hinic5_nic_dev *nic_dev) +{ + hinic5_free_intr_coalesce(nic_dev); + hinic5_free_rxqs(nic_dev->netdev); + hinic5_free_txqs(nic_dev->netdev); +} + +static void hinic5_sw_deinit(struct hinic5_nic_dev *nic_dev) +{ + hinic5_free_txrxqs(nic_dev); + + hinic5_clean_mac_list_filter(nic_dev); + + hinic5_del_mac(nic_dev->hwdev, nic_dev->netdev->dev_addr, 0, + hinic5_global_func_id(nic_dev->hwdev), + HINIC5_CHANNEL_NIC); + + hinic5_clear_rss_config(nic_dev); + + if (test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags)) + hinic5_sync_dcb_state(nic_dev->hwdev, 1, 0); +} + +static inline int invalid_mac_address(struct hinic5_nic_dev *nic_dev) +{ + if (!is_valid_ether_addr(nic_dev->netdev->dev_addr)) { + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) + return -EIO; + nic_info(nic_dev->lld_dev->dev, "Invalid MAC address %pM, using random\n", + nic_dev->netdev->dev_addr); + eth_hw_addr_random(nic_dev->netdev); + } + return 0; +} + +static void hinic5_sw_mtu_range_init(struct net_device *netdev) +{ + /* MTU range: 256 - 9600 */ +#ifdef HAVE_NETDEVICE_MIN_MAX_MTU + netdev->min_mtu = HINIC5_MIN_MTU_SIZE; + netdev->max_mtu = HINIC5_MAX_JUMBO_FRAME_SIZE; +#endif + +#ifdef HAVE_NETDEVICE_EXTENDED_MIN_MAX_MTU + netdev->extended->min_mtu = HINIC5_MIN_MTU_SIZE; + netdev->extended->max_mtu = HINIC5_MAX_JUMBO_FRAME_SIZE; +#endif +} + +static void hinic5_tx_rx_ops_init(struct hinic5_nic_dev *nic_dev) +{ + nic_dev->tx_wqe_compact_task = HINIC5_SUPPORT_TX_WQE_COMPACT_TASK(nic_dev->hwdev); + + if (HINIC5_SUPPORT_TX_WQE_COMPACT_TASK(nic_dev->hwdev)) + nic_dev->tx_rx_ops.tx_set_wqe_offload = hinic5_tx_set_compact_task_offload; + else + nic_dev->tx_rx_ops.tx_set_wqe_offload = hinic5_tx_set_normal_task_offload; + + if (hinic5_get_rq_wqe_type(nic_dev->hwdev) == HINIC5_COMPACT_RQ_WQE) { + /* 1825/1872合一cqe */ + nic_dev->tx_rx_ops.rx_get_cqe_info = hinic5_rx_get_compact_cqe_info; + nic_dev->cqe_mode = HINIC5_RQ_CQE_INTEGRATE; + nic_dev->tx_rx_ops.rx_cqe_done = hinic5_rx_integrated_cqe_done; + } else if (HINIC5_SUPPORT_RX_HW_COMPACT_CQE(nic_dev->hwdev)) { + /* 1872分离cqe */ + nic_dev->tx_rx_ops.rx_get_cqe_info = hinic5_rx_get_compact_cqe_info; + nic_dev->cqe_mode = HINIC5_RQ_CQE_SEPARATE; + nic_dev->tx_rx_ops.rx_cqe_done = hinic5_rx_separate_cqe_done; + } else { + /* 1823/1825分离cqe */ + nic_dev->tx_rx_ops.rx_get_cqe_info = hinic5_rx_get_cqe_info; + nic_dev->cqe_mode = HINIC5_RQ_CQE_SEPARATE; + nic_dev->tx_rx_ops.rx_cqe_done = hinic5_rx_separate_cqe_done; + } +} + +static void hinic5_set_hw_default_cos(struct hinic5_nic_dev *nic_dev) +{ + u8 hw_default_cos; + + hw_default_cos = hinic5_func_dev_default_cos(nic_dev->hwdev); + nic_dev->hw_default_cos_valid = HW_DEFAULT_COS_IS_VALID(hw_default_cos); + nic_dev->hw_default_cos = hw_default_cos & HW_DEFAULT_COS_VALID_BIT; +} + +static int hinic5_sw_init(struct hinic5_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u64 nic_features; + u8 mac_temp[ETH_ALEN]; + int err = 0; + + nic_features = hinic5_get_feature_cap(nic_dev->hwdev); + /* You can update the features supported by the driver according to the + * scenario here + */ + hinic5_update_nic_feature(nic_dev->hwdev, nic_features & NIC_DRV_DEFAULT_FEATURE); + + sema_init(&nic_dev->port_state_sem, 1); + + nic_dev->cos_mask_mode = hinic5_func_cos_mask_mode(nic_dev->hwdev); + + hinic5_set_hw_default_cos(nic_dev); + + err = hinic5_dcb_init(nic_dev); + if (err != 0) { + nic_err(nic_dev->lld_dev->dev, "Failed to init dcb\n"); + return -EFAULT; + } + + nic_dev->q_params.sq_depth = HINIC5_SQ_DEPTH; + nic_dev->q_params.rq_depth = HINIC5_RQ_DEPTH; + + hinic5_try_to_enable_rss(nic_dev); + + err = hinic5_get_default_mac(nic_dev->hwdev, mac_temp); + if (err != 0) { + nic_err(nic_dev->lld_dev->dev, "Failed to get MAC address\n"); + goto err_mac; + } + + hinic5_eth_hw_addr_set(nic_dev->netdev, mac_temp); + + err = invalid_mac_address(nic_dev); + if (err != 0) { + nic_err(nic_dev->lld_dev->dev, "Invalid MAC address %pM\n", netdev->dev_addr); + goto err_mac; + } + + err = hinic5_set_mac(nic_dev->hwdev, netdev->dev_addr, 0, + hinic5_global_func_id(nic_dev->hwdev), HINIC5_CHANNEL_NIC); + /* When this is VF driver, we must consider that PF has already set VF + * MAC, and we can't consider this condition is error status during + * driver probe procedure. + */ + if (err != 0 && err != HINIC5_PF_SET_VF_ALREADY) { + nic_err(nic_dev->lld_dev->dev, "Failed to set default MAC\n"); + goto err_mac; + } + + hinic5_sw_mtu_range_init(netdev); + + err = hinic5_alloc_txrxqs(nic_dev); + if (err != 0) { + nic_err(nic_dev->lld_dev->dev, "Failed to alloc qps\n"); + goto alloc_qps_err; + } + + hinic5_tx_rx_ops_init(nic_dev); + + return 0; + +alloc_qps_err: + hinic5_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, + hinic5_global_func_id(nic_dev->hwdev), + HINIC5_CHANNEL_NIC); + +err_mac: + hinic5_clear_rss_config(nic_dev); + + return err; +} + +static void hinic5_assign_netdev_ops(struct hinic5_nic_dev *adapter) +{ + hinic5_set_netdev_ops(adapter); + if (!HINIC5_FUNC_IS_VF(adapter->hwdev)) + hinic5_set_ethtool_ops(adapter->netdev); + else + hinic5vf_set_ethtool_ops(adapter->netdev); + + adapter->netdev->watchdog_timeo = WATCHDOG_TIMEOUT * HZ; +} + +static int hinic5_validate_parameters(struct hinic5_lld_dev *lld_dev) +{ + /* If weight exceeds the queue depth, the queue resources will be + * exhausted, and increasing it has no effect. + */ + + hinic5_main_param_validate(); + hinic5_nic_io_param_validate(); + + return 0; +} + +static void decide_intr_cfg(struct hinic5_nic_dev *nic_dev) +{ + set_bit(HINIC5_INTR_ADAPT, &nic_dev->flags); +} + +static void adaptive_configuration_init(struct hinic5_nic_dev *nic_dev) +{ + decide_intr_cfg(nic_dev); +} + +static int set_interrupt_moder(struct hinic5_nic_dev *nic_dev, u16 q_id, + u8 coalesc_timer_cfg, u8 pending_limt) +{ + struct hinic5_qp_coalesce_info coalesce_info; + int err; + + if (coalesc_timer_cfg == nic_dev->rxqs[q_id].last_coalesc_timer_cfg && + pending_limt == nic_dev->rxqs[q_id].last_pending_limt) + return 0; + + /* netdev not running or qp not in using, + * don't need to set coalesce to hw + */ + if (!HINIC5_CHANNEL_RES_VALID(nic_dev) || + q_id >= nic_dev->q_params.num_qps) + return 0; + + memset(&coalesce_info, 0, sizeof(coalesce_info)); + coalesce_info.rx_coalesce_timer_cfg = coalesc_timer_cfg; + coalesce_info.rx_pending_limt = pending_limt; + coalesce_info.tx_coalesce_timer_cfg = coalesc_timer_cfg; + coalesce_info.tx_pending_limt = pending_limt; + + err = hinic5_set_sq_rq_coalesce_cfg(nic_dev->hwdev, q_id, HINIC5_SQ_RQ_COALESCE, + &coalesce_info); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to modify moderation for Queue: %u\n", q_id); + } else { + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = pending_limt; + } + + return err; +} + +static void calc_coal_para(struct hinic5_nic_dev *nic_dev, + struct hinic5_qp_coalesce_info *q_coal, u64 rx_rate, + u8 *coalesc_timer_cfg, u8 *pending_limt) +{ + if (rx_rate < q_coal->pkt_rate_low) { + *coalesc_timer_cfg = q_coal->rx_usecs_low; + *pending_limt = q_coal->rx_pending_limt_low; + } else if (rx_rate > q_coal->pkt_rate_high) { + *coalesc_timer_cfg = q_coal->rx_usecs_high; + *pending_limt = q_coal->rx_pending_limt_high; + } else { + *coalesc_timer_cfg = + (u8)((rx_rate - q_coal->pkt_rate_low) * + (q_coal->rx_usecs_high - q_coal->rx_usecs_low) / + (q_coal->pkt_rate_high - q_coal->pkt_rate_low) + + q_coal->rx_usecs_low); + + *pending_limt = + (u8)((rx_rate - q_coal->pkt_rate_low) * + (q_coal->rx_pending_limt_high - q_coal->rx_pending_limt_low) / + (q_coal->pkt_rate_high - q_coal->pkt_rate_low) + + q_coal->rx_pending_limt_low); + } +} + +static void update_queue_coal(struct hinic5_nic_dev *nic_dev, u16 qid, + u64 rx_rate, u64 avg_pkt_size, u64 tx_rate) +{ + struct hinic5_qp_coalesce_info *q_coal = NULL; + u8 coalesc_timer_cfg, pending_limt; + + q_coal = &nic_dev->intr_coalesce[qid]; + + if (rx_rate > HINIC5_RX_RATE_THRESH && avg_pkt_size > HINIC5_AVG_PKT_SMALL) { + calc_coal_para(nic_dev, q_coal, rx_rate, &coalesc_timer_cfg, &pending_limt); + } else { + coalesc_timer_cfg = HINIC5_LOWEST_LATENCY; + pending_limt = q_coal->rx_pending_limt_low; + } + + set_interrupt_moder(nic_dev, qid, coalesc_timer_cfg, pending_limt); +} + +void hinic5_auto_moderation_work(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic5_nic_dev *nic_dev = container_of(delay, + struct hinic5_nic_dev, + moderation_task); + unsigned long period = (unsigned long)(jiffies - + nic_dev->last_moder_jiffies); + u64 rx_packets, rx_bytes, rx_pkt_diff, rx_rate, avg_pkt_size; + u64 tx_packets, tx_bytes, tx_pkt_diff, tx_rate; + u16 qid; + + if (test_bit(HINIC5_INTF_UP, &nic_dev->flags) == 0) + return; + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + HINIC5_MODERATONE_DELAY); + + if (nic_dev->adaptive_rx_coal == 0 || period == 0) + return; + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + rx_packets = nic_dev->rxqs[qid].rxq_stats.packets; + rx_bytes = nic_dev->rxqs[qid].rxq_stats.bytes; + tx_packets = nic_dev->txqs[qid].txq_stats.packets; + tx_bytes = nic_dev->txqs[qid].txq_stats.bytes; + + rx_pkt_diff = + rx_packets - nic_dev->rxqs[qid].last_moder_packets; + avg_pkt_size = (rx_pkt_diff != 0) ? + ((unsigned long)(rx_bytes - + nic_dev->rxqs[qid].last_moder_bytes)) / + rx_pkt_diff : 0; + + rx_rate = rx_pkt_diff * HZ / period; + tx_pkt_diff = + tx_packets - nic_dev->txqs[qid].last_moder_packets; + tx_rate = tx_pkt_diff * HZ / period; + + update_queue_coal(nic_dev, qid, rx_rate, avg_pkt_size, + tx_rate); + + nic_dev->rxqs[qid].last_moder_packets = rx_packets; + nic_dev->rxqs[qid].last_moder_bytes = rx_bytes; + nic_dev->txqs[qid].last_moder_packets = tx_packets; + nic_dev->txqs[qid].last_moder_bytes = tx_bytes; + } + + nic_dev->last_moder_jiffies = jiffies; +} + +static void hinic5_periodic_work_handler(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic5_nic_dev *nic_dev = container_of(delay, struct hinic5_nic_dev, periodic_work); + + if (test_and_clear_bit(EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag) != 0) + hinic5_fault_event_report(nic_dev->hwdev, HINIC5_FAULT_SRC_TX_TIMEOUT, + FAULT_LEVEL_SERIOUS_FLR); + + queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ); +} + +void hinic5_arp_dual_work(struct work_struct *work) +{ + int ret; + struct hinic5_nic_dev *nic_dev = + container_of(work, struct hinic5_nic_dev, arp_dual_work); + struct sk_buff *skb = NULL; + struct hinic5_arp_pkt_info info = {0}; + struct hinic5_txq *txq = NULL; + u16 q_id; + u32 pkt_len; + + while ((skb = skb_dequeue(&nic_dev->arp_queue)) != NULL) { + q_id = skb_get_queue_mapping(skb); + pkt_len = skb->len; + info.pkt_length = (u16)skb->len; + info.origin_queue_id = q_id; + info.func_id = hinic5_global_func_id(nic_dev->hwdev); + + if (pkt_len > HINIC5_ARP_PKT_MAX_LEN) { + kfree_skb(skb); + txq = &nic_dev->txqs[q_id]; + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.dropped++; + u64_stats_update_end(&txq->txq_stats.syncp); + continue; + } + ret = skb_copy_bits(skb, 0, info.pkt_buf, (int)pkt_len); + if (ret < 0) { + kfree_skb(skb); + nic_err(nic_dev->lld_dev->dev, "Copy skb failed, ret:%d.\n", ret); + continue; + } + kfree_skb(skb); + + ret = hinic5_send_arp_to_mpu(nic_dev->hwdev, &info); + if (ret < 0) + nic_err(nic_dev->lld_dev->dev, "Send ARP to mpu failed, ret:%d.\n", ret); + } +} + +static int init_nic_dev_hinic5_vram(struct hinic5_nic_dev *nic_dev) +{ + int is_in_kexec = hinic5_vram_get_kexec_flag(); + int is_use_hinic5_vram = get_use_hinic5_vram_flag(); + u16 func_id; + int ret; + + if (is_use_hinic5_vram != 0) { + func_id = hinic5_global_func_id(nic_dev->hwdev); + ret = snprintf(nic_dev->nic_hinic5_vram_name, HINIC5_VRAM_NAME_MAX_LEN, + "%s%hu", HINIC5_VRAM_NIC_HINIC5_VRAM, func_id); + if (ret < 0) { + nic_err(nic_dev->lld_dev->dev, + "NIC hinic5_vram name snprintf_s failed, ret:%d.\n", ret); + return -EINVAL; + } + + nic_dev->nic_hinic5_vram = + (struct hinic5_hinic5_vram *) + hinic5_hinic5_vram_kalloc(nic_dev->nic_hinic5_vram_name, + sizeof(struct hinic5_hinic5_vram)); + if (!nic_dev->nic_hinic5_vram) { + nic_err(nic_dev->lld_dev->dev, "Failed to allocate nic hinic5_vram\n"); + return -ENOMEM; + } + + if (is_in_kexec == 0) + nic_dev->nic_hinic5_vram->hinic5_vram_mtu = nic_dev->netdev->mtu; + else + nic_dev->netdev->mtu = nic_dev->nic_hinic5_vram->hinic5_vram_mtu; + } else { + nic_dev->nic_hinic5_vram = kzalloc(sizeof(struct hinic5_hinic5_vram), GFP_KERNEL); + if (!nic_dev->nic_hinic5_vram) + return -ENOMEM; + nic_dev->nic_hinic5_vram->hinic5_vram_mtu = nic_dev->netdev->mtu; + } + + return 0; +} + +static void free_nic_dev_hinic5_vram(struct hinic5_nic_dev *nic_dev) +{ + int is_use_hinic5_vram = get_use_hinic5_vram_flag(); + + if (is_use_hinic5_vram != 0) + hinic5_hinic5_vram_kfree((void *)nic_dev->nic_hinic5_vram, + nic_dev->nic_hinic5_vram_name, + sizeof(struct hinic5_hinic5_vram)); + else + kfree(nic_dev->nic_hinic5_vram); + nic_dev->nic_hinic5_vram = NULL; +} + +static void free_nic_dev(struct hinic5_nic_dev *nic_dev) +{ + destroy_workqueue(nic_dev->workq); + kfree(nic_dev->vlan_bitmap); + nic_dev->vlan_bitmap = NULL; + free_nic_dev_hinic5_vram(nic_dev); +} + +static void nic_dev_init(struct hinic5_nic_dev *nic_dev, struct net_device *netdev, + struct hinic5_lld_dev *lld_dev) +{ + u8 rx_buff_per_page = RX_BUFF_NUM_PER_PAGE; + u32 page_num; + +#ifdef HAVE_PAGE_POOL_SUPPORT + /* If page pool is enabled, page reuse not supported */ + rx_buff_per_page = page_pool_enabled ? 1 : RX_BUFF_NUM_PER_PAGE; +#endif + + nic_dev->netdev = netdev; + SET_NETDEV_DEV(netdev, lld_dev->dev); + nic_dev->lld_dev = lld_dev; + nic_dev->hwdev = lld_dev->hwdev; + nic_dev->poll_weight = (int)poll_weight; + nic_dev->msg_enable = DEFAULT_MSG_ENABLE; + nic_dev->lro_replenish_thld = lro_replenish_thld; + nic_dev->rx_buff_len = (u16)(rx_buff * CONVERT_UNIT); + nic_dev->dma_rx_buff_size = rx_buff_per_page * nic_dev->rx_buff_len; + page_num = nic_dev->dma_rx_buff_size / PAGE_SIZE; + nic_dev->page_order = (page_num > 0) ? ilog2(page_num) : 0; + nic_dev->page_pool_enabled = page_pool_enabled; + nic_dev->support_htn = hinic5_support_htn(nic_dev->hwdev); +} + +static void init_list_head(struct hinic5_nic_dev *nic_dev) +{ + INIT_LIST_HEAD(&nic_dev->uc_filter_list); + INIT_LIST_HEAD(&nic_dev->mc_filter_list); + INIT_LIST_HEAD(&nic_dev->rx_flow_rule.rules); + INIT_LIST_HEAD(&nic_dev->tcam.tcam_list); + INIT_LIST_HEAD(&nic_dev->tcam.tcam_dynamic_info.tcam_dynamic_list); +} + +static int setup_nic_dev(struct net_device *netdev, + struct hinic5_lld_dev *lld_dev) +{ + struct hinic5_nic_dev *nic_dev = (struct hinic5_nic_dev *)netdev_priv(netdev); + int ret; + + nic_dev_init(nic_dev, netdev, lld_dev); + + ret = init_nic_dev_hinic5_vram(nic_dev); + if (ret != 0) + return ret; + + mutex_init(&nic_dev->nic_mutex); + + nic_dev->vlan_bitmap = kzalloc(VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); + if (nic_dev->vlan_bitmap == 0) { + nic_err(lld_dev->dev, "Failed to allocate vlan bitmap\n"); + ret = -ENOMEM; + goto vlan_bitmap_error; + } + + nic_dev->workq = create_singlethread_workqueue(HINIC5_NIC_DEV_WQ_NAME); + if (!nic_dev->workq) { + nic_err(lld_dev->dev, "Failed to initialize nic workqueue\n"); + ret = -ENOMEM; + goto create_workq_error; + } + + INIT_DELAYED_WORK(&nic_dev->periodic_work, hinic5_periodic_work_handler); + INIT_DELAYED_WORK(&nic_dev->rxq_check_work, hinic5_rxq_check_work_handler); + init_list_head(nic_dev); + skb_queue_head_init(&nic_dev->arp_queue); + INIT_WORK(&nic_dev->rx_mode_work, hinic5_set_rx_mode_work); + INIT_WORK(&nic_dev->arp_dual_work, hinic5_arp_dual_work); + + return 0; + +create_workq_error: + kfree(nic_dev->vlan_bitmap); + nic_dev->vlan_bitmap = NULL; +vlan_bitmap_error: + free_nic_dev_hinic5_vram(nic_dev); + return ret; +} + +static int hinic5_set_default_hw_feature(struct hinic5_nic_dev *nic_dev) +{ + int err; + + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) { + hinic5_dcb_reset_hw_config(nic_dev); + + if (set_link_status_follow < HINIC5_LINK_FOLLOW_STATUS_MAX) { + err = hinic5_set_link_status_follow(nic_dev->hwdev, + set_link_status_follow); + if (err == HINIC5_MGMT_CMD_UNSUPPORTED) + nic_warn(nic_dev->lld_dev->dev, + "Current version of firmware doesn't support to set link status follow port status\n"); + } + } + + err = hinic5_set_nic_feature_to_hw(nic_dev->hwdev); + if (err != 0) { + nic_err(nic_dev->lld_dev->dev, "Failed to set nic features\n"); + return err; + } + + /* enable all hw features in netdev->features */ + err = hinic5_set_hw_features(nic_dev); + if (err != 0) { + hinic5_update_nic_feature(nic_dev->hwdev, 0); + hinic5_set_nic_feature_to_hw(nic_dev->hwdev); + return err; + } + + if (HINIC5_SUPPORT_RXQ_RECOVERY(nic_dev->hwdev) != 0) + set_bit(HINIC5_RXQ_RECOVERY, &nic_dev->flags); + + return 0; +} + +static int nic_init_for_hotreplace(struct hinic5_lld_dev *lld_dev, struct hinic5_nic_dev *nic_dev) +{ + int is_use_hinic5_vram = get_use_hinic5_vram_flag(); + int is_in_kexec = hinic5_vram_get_kexec_flag(); + int err; + + /* register netdev flush ops, required only in sdinanoos hotreplace */ + if (is_use_hinic5_vram != 0) { + err = hiudk5_register_flush_fn(lld_dev, hinic5_flush_nic_dev); + if (err != 0) { + nic_err(lld_dev->dev, "Failed to register netdev flush ops, err:%d.\n", + err); + return err; + } + } + + if (is_in_kexec != 0 && + test_bit(HINIC5_DCB_ENABLE, &nic_dev->nic_hinic5_vram->flags) != 0) { + err = hinic5_configure_dcb_hw(nic_dev, 1); + if (err != 0) { + nic_err(lld_dev->dev, "Failed to enable dcb during sdinanoos-hotreplace\n"); + hiudk5_unregister_flush_fn(lld_dev); + return err; + } + nic_info(lld_dev->dev, "Enable dcb success during sdinanoos-hotreplace\n"); + } + + return 0; +} + +#define hinic5_set_dpath_timeout(nic_dev, hw_type) { \ + (nic_dev)->timeout.wait_flush_qp_res_timeout = \ + HINIC5_GET_TIMEOUT(hw_type, WAIT_FLUSH_QP_RESOURCE); \ +} + +static void hinic5_init_dpath_timeout(struct hinic5_nic_dev *nic_dev) +{ + u8 hw_type; + + hw_type = hinic5_get_hw_type(nic_dev->hwdev); + if (hw_type == HINIC5_HW_TYPE_FPGA) { + hinic5_set_dpath_timeout(nic_dev, FPGA); + } else if (hw_type == HINIC5_HW_TYPE_ASIC) { + hinic5_set_dpath_timeout(nic_dev, ASIC); + } else if (hw_type == HINIC5_HW_TYPE_EMU) { + hinic5_set_dpath_timeout(nic_dev, EMU); + } else if (hw_type == HINIC5_HW_TYPE_EDA) { + hinic5_set_dpath_timeout(nic_dev, EDA); + } else { + hinic5_set_dpath_timeout(nic_dev, FPGA); + } +} + +__weak int hinic5_probe_extend_hook(struct net_device *netdev) +{ + return 0; +} + +__weak void hinic5_remove_extend_hook(struct net_device *netdev) +{ +} + +static int nic_probe(struct hinic5_lld_dev *lld_dev, void **uld_dev, + char *uld_dev_name) +{ + struct hinic5_nic_dev *nic_dev = NULL; + struct net_device *netdev = NULL; + u16 glb_func_id; + int err; + + if (!hinic5_support_nic(lld_dev->hwdev, NULL)) { + nic_info(lld_dev->dev, "Hw don't support nic\n"); + return 0; + } + + nic_info(lld_dev->dev, "NIC service probe begin\n"); + + err = hinic5_validate_parameters(lld_dev); + if (err != 0) + goto err_out; + + glb_func_id = hinic5_global_func_id(lld_dev->hwdev); + err = hinic5_func_reset(lld_dev->hwdev, glb_func_id, HINIC5_NIC_RES, HINIC5_CHANNEL_NIC); + if (err != 0) { + nic_err(lld_dev->dev, "Failed to reset function\n"); + goto err_out; + } + + netdev = alloc_etherdev_mq(sizeof(*nic_dev), hinic5_func_max_nic_qnum(lld_dev->hwdev)); + if (!netdev) { + nic_err(lld_dev->dev, "Failed to allocate ETH device\n"); + err = -ENOMEM; + goto err_out; + } + + nic_dev = (struct hinic5_nic_dev *)netdev_priv(netdev); + err = setup_nic_dev(netdev, lld_dev); + if (err != 0) + goto setup_dev_err; + + adaptive_configuration_init(nic_dev); + + hinic5_init_dpath_timeout(nic_dev); + + /* get nic cap from hw */ + hinic5_support_nic(lld_dev->hwdev, &nic_dev->nic_cap); + + err = hinic5_init_nic_hwdev(nic_dev->hwdev, lld_dev->dev, nic_dev->rx_buff_len); + if (err != 0) { + nic_err(lld_dev->dev, "Failed to init nic hwdev\n"); + goto init_nic_hwdev_err; + } + + err = hinic5_sw_init(nic_dev); + if (err != 0) + goto sw_init_err; + hinic5_assign_netdev_ops(nic_dev); + netdev_feature_init(netdev); +#ifdef HAVE_UDP_TUNNEL_NIC_INFO + netdev->udp_tunnel_nic_info = &hinic5_udp_tunnels; +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ + err = hinic5_set_default_hw_feature(nic_dev); + if (err != 0) + goto set_features_err; + + err = hinic5_probe_extend_hook(netdev); + if (err != 0) + goto probe_hook_err; + + if (register_netdev(netdev) != 0) { + nic_err(lld_dev->dev, "Failed to register netdev\n"); + err = -ENOMEM; + goto netdev_err; + } + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + hinic5_register_notifier(nic_dev); +#endif + + queue_delayed_work(nic_dev->workq, &nic_dev->periodic_work, HZ); + netif_carrier_off(netdev); + + hinic5_ptp_init(nic_dev); + +#if (KERNEL_VERSION(5, 1, 1) <= LINUX_VERSION_CODE) + if (HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, TC_FLOWER_OFFLOAD)) { + err = hinic5_init_tc(nic_dev); + if (err != 0) + goto hinic5_init_tc_err; + } +#endif + + if (macsec_enabled && HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, MACSEC_OFFLOAD)) { + err = macsec_init_offload(nic_dev); + if (err != 0) + goto hinic5_init_macsec_err; + } + + err = nic_init_for_hotreplace(lld_dev, nic_dev); + if (err != 0) + goto init_hotreplace_err; + + *uld_dev = nic_dev; + + nicif_info(nic_dev, probe, netdev, "Register netdev succeed\n"); + nic_info(lld_dev->dev, "NIC service probed\n"); + + return 0; + +init_hotreplace_err: + if (macsec_enabled && HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, MACSEC_OFFLOAD)) + macsec_cleanup_offload(nic_dev); + +hinic5_init_macsec_err: +#if (KERNEL_VERSION(5, 1, 1) <= LINUX_VERSION_CODE) + if (HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, TC_FLOWER_OFFLOAD)) + hinic5_deinit_tc(nic_dev); +hinic5_init_tc_err: +#endif + hinic5_ptp_deinit(nic_dev); + + unregister_netdev(netdev); +netdev_err: + hinic5_remove_extend_hook(netdev); +probe_hook_err: +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + hinic5_unregister_notifier(nic_dev); +#endif + hinic5_update_nic_feature(nic_dev->hwdev, 0); + hinic5_set_nic_feature_to_hw(nic_dev->hwdev); + +set_features_err: + hinic5_sw_deinit(nic_dev); + +sw_init_err: + hinic5_free_nic_hwdev(nic_dev->hwdev); + +init_nic_hwdev_err: + free_nic_dev(nic_dev); +setup_dev_err: + free_netdev(netdev); + +err_out: + nic_err(lld_dev->dev, "NIC service probe failed\n"); + + return err; +} + +static void nic_remove(struct hinic5_lld_dev *lld_dev, void *adapter) +{ + struct hinic5_nic_dev *nic_dev = adapter; + struct net_device *netdev = NULL; + int is_use_hinic5_vram = get_use_hinic5_vram_flag(); + + if (!nic_dev || !hinic5_support_nic(lld_dev->hwdev, NULL)) + return; + + nic_info(lld_dev->dev, "NIC service remove begin\n"); + + hinic5_ptp_deinit(nic_dev); + + netdev = nic_dev->netdev; + + if (macsec_enabled && HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, MACSEC_OFFLOAD)) + macsec_cleanup_offload(nic_dev); + +#if (KERNEL_VERSION(5, 1, 1) <= LINUX_VERSION_CODE) + if (HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, TC_FLOWER_OFFLOAD)) + hinic5_deinit_tc(nic_dev); +#endif + +#ifdef HAVE_XDP_SUPPORT + nic_dev->remove_flag = true; +#endif + /* 内核函数注销该网络设备, + * 并释放队列、挂载的xdp程序等相关资源 + */ + unregister_netdev(netdev); + +#ifdef HAVE_XDP_SUPPORT + nic_dev->remove_flag = false; +#endif + + hinic5_remove_extend_hook(netdev); + +#ifdef HAVE_MULTI_VLAN_OFFLOAD_EN + hinic5_unregister_notifier(nic_dev); +#endif + + cancel_delayed_work_sync(&nic_dev->periodic_work); + cancel_delayed_work_sync(&nic_dev->rxq_check_work); + cancel_work_sync(&nic_dev->rx_mode_work); + cancel_work_sync(&nic_dev->arp_dual_work); + skb_queue_purge(&nic_dev->arp_queue); + destroy_workqueue(nic_dev->workq); + + hinic5_flush_rx_flow_rule(nic_dev); + + hinic5_update_nic_feature(nic_dev->hwdev, 0); + hinic5_set_nic_feature_to_hw(nic_dev->hwdev); + + hinic5_sw_deinit(nic_dev); + + hinic5_free_nic_hwdev(nic_dev->hwdev); + + kfree(nic_dev->vlan_bitmap); + nic_dev->vlan_bitmap = NULL; + + if (is_use_hinic5_vram != 0) + hinic5_hinic5_vram_kfree((void *)nic_dev->nic_hinic5_vram, + nic_dev->nic_hinic5_vram_name, + sizeof(struct hinic5_hinic5_vram)); + else + kfree(nic_dev->nic_hinic5_vram); + + free_netdev(netdev); + + if (is_use_hinic5_vram != 0) + hiudk5_unregister_flush_fn(lld_dev); + + nic_info(lld_dev->dev, "NIC service removed\n"); +} + +static void sriov_state_change(struct hinic5_nic_dev *nic_dev, + const struct hinic5_sriov_state_info *info) +{ + /* todo: ubus场景支持单独disable某一个vf, + * nic_dev中记录真实active的vf数目,pcie和ubus逻辑归一 + */ + if (info->enable == 0) + hinic5_clear_vfs_info(nic_dev->hwdev, info->vf_id, info->vf_id); +} + +static void hinic5_port_module_event_handler(struct hinic5_nic_dev *nic_dev, + struct hinic5_event_info *event) +{ + const char *g_hinic5_module_link_err[LINK_ERR_NUM] = { "Unrecognized module" }; + struct hinic5_port_module_event *module_event = (void *)event->event_data; + enum port_module_event_type type = module_event->type; + enum link_err_type err_type = module_event->err_type; + + switch (type) { + case HINIC5_PORT_MODULE_CABLE_PLUGGED: + case HINIC5_PORT_MODULE_CABLE_UNPLUGGED: + nicif_info(nic_dev, link, nic_dev->netdev, + "Port module event: Cable %s\n", + type == HINIC5_PORT_MODULE_CABLE_PLUGGED ? + "plugged" : "unplugged"); + break; + case HINIC5_PORT_MODULE_LINK_ERR: + if (err_type >= LINK_ERR_NUM) { + nicif_info(nic_dev, link, nic_dev->netdev, + "Link failed, Unknown error type: 0x%x\n", + err_type); + } else { + nicif_info(nic_dev, link, nic_dev->netdev, + "Link failed, error type: 0x%x: %s\n", + err_type, + g_hinic5_module_link_err[err_type]); + } + break; + default: + nicif_err(nic_dev, link, nic_dev->netdev, + "Unknown port module type %d\n", type); + break; + } +} + +static void nic_event(struct hinic5_lld_dev *lld_dev, void *adapter, + struct hinic5_event_info *event) +{ + struct hinic5_nic_dev *nic_dev = adapter; + struct hinic5_fault_event *fault = NULL; + + if (!nic_dev || !event || !hinic5_support_nic(lld_dev->hwdev, NULL)) + return; + + switch (HINIC5_SRV_EVENT_TYPE(event->service, event->type)) { + case HINIC5_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_LINK_DOWN): + hinic5_link_status_change(nic_dev, false); + break; + case HINIC5_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_LINK_UP): + hinic5_link_status_change(nic_dev, true); + break; + case HINIC5_SRV_EVENT_TYPE(EVENT_SRV_NIC, EVENT_NIC_PORT_MODULE_EVENT): + hinic5_port_module_event_handler(nic_dev, event); + break; + case HINIC5_SRV_EVENT_TYPE(EVENT_SRV_COMM, EVENT_COMM_SRIOV_STATE_CHANGE): + sriov_state_change(nic_dev, (void *)event->event_data); + break; + case HINIC5_SRV_EVENT_TYPE(EVENT_SRV_COMM, EVENT_COMM_FAULT): + fault = (void *)event->event_data; + if (fault->fault_level == FAULT_LEVEL_SERIOUS_FLR && + fault->event.chip.func_id == hinic5_global_func_id(lld_dev->hwdev)) + hinic5_link_status_change(nic_dev, false); + break; + case HINIC5_SRV_EVENT_TYPE(EVENT_SRV_COMM, EVENT_COMM_PCIE_LINK_DOWN): + case HINIC5_SRV_EVENT_TYPE(EVENT_SRV_COMM, EVENT_COMM_HEART_LOST): + case HINIC5_SRV_EVENT_TYPE(EVENT_SRV_COMM, EVENT_COMM_MGMT_WATCHDOG): + hinic5_link_status_change(nic_dev, false); + break; + default: + break; + } +} + +struct net_device *hinic5_get_netdev_by_lld(struct hinic5_lld_dev *lld_dev) +{ + struct hinic5_nic_dev *nic_dev = NULL; + + if (!lld_dev || !hinic5_support_nic(lld_dev->hwdev, NULL)) + return NULL; + + nic_dev = hinic5_get_uld_dev_unsafe(lld_dev, SERVICE_T_NIC); + if (!nic_dev) { + nic_err(lld_dev->dev, + "There's no net device attached on the pci device"); + return NULL; + } + + return nic_dev->netdev; +} +EXPORT_SYMBOL(hinic5_get_netdev_by_lld); + +struct hinic5_lld_dev *hinic5_get_lld_dev_by_netdev(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = NULL; + + if (!netdev || !hinic5_is_netdev_ops_match(netdev)) + return NULL; + + nic_dev = netdev_priv(netdev); + if (!nic_dev) + return NULL; + + return nic_dev->lld_dev; +} +EXPORT_SYMBOL(hinic5_get_lld_dev_by_netdev); + +int hinic5_get_phy_port_id_by_netdev(struct net_device *netdev, uint8_t *phy_port_id) +{ + struct hinic5_lld_dev *lld_dev = NULL; + + if (!netdev || !phy_port_id) + return -EINVAL; + + lld_dev = hinic5_get_lld_dev_by_netdev(netdev); + if (!lld_dev) + return -ENXIO; + + *phy_port_id = hinic5_physical_port_id(lld_dev->hwdev); + + return 0; +} + +void *hinic5_netdev_priv_get(const struct net_device *dev) +{ + struct hinic5_nic_dev *nic_dev = NULL; + + if (!dev) + return NULL; + + nic_dev = netdev_priv(dev); + + return nic_dev->extend; +} + +int hinic5_netdev_priv_set(const struct net_device *dev, void *priv) +{ + struct hinic5_nic_dev *nic_dev = NULL; + + if (!dev || !priv) + return -EINVAL; + + nic_dev = netdev_priv(dev); + nic_dev->extend = priv; + + return 0; +} + +struct hinic5_uld_info g_nic_uld_info = { + .probe = nic_probe, + .remove = nic_remove, + .suspend = NULL, + .resume = NULL, + .event = nic_event, + .ioctl = nic_ioctl, +}; + +struct hinic5_uld_info *get_nic_uld_info(void) +{ + return &g_nic_uld_info; +} + +#define HINIC5_NIC_DRV_DESC "Intelligent Network Interface Card Driver" + +static __init int hinic5_nic_lld_init(void) +{ + int err; + + pr_info("%s - version %s\n", HINIC5_NIC_DRV_DESC, + HINIC5_NIC_DRV_VERSION); + + err = hinic5_lld_init(); + if (err) { + pr_err("SDK failed.\n"); + return err; + } + + err = hinic5_module_pre_init(); + if (err != 0) { + pr_err("hinic5_module_pre_init failed\n"); + goto hinic5_module_pre_init_fail; + } + + err = hinic5_register_uld(SERVICE_T_NIC, &g_nic_uld_info); + if (err != 0) { + pr_err("Register hinic5 uld failed\n"); + goto hinic5_register_uld_fail; + } + + err = hinic5_bond_init(); + if (err != 0) { + pr_err("Init bond failed\n"); + goto hinic5_bond_init_fail; + } + + err = hinic5_module_post_init(); + if (err != 0) { + pr_err("hinic5_module_post_init failed\n"); + goto hinic5_module_post_init_fail; + } + + return 0; + +hinic5_module_post_init_fail: + hinic5_bond_deinit(); +hinic5_bond_init_fail: + hinic5_unregister_uld(SERVICE_T_NIC); +hinic5_register_uld_fail: + hinic5_module_post_exit(); +hinic5_module_pre_init_fail: + hinic5_lld_exit(); + + return err; +} + +static __exit void hinic5_nic_lld_exit(void) +{ + hinic5_module_pre_exit(); + hinic5_bond_deinit(); + hinic5_unregister_uld(SERVICE_T_NIC); + hinic5_module_post_exit(); + hinic5_lld_exit(); +} + +#ifndef _LLT_TEST_ +module_init(hinic5_nic_lld_init); +module_exit(hinic5_nic_lld_exit); +#endif + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION(HINIC5_NIC_DRV_DESC); +MODULE_VERSION(HINIC5_NIC_DRV_VERSION); +MODULE_LICENSE("GPL"); diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_main.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_main.h new file mode 100644 index 00000000..68aff6d0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ctrl/hinic5_main.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_MAIN_H +#define HINIC5_MAIN_H + +#define HINIC5_NIC_DEV_WQ_NAME "hinic5_nic_dev_wq" + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_LINK) + +#define QID_MASKED(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) +#define WATCHDOG_TIMEOUT 5 + +#define HINIC5_SQ_DEPTH 1024 +#define HINIC5_RQ_DEPTH 1024 + +#define HW_DEFAULT_COS_IS_VALID(cos) ((cos) & BIT(3)) +#define HW_DEFAULT_COS_VALID_BIT 0x7 + +enum hinic5_rx_buff_len { + RX_BUFF_VALID_2KB = 2, + RX_BUFF_VALID_4KB = 4, + RX_BUFF_VALID_8KB = 8, +}; + +#define CONVERT_UNIT 1024 + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool.c new file mode 100644 index 00000000..dfce1796 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool.c @@ -0,0 +1,757 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/vmalloc.h> + +#include "drv_nic_api.h" +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_crm.h" +#include "hinic5_nic_dev.h" +#include "hinic5_tx.h" +#include "hinic5_rx.h" +#include "hinic5_rss.h" +#include "hinic5_ethtool_coalesce.h" +#include "hinic5_ethtool_lb_test.h" +#include "hinic5_ethtool_priv_flags.h" +#include "hinic5_ethtool.h" + +static void hinic5_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u8 mgmt_ver[HINIC5_MGMT_VERSION_MAX_LEN] = {0}; + int err; + + strscpy(info->driver, HINIC5_NIC_DRV_NAME, sizeof(info->driver)); + strscpy(info->version, HINIC5_NIC_DRV_VERSION, sizeof(info->version)); + strscpy(info->bus_info, dev_name(nic_dev->lld_dev->dev), sizeof(info->bus_info)); + + err = hinic5_get_mgmt_version(nic_dev->hwdev, mgmt_ver, + HINIC5_MGMT_VERSION_MAX_LEN, + HINIC5_CHANNEL_NIC); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to get fw version\n"); + return; + } + + err = snprintf(info->fw_version, sizeof(info->fw_version), "%s", mgmt_ver); + if (err < 0) + nicif_err(nic_dev, drv, netdev, "Failed to snprintf_s fw version\n"); +} + +static u32 hinic5_get_msglevel(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + return nic_dev->msg_enable; +} + +static void hinic5_set_msglevel(struct net_device *netdev, u32 data) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->msg_enable = data; + + nicif_info(nic_dev, drv, netdev, "Set message level: 0x%x\n", data); +} + +static int hinic5_nway_reset(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct mag_port_info port_info = {0}; + int err; + + while (test_and_set_bit(HINIC5_AUTONEG_RESET, &nic_dev->flags)) + msleep(100); /* sleep 100 ms, waiting for another autoneg restart progress done */ + + err = hinic5_get_port_info(nic_dev->hwdev, &port_info, HINIC5_CHANNEL_NIC); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Get port info failed\n"); + err = -EFAULT; + goto reset_err; + } + + if (port_info.autoneg_state != PORT_CFG_AN_ON) { + nicif_err(nic_dev, drv, netdev, "Autonegotiation is not on, don't support to restart it\n"); + err = -EOPNOTSUPP; + goto reset_err; + } + + err = hinic5_set_autoneg(nic_dev->hwdev, false); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Set autonegotiation off failed\n"); + err = -EFAULT; + goto reset_err; + } + + msleep(200); /* sleep 200 ms, waiting for status polling finished */ + + err = hinic5_set_autoneg(nic_dev->hwdev, true); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Set autonegotiation on failed\n"); + err = -EFAULT; + goto reset_err; + } + + msleep(200); /* sleep 200 ms, waiting for status polling finished */ + nicif_info(nic_dev, drv, netdev, "Restart autonegotiation successfully\n"); + +reset_err: + clear_bit(HINIC5_AUTONEG_RESET, &nic_dev->flags); + return err; +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +static void hinic5_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +#else +static void hinic5_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + ring->rx_max_pending = HINIC5_MAX_RX_QUEUE_DEPTH; + ring->tx_max_pending = HINIC5_MAX_TX_QUEUE_DEPTH; + ring->rx_pending = nic_dev->rxqs[0].q_depth; + ring->tx_pending = nic_dev->txqs[0].q_depth; +} + +static void hinic5_update_qp_depth(struct hinic5_nic_dev *nic_dev, + u32 sq_depth, u32 rq_depth) +{ + u16 i; + + nic_dev->q_params.sq_depth = sq_depth; + nic_dev->q_params.rq_depth = rq_depth; + for (i = 0; i < nic_dev->max_qps; i++) { + nic_dev->txqs[i].q_depth = sq_depth; + nic_dev->txqs[i].q_mask = sq_depth - 1; + nic_dev->rxqs[i].q_depth = rq_depth; + nic_dev->rxqs[i].q_mask = rq_depth - 1; + } +} + +static int check_ringparam_valid(struct net_device *netdev, + const struct ethtool_ringparam *ring) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + if (ring->rx_jumbo_pending != 0 || ring->rx_mini_pending != 0) { + nicif_err(nic_dev, drv, netdev, + "Unsupported rx_jumbo_pending/rx_mini_pending\n"); + return -EINVAL; + } + + if (ring->tx_pending > HINIC5_MAX_TX_QUEUE_DEPTH || + ring->tx_pending < HINIC5_MIN_QUEUE_DEPTH || + ring->rx_pending > HINIC5_MAX_RX_QUEUE_DEPTH || + ring->rx_pending < HINIC5_MIN_QUEUE_DEPTH) { + nicif_err(nic_dev, drv, netdev, + "Queue depth out of rang tx[%d-%d] rx[%d-%d]\n", + HINIC5_MIN_QUEUE_DEPTH, HINIC5_MAX_TX_QUEUE_DEPTH, + HINIC5_MIN_QUEUE_DEPTH, HINIC5_MAX_RX_QUEUE_DEPTH); + return -EINVAL; + } + + return 0; +} + +__weak int hinic5_set_ringparam_pre_hook(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + return 0; +} + +#ifdef HAVE_ETHTOOL_EXTENDED_RINGPARAMS +static int hinic5_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +#else +static int hinic5_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_dyna_txrxq_params q_params = {0}; + u32 new_sq_depth, new_rq_depth; + int err; + + if (nic_dev->flow_bifur_group_num > HINIC5_GROUP_NUMBER_MIN) { + nicif_err(nic_dev, drv, netdev, "Cannot be modified after queue groups are configured.\n"); + return -EOPNOTSUPP; + } + + err = hinic5_set_ringparam_pre_hook(netdev, ring); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Skip ringparam config\n"); + return err; + } + + err = check_ringparam_valid(netdev, ring); + if (err != 0) + return err; + + new_sq_depth = (u32)(1U << (u16)ilog2(ring->tx_pending)); + new_rq_depth = (u32)(1U << (u16)ilog2(ring->rx_pending)); + if (new_sq_depth == nic_dev->q_params.sq_depth && + new_rq_depth == nic_dev->q_params.rq_depth) + return 0; /* nothing to do */ + + nicif_info(nic_dev, drv, netdev, + "Change Tx/Rx ring depth from %u/%u to %u/%u\n", + nic_dev->q_params.sq_depth, nic_dev->q_params.rq_depth, + new_sq_depth, new_rq_depth); + + if (!netif_running(netdev)) { + hinic5_update_qp_depth(nic_dev, new_sq_depth, new_rq_depth); + } else { + q_params = nic_dev->q_params; + q_params.sq_depth = new_sq_depth; + q_params.rq_depth = new_rq_depth; + q_params.txqs_res = NULL; + q_params.rxqs_res = NULL; + q_params.irq_cfg = NULL; + +#ifdef HAVE_XDP_SUPPORT + err = hinic5_set_xdp_num(nic_dev, &q_params); + if (err != 0) + return err; +#endif + + nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); + err = hinic5_change_channel_settings(nic_dev, &q_params, + NULL, NULL); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); + return -EFAULT; + } + } + + return 0; +} + +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK +static int hinic5_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else +static int hinic5_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +#endif +{ + return get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); +} + +#ifdef HAVE_ETHTOOL_COALESCE_EXTACK +static int hinic5_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +#else +static int hinic5_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +#endif +{ + return set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); +} + +#if defined(ETHTOOL_PERQUEUE) && defined(ETHTOOL_GCOALESCE) +static int hinic5_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return get_coalesce(netdev, coal, (u16)queue); +} + +static int hinic5_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return set_coalesce(netdev, coal, (u16)queue); +} +#endif + +#ifdef HAVE_ETHTOOL_SET_PHYS_ID +static int hinic5_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + err = hinic5_set_led_status(nic_dev->hwdev, + MAG_CMD_LED_TYPE_ALARM, + MAG_CMD_LED_MODE_FORCE_BLINK_2HZ); + if (err != 0) + nicif_err(nic_dev, drv, netdev, + "Set LED blinking in 2HZ failed\n"); + else + nicif_info(nic_dev, drv, netdev, + "Set LED blinking in 2HZ success\n"); + break; + + case ETHTOOL_ID_INACTIVE: + err = hinic5_set_led_status(nic_dev->hwdev, + MAG_CMD_LED_TYPE_ALARM, + MAG_CMD_LED_MODE_DEFAULT); + if (err != 0) + nicif_err(nic_dev, drv, netdev, + "Reset LED to original status failed\n"); + else + nicif_info(nic_dev, drv, netdev, + "Reset LED to original status success\n"); + break; + + default: + return -EOPNOTSUPP; + } + + return err; +} +#else +static int hinic5_phys_id(struct net_device *netdev, u32 data) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + nicif_err(nic_dev, drv, netdev, "Not support to set phys id\n"); + + return -EOPNOTSUPP; +} +#endif + +static void hinic5_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_pause_config nic_pause = {0}; + int err; + + err = hinic5_get_pause_info(nic_dev->hwdev, &nic_pause); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, + "Failed to get pauseparam from hw\n"); + } else { + /* 为了兼容23v200, + * 其获取的auto_neg是端口速率自协商(可能为on), + * 而实际应该获取的auto_neg是pause帧自协商(off), + * 故直接在驱动侧统一置为off + */ + pause->autoneg = AUTONEG_DISABLE; + pause->rx_pause = nic_pause.rx_pause; + pause->tx_pause = nic_pause.tx_pause; + } +} + +static int hinic5_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct nic_pause_config nic_pause = {0}; + struct mag_port_info port_info = {0}; + int err; + + if (pause->autoneg != AUTONEG_DISABLE) { + nicif_warn(nic_dev, drv, netdev, + "The current version does not support enabling pause frame auto-negotiation.\n"); + } + + err = hinic5_get_port_info(nic_dev->hwdev, &port_info, HINIC5_CHANNEL_NIC); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to get auto-negotiation state\n"); + return -EFAULT; + } + + /* 兼容旧版本,按端口速率自协商状态配置 */ + nic_pause.auto_neg = port_info.autoneg_state; + nic_pause.rx_pause = (u8)pause->rx_pause; + nic_pause.tx_pause = (u8)pause->tx_pause; + + err = hinic5_set_pause_info(nic_dev->hwdev, nic_pause); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to set pauseparam\n"); + return err; + } + + nicif_info(nic_dev, drv, netdev, "Set pause options, autoneg: %s, tx: %s, rx: %s\n", + "off", (pause->tx_pause != 0) ? "on" : "off", + (pause->rx_pause != 0) ? "on" : "off"); + + return 0; +} + +#ifdef ETHTOOL_GMODULEEEPROM +static int hinic5_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_type = 0; + u8 sfp_type_ext = 0; + int err; + + err = hinic5_get_sfp_cmis_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); + if (err != 0) + return err; + + switch (sfp_type) { + case MODULE_TYPE_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case MODULE_TYPE_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + case MODULE_TYPE_QSFP_PLUS: + if (sfp_type_ext >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } + break; + case MODULE_TYPE_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; + case MODULE_TYPE_SFF8024_ID_QSFP_PLUS_CMIS: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; + case MODULE_TYPE_SFF8024_ID_DSFP: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: + nicif_warn(nic_dev, drv, netdev, + "Optical module unknown: 0x%x\n", sfp_type); + return -EINVAL; + } + + return 0; +} + +static int hinic5_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + int err; + + if (ee->len == 0 || + ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE) || ee->len > PAGE_SIZE) + return -EINVAL; + + memset(data, 0, ee->len); + + err = hinic5_get_sfp_eeprom(nic_dev->hwdev, (u8 *)sfp_data, ee->len, ee->offset); + if (err != 0) + return err; + + memcpy(data, sfp_data, ee->len); + + return err == 0 ? 0 : -ENOMEM; +} +#endif /* ETHTOOL_GMODULEEEPROM */ + +#ifdef HAVE_ETHTOOL_GET_MODULE_EEPROM_BY_PAGE +static int hinic5_get_module_eeprom_by_page(struct net_device *dev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(dev); + u8 page_id; + u8 i2c_address; + u32 offset; + u32 len; + int ret; + + if (!page_data || !page_data->data) + return -EINVAL; + + page_id = page_data->page; + offset = page_data->offset; + len = page_data->length; + i2c_address = page_data->i2c_address; + + if (i2c_address == SFF8079_I2C_ADDRESS_HIGH) + page_id = HINIC5_ETHTOOL_PAGE_A2H; + memset(page_data->data, 0, len); + + ret = hinic5_eeprom_page_check(page_id, offset, len); + if (ret != 0) + return ret; + + ret = hinic5_get_cmis_eeprom_by_page(nic_dev->hwdev, page_id, offset, page_data->data, len); + if (ret != 0) + return ret; + return len; +} +#endif /* HAVE_ETHTOOL_GET_MODULE_EEPROM_BY_PAGE */ + +#ifdef HAVE_DEVLINK_PARAM_SET_EXTACK +static int hinic5_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info) +#else +static int hinic5_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + /* only report HW timestamping if PTP is enabled */ + if (test_bit(HINIC5_PTP_CLOCK, &nic_dev->flags) == 0) { + info->so_timestamping = 0; + info->phc_index = -1; + return 0; + } + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (nic_dev->ptp_ctrl.ptp_clock) + info->phc_index = ptp_clock_index(nic_dev->ptp_ctrl.ptp_clock); + else + info->phc_index = -1; + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +} + +static const struct ethtool_ops hinic5_ethtool_ops = { +#ifdef SUPPORTED_COALESCE_PARAMS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_PKT_RATE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_LOW_HIGH | + ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH, +#endif +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS + .get_link_ksettings = hinic5_get_link_ksettings, + .set_link_ksettings = hinic5_set_link_ksettings, +#endif +#endif +#ifndef HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY + .get_settings = hinic5_get_settings, + .set_settings = hinic5_set_settings, +#endif + + .get_drvinfo = hinic5_get_drvinfo, + .get_msglevel = hinic5_get_msglevel, + .set_msglevel = hinic5_set_msglevel, + .nway_reset = hinic5_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = hinic5_get_ringparam, + .set_ringparam = hinic5_set_ringparam, + .get_pauseparam = hinic5_get_pauseparam, + .set_pauseparam = hinic5_set_pauseparam, + .get_sset_count = hinic5_get_sset_count, + .get_ethtool_stats = hinic5_get_ethtool_stats, + .get_strings = hinic5_get_strings, + + .self_test = hinic5_diag_test, + +#ifndef HAVE_ETHTOOL_OPS_EXT +#ifdef HAVE_ETHTOOL_SET_PHYS_ID + .set_phys_id = hinic5_set_phys_id, +#else + .phys_id = hinic5_phys_id, +#endif +#endif + + .get_coalesce = hinic5_get_coalesce, + .set_coalesce = hinic5_set_coalesce, +#if defined(ETHTOOL_PERQUEUE) && defined(ETHTOOL_GCOALESCE) + .get_per_queue_coalesce = hinic5_get_per_queue_coalesce, + .set_per_queue_coalesce = hinic5_set_per_queue_coalesce, +#endif + +#if defined(ETHTOOL_GFECPARAM) && defined(ETHTOOL_SFECPARAM) + .get_fecparam = hinic5_get_fecparam, + .set_fecparam = hinic5_set_fecparam, +#endif + + .get_rxnfc = hinic5_get_rxnfc, + .set_rxnfc = hinic5_set_rxnfc, + .get_priv_flags = hinic5_get_priv_flags, + .set_priv_flags = hinic5_set_priv_flags, + .get_ts_info = hinic5_get_ts_info, + +#ifndef HAVE_ETHTOOL_OPS_EXT + .get_channels = hinic5_get_channels, + .set_channels = hinic5_set_channels, + +#ifdef ETHTOOL_GMODULEEEPROM + .get_module_info = hinic5_get_module_info, + .get_module_eeprom = hinic5_get_module_eeprom, +#endif +#ifdef HAVE_ETHTOOL_GET_MODULE_EEPROM_BY_PAGE + .get_module_eeprom_by_page = hinic5_get_module_eeprom_by_page, +#endif + +#ifdef HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE + .get_rxfh_indir_size = hinic5_get_rxfh_indir_size, +#endif + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = hinic5_get_rxfh_key_size, + .get_rxfh = hinic5_get_rxfh, + .set_rxfh = hinic5_set_rxfh, +#else + .get_rxfh_indir = hinic5_get_rxfh_indir, + .set_rxfh_indir = hinic5_set_rxfh_indir, +#endif + +#endif /* HAVE_ETHTOOL_OPS_EXT */ +}; + +#ifdef HAVE_ETHTOOL_OPS_EXT +static const struct ethtool_ops_ext hinic5_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .set_phys_id = hinic5_set_phys_id, + .get_channels = hinic5_get_channels, + .set_channels = hinic5_set_channels, +#ifdef ETHTOOL_GMODULEEEPROM + .get_module_info = hinic5_get_module_info, + .get_module_eeprom = hinic5_get_module_eeprom, +#endif +#ifdef HAVE_ETHTOOL_GET_MODULE_EEPROM_BY_PAGE + .get_module_eeprom_by_page = hinic5_get_module_eeprom_by_page, +#endif + +#ifdef HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE + .get_rxfh_indir_size = hinic5_get_rxfh_indir_size, +#endif + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = hinic5_get_rxfh_key_size, + .get_rxfh = hinic5_get_rxfh, + .set_rxfh = hinic5_set_rxfh, +#else + .get_rxfh_indir = hinic5_get_rxfh_indir, + .set_rxfh_indir = hinic5_set_rxfh_indir, +#endif + +}; +#endif /* HAVE_ETHTOOL_OPS_EXT */ + +static const struct ethtool_ops hinic5vf_ethtool_ops = { +#ifdef SUPPORTED_COALESCE_PARAMS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_PKT_RATE_RX_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_LOW_HIGH | + ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH, +#endif +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS + .get_link_ksettings = hinic5_get_link_ksettings, +#endif +#else + .get_settings = hinic5_get_settings, +#endif + .get_drvinfo = hinic5_get_drvinfo, + .get_msglevel = hinic5_get_msglevel, + .set_msglevel = hinic5_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ringparam = hinic5_get_ringparam, + + .set_ringparam = hinic5_set_ringparam, + .get_sset_count = hinic5_get_sset_count, + .get_ethtool_stats = hinic5_get_ethtool_stats, + .get_strings = hinic5_get_strings, + + .get_coalesce = hinic5_get_coalesce, + .set_coalesce = hinic5_set_coalesce, +#if defined(ETHTOOL_PERQUEUE) && defined(ETHTOOL_GCOALESCE) + .get_per_queue_coalesce = hinic5_get_per_queue_coalesce, + .set_per_queue_coalesce = hinic5_set_per_queue_coalesce, +#endif + +#if defined(ETHTOOL_GFECPARAM) && defined(ETHTOOL_SFECPARAM) + .get_fecparam = hinic5_get_fecparam, + .set_fecparam = hinic5_set_fecparam, +#endif + + .get_rxnfc = hinic5_get_rxnfc, + .set_rxnfc = hinic5_set_rxnfc, + .get_priv_flags = hinic5_get_priv_flags, + .set_priv_flags = hinic5_set_priv_flags, + +#ifndef HAVE_ETHTOOL_OPS_EXT + .get_channels = hinic5_get_channels, + .set_channels = hinic5_set_channels, + +#ifdef HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE + .get_rxfh_indir_size = hinic5_get_rxfh_indir_size, +#endif + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = hinic5_get_rxfh_key_size, + .get_rxfh = hinic5_get_rxfh, + .set_rxfh = hinic5_set_rxfh, +#else + .get_rxfh_indir = hinic5_get_rxfh_indir, + .set_rxfh_indir = hinic5_set_rxfh_indir, +#endif + +#endif /* HAVE_ETHTOOL_OPS_EXT */ +}; + +#ifdef HAVE_ETHTOOL_OPS_EXT +static const struct ethtool_ops_ext hinic5vf_ethtool_ops_ext = { + .size = sizeof(struct ethtool_ops_ext), + .get_channels = hinic5_get_channels, + .set_channels = hinic5_set_channels, + +#ifdef HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE + .get_rxfh_indir_size = hinic5_get_rxfh_indir_size, +#endif + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) + .get_rxfh_key_size = hinic5_get_rxfh_key_size, + .get_rxfh = hinic5_get_rxfh, + .set_rxfh = hinic5_set_rxfh, +#else + .get_rxfh_indir = hinic5_get_rxfh_indir, + .set_rxfh_indir = hinic5_set_rxfh_indir, +#endif + +}; +#endif /* HAVE_ETHTOOL_OPS_EXT */ + +void hinic5_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &hinic5_ethtool_ops); +#ifdef HAVE_ETHTOOL_OPS_EXT + set_ethtool_ops_ext(netdev, &hinic5_ethtool_ops_ext); +#endif /* HAVE_ETHTOOL_OPS_EXT */ +} + +void hinic5vf_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &hinic5vf_ethtool_ops); +#ifdef HAVE_ETHTOOL_OPS_EXT + set_ethtool_ops_ext(netdev, &hinic5vf_ethtool_ops_ext); +#endif /* HAVE_ETHTOOL_OPS_EXT */ +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool.h new file mode 100644 index 00000000..592f8a15 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_ETHTOOL_H +#define HINIC5_ETHTOOL_H + +#include <linux/ethtool.h> +#include <linux/netdevice.h> + +#define COALESCE_ALL_QUEUE 0xFFFF +#define COALESCE_PENDING_LIMIT_UNIT 8 +#define COALESCE_TIMER_CFG_UNIT 5 +#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) +#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) +#define HINIC5_WAIT_PKTS_TO_RX_BUFFER 200 +#define HINIC5_WAIT_CLEAR_LP_TEST 100 + +#define SFF8079_I2C_ADDRESS_HIGH 0x51 +#define HINIC5_ETHTOOL_PAGE_A2H 0x2 + +#ifndef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev, ops) \ + ((netdev)->ethtool_ops = (ops)) +#endif + +#ifdef NEED_ETHTOOL_COALESCE_USECS_LOW_HIGH +#define ETHTOOL_COALESCE_USECS_LOW_HIGH \ + (ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_TX_USECS_LOW | \ + ETHTOOL_COALESCE_RX_USECS_HIGH | ETHTOOL_COALESCE_TX_USECS_HIGH) +#endif + +#ifdef NEED_ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH +#define ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH \ + (ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \ + ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \ + ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \ + ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH) +#endif + +#ifdef NEED_ETHTOOL_COALESCE_PKT_RATE_RX_USECS +#define ETHTOOL_COALESCE_PKT_RATE_RX_USECS \ + (ETHTOOL_COALESCE_USE_ADAPTIVE_RX | \ + ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \ + ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \ + ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL) +#endif + +/* Include sub-module headers */ +#include "hinic5_ethtool_coalesce.h" +#include "hinic5_ethtool_lb_test.h" +#include "hinic5_ethtool_priv_flags.h" + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_coalesce.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_coalesce.c new file mode 100644 index 00000000..966b25e3 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_coalesce.c @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> + +#include "drv_nic_api.h" +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_crm.h" +#include "hinic5_nic_dev.h" +#include "hinic5_tx.h" +#include "hinic5_rx.h" +#include "hinic5_ethtool.h" +#include "hinic5_ethtool_coalesce.h" + +#define CHECK_COALESCE_ALIGN(coal, item, unit) \ +do { \ + if ((coal)->item % (unit) != 0) \ + nicif_warn(nic_dev, drv, netdev, \ + "%s in %d units, change to %u\n", \ + #item, (unit), ((coal)->item - \ + (coal)->item % (unit))); \ +} while (0) + +#define CHECK_COALESCE_CHANGED(coal, item, unit, ori_val, obj_str) \ +do { \ + if (((coal)->item / (unit)) != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %d to %u %s\n", \ + #item, (ori_val) * (unit), \ + ((coal)->item - (coal)->item % (unit)), \ + (obj_str)); \ +} while (0) + +#define CHECK_PKT_RATE_CHANGED(coal, item, ori_val, obj_str) \ +do { \ + if ((coal)->item != (ori_val)) \ + nicif_info(nic_dev, drv, netdev, \ + "Change %s from %llu to %u %s\n", \ + #item, (ori_val), (coal)->item, (obj_str)); \ +} while (0) + +int get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_qp_coalesce_info *interrupt_info = NULL; + + if (queue == COALESCE_ALL_QUEUE) { + /* get tx/rx irq0 as default parameters */ + interrupt_info = &nic_dev->intr_coalesce[0]; + } else { + if (queue >= nic_dev->q_params.num_qps) { + nicif_err(nic_dev, drv, netdev, + "Invalid queue_id: %u\n", queue); + return -EINVAL; + } + interrupt_info = &nic_dev->intr_coalesce[queue]; + } + + /* coalescs_timer is in unit of 5us */ + coal->rx_coalesce_usecs = interrupt_info->rx_coalesce_timer_cfg * + COALESCE_TIMER_CFG_UNIT; + /* coalescs_frams is in unit of 8 */ + coal->rx_max_coalesced_frames = interrupt_info->rx_pending_limt * + COALESCE_PENDING_LIMIT_UNIT; + + /* tx/rx use the same interrupt */ + /* coalescs_timer is in unit of 5us */ + coal->tx_coalesce_usecs = interrupt_info->tx_coalesce_timer_cfg * + COALESCE_TIMER_CFG_UNIT; + /* coalescs_frams is in unit of 8 */ + coal->tx_max_coalesced_frames = interrupt_info->tx_pending_limt * + COALESCE_PENDING_LIMIT_UNIT; + coal->use_adaptive_rx_coalesce = nic_dev->adaptive_rx_coal; + + coal->pkt_rate_high = (u32)interrupt_info->pkt_rate_high; + coal->rx_coalesce_usecs_high = interrupt_info->rx_usecs_high * + COALESCE_TIMER_CFG_UNIT; + coal->rx_max_coalesced_frames_high = + interrupt_info->rx_pending_limt_high * + COALESCE_PENDING_LIMIT_UNIT; + + coal->pkt_rate_low = (u32)interrupt_info->pkt_rate_low; + coal->rx_coalesce_usecs_low = interrupt_info->rx_usecs_low * + COALESCE_TIMER_CFG_UNIT; + coal->rx_max_coalesced_frames_low = + interrupt_info->rx_pending_limt_low * + COALESCE_PENDING_LIMIT_UNIT; + + return 0; +} + +int set_queue_coalesce(struct hinic5_nic_dev *nic_dev, u16 q_id, + const struct hinic5_qp_coalesce_info *coal) +{ + struct hinic5_qp_coalesce_info *intr_coal = NULL; + struct net_device *netdev = nic_dev->netdev; + int err; + + intr_coal = &nic_dev->intr_coalesce[q_id]; + *intr_coal = *coal; + + /* netdev not running or qp not in using, + * don't need to set coalesce to hw + */ + if ((test_bit(HINIC5_INTF_UP, &nic_dev->flags) == 0) || + q_id >= nic_dev->q_params.num_qps || nic_dev->adaptive_rx_coal != 0) + return 0; + + nic_dev->rxqs[q_id].last_coalesc_timer_cfg = intr_coal->rx_coalesce_timer_cfg; + nic_dev->rxqs[q_id].last_pending_limt = intr_coal->rx_pending_limt; + err = hinic5_set_sq_rq_coalesce_cfg(nic_dev->hwdev, q_id, HINIC5_SQ_RQ_COALESCE, intr_coal); + if (err != 0) + nicif_warn(nic_dev, drv, netdev, + "Failed to set queue%u coalesce", q_id); + + return err; +} + +int is_coalesce_exceed_limit(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || + coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "rx_coalesce_usecs out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT || + coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "rx_max_coalesced_frames out of range[%d-%d]\n", 0, + COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_low > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "rx_coalesce_usecs_low out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "rx_max_coalesced_frames_low out of range[%d-%d]\n", + 0, COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + if (coal->rx_coalesce_usecs_high > COALESCE_MAX_TIMER_CFG) { + nicif_err(nic_dev, drv, netdev, + "rx_coalesce_usecs_high out of range[%d-%d]\n", 0, + COALESCE_MAX_TIMER_CFG); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_high > COALESCE_MAX_PENDING_LIMIT) { + nicif_err(nic_dev, drv, netdev, + "rx_max_coalesced_frames_high out of range[%d-%d]\n", + 0, COALESCE_MAX_PENDING_LIMIT); + return -EOPNOTSUPP; + } + + return 0; +} + +void tmp_coal_init(struct ethtool_coalesce *tmp_coal, const struct ethtool_coalesce *coal) +{ + tmp_coal->cmd = coal->cmd; + tmp_coal->rx_coalesce_usecs = coal->rx_coalesce_usecs; + tmp_coal->rx_max_coalesced_frames = coal->rx_max_coalesced_frames; + tmp_coal->tx_coalesce_usecs = coal->tx_coalesce_usecs; + tmp_coal->tx_max_coalesced_frames = coal->tx_max_coalesced_frames; + tmp_coal->use_adaptive_rx_coalesce = coal->use_adaptive_rx_coalesce; + + tmp_coal->pkt_rate_low = coal->pkt_rate_low; + tmp_coal->rx_coalesce_usecs_low = coal->rx_coalesce_usecs_low; + tmp_coal->rx_max_coalesced_frames_low = coal->rx_max_coalesced_frames_low; + + tmp_coal->pkt_rate_high = coal->pkt_rate_high; + tmp_coal->rx_coalesce_usecs_high = coal->rx_coalesce_usecs_high; + tmp_coal->rx_max_coalesced_frames_high = coal->rx_max_coalesced_frames_high; +} + +int is_coalesce_legal(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct ethtool_coalesce tmp_coal = {0}; + + if (!HINIC5_SUPPORT_SQ_RQ_CI_COALESCE(nic_dev->hwdev)) { + if (coal->rx_coalesce_usecs != coal->tx_coalesce_usecs) { + nicif_err(nic_dev, drv, netdev, + "tx-usecs must be equal to rx-usecs\n"); + return -EINVAL; + } + + if (coal->rx_max_coalesced_frames != coal->tx_max_coalesced_frames) { + nicif_err(nic_dev, drv, netdev, + "tx-frames must be equal to rx-frames\n"); + return -EINVAL; + } + } + + tmp_coal_init(&tmp_coal, coal); + + if (memcmp(coal, &tmp_coal, sizeof(struct ethtool_coalesce)) != 0) { + nicif_err(nic_dev, drv, netdev, + "Only support to change rx/tx-usecs and rx/tx-frames\n"); + return -EOPNOTSUPP; + } + + if (is_coalesce_exceed_limit(netdev, coal) != 0) + return -EOPNOTSUPP; + + if (coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT >= + coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT) { + nicif_err(nic_dev, drv, netdev, + "coalesce_usecs_high(%u) must more than coalesce_usecs_low(%u), after dividing %d usecs unit\n", + coal->rx_coalesce_usecs_high, + coal->rx_coalesce_usecs_low, + COALESCE_TIMER_CFG_UNIT); + return -EOPNOTSUPP; + } + + if (coal->rx_max_coalesced_frames_low / COALESCE_PENDING_LIMIT_UNIT >= + coal->rx_max_coalesced_frames_high / COALESCE_PENDING_LIMIT_UNIT) { + nicif_err(nic_dev, drv, netdev, + "coalesced_frames_high(%u) must more than coalesced_frames_low(%u),after dividing %d frames unit\n", + coal->rx_max_coalesced_frames_high, + coal->rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT); + return -EOPNOTSUPP; + } + + if (coal->pkt_rate_low >= coal->pkt_rate_high) { + nicif_err(nic_dev, drv, netdev, + "pkt_rate_high(%u) must more than pkt_rate_low(%u)\n", + coal->pkt_rate_high, + coal->pkt_rate_low); + return -EOPNOTSUPP; + } + + return 0; +} + +int set_hw_coal_param(struct hinic5_nic_dev *nic_dev, + struct hinic5_qp_coalesce_info *intr_coal, u16 queue) +{ + u16 i; + + if (queue == COALESCE_ALL_QUEUE) { + for (i = 0; i < nic_dev->max_qps; i++) + set_queue_coalesce(nic_dev, i, intr_coal); + } else { + if (queue >= nic_dev->q_params.num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid queue_id: %u\n", queue); + return -EINVAL; + } + set_queue_coalesce(nic_dev, queue, intr_coal); + } + + return 0; +} + +void check_coalesce_align(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames, + COALESCE_PENDING_LIMIT_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_high, + COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_high, + COALESCE_PENDING_LIMIT_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_coalesce_usecs_low, + COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal, rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT); +} + +int check_coalesce_change(struct net_device *netdev, + u16 queue, const struct ethtool_coalesce *coal) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_qp_coalesce_info *ori_intr_coal = NULL; + char obj_str[32] = {0}; + int ret; + + if (queue == COALESCE_ALL_QUEUE) { + ori_intr_coal = &nic_dev->intr_coalesce[0]; + ret = snprintf(obj_str, sizeof(obj_str), "for netdev"); + if (ret < 0) { + nicif_err(nic_dev, drv, netdev, "Copy intr coalesce failed.\n"); + return -EINVAL; + } + } else { + ori_intr_coal = &nic_dev->intr_coalesce[queue]; + ret = snprintf(obj_str, sizeof(obj_str), "for queue %u", queue); + if (ret < 0) { + nicif_err(nic_dev, drv, netdev, "Copy intr coalesce failed.\n"); + return -EINVAL; + } + } + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->coalesce_timer_cfg, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->pending_limt, obj_str); + CHECK_PKT_RATE_CHANGED(coal, pkt_rate_high, + ori_intr_coal->pkt_rate_high, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_high, + COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->rx_usecs_high, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_high, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->rx_pending_limt_high, obj_str); + CHECK_PKT_RATE_CHANGED(coal, pkt_rate_low, + ori_intr_coal->pkt_rate_low, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_coalesce_usecs_low, + COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->rx_usecs_low, obj_str); + CHECK_COALESCE_CHANGED(coal, rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->rx_pending_limt_low, obj_str); + + return 0; +} + +void init_intr_coal_params(struct hinic5_qp_coalesce_info *intr_coal, + struct ethtool_coalesce *coal) +{ + intr_coal->tx_coalesce_timer_cfg = + (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); + intr_coal->tx_pending_limt = (u8)(coal->tx_max_coalesced_frames / + COALESCE_PENDING_LIMIT_UNIT); + + intr_coal->rx_coalesce_timer_cfg = + (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); + intr_coal->rx_pending_limt = (u8)(coal->rx_max_coalesced_frames / + COALESCE_PENDING_LIMIT_UNIT); + + intr_coal->pkt_rate_high = coal->pkt_rate_high; + intr_coal->rx_usecs_high = + (u8)(coal->rx_coalesce_usecs_high / COALESCE_TIMER_CFG_UNIT); + intr_coal->rx_pending_limt_high = + (u8)(coal->rx_max_coalesced_frames_high / + COALESCE_PENDING_LIMIT_UNIT); + + intr_coal->pkt_rate_low = coal->pkt_rate_low; + intr_coal->rx_usecs_low = + (u8)(coal->rx_coalesce_usecs_low / COALESCE_TIMER_CFG_UNIT); + intr_coal->rx_pending_limt_low = + (u8)(coal->rx_max_coalesced_frames_low / + COALESCE_PENDING_LIMIT_UNIT); +} + +int set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_qp_coalesce_info intr_coal; + u32 last_adaptive_rx; + int err = 0; + + err = is_coalesce_legal(netdev, coal); + if (err != 0) + return err; + + check_coalesce_align(netdev, coal); + + err = check_coalesce_change(netdev, queue, coal); + if (err != 0) + return err; + + memset(&intr_coal, 0, sizeof(intr_coal)); + init_intr_coal_params(&intr_coal, coal); + + last_adaptive_rx = nic_dev->adaptive_rx_coal; + nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; + + /* coalesce timer or pending set to zero will disable coalesce */ + if (nic_dev->adaptive_rx_coal == 0 && + (intr_coal.coalesce_timer_cfg == 0 || intr_coal.pending_limt == 0)) + nicif_warn(nic_dev, drv, netdev, "Coalesce will be disabled\n"); + + /* ensure coalesce paramester will not be changed in auto + * moderation work + */ + if (HINIC5_CHANNEL_RES_VALID(nic_dev)) { + if (nic_dev->adaptive_rx_coal == 0) + cancel_delayed_work_sync(&nic_dev->moderation_task); + else if (last_adaptive_rx == 0) + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + HINIC5_MODERATONE_DELAY); + } + + return set_hw_coal_param(nic_dev, &intr_coal, queue); +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_coalesce.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_coalesce.h new file mode 100644 index 00000000..d81bccce --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_coalesce.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_ETHTOOL_COALESCE_H +#define HINIC5_ETHTOOL_COALESCE_H + +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> + +struct hinic5_nic_dev; +struct hinic5_qp_coalesce_info; + +/* Coalesce configuration functions */ +int get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue); +int set_queue_coalesce(struct hinic5_nic_dev *nic_dev, u16 q_id, + const struct hinic5_qp_coalesce_info *coal); +int is_coalesce_exceed_limit(struct net_device *netdev, + const struct ethtool_coalesce *coal); +void tmp_coal_init(struct ethtool_coalesce *tmp_coal, const struct ethtool_coalesce *coal); +int is_coalesce_legal(struct net_device *netdev, const struct ethtool_coalesce *coal); +int set_hw_coal_param(struct hinic5_nic_dev *nic_dev, + struct hinic5_qp_coalesce_info *intr_coal, u16 queue); +void check_coalesce_align(struct net_device *netdev, const struct ethtool_coalesce *coal); +int check_coalesce_change(struct net_device *netdev, u16 queue, + const struct ethtool_coalesce *coal); +void init_intr_coal_params(struct hinic5_qp_coalesce_info *intr_coal, + struct ethtool_coalesce *coal); +int set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue); + +#endif /* HINIC5_ETHTOOL_COALESCE_H */ diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_lb_test.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_lb_test.c new file mode 100644 index 00000000..4bc22abc --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_lb_test.c @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/vmalloc.h> + +#include "drv_nic_api.h" +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_crm.h" +#include "hinic5_nic_dev.h" +#include "hinic5_tx.h" +#include "hinic5_rx.h" +#include "hinic5_ethtool.h" +#include "hinic5_ethtool_lb_test.h" + +void hinic5_run_lp_init_data(struct ethhdr *eth_hdr, struct sk_buff *skb_tmp, + const struct hinic5_nic_dev *nic_dev) +{ + u32 i; + u8 *test_data = NULL; + + eth_hdr = __skb_put(skb_tmp, ETH_HLEN); + eth_hdr->h_proto = htons(ETH_P_ARP); + ether_addr_copy(eth_hdr->h_dest, nic_dev->netdev->dev_addr); + eth_zero_addr(eth_hdr->h_source); + skb_reset_mac_header(skb_tmp); + + test_data = __skb_put(skb_tmp, LP_PKT_LEN - ETH_HLEN); + for (i = 0; i < LP_PKT_LEN - ETH_HLEN; i++) + test_data[i] = i & 0xFF; + + skb_tmp->queue_mapping = 0; + skb_tmp->dev = nic_dev->netdev; + skb_tmp->protocol = htons(ETH_P_ARP); +} + +int hinic5_run_lp_test(struct hinic5_nic_dev *nic_dev, u32 test_time) +{ + u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; + struct net_device *netdev = nic_dev->netdev; + u32 cnt = test_time * TEST_TIME_MULTIPLE; + struct sk_buff *skb_tmp = NULL; + struct ethhdr *eth_hdr = NULL; + struct sk_buff *skb = NULL; + u32 i; + u8 j; + + skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); + if (!skb_tmp) + return -ENOMEM; + + hinic5_run_lp_init_data(eth_hdr, skb_tmp, nic_dev); + + for (i = 0; i < cnt; i++) { + nic_dev->lb_test_rx_idx = 0; + memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN); + + for (j = 0; j < LP_PKT_CNT; j++) { + skb = pskb_copy(skb_tmp, GFP_ATOMIC); + if (!skb) { + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, + "Copy skb failed for loopback test\n"); + return -ENOMEM; + } + + /* mark index for every pkt */ + skb->data[LP_PKT_LEN - 1] = j; + + if (hinic5_lb_xmit_frame(skb, netdev) != NETDEV_TX_OK) { + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, + "Xmit pkt failed for loopback test\n"); + return -EBUSY; + } + } + + /* wait till all pkts received to RX buffer */ + msleep(HINIC5_WAIT_PKTS_TO_RX_BUFFER); + + for (j = 0; j < LP_PKT_CNT; j++) { + if ((memcmp((lb_test_rx_buf + (u16)(j * LP_PKT_LEN)), + skb_tmp->data, (LP_PKT_LEN - 1)) != 0) || + (*(lb_test_rx_buf + (u16)((j * LP_PKT_LEN) + (LP_PKT_LEN - 1))) != j)) { + dev_kfree_skb_any(skb_tmp); + nicif_err(nic_dev, drv, netdev, + "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", + (j + (i * LP_PKT_CNT)), (LP_PKT_LEN - 1), + *((lb_test_rx_buf + ((u64)j * LP_PKT_LEN)) + + (LP_PKT_LEN - 1))); + return -EIO; + } + } + } + + dev_kfree_skb_any(skb_tmp); + nicif_info(nic_dev, drv, netdev, "Loopback test succeed.\n"); + return 0; +} + +int do_lp_test(struct hinic5_nic_dev *nic_dev, u32 *flags, u32 test_time, + enum diag_test_index *test_index) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *lb_test_rx_buf = NULL; + u16 cur_veb_offload = 0; + int err = 0; + u16 glb_func_id; + + if (HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, VEB_OFFLOAD)) { + err = hinic5_get_veb_offload(nic_dev->hwdev, &cur_veb_offload); + if (err != 0) + goto err_out; + + if (cur_veb_offload != 0) { + err = hinic5_set_veb_offload(nic_dev->hwdev, 0); + if (err != 0) + goto err_out; + } + } + + if ((*flags & ETH_TEST_FL_EXTERNAL_LB) == 0) { + *test_index = INTERNAL_LP_TEST; + if (hinic5_set_loopback_mode(nic_dev->hwdev, + HINIC5_INTERNAL_LP_MODE, true)) { + nicif_err(nic_dev, drv, netdev, + "Failed to set port loopback mode before loopback test\n"); + err = -EFAULT; + goto restore_veb_offload; + } + + glb_func_id = hinic5_global_func_id(nic_dev->hwdev); + + err = hinic5_set_vport_enable(nic_dev->hwdev, glb_func_id, false, + HINIC5_CHANNEL_NIC); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to disable vport\n"); + goto restore_veb_offload; + } + + msleep(nic_dev->timeout.wait_flush_qp_res_timeout); + + err = hinic5_set_vport_enable(nic_dev->hwdev, glb_func_id, true, + HINIC5_CHANNEL_NIC); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to enable vport\n"); + goto restore_veb_offload; + } + } else { + *test_index = EXTERNAL_LP_TEST; + } + + lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); + if (!lb_test_rx_buf) { + err = -ENOMEM; + } else { + nic_dev->lb_test_rx_buf = lb_test_rx_buf; + nic_dev->lb_pkt_len = LP_PKT_LEN; + set_bit(HINIC5_LP_TEST, &nic_dev->flags); + + if (hinic5_run_lp_test(nic_dev, test_time) != 0) + err = -EFAULT; + + clear_bit(HINIC5_LP_TEST, &nic_dev->flags); + msleep(HINIC5_WAIT_CLEAR_LP_TEST); + vfree(lb_test_rx_buf); + nic_dev->lb_test_rx_buf = NULL; + } + + if ((*flags & ETH_TEST_FL_EXTERNAL_LB) == 0) { + if (hinic5_set_loopback_mode(nic_dev->hwdev, + HINIC5_INTERNAL_LP_MODE, false)) { + nicif_err(nic_dev, drv, netdev, + "Failed to cancel port loopback mode after loopback test\n"); + err = -EFAULT; + } + } else { + *flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + } + +restore_veb_offload: + if (cur_veb_offload != 0) + err = hinic5_set_veb_offload(nic_dev->hwdev, cur_veb_offload); +err_out: + return err; +} + +void hinic5_lp_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data, u32 test_time) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + enum diag_test_index test_index = 0; + u8 link_status = 0; + int err; + u32 test_time_real = test_time; + + /* don't support loopback test when netdev is closed. */ + if (test_bit(HINIC5_INTF_UP, &nic_dev->flags) == 0) { + nicif_err(nic_dev, drv, netdev, + "Do not support loopback test when netdev is closed\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + data[PORT_DOWN_ERR_IDX] = 1; + return; + } + if (test_time_real == 0) + test_time_real = LP_DEFAULT_TIME; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + err = do_lp_test(nic_dev, ð_test->flags, test_time_real, &test_index); + if (err != 0) { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[test_index] = 1; + } + + netif_tx_wake_all_queues(netdev); + + err = hinic5_get_link_state(nic_dev->hwdev, &link_status); + if (err == 0 && link_status != 0) + netif_carrier_on(netdev); +} + +void hinic5_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, DIAG_TEST_MAX * sizeof(u64)); + + hinic5_lp_test(netdev, eth_test, data, 0); +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_lb_test.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_lb_test.h new file mode 100644 index 00000000..a42405f6 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_lb_test.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_ETHTOOL_LB_TEST_H +#define HINIC5_ETHTOOL_LB_TEST_H + +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> + +#define PORT_DOWN_ERR_IDX 0 +#define LP_DEFAULT_TIME 5 /* seconds */ +#define TEST_TIME_MULTIPLE 5 +#define HINIC5_INTERNAL_LP_MODE 5 + +enum diag_test_index { + INTERNAL_LP_TEST = 0, + EXTERNAL_LP_TEST = 1, + DIAG_TEST_MAX = 2, +}; + +struct hinic5_nic_dev; + +/* Loopback test functions */ +void hinic5_run_lp_init_data(struct ethhdr *eth_hdr, struct sk_buff *skb_tmp, + const struct hinic5_nic_dev *nic_dev); +int hinic5_run_lp_test(struct hinic5_nic_dev *nic_dev, u32 test_time); +int do_lp_test(struct hinic5_nic_dev *nic_dev, u32 *flags, u32 test_time, + enum diag_test_index *test_index); +void hinic5_lp_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data, u32 test_time); +void hinic5_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, + u64 *data); + +#endif /* HINIC5_ETHTOOL_LB_TEST_H */ diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_link_stats.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_link_stats.c new file mode 100644 index 00000000..24abcf8b --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_link_stats.c @@ -0,0 +1,637 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/ethtool.h> + +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_crm.h" +#include "hinic5_mt.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_nic_dev.h" +#include "hinic5_tx.h" +#include "hinic5_rx.h" +#include "hinic5_ethtool_link_stats.h" + +#define HINIC_ETHTOOL_FEC_INFO_LEN 6 +#define HINIC_SUPPORTED_FEC_CMD 0 +#define HINIC_ADVERTISED_FEC_CMD 1 + +struct hinic5_ethtool_fec { + u8 hinic_fec_offset; + u8 ethtool_bit_offset; +}; + +static struct hinic5_ethtool_fec hinic5_ethtool_fec_info[HINIC_ETHTOOL_FEC_INFO_LEN] = { + {PORT_FEC_NOT_SET, 0xFF}, /* The ethtool does not have the + * corresponding enumeration variable + */ + {PORT_FEC_RSFEC, 0x32}, /* ETHTOOL_LINK_MODE_FEC_RS_BIT */ + {PORT_FEC_BASEFEC, 0x33}, /* ETHTOOL_LINK_MODE_FEC_BASER_BIT */ + {PORT_FEC_NOFEC, 0x31}, /* ETHTOOL_LINK_MODE_FEC_NONE_BIT */ + {PORT_FEC_LLRSFEC, 0x4A}, /* ETHTOOL_LINK_MODE_FEC_LLRS_BIT: + * Available only in later versions + */ + {PORT_FEC_AUTO, 0XFF} /* The ethtool does not have the + * corresponding enumeration variable + */ +}; + +static const u32 hinic5_mag_link_mode_ge[] = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_10ge_base_r[] = { + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_25ge_base_r[] = { + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_40ge_base_r4[] = { + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_50ge_base_r[] = { + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_50ge_base_r2[] = { + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_100ge_base_r[] = { + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_100ge_base_r2[] = { + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_100ge_base_r4[] = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_200ge_base_r2[] = { + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_200ge_base_r4[] = { + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_400ge_base_r4[] = { + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, +}; + +static const u32 hinic5_mag_link_mode_800ge_base_r8[] = { + ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT, + ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT, + ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT, +}; + +static const struct hw2ethtool_link_mode + hw2ethtool_link_mode_table[LINK_MODE_MAX_NUMBERS] = { + [LINK_MODE_GE] = { + .link_mode_bit_arr = hinic5_mag_link_mode_ge, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_ge), + .speed = SPEED_1000, + }, + [LINK_MODE_10GE_BASE_R] = { + .link_mode_bit_arr = hinic5_mag_link_mode_10ge_base_r, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_10ge_base_r), + .speed = SPEED_10000, + }, + [LINK_MODE_25GE_BASE_R] = { + .link_mode_bit_arr = hinic5_mag_link_mode_25ge_base_r, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_25ge_base_r), + .speed = SPEED_25000, + }, + [LINK_MODE_40GE_BASE_R4] = { + .link_mode_bit_arr = hinic5_mag_link_mode_40ge_base_r4, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_40ge_base_r4), + .speed = SPEED_40000, + }, + [LINK_MODE_50GE_BASE_R] = { + .link_mode_bit_arr = hinic5_mag_link_mode_50ge_base_r, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_50ge_base_r), + .speed = SPEED_50000, + }, + [LINK_MODE_50GE_BASE_R2] = { + .link_mode_bit_arr = hinic5_mag_link_mode_50ge_base_r2, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_50ge_base_r2), + .speed = SPEED_50000, + }, + [LINK_MODE_100GE_BASE_R] = { + .link_mode_bit_arr = hinic5_mag_link_mode_100ge_base_r, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_100ge_base_r), + .speed = SPEED_100000, + }, + [LINK_MODE_100GE_BASE_R2] = { + .link_mode_bit_arr = hinic5_mag_link_mode_100ge_base_r2, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_100ge_base_r2), + .speed = SPEED_100000, + }, + [LINK_MODE_100GE_BASE_R4] = { + .link_mode_bit_arr = hinic5_mag_link_mode_100ge_base_r4, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_100ge_base_r4), + .speed = SPEED_100000, + }, + [LINK_MODE_200GE_BASE_R2] = { + .link_mode_bit_arr = hinic5_mag_link_mode_200ge_base_r2, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_200ge_base_r2), + .speed = SPEED_200000, + }, + [LINK_MODE_200GE_BASE_R4] = { + .link_mode_bit_arr = hinic5_mag_link_mode_200ge_base_r4, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_200ge_base_r4), + .speed = SPEED_200000, + }, + [LINK_MODE_400GE_BASE_R4] = { + .link_mode_bit_arr = hinic5_mag_link_mode_400ge_base_r4, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_400ge_base_r4), + .speed = SPEED_400000, + }, + [LINK_MODE_800GE_BASE_R8] = { + .link_mode_bit_arr = hinic5_mag_link_mode_800ge_base_r8, + .arr_size = ARRAY_LEN(hinic5_mag_link_mode_800ge_base_r8), + .speed = SPEED_800000, + }, +}; + +static void ethtool_add_supported_speed_link_mode(struct cmd_link_settings *link_settings, + u32 mode) +{ + u32 i; + + for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) { + if (hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i] >= + __ETHTOOL_LINK_MODE_MASK_NBITS) + continue; + set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], + link_settings->supported); + } +} + +static void ethtool_add_advertised_speed_link_mode(struct cmd_link_settings *link_settings, + u32 mode) +{ + u32 i; + + for (i = 0; i < hw2ethtool_link_mode_table[mode].arr_size; i++) { + if (hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i] >= + __ETHTOOL_LINK_MODE_MASK_NBITS) + continue; + set_bit(hw2ethtool_link_mode_table[mode].link_mode_bit_arr[i], + link_settings->advertising); + } +} + +/* Related to enum mag_cmd_port_speed */ +static u32 hw_to_ethtool_speed[] = { + (u32)SPEED_UNKNOWN, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, SPEED_200000, + SPEED_400000, SPEED_800000, +}; + +static int hinic5_ethtool_to_hw_speed_level(u32 speed) +{ + int i; + + for (i = 0; i < ARRAY_LEN(hw_to_ethtool_speed); i++) { + if (hw_to_ethtool_speed[i] == speed) + break; + } + + return i; +} + +static void hinic5_add_ethtool_link_mode(struct cmd_link_settings *link_settings, + u32 hw_link_mode, u32 name) +{ + u32 link_mode; + + for (link_mode = 0; link_mode < LINK_MODE_MAX_NUMBERS; link_mode++) { + if ((hw_link_mode & BIT(link_mode)) != 0) { + if (name == GET_SUPPORTED_MODE) + ethtool_add_supported_speed_link_mode(link_settings, link_mode); + else + ethtool_add_advertised_speed_link_mode(link_settings, link_mode); + } + } +} + +static int hinic5_link_speed_set(struct hinic5_nic_dev *nic_dev, + struct cmd_link_settings *link_settings, + struct mag_port_info *port_info) +{ + u8 link_state = 0; + int err; + + if (port_info->supported_mode != LINK_MODE_UNKNOWN) + hinic5_add_ethtool_link_mode(link_settings, + port_info->supported_mode, + GET_SUPPORTED_MODE); + if (port_info->advertised_mode != LINK_MODE_UNKNOWN) + hinic5_add_ethtool_link_mode(link_settings, + port_info->advertised_mode, + GET_ADVERTISED_MODE); + + err = hinic5_get_link_state(nic_dev->hwdev, &link_state); + if (err == 0 && link_state != 0) { + link_settings->speed = + port_info->speed < ARRAY_LEN(hw_to_ethtool_speed) ? + hw_to_ethtool_speed[port_info->speed] : + (u32)SPEED_UNKNOWN; + + link_settings->duplex = port_info->duplex; + } else { + link_settings->speed = (u32)SPEED_UNKNOWN; + link_settings->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static void hinic5_link_port_type(struct cmd_link_settings *link_settings, + u8 port_type) +{ + switch (port_type) { + case MAG_CMD_WIRE_TYPE_ELECTRIC: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, TP); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, TP); + link_settings->port = PORT_TP; + break; + + case MAG_CMD_WIRE_TYPE_AOC: + case MAG_CMD_WIRE_TYPE_MM: + case MAG_CMD_WIRE_TYPE_SM: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); + link_settings->port = PORT_FIBRE; + break; + + case MAG_CMD_WIRE_TYPE_COPPER: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, FIBRE); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, FIBRE); + link_settings->port = PORT_DA; + break; + + case MAG_CMD_WIRE_TYPE_BACKPLANE: + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Backplane); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Backplane); + link_settings->port = PORT_NONE; + break; + + default: + link_settings->port = PORT_OTHER; + break; + } +} + +static int get_link_pause_settings(struct hinic5_nic_dev *nic_dev, + struct cmd_link_settings *link_settings) +{ + struct nic_pause_config nic_pause = {0}; + int err; + + err = hinic5_get_pause_info(nic_dev->hwdev, &nic_pause); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get pauseparam from hw\n"); + return err; + } + + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Pause); + if (nic_pause.rx_pause != 0 && nic_pause.tx_pause != 0) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); + } else if (nic_pause.tx_pause != 0) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Asym_Pause); + } else if (nic_pause.rx_pause != 0) { + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Pause); + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Asym_Pause); + } + + return 0; +} + +static bool is_bit_offset_defined(u8 bit_offset) +{ + if (bit_offset < __ETHTOOL_LINK_MODE_MASK_NBITS) + return true; + return false; +} + +static void ethtool_add_supported_advertised_fec(struct cmd_link_settings *link_settings, + u32 fec, u8 cmd) +{ + u8 i; + + for (i = 0; i < HINIC_ETHTOOL_FEC_INFO_LEN; i++) { + if ((fec & BIT(hinic5_ethtool_fec_info[i].hinic_fec_offset)) == 0) + continue; + if (is_bit_offset_defined(hinic5_ethtool_fec_info[i].ethtool_bit_offset) && + cmd == HINIC_ADVERTISED_FEC_CMD) { + set_bit(hinic5_ethtool_fec_info[i].ethtool_bit_offset, + link_settings->advertising); + return; /* There can be only one advertised fec mode. */ + } + if (is_bit_offset_defined(hinic5_ethtool_fec_info[i].ethtool_bit_offset) && + cmd == HINIC_SUPPORTED_FEC_CMD) + set_bit(hinic5_ethtool_fec_info[i].ethtool_bit_offset, + link_settings->supported); + } +} + +static void hinic5_link_fec_type(struct cmd_link_settings *link_settings, + u32 fec, u32 supported_fec) +{ + ethtool_add_supported_advertised_fec(link_settings, supported_fec, HINIC_SUPPORTED_FEC_CMD); + ethtool_add_supported_advertised_fec(link_settings, fec, HINIC_ADVERTISED_FEC_CMD); +} + +static int get_link_settings(struct net_device *netdev, + struct cmd_link_settings *link_settings) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct mag_port_info port_info = {0}; + int err; + + err = hinic5_get_port_info(nic_dev->hwdev, &port_info, + HINIC5_CHANNEL_NIC); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to get port info\n"); + return err; + } + + err = hinic5_link_speed_set(nic_dev, link_settings, &port_info); + if (err != 0) + return err; + + hinic5_link_port_type(link_settings, port_info.port_type); + + /* port_info.fec is bit offset, value is BIT(port_info.fec); + * but port_info.supported_fec_mode is bit value + */ + hinic5_link_fec_type(link_settings, BIT(port_info.fec), port_info.supported_fec_mode); + + link_settings->autoneg = port_info.autoneg_state == PORT_CFG_AN_ON ? + AUTONEG_ENABLE : AUTONEG_DISABLE; + if (port_info.autoneg_cap != 0) + ETHTOOL_ADD_SUPPORTED_LINK_MODE(link_settings, Autoneg); + if (port_info.autoneg_state == PORT_CFG_AN_ON) + ETHTOOL_ADD_ADVERTISED_LINK_MODE(link_settings, Autoneg); + + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) + err = get_link_pause_settings(nic_dev, link_settings); + + return err; +} + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int hinic5_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_settings) +{ + struct cmd_link_settings settings = { { 0 } }; + struct ethtool_link_settings *base = &link_settings->base; + int err; + + ethtool_link_ksettings_zero_link_mode(link_settings, supported); + ethtool_link_ksettings_zero_link_mode(link_settings, advertising); + + err = get_link_settings(netdev, &settings); + if (err != 0) + return err; + + bitmap_copy(link_settings->link_modes.supported, settings.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(link_settings->link_modes.advertising, settings.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + base->autoneg = settings.autoneg; + base->speed = settings.speed; + base->duplex = settings.duplex; + base->port = settings.port; + + return 0; +} +#endif +#endif + +static bool hinic5_is_support_speed(u32 supported_link, u32 speed) +{ + u32 link_mode; + + for (link_mode = 0; link_mode < LINK_MODE_MAX_NUMBERS; link_mode++) { + if ((supported_link & BIT(link_mode)) == 0) + continue; + + if (hw2ethtool_link_mode_table[link_mode].speed == speed) + return true; + } + + return false; +} + +static int hinic5_is_speed_legal(struct hinic5_nic_dev *nic_dev, + struct mag_port_info *port_info, u32 speed) +{ + struct net_device *netdev = nic_dev->netdev; + int speed_level = 0; + + if (port_info->supported_mode == LINK_MODE_UNKNOWN || + port_info->advertised_mode == LINK_MODE_UNKNOWN) { + nicif_err(nic_dev, drv, netdev, "Unknown supported link modes\n"); + return -EAGAIN; + } + + speed_level = hinic5_ethtool_to_hw_speed_level(speed); + if (speed_level >= PORT_SPEED_UNKNOWN || + !hinic5_is_support_speed(port_info->supported_mode, speed)) { + nicif_err(nic_dev, drv, netdev, + "Not supported speed: %u\n", speed); + return -EINVAL; + } + + return 0; +} + +static int get_link_settings_type(struct hinic5_nic_dev *nic_dev, + u8 autoneg, u32 speed, u32 *set_settings) +{ + struct mag_port_info port_info = {0}; + int err; + + err = hinic5_get_port_info(nic_dev->hwdev, &port_info, + HINIC5_CHANNEL_NIC); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get current settings\n"); + return -EAGAIN; + } + + /* Alwayse set autonegation */ + if (port_info.autoneg_cap != 0) + *set_settings |= HILINK_LINK_SET_AUTONEG; + + if (autoneg == AUTONEG_ENABLE) { + if (port_info.autoneg_cap == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Not support autoneg\n"); + return -EOPNOTSUPP; + } + } else if (speed != (u32)SPEED_UNKNOWN) { + /* Set speed only when autoneg is disable */ + err = hinic5_is_speed_legal(nic_dev, &port_info, speed); + if (err != 0) + return err; + + *set_settings |= HILINK_LINK_SET_SPEED; + } else { + nicif_err(nic_dev, drv, nic_dev->netdev, "Need to set speed when autoneg is off\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int hinic5_set_settings_to_hw(struct hinic5_nic_dev *nic_dev, + u32 set_settings, u8 autoneg, u32 speed) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic5_link_ksettings settings = {0}; + int speed_level = 0; + char set_link_str[128] = {0}; + char link_info[128] = {0}; + int err = 0; + + err = snprintf(link_info, sizeof(link_info), "%s", + (bool)(set_settings & HILINK_LINK_SET_AUTONEG) ? + ((bool)autoneg ? "autong enable " : "autong disable ") : ""); + if (err < 0) + return -EINVAL; + + if ((set_settings & HILINK_LINK_SET_SPEED) != 0) { + speed_level = hinic5_ethtool_to_hw_speed_level(speed); + err = snprintf(set_link_str, sizeof(set_link_str), + "%s speed %u ", link_info, speed); + if (err < 0) + return -EINVAL; + } + + settings.valid_bitmap = set_settings; + settings.autoneg = (bool)autoneg ? PORT_CFG_AN_ON : PORT_CFG_AN_OFF; + settings.speed = (u8)speed_level; + + err = hinic5_set_link_settings(nic_dev->hwdev, &settings); + if (err != 0) + nicif_err(nic_dev, drv, netdev, "Set %s failed\n", + set_link_str); + else + nicif_info(nic_dev, drv, netdev, "Set %s success\n", + set_link_str); + + return err; +} + +static int set_link_settings(struct net_device *netdev, u8 autoneg, u32 speed) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u32 set_settings = 0; + int err = 0; + + err = get_link_settings_type(nic_dev, autoneg, speed, &set_settings); + if (err != 0) + return err; + + if (set_settings != 0) + err = hinic5_set_settings_to_hw(nic_dev, set_settings, + autoneg, speed); + else + nicif_info(nic_dev, drv, netdev, "Nothing changed, exiting without setting anything\n"); + + return err; +} + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int hinic5_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *link_settings) +{ + /* Only support to set autoneg and speed */ + return set_link_settings(netdev, link_settings->base.autoneg, + link_settings->base.speed); +} +#endif +#endif + +#ifndef HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY +int hinic5_get_settings(struct net_device *netdev, struct ethtool_cmd *ep) +{ + struct cmd_link_settings settings = { { 0 } }; + int err; + + err = get_link_settings(netdev, &settings); + if (err != 0) + return err; + + ep->supported = settings.supported[0] & ((u32)~0); + ep->advertising = settings.advertising[0] & ((u32)~0); + + ep->autoneg = settings.autoneg; + ethtool_cmd_speed_set(ep, settings.speed); + ep->duplex = settings.duplex; + ep->port = settings.port; + ep->transceiver = XCVR_INTERNAL; + + return 0; +} + +int hinic5_set_settings(struct net_device *netdev, + struct ethtool_cmd *link_settings) +{ + /* Only support to set autoneg and speed */ + return set_link_settings(netdev, link_settings->autoneg, + ethtool_cmd_speed(link_settings)); +} +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_link_stats.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_link_stats.h new file mode 100644 index 00000000..f063532d --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_link_stats.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_ETHTOOL_LINK_STATS_H +#define HINIC5_ETHTOOL_LINK_STATS_H + +#include <linux/types.h> +#include <linux/ethtool.h> + +struct hw2ethtool_link_mode { + const u32 *link_mode_bit_arr; + u32 arr_size; + u32 speed; +}; + +#define GET_SUPPORTED_MODE 0 +#define GET_ADVERTISED_MODE 1 + +struct cmd_link_settings { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + + u32 speed; + u8 duplex; + u8 port; + u8 autoneg; +}; + +#define ETHTOOL_ADD_SUPPORTED_LINK_MODE(ecmd, mode) \ + set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->supported) +#define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ + set_bit(ETHTOOL_LINK_MODE_##mode##_BIT, (ecmd)->advertising) + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_port_stats.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_port_stats.c new file mode 100644 index 00000000..6eea3e76 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_port_stats.c @@ -0,0 +1,763 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/if_vlan.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> + +#include "ossl_knl.h" +#include "nic_pub_cmd.h" +#include "hinic5_hw.h" +#include "hinic5_crm.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_mag_cfg.h" +#include "hinic5_nic_dev.h" +#include "hinic5_tx.h" +#include "hinic5_rx.h" +#include "hinic5_xdp.h" +#include "hinic5_ethtool_port_stats.h" + +#define HINIC5_NETDEV_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct rtnl_link_stats64, _stat_item), \ + .offset = offsetof(struct rtnl_link_stats64, _stat_item) \ +} + +static struct hinic5_stats hinic5_netdev_stats[] = { + HINIC5_NETDEV_STAT(rx_packets), + HINIC5_NETDEV_STAT(tx_packets), + HINIC5_NETDEV_STAT(rx_bytes), + HINIC5_NETDEV_STAT(tx_bytes), + HINIC5_NETDEV_STAT(rx_errors), + HINIC5_NETDEV_STAT(tx_errors), + HINIC5_NETDEV_STAT(rx_dropped), + HINIC5_NETDEV_STAT(tx_dropped), + HINIC5_NETDEV_STAT(multicast), + HINIC5_NETDEV_STAT(collisions), + HINIC5_NETDEV_STAT(rx_length_errors), + HINIC5_NETDEV_STAT(rx_over_errors), + HINIC5_NETDEV_STAT(rx_crc_errors), + HINIC5_NETDEV_STAT(rx_frame_errors), + HINIC5_NETDEV_STAT(rx_fifo_errors), + HINIC5_NETDEV_STAT(rx_missed_errors), + HINIC5_NETDEV_STAT(tx_aborted_errors), + HINIC5_NETDEV_STAT(tx_carrier_errors), + HINIC5_NETDEV_STAT(tx_fifo_errors), + HINIC5_NETDEV_STAT(tx_heartbeat_errors), +}; + +static struct hinic5_stats hinic5_port_link_stat[] = { + HINIC5_PORT_LINK_STAT(link_down_events_phy), +}; + +static struct hinic5_stats hinic5_nic_dev_stats[] = { + HINIC5_NIC_STAT(netdev_tx_timeout), +}; + +static struct hinic5_stats hinic5_nic_dev_stats_extern[] = { + HINIC5_NIC_STAT(tx_carrier_off_drop), + HINIC5_NIC_STAT(tx_invalid_qid), + HINIC5_NIC_STAT(rsvd1), + HINIC5_NIC_STAT(rsvd2), +}; + +static struct hinic5_stats hinic5_rx_queue_stats[] = { + HINIC5_RXQ_STAT(packets), + HINIC5_RXQ_STAT(bytes), + HINIC5_RXQ_STAT(errors), + HINIC5_RXQ_STAT(csum_errors), + HINIC5_RXQ_STAT(other_errors), + HINIC5_RXQ_STAT(dropped), +#ifdef HAVE_XDP_SUPPORT + HINIC5_RXQ_STAT(xdp_dropped), + HINIC5_RXQ_STAT(xdp_redirected), +#endif + HINIC5_RXQ_STAT(rx_buf_empty), +}; + +static struct hinic5_stats hinic5_rx_queue_stats_extern[] = { + HINIC5_RXQ_STAT(alloc_skb_err), + HINIC5_RXQ_STAT(alloc_rx_buf_err), +#ifdef HAVE_XDP_SUPPORT + HINIC5_RXQ_STAT(xdp_large_pkt), +#endif + HINIC5_RXQ_STAT(restore_drop_sge), + HINIC5_RXQ_STAT(pkt_mc), +}; + +static struct hinic5_stats hinic5_tx_queue_stats[] = { + HINIC5_TXQ_STAT(packets), + HINIC5_TXQ_STAT(bytes), + HINIC5_TXQ_STAT(busy), + HINIC5_TXQ_STAT(wake), + HINIC5_TXQ_STAT(dropped), + HINIC5_TXQ_STAT(unfinished), +}; + +#ifdef HAVE_XDP_SUPPORT +static struct hinic5_stats hinic5_xdp_tx_queue_stats[] = { + HINIC5_XDPTXQ_STAT(xdp_dropped), + HINIC5_XDPTXQ_STAT(xdp_xmits), +}; +#endif + +static struct hinic5_stats hinic5_tx_queue_stats_extern[] = { + HINIC5_TXQ_STAT(skb_pad_err), + HINIC5_TXQ_STAT(frag_len_overflow), + HINIC5_TXQ_STAT(offload_cow_skb_err), + HINIC5_TXQ_STAT(map_frag_err), + HINIC5_TXQ_STAT(unknown_tunnel_pkt), + HINIC5_TXQ_STAT(frag_size_err), + HINIC5_TXQ_STAT(rsvd1), + HINIC5_TXQ_STAT(rsvd2), +}; + +static struct hinic5_stats hinic5_function_stats[] = { + HINIC5_FUNC_STAT(tx_unicast_pkts_vport), + HINIC5_FUNC_STAT(tx_unicast_bytes_vport), + HINIC5_FUNC_STAT(tx_multicast_pkts_vport), + HINIC5_FUNC_STAT(tx_multicast_bytes_vport), + HINIC5_FUNC_STAT(tx_broadcast_pkts_vport), + HINIC5_FUNC_STAT(tx_broadcast_bytes_vport), + + HINIC5_FUNC_STAT(rx_unicast_pkts_vport), + HINIC5_FUNC_STAT(rx_unicast_bytes_vport), + HINIC5_FUNC_STAT(rx_multicast_pkts_vport), + HINIC5_FUNC_STAT(rx_multicast_bytes_vport), + HINIC5_FUNC_STAT(rx_broadcast_pkts_vport), + HINIC5_FUNC_STAT(rx_broadcast_bytes_vport), + + HINIC5_FUNC_STAT(tx_discard_vport), + HINIC5_FUNC_STAT(rx_discard_vport), + HINIC5_FUNC_STAT(tx_err_vport), + HINIC5_FUNC_STAT(rx_err_vport), +}; + +static struct hinic5_stats hinic5_port_stats[] = { + HINIC5_PORT_STAT(mac_tx_fragment_pkt_num), + HINIC5_PORT_STAT(mac_tx_undersize_pkt_num), + HINIC5_PORT_STAT(mac_tx_undermin_pkt_num), + HINIC5_PORT_STAT(mac_tx_64_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_65_127_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_128_255_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_256_511_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_512_1023_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_1024_1518_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_1519_2047_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_2048_4095_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_4096_8191_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_8192_9216_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_9217_12287_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_12288_16383_oct_pkt_num), + HINIC5_PORT_STAT(mac_tx_1519_max_bad_pkt_num), + HINIC5_PORT_STAT(mac_tx_1519_max_good_pkt_num), + HINIC5_PORT_STAT(mac_tx_oversize_pkt_num), + HINIC5_PORT_STAT(mac_tx_jabber_pkt_num), + HINIC5_PORT_STAT(mac_tx_bad_pkt_num), + HINIC5_PORT_STAT(mac_tx_bad_oct_num), + HINIC5_PORT_STAT(mac_tx_good_pkt_num), + HINIC5_PORT_STAT(mac_tx_good_oct_num), + HINIC5_PORT_STAT(mac_tx_total_pkt_num), + HINIC5_PORT_STAT(mac_tx_total_oct_num), + HINIC5_PORT_STAT(mac_tx_uni_pkt_num), + HINIC5_PORT_STAT(mac_tx_multi_pkt_num), + HINIC5_PORT_STAT(mac_tx_broad_pkt_num), + HINIC5_PORT_STAT(mac_tx_pause_num), + HINIC5_PORT_STAT(mac_tx_pfc_pkt_num), + HINIC5_PORT_STAT(mac_tx_pfc_pri0_pkt_num), + HINIC5_PORT_STAT(mac_tx_pfc_pri1_pkt_num), + HINIC5_PORT_STAT(mac_tx_pfc_pri2_pkt_num), + HINIC5_PORT_STAT(mac_tx_pfc_pri3_pkt_num), + HINIC5_PORT_STAT(mac_tx_pfc_pri4_pkt_num), + HINIC5_PORT_STAT(mac_tx_pfc_pri5_pkt_num), + HINIC5_PORT_STAT(mac_tx_pfc_pri6_pkt_num), + HINIC5_PORT_STAT(mac_tx_pfc_pri7_pkt_num), + HINIC5_PORT_STAT(mac_tx_control_pkt_num), + HINIC5_PORT_STAT(mac_tx_err_all_pkt_num), + HINIC5_PORT_STAT(mac_tx_from_app_good_pkt_num), + HINIC5_PORT_STAT(mac_tx_from_app_bad_pkt_num), + + HINIC5_PORT_STAT(mac_rx_fragment_pkt_num), + HINIC5_PORT_STAT(mac_rx_undersize_pkt_num), + HINIC5_PORT_STAT(mac_rx_undermin_pkt_num), + HINIC5_PORT_STAT(mac_rx_64_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_65_127_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_128_255_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_256_511_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_512_1023_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_1024_1518_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_1519_2047_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_2048_4095_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_4096_8191_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_8192_9216_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_9217_12287_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_12288_16383_oct_pkt_num), + HINIC5_PORT_STAT(mac_rx_1519_max_bad_pkt_num), + HINIC5_PORT_STAT(mac_rx_1519_max_good_pkt_num), + HINIC5_PORT_STAT(mac_rx_oversize_pkt_num), + HINIC5_PORT_STAT(mac_rx_jabber_pkt_num), + HINIC5_PORT_STAT(mac_rx_bad_pkt_num), + HINIC5_PORT_STAT(mac_rx_bad_oct_num), + HINIC5_PORT_STAT(mac_rx_good_pkt_num), + HINIC5_PORT_STAT(mac_rx_good_oct_num), + HINIC5_PORT_STAT(mac_rx_total_pkt_num), + HINIC5_PORT_STAT(mac_rx_total_oct_num), + HINIC5_PORT_STAT(mac_rx_uni_pkt_num), + HINIC5_PORT_STAT(mac_rx_multi_pkt_num), + HINIC5_PORT_STAT(mac_rx_broad_pkt_num), + HINIC5_PORT_STAT(mac_rx_pause_num), + HINIC5_PORT_STAT(mac_rx_pfc_pkt_num), + HINIC5_PORT_STAT(mac_rx_pfc_pri0_pkt_num), + HINIC5_PORT_STAT(mac_rx_pfc_pri1_pkt_num), + HINIC5_PORT_STAT(mac_rx_pfc_pri2_pkt_num), + HINIC5_PORT_STAT(mac_rx_pfc_pri3_pkt_num), + HINIC5_PORT_STAT(mac_rx_pfc_pri4_pkt_num), + HINIC5_PORT_STAT(mac_rx_pfc_pri5_pkt_num), + HINIC5_PORT_STAT(mac_rx_pfc_pri6_pkt_num), + HINIC5_PORT_STAT(mac_rx_pfc_pri7_pkt_num), + HINIC5_PORT_STAT(mac_rx_control_pkt_num), + HINIC5_PORT_STAT(mac_rx_sym_err_pkt_num), + HINIC5_PORT_STAT(mac_rx_fcs_err_pkt_num), + HINIC5_PORT_STAT(mac_rx_send_app_good_pkt_num), + HINIC5_PORT_STAT(mac_rx_send_app_bad_pkt_num), + HINIC5_PORT_STAT(mac_rx_unfilter_pkt_num), +}; + +static char g_hinic_priv_flags_strings[][ETH_GSTRING_LEN] = { + "Symmetric-RSS", + "Force-Link-up", + "Rxq_Recovery", +}; + +u32 hinic5_get_io_stats_size(const struct hinic5_nic_dev *nic_dev) +{ + u32 count; + + count = (u32)(ARRAY_LEN(hinic5_nic_dev_stats) + + ARRAY_LEN(hinic5_nic_dev_stats_extern) + + (ARRAY_LEN(hinic5_tx_queue_stats) + + ARRAY_LEN(hinic5_tx_queue_stats_extern) + + ARRAY_LEN(hinic5_rx_queue_stats) + + ARRAY_LEN(hinic5_rx_queue_stats_extern)) * nic_dev->max_qps); + + return count; +} + +#define GET_VALUE_OF_PTR(size, ptr) ( \ + (size) == sizeof(u64) ? *(u64 *)(ptr) : \ + (size) == sizeof(u32) ? *(u32 *)(ptr) : \ + (size) == sizeof(u16) ? *(u16 *)(ptr) : *(u8 *)(ptr) \ +) + +#define DEV_STATS_PACK(items, item_idx, array, stats_ptr) do { \ + int j; \ + for (j = 0; j < ARRAY_LEN(array); j++) { \ + memcpy((items)[item_idx].name, (array)[j].name, \ + HINIC5_SHOW_ITEM_LEN); \ + (items)[item_idx].hexadecimal = 0; \ + (items)[item_idx].value = \ + GET_VALUE_OF_PTR((array)[j].size, \ + (char *)(stats_ptr) + (array)[j].offset); \ + (item_idx)++; \ + } \ +} while (0) + +int hinic5_rx_queue_stat_pack(struct hinic5_show_item *item, + struct hinic5_stats *stat, + const struct hinic5_rxq_stats *rxq_stats, u16 qid) +{ + int ret; + + ret = snprintf(item->name, HINIC5_SHOW_ITEM_LEN, stat->name, qid); + if (ret < 0) + return -EINVAL; + + item->hexadecimal = 0; + item->value = GET_VALUE_OF_PTR(stat->size, (char *)(rxq_stats) + stat->offset); + + return 0; +} + +int hinic5_tx_queue_stat_pack(struct hinic5_show_item *item, + struct hinic5_stats *stat, + const struct hinic5_txq_stats *txq_stats, u16 qid) +{ + int ret; + + ret = snprintf(item->name, HINIC5_SHOW_ITEM_LEN, stat->name, qid); + if (ret < 0) + return -EINVAL; + + item->hexadecimal = 0; + item->value = GET_VALUE_OF_PTR(stat->size, (char *)(txq_stats) + stat->offset); + + return 0; +} + +int hinic5_get_io_stats(const struct hinic5_nic_dev *nic_dev, void *stats) +{ + struct hinic5_show_item *items = stats; + int item_idx = 0; + u16 qid; + int idx; + int ret; + + DEV_STATS_PACK(items, item_idx, hinic5_nic_dev_stats, &nic_dev->stats); + DEV_STATS_PACK(items, item_idx, hinic5_nic_dev_stats_extern, &nic_dev->stats); + + for (qid = 0; qid < nic_dev->max_qps; qid++) { + for (idx = 0; idx < ARRAY_LEN(hinic5_tx_queue_stats); idx++) { + ret = hinic5_tx_queue_stat_pack(&items[item_idx++], + &hinic5_tx_queue_stats[idx], + &nic_dev->txqs[qid].txq_stats, qid); + if (ret != 0) + return -EINVAL; + } + + for (idx = 0; idx < ARRAY_LEN(hinic5_tx_queue_stats_extern); idx++) { + ret = hinic5_tx_queue_stat_pack(&items[item_idx++], + &hinic5_tx_queue_stats_extern[idx], + &nic_dev->txqs[qid].txq_stats, qid); + if (ret != 0) + return -EINVAL; + } + } + + for (qid = 0; qid < nic_dev->max_qps; qid++) { + for (idx = 0; idx < ARRAY_LEN(hinic5_rx_queue_stats); idx++) { + ret = hinic5_rx_queue_stat_pack(&items[item_idx++], + &hinic5_rx_queue_stats[idx], + &nic_dev->rxqs[qid].rxq_stats, qid); + if (ret != 0) + return -EINVAL; + } + + for (idx = 0; idx < ARRAY_LEN(hinic5_rx_queue_stats_extern); idx++) { + ret = hinic5_rx_queue_stat_pack(&items[item_idx++], + &hinic5_rx_queue_stats_extern[idx], + &nic_dev->rxqs[qid].rxq_stats, qid); + if (ret != 0) + return -EINVAL; + } + } + + return 0; +} + +static char g_hinic5_test_strings[][ETH_GSTRING_LEN] = { + "Internal lb test (on/offline)", + "External lb test (external_lb)", +}; + +int hinic5_get_sset_count(struct net_device *netdev, int sset) +{ + int count = 0, q_num = 0, xdp_num = 0; + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + switch (sset) { + case ETH_SS_TEST: + return ARRAY_LEN(g_hinic5_test_strings); + case ETH_SS_STATS: + q_num = nic_dev->q_params.num_qps; + xdp_num = nic_dev->q_params.xdp_qps; + count = ARRAY_LEN(hinic5_netdev_stats) + + ARRAY_LEN(hinic5_nic_dev_stats) + + ARRAY_LEN(hinic5_port_link_stat) + + ARRAY_LEN(hinic5_function_stats) + + ARRAY_LEN(hinic5_tx_queue_stats) * q_num + + ARRAY_LEN(hinic5_rx_queue_stats) * q_num; + +#ifdef HAVE_XDP_SUPPORT + count += ARRAY_LEN(hinic5_xdp_tx_queue_stats) * xdp_num; +#endif + + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) + count += ARRAY_LEN(hinic5_port_stats); + return count; + + case ETH_SS_PRIV_FLAGS: + return ARRAY_LEN(g_hinic_priv_flags_strings); + default: + return -EOPNOTSUPP; + } +} + +static void get_drv_queue_stats(struct hinic5_nic_dev *nic_dev, u64 *data) +{ + struct hinic5_txq_stats txq_stats; +#ifdef HAVE_XDP_SUPPORT + struct hinic5_xdptxq_stats xdptxq_stats; +#endif + struct hinic5_rxq_stats rxq_stats; + u16 i = 0, j = 0, qid = 0; + char *p = NULL; + + /* 1. 统计内核 TX 队列 (num_qps 个),显示常规统计字段 */ + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + if (!nic_dev->txqs) + break; + + hinic5_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); + for (j = 0; j < ARRAY_LEN(hinic5_tx_queue_stats); j++, i++) { + p = (char *)(&txq_stats) + + hinic5_tx_queue_stats[j].offset; + data[i] = (hinic5_tx_queue_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + + /* 3. 统计 RX 队列 (num_qps 个) (仅内核队列) */ + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + if (!nic_dev->rxqs) + break; + + hinic5_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); + for (j = 0; j < ARRAY_LEN(hinic5_rx_queue_stats); j++, i++) { + p = (char *)(&rxq_stats) + + hinic5_rx_queue_stats[j].offset; + data[i] = (hinic5_rx_queue_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + + /* 2. 统计 XDP TX 队列 (xdp_qps 个),只显示 XDP 相关统计字段 */ +#ifdef HAVE_XDP_SUPPORT + for (qid = nic_dev->q_params.num_qps; + qid < nic_dev->q_params.num_qps + nic_dev->q_params.xdp_qps; qid++) { + if (!nic_dev->txqs) + break; + + hinic5_xdptxq_get_stats(&nic_dev->txqs[qid], &xdptxq_stats); + /* 只显示 xdp_dropped 和 xdp_xmits */ + for (j = 0; j < ARRAY_LEN(hinic5_xdp_tx_queue_stats); j++, i++) { + p = (char *)(&xdptxq_stats) + + hinic5_xdp_tx_queue_stats[j].offset; + data[i] = (hinic5_xdp_tx_queue_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } +#endif +} + +static u16 get_ethtool_port_stats(struct hinic5_nic_dev *nic_dev, u64 *data) +{ + struct mag_cmd_port_stats *port_stats = NULL; + char *p = NULL; + u16 i = 0, j = 0; + int err; + + port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL); + if (!port_stats) { + memset(&data[i], 0, + ARRAY_LEN(hinic5_port_stats) * sizeof(*data)); + i += ARRAY_LEN(hinic5_port_stats); + return i; + } + + err = hinic5_get_phy_port_stats(nic_dev->hwdev, port_stats); + if (err != 0) + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get port stats from fw\n"); + + for (j = 0; j < ARRAY_LEN(hinic5_port_stats); j++, i++) { + p = (char *)(port_stats) + hinic5_port_stats[j].offset; + data[i] = (hinic5_port_stats[j].size == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + kfree(port_stats); + + return i; +} + +void hinic5_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); +#ifdef HAVE_NDO_GET_STATS64 + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats = NULL; +#else + const struct net_device_stats *net_stats = NULL; +#endif + struct hinic5_nic_stats *nic_stats = NULL; + + struct hinic5_vport_stats vport_stats = {0}; + u16 i = 0, j = 0; + char *p = NULL; + int err; + struct hinic5_port_link_stats link_count = {0}; + +#ifdef HAVE_NDO_GET_STATS64 + net_stats = dev_get_stats(netdev, &temp); +#else + net_stats = dev_get_stats(netdev); +#endif + for (j = 0; j < ARRAY_LEN(hinic5_netdev_stats); j++, i++) { + p = (char *)(net_stats) + hinic5_netdev_stats[j].offset; + data[i] = GET_VALUE_OF_PTR(hinic5_netdev_stats[j].size, p); + } + + nic_stats = &nic_dev->stats; + for (j = 0; j < ARRAY_LEN(hinic5_nic_dev_stats); j++, i++) { + p = (char *)(nic_stats) + hinic5_nic_dev_stats[j].offset; + data[i] = GET_VALUE_OF_PTR(hinic5_nic_dev_stats[j].size, p); + } + + err = hinic5_get_link_down_cnt(nic_dev->hwdev, (int *)&link_count.link_down_events_phy); + if (err != 0) + nicif_err(nic_dev, drv, netdev, + "Failed to get link down counter from fw\n"); + + for (j = 0; j < ARRAY_LEN(hinic5_port_link_stat); j++) { + p = (char *)(&link_count) + hinic5_port_link_stat[j].offset; + data[i++] = GET_VALUE_OF_PTR(hinic5_port_link_stat[j].size, p); + } + + err = hinic5_get_vport_stats(nic_dev->hwdev, hinic5_global_func_id(nic_dev->hwdev), + &vport_stats); + if (err != 0) + nicif_err(nic_dev, drv, netdev, + "Failed to get function stats from fw\n"); + + for (j = 0; j < ARRAY_LEN(hinic5_function_stats); j++, i++) { + p = (char *)(&vport_stats) + hinic5_function_stats[j].offset; + data[i] = GET_VALUE_OF_PTR(hinic5_function_stats[j].size, p); + } + + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) + i += get_ethtool_port_stats(nic_dev, data + i); + + get_drv_queue_stats(nic_dev, data + i); +} + +static u16 get_drv_dev_strings(struct hinic5_nic_dev *nic_dev, char *p) +{ + u16 i, cnt = 0; + + for (i = 0; i < ARRAY_LEN(hinic5_netdev_stats); i++) { + memcpy(p, hinic5_netdev_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + + for (i = 0; i < ARRAY_LEN(hinic5_nic_dev_stats); i++) { + memcpy(p, hinic5_nic_dev_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + + for (i = 0; i < ARRAY_LEN(hinic5_port_link_stat); i++) { + memcpy(p, hinic5_port_link_stat[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + + return cnt; +} + +static u16 get_hw_stats_strings(struct hinic5_nic_dev *nic_dev, char *p) +{ + u16 i, cnt = 0; + + for (i = 0; i < ARRAY_LEN(hinic5_function_stats); i++) { + memcpy(p, hinic5_function_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) { + for (i = 0; i < ARRAY_LEN(hinic5_port_stats); i++) { + memcpy(p, hinic5_port_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + } + + return cnt; +} + +static u16 get_qp_stats_strings(const struct hinic5_nic_dev *nic_dev, char *p) +{ + u16 i = 0, j = 0, cnt = 0; + int err; + + /* 1. 内核 TX 队列统计名称 (num_qps 个) */ + for (i = 0; i < nic_dev->q_params.num_qps; i++) { + for (j = 0; j < ARRAY_LEN(hinic5_tx_queue_stats); j++) { + err = sprintf(p, hinic5_tx_queue_stats[j].name, i); + if (err < 0) + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to sprintf tx queue stats name, idx_qps: %u, idx_stats: %u\n", + i, j); + p += ETH_GSTRING_LEN; + cnt++; + } + } + + /* 2 RX 队列统计名称 (仅内核队列) */ + for (i = 0; i < nic_dev->q_params.num_qps; i++) { + for (j = 0; j < ARRAY_LEN(hinic5_rx_queue_stats); j++) { + err = sprintf(p, hinic5_rx_queue_stats[j].name, i); + if (err < 0) + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to sprintf rx queue stats name, idx_qps: %u, idx_stats: %u\n", + i, j); + p += ETH_GSTRING_LEN; + cnt++; + } + } + + /* 3 XDP TX 队列统计名称 (xdp_qps 个) */ +#ifdef HAVE_XDP_SUPPORT + for (i = 0; i < nic_dev->q_params.xdp_qps; i++) { + for (j = 0; j < ARRAY_LEN(hinic5_xdp_tx_queue_stats); j++) { + err = sprintf(p, hinic5_xdp_tx_queue_stats[j].name, i); + if (err < 0) + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to sprintf xdp tx queue stats name, idx_qps: %u, idx_stats: %u\n", + i, j); + p += ETH_GSTRING_LEN; + cnt++; + } + } +#endif + + return cnt; +} + +void hinic5_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + char *p = (char *)data; + u16 offset = 0; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *g_hinic5_test_strings, sizeof(g_hinic5_test_strings)); + return; + case ETH_SS_STATS: + offset = get_drv_dev_strings(nic_dev, p); + offset += get_hw_stats_strings(nic_dev, + p + (u32)(offset * ETH_GSTRING_LEN)); + get_qp_stats_strings(nic_dev, p + (u32)(offset * ETH_GSTRING_LEN)); + + return; + case ETH_SS_PRIV_FLAGS: + memcpy(data, g_hinic_priv_flags_strings, sizeof(g_hinic_priv_flags_strings)); + return; + default: + nicif_err(nic_dev, drv, netdev, + "Invalid string set %u.", stringset); + return; + } +} + +#if defined(ETHTOOL_GFECPARAM) && defined(ETHTOOL_SFECPARAM) +struct fecparam_value_map { + u8 hinic5_fec_offset; + u8 hinic5_fec_value; + u8 ethtool_fec_value; +}; + +static void fecparam_convert(u32 opcode, u8 in_fec_param, u8 *out_fec_param) +{ + u8 i; + u8 fec_value_table_lenth; + struct fecparam_value_map fec_value_table[] = { + {PORT_FEC_NOT_SET, BIT(PORT_FEC_NOT_SET), ETHTOOL_FEC_NONE}, + {PORT_FEC_RSFEC, BIT(PORT_FEC_RSFEC), ETHTOOL_FEC_RS}, + {PORT_FEC_BASEFEC, BIT(PORT_FEC_BASEFEC), ETHTOOL_FEC_BASER}, + {PORT_FEC_NOFEC, BIT(PORT_FEC_NOFEC), ETHTOOL_FEC_OFF}, +#ifdef ETHTOOL_FEC_LLRS + {PORT_FEC_LLRSFEC, BIT(PORT_FEC_LLRSFEC), ETHTOOL_FEC_LLRS}, +#endif + {PORT_FEC_AUTO, BIT(PORT_FEC_AUTO), ETHTOOL_FEC_AUTO} + }; + + *out_fec_param = 0; + fec_value_table_lenth = (u8)(sizeof(fec_value_table) / sizeof(struct fecparam_value_map)); + + if (opcode == MAG_CMD_OPCODE_SET) { + for (i = 0; i < fec_value_table_lenth; i++) { + if ((in_fec_param & fec_value_table[i].ethtool_fec_value) != 0) + /* The MPU uses the offset to determine the FEC mode. */ + *out_fec_param = fec_value_table[i].hinic5_fec_offset; + } + } + + if (opcode == MAG_CMD_OPCODE_GET) { + for (i = 0; i < fec_value_table_lenth; i++) { + if ((in_fec_param & fec_value_table[i].hinic5_fec_value) != 0) + *out_fec_param |= fec_value_table[i].ethtool_fec_value; + } + } +} + +/* When the ethtool is used to set the FEC mode */ +static bool check_fecparam_is_valid(u8 fec_param) +{ + if (fec_param == ETHTOOL_FEC_RS || + fec_param == ETHTOOL_FEC_BASER || +#ifdef ETHTOOL_FEC_LLRS + fec_param == ETHTOOL_FEC_LLRS || +#endif + fec_param == ETHTOOL_FEC_OFF) + return true; + return false; +} + +int hinic5_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u8 advertised_fec = 0; + u8 supported_fec = 0; + int err; + + err = hinic5_get_fec(nic_dev->hwdev, &advertised_fec, &supported_fec); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Get fec param failed\n"); + return err; + } + + fecparam_convert(MAG_CMD_OPCODE_GET, BIT(advertised_fec), + (u8 *)(&fecparam->active_fec)); + fecparam_convert(MAG_CMD_OPCODE_GET, supported_fec, (u8 *)(&fecparam->fec)); + + nicif_info(nic_dev, drv, netdev, "Get fec param success\n"); + return 0; +} + +int hinic5_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + int err; + u8 advertised_fec = 0; + + if (check_fecparam_is_valid((u8)(fecparam->fec)) == false) { + nicif_err(nic_dev, drv, netdev, "fec param is invalid, failed to set fec param\n"); + return -EINVAL; + } + + fecparam_convert(MAG_CMD_OPCODE_SET, (u8)(fecparam->fec), &advertised_fec); + + err = hinic5_set_fec(nic_dev->hwdev, advertised_fec); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Set fec param failed\n"); + return err; + } + + nicif_info(nic_dev, drv, netdev, "Set fec param success\n"); + return 0; +} +#endif + diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_port_stats.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_port_stats.h new file mode 100644 index 00000000..703969c4 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_port_stats.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_ETHTOOL_PORT_STATS_H +#define HINIC5_ETHTOOL_PORT_STATS_H + +#include <linux/kernel.h> +#include <linux/ethtool.h> +#include "ossl_knl_linux.h" +#include "hinic5_nic_dev.h" +#include "hinic5_rx.h" +#include "hinic5_tx.h" +#include "nic_cfg_comm.h" +#include "mag_mpu_cmd_defs.h" + +#define FPGA_PORT_COUNTER 0 +#define EVB_PORT_COUNTER 1 + +struct hinic5_stats { + char name[ETH_GSTRING_LEN]; + u32 size; + int offset; +}; + +struct hinic5_port_link_stats { + u64 link_down_events_phy; +}; + +#define HINIC5_NIC_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct hinic5_nic_stats, _stat_item), \ + .offset = offsetof(struct hinic5_nic_stats, _stat_item) \ +} + +#define HINIC5_RXQ_STAT(_stat_item) { \ + .name = "rxq%d_"#_stat_item, \ + .size = FIELD_SIZEOF(struct hinic5_rxq_stats, _stat_item), \ + .offset = offsetof(struct hinic5_rxq_stats, _stat_item) \ +} + +#define HINIC5_TXQ_STAT(_stat_item) { \ + .name = "txq%d_"#_stat_item, \ + .size = FIELD_SIZEOF(struct hinic5_txq_stats, _stat_item), \ + .offset = offsetof(struct hinic5_txq_stats, _stat_item) \ +} + +#ifdef HAVE_XDP_SUPPORT +#define HINIC5_XDPTXQ_STAT(_stat_item) { \ + .name = "txq%d_"#_stat_item, \ + .size = FIELD_SIZEOF(struct hinic5_xdptxq_stats, _stat_item), \ + .offset = offsetof(struct hinic5_xdptxq_stats, _stat_item) \ +} +#endif + +#define HINIC5_FUNC_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct hinic5_vport_stats, _stat_item), \ + .offset = offsetof(struct hinic5_vport_stats, _stat_item) \ +} + +#define HINIC5_PORT_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct mag_cmd_port_stats, _stat_item), \ + .offset = offsetof(struct mag_cmd_port_stats, _stat_item) \ +} + +#define HINIC5_PORT_LINK_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = FIELD_SIZEOF(struct hinic5_port_link_stats, _stat_item), \ + .offset = offsetof(struct hinic5_port_link_stats, _stat_item) \ +} + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_priv_flags.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_priv_flags.c new file mode 100644 index 00000000..d2900479 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_priv_flags.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> + +#include "drv_nic_api.h" +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_crm.h" +#include "hinic5_nic_dev.h" +#include "hinic5_tx.h" +#include "hinic5_rx.h" +#include "hinic5_ethtool.h" +#include "hinic5_ethtool_priv_flags.h" + +u32 hinic5_get_priv_flags(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u32 priv_flags = 0; + + if (test_bit(HINIC5_SAME_RXTX, &nic_dev->flags)) + priv_flags |= HINIC5_PRIV_FLAGS_SYMM_RSS; + + if (test_bit(HINIC5_FORCE_LINK_UP, &nic_dev->flags)) + priv_flags |= HINIC5_PRIV_FLAGS_LINK_UP; + + if (test_bit(HINIC5_RXQ_RECOVERY, &nic_dev->flags)) + priv_flags |= HINIC5_PRIV_FLAGS_RXQ_RECOVERY; + + return priv_flags; +} + +int hinic5_set_rxq_recovery_flag(struct net_device *netdev, u32 priv_flags) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + if ((priv_flags & HINIC5_PRIV_FLAGS_RXQ_RECOVERY) != 0) { + if (HINIC5_SUPPORT_RXQ_RECOVERY(nic_dev->hwdev) == 0) { + nicif_info(nic_dev, drv, netdev, + "Unsupport open rxq recovery\n"); + return -EOPNOTSUPP; + } + + if (test_and_set_bit(HINIC5_RXQ_RECOVERY, &nic_dev->flags) != 0) + return 0; + queue_delayed_work(nic_dev->workq, &nic_dev->rxq_check_work, HZ); + nicif_info(nic_dev, drv, netdev, "open rxq recovery\n"); + } else { + if (test_and_clear_bit(HINIC5_RXQ_RECOVERY, &nic_dev->flags) == 0) + return 0; + cancel_delayed_work_sync(&nic_dev->rxq_check_work); + nicif_info(nic_dev, drv, netdev, "close rxq recovery\n"); + } + + return 0; +} + +static int hinic5_set_symm_rss_flag(struct net_device *netdev, u32 priv_flags) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + if ((priv_flags & HINIC5_PRIV_FLAGS_SYMM_RSS) != 0) { + if (test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) != 0) { + nicif_err(nic_dev, drv, netdev, + "Failed to open Symmetric RSS while DCB is enabled\n"); + return -EOPNOTSUPP; + } + + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) == 0) { + nicif_err(nic_dev, drv, netdev, + "Failed to open Symmetric RSS while RSS is disabled\n"); + return -EOPNOTSUPP; + } + + set_bit(HINIC5_SAME_RXTX, &nic_dev->flags); + } else { + clear_bit(HINIC5_SAME_RXTX, &nic_dev->flags); + } + + return 0; +} + +static int hinic5_set_force_link_flag(struct net_device *netdev, u32 priv_flags) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u8 link_status = 0; + int err; + + if ((priv_flags & HINIC5_PRIV_FLAGS_LINK_UP) != 0) { + if (test_and_set_bit(HINIC5_FORCE_LINK_UP, &nic_dev->flags) != 0) + return 0; + + if (!HINIC5_CHANNEL_RES_VALID(nic_dev)) + return 0; + + if (netif_carrier_ok(netdev)) + return 0; + + nic_dev->link_status = true; + netif_carrier_on(netdev); + nicif_info(nic_dev, link, netdev, "Set link up\n"); + + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) + hinic5_notify_all_vfs_link_changed(nic_dev->hwdev, nic_dev->link_status); + } else { + if (test_and_clear_bit(HINIC5_FORCE_LINK_UP, &nic_dev->flags) == 0) + return 0; + + if (!HINIC5_CHANNEL_RES_VALID(nic_dev)) + return 0; + + err = hinic5_get_link_state(nic_dev->hwdev, &link_status); + if (err != 0) { + nicif_err(nic_dev, link, netdev, + "Get link state err: %d\n", err); + return err; + } + + nic_dev->link_status = link_status; + + if (link_status != 0) { + if (netif_carrier_ok(netdev)) + return 0; + + netif_carrier_on(netdev); + nicif_info(nic_dev, link, netdev, "Link state is up\n"); + } else { + if (!netif_carrier_ok(netdev)) + return 0; + + netif_carrier_off(netdev); + nicif_info(nic_dev, link, netdev, "Link state is down\n"); + } + + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) + hinic5_notify_all_vfs_link_changed(nic_dev->hwdev, nic_dev->link_status); + } + + return 0; +} + +int hinic5_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + int err; + + err = hinic5_set_symm_rss_flag(netdev, priv_flags); + if (err != 0) + return err; + + err = hinic5_set_rxq_recovery_flag(netdev, priv_flags); + if (err != 0) + return err; + + return hinic5_set_force_link_flag(netdev, priv_flags); +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_priv_flags.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_priv_flags.h new file mode 100644 index 00000000..74987f7a --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ethtool_priv_flags.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_ETHTOOL_PRIV_FLAGS_H +#define HINIC5_ETHTOOL_PRIV_FLAGS_H + +#include <linux/types.h> +#include <linux/netdevice.h> + +#define HINIC5_PRIV_FLAGS_SYMM_RSS BIT(0) +#define HINIC5_PRIV_FLAGS_LINK_UP BIT(1) +#define HINIC5_PRIV_FLAGS_RXQ_RECOVERY BIT(2) + +/* Private flags functions - exported for external use */ +int hinic5_set_rxq_recovery_flag(struct net_device *netdev, u32 priv_flags); + +/* Private flags functions - internal use */ +u32 hinic5_get_priv_flags(struct net_device *netdev); +int hinic5_set_priv_flags(struct net_device *netdev, u32 priv_flags); + +#endif /* HINIC5_ETHTOOL_PRIV_FLAGS_H */ diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ntuple.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ntuple.c new file mode 100644 index 00000000..5cbf74f7 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ntuple.c @@ -0,0 +1,1226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/in.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_nic_dev.h" +#include "hinic5_ntuple.h" + +static bool flow_bifurcations; +module_param(flow_bifurcations, bool, 0444); +MODULE_PARM_DESC(flow_bifurcations, "flow_bifurcations, 0: UNSUPPORTED, 1: SUPPORTED (default=0)"); + +static void tcam_translate_key_y(u8 *key_y, const u8 *src_input, const u8 *mask, u8 len) +{ + u8 idx; + + for (idx = 0; idx < len; idx++) + key_y[idx] = src_input[idx] & mask[idx]; +} + +static void tcam_translate_key_x(u8 *key_x, const u8 *key_y, const u8 *mask, u8 len) +{ + u8 idx; + + for (idx = 0; idx < len; idx++) + key_x[idx] = key_y[idx] ^ mask[idx]; +} + +static void tcam_key_calculate(struct tag_tcam_key *tcam_key, + struct nic_tcam_cfg_rule *fdir_tcam_rule) +{ + tcam_translate_key_y(fdir_tcam_rule->key.y, + (u8 *)(&tcam_key->key_info), + (u8 *)(&tcam_key->key_mask), TCAM_FLOW_KEY_SIZE); + tcam_translate_key_x(fdir_tcam_rule->key.x, fdir_tcam_rule->key.y, + (u8 *)(&tcam_key->key_mask), TCAM_FLOW_KEY_SIZE); +} + +#define TCAM_IPV4_TYPE 0 +#define TCAM_IPV6_TYPE 1 + +static int hinic5_base_ipv4_parse(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *val = &fs->h_u.tcp_ip4_spec; + u32 temp; + + switch (mask->ip4src) { + case U32_MAX: + temp = ntohl(val->ip4src); + tcam_key->key_info.sipv4_h = high_16_bits(temp); + tcam_key->key_info.sipv4_l = low_16_bits(temp); + + tcam_key->key_mask.sipv4_h = U16_MAX; + tcam_key->key_mask.sipv4_l = U16_MAX; + break; + case 0: + break; + + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ip mask\n"); + return -EINVAL; + } + + switch (mask->ip4dst) { + case U32_MAX: + temp = ntohl(val->ip4dst); + tcam_key->key_info.dipv4_h = high_16_bits(temp); + tcam_key->key_info.dipv4_l = low_16_bits(temp); + + tcam_key->key_mask.dipv4_h = U16_MAX; + tcam_key->key_mask.dipv4_l = U16_MAX; + break; + case 0: + break; + + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ip mask\n"); + return -EINVAL; + } + + tcam_key->key_info.ip_type = TCAM_IPV4_TYPE; + tcam_key->key_mask.ip_type = TCAM_IP_TYPE_MASK; + + tcam_key->key_info.function_id = hinic5_global_func_id(nic_dev->hwdev); + tcam_key->key_mask.function_id = TCAM_FUNC_ID_MASK; + + return 0; +} + +static int hinic5_base_ipv4_htn_parse(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *val = &fs->h_u.tcp_ip4_spec; + u32 temp; + + switch (mask->ip4src) { + case U32_MAX: + temp = ntohl(val->ip4src); + tcam_key->key_info_htn.sipv4_h = high_16_bits(temp); + tcam_key->key_info_htn.sipv4_l = low_16_bits(temp); + + tcam_key->key_mask_htn.sipv4_h = U16_MAX; + tcam_key->key_mask_htn.sipv4_l = U16_MAX; + break; + + case 0: + break; + + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid ipv4_htn src_ip mask\n"); + return -EINVAL; + } + + switch (mask->ip4dst) { + case U32_MAX: + temp = ntohl(val->ip4dst); + tcam_key->key_info_htn.dipv4_h = high_16_bits(temp); + tcam_key->key_info_htn.dipv4_l = low_16_bits(temp); + + tcam_key->key_mask_htn.dipv4_h = U16_MAX; + tcam_key->key_mask_htn.dipv4_l = U16_MAX; + break; + case 0: + break; + + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid ipv4_htn src_ip mask\n"); + return -EINVAL; + } + + tcam_key->key_mask_htn.ip_type = UINT2_MAX; + tcam_key->key_info_htn.ip_type = TCAM_IPV4_TYPE; + + return 0; +} + +static int hinic5_fdir_tcam_ipv4_l4_htn_init(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *val = &fs->h_u.tcp_ip4_spec; + int err; + + err = hinic5_base_ipv4_htn_parse(nic_dev, fs, tcam_key); + if (err != 0) + return err; + + if (fs->flow_type == TCP_V4_FLOW) + tcam_key->key_info_htn.ip_proto = IPPROTO_TCP; + else + tcam_key->key_info_htn.ip_proto = IPPROTO_UDP; + tcam_key->key_mask_htn.ip_proto = U8_MAX; + + tcam_key->key_mask_htn.dport = mask->pdst; + tcam_key->key_info_htn.dport = ntohs(val->pdst); + + tcam_key->key_mask_htn.sport = mask->psrc; + tcam_key->key_info_htn.sport = ntohs(val->psrc); + + return 0; +} + +static int hinic5_fdir_tcam_ipv4_htn_init(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec; + struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec; + int err; + + err = hinic5_base_ipv4_htn_parse(nic_dev, fs, tcam_key); + if (err != 0) + return err; + + tcam_key->key_info_htn.ip_proto = l3_val->proto; + tcam_key->key_mask_htn.ip_proto = l3_mask->proto; + + return 0; +} + +static int hinic5_fdir_tcam_ipv4_l4_init(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec; + int err; + + err = hinic5_base_ipv4_parse(nic_dev, fs, tcam_key); + if (err != 0) + return err; + + tcam_key->key_info.dport = ntohs(l4_val->pdst); + tcam_key->key_mask.dport = l4_mask->pdst; + + tcam_key->key_info.sport = ntohs(l4_val->psrc); + tcam_key->key_mask.sport = l4_mask->psrc; + + if (fs->flow_type == TCP_V4_FLOW) + tcam_key->key_info.ip_proto = IPPROTO_TCP; + else + tcam_key->key_info.ip_proto = IPPROTO_UDP; + tcam_key->key_mask.ip_proto = U8_MAX; + + return 0; +} + +static int hinic5_fdir_tcam_ipv4_init(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec; + struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec; + int err; + + err = hinic5_base_ipv4_parse(nic_dev, fs, tcam_key); + if (err != 0) + return err; + + tcam_key->key_info.ip_proto = l3_val->proto; + tcam_key->key_mask.ip_proto = l3_mask->proto; + + return 0; +} + +#ifndef UNSUPPORT_NTUPLE_IPV6 +enum ipv6_parse_res { + IPV6_MASK_INVALID, + IPV6_MASK_ALL_MASK, + IPV6_MASK_ALL_ZERO, +}; + +enum ipv6_index { + IPV6_IDX0, + IPV6_IDX1, + IPV6_IDX2, + IPV6_IDX3, +}; + +static int ipv6_mask_parse(const u32 *ipv6_mask) +{ + if (ipv6_mask[IPV6_IDX0] == 0 && ipv6_mask[IPV6_IDX1] == 0 && + ipv6_mask[IPV6_IDX2] == 0 && ipv6_mask[IPV6_IDX3] == 0) + return IPV6_MASK_ALL_ZERO; + + if (ipv6_mask[IPV6_IDX0] == U32_MAX && + ipv6_mask[IPV6_IDX1] == U32_MAX && + ipv6_mask[IPV6_IDX2] == U32_MAX && ipv6_mask[IPV6_IDX3] == U32_MAX) + return IPV6_MASK_ALL_MASK; + + return IPV6_MASK_INVALID; +} + +static void hinic5_ipv6_tcam_key_pares_src(struct tag_tcam_key *tcam_key, + struct ethtool_tcpip6_spec *val) +{ + u32 temp; + + temp = ntohl(val->ip6src[IPV6_IDX0]); + tcam_key->key_info_ipv6.sipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6src[IPV6_IDX1]); + tcam_key->key_info_ipv6.sipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6src[IPV6_IDX2]); + tcam_key->key_info_ipv6.sipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6src[IPV6_IDX3]); + tcam_key->key_info_ipv6.sipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6.sipv6_key7 = low_16_bits(temp); + + tcam_key->key_mask_ipv6.sipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6.sipv6_key7 = U16_MAX; +} + +static void hinic5_ipv6_tcam_key_pares_dst(struct tag_tcam_key *tcam_key, + struct ethtool_tcpip6_spec *val) +{ + u32 temp; + + temp = ntohl(val->ip6dst[IPV6_IDX0]); + tcam_key->key_info_ipv6.dipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6dst[IPV6_IDX1]); + tcam_key->key_info_ipv6.dipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6dst[IPV6_IDX2]); + tcam_key->key_info_ipv6.dipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6dst[IPV6_IDX3]); + tcam_key->key_info_ipv6.dipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6.dipv6_key7 = low_16_bits(temp); + + tcam_key->key_mask_ipv6.dipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6.dipv6_key7 = U16_MAX; +} + +static void hinic5_ipv6_tcam_key_htn_pares_src(struct tag_tcam_key *tcam_key, + struct ethtool_tcpip6_spec *val) +{ + u32 temp; + + temp = ntohl(val->ip6src[IPV6_IDX0]); + tcam_key->key_info_ipv6_htn.sipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6_htn.sipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6src[IPV6_IDX1]); + tcam_key->key_info_ipv6_htn.sipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6_htn.sipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6src[IPV6_IDX2]); + tcam_key->key_info_ipv6_htn.sipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6_htn.sipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6src[IPV6_IDX3]); + tcam_key->key_info_ipv6_htn.sipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6_htn.sipv6_key7 = low_16_bits(temp); + + tcam_key->key_mask_ipv6_htn.sipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6_htn.sipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6_htn.sipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6_htn.sipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6_htn.sipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6_htn.sipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6_htn.sipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6_htn.sipv6_key7 = U16_MAX; +} + +static void hinic5_ipv6_tcam_key_htn_pares_dst(struct tag_tcam_key *tcam_key, + struct ethtool_tcpip6_spec *val) +{ + u32 temp; + + temp = ntohl(val->ip6dst[IPV6_IDX0]); + tcam_key->key_info_ipv6_htn.dipv6_key0 = high_16_bits(temp); + tcam_key->key_info_ipv6_htn.dipv6_key1 = low_16_bits(temp); + temp = ntohl(val->ip6dst[IPV6_IDX1]); + tcam_key->key_info_ipv6_htn.dipv6_key2 = high_16_bits(temp); + tcam_key->key_info_ipv6_htn.dipv6_key3 = low_16_bits(temp); + temp = ntohl(val->ip6dst[IPV6_IDX2]); + tcam_key->key_info_ipv6_htn.dipv6_key4 = high_16_bits(temp); + tcam_key->key_info_ipv6_htn.dipv6_key5 = low_16_bits(temp); + temp = ntohl(val->ip6dst[IPV6_IDX3]); + tcam_key->key_info_ipv6_htn.dipv6_key6 = high_16_bits(temp); + tcam_key->key_info_ipv6_htn.dipv6_key7 = low_16_bits(temp); + + tcam_key->key_mask_ipv6_htn.dipv6_key0 = U16_MAX; + tcam_key->key_mask_ipv6_htn.dipv6_key1 = U16_MAX; + tcam_key->key_mask_ipv6_htn.dipv6_key2 = U16_MAX; + tcam_key->key_mask_ipv6_htn.dipv6_key3 = U16_MAX; + tcam_key->key_mask_ipv6_htn.dipv6_key4 = U16_MAX; + tcam_key->key_mask_ipv6_htn.dipv6_key5 = U16_MAX; + tcam_key->key_mask_ipv6_htn.dipv6_key6 = U16_MAX; + tcam_key->key_mask_ipv6_htn.dipv6_key7 = U16_MAX; +} + +static int hinic5_base_ipv6_htn_parse(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip6_spec *mask = &fs->m_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *val = &fs->h_u.tcp_ip6_spec; + int parse_res; + + parse_res = ipv6_mask_parse((u32 *)mask->ip6src); + if (parse_res == IPV6_MASK_ALL_MASK) { + hinic5_ipv6_tcam_key_htn_pares_src(tcam_key, val); + } else if (parse_res == IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ipv6 mask\n"); + return -EINVAL; + } + + parse_res = ipv6_mask_parse((u32 *)mask->ip6dst); + if (parse_res == IPV6_MASK_ALL_MASK) { + hinic5_ipv6_tcam_key_htn_pares_dst(tcam_key, val); + } else if (parse_res == IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid dst_ipv6 mask\n"); + return -EINVAL; + } + + tcam_key->key_info_ipv6_htn.ip_type = TCAM_IPV6_TYPE; + tcam_key->key_mask_ipv6_htn.ip_type = UINT2_MAX; + + return 0; +} + +static int hinic5_fdir_tcam_ipv6_l4_htn_init(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip6_spec *mask = &fs->m_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *val = &fs->h_u.tcp_ip6_spec; + int err; + + err = hinic5_base_ipv6_htn_parse(nic_dev, fs, tcam_key); + if (err != 0) + return err; + + if (fs->flow_type == TCP_V6_FLOW) + tcam_key->key_info_htn.ip_proto = IPPROTO_TCP; + else + tcam_key->key_info_htn.ip_proto = IPPROTO_UDP; + tcam_key->key_mask_htn.ip_proto = U8_MAX; + + tcam_key->key_mask_htn.dport = mask->pdst; + tcam_key->key_info_htn.dport = ntohs(val->pdst); + + tcam_key->key_mask_htn.sport = mask->psrc; + tcam_key->key_info_htn.sport = ntohs(val->psrc); + + return 0; +} + +static int hinic5_fdir_tcam_ipv6_htn_init(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_usrip6_spec *mask = &fs->m_u.usr_ip6_spec; + struct ethtool_usrip6_spec *val = &fs->h_u.usr_ip6_spec; + int err; + + err = hinic5_base_ipv6_htn_parse(nic_dev, fs, tcam_key); + if (err != 0) + return err; + + tcam_key->key_info_ipv6.ip_proto = val->l4_proto; + tcam_key->key_mask_ipv6.ip_proto = mask->l4_proto; + + return 0; +} + +static int hinic5_base_ipv6_parse(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip6_spec *mask = &fs->m_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *val = &fs->h_u.tcp_ip6_spec; + int parse_res; + + parse_res = ipv6_mask_parse((u32 *)mask->ip6src); + if (parse_res == IPV6_MASK_ALL_MASK) { + hinic5_ipv6_tcam_key_pares_src(tcam_key, val); + } else if (parse_res == IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid src_ipv6 mask\n"); + return -EINVAL; + } + + parse_res = ipv6_mask_parse((u32 *)mask->ip6dst); + if (parse_res == IPV6_MASK_ALL_MASK) { + hinic5_ipv6_tcam_key_pares_dst(tcam_key, val); + } else if (parse_res == IPV6_MASK_INVALID) { + nicif_err(nic_dev, drv, nic_dev->netdev, "invalid dst_ipv6 mask\n"); + return -EINVAL; + } + + tcam_key->key_info_ipv6.ip_type = TCAM_IPV6_TYPE; + tcam_key->key_mask_ipv6.ip_type = TCAM_IP_TYPE_MASK; + + tcam_key->key_info_ipv6.function_id = + hinic5_global_func_id(nic_dev->hwdev); + tcam_key->key_mask_ipv6.function_id = TCAM_FUNC_ID_MASK; + + return 0; +} + +static int hinic5_fdir_tcam_ipv6_l4_init(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec; + int err; + + err = hinic5_base_ipv6_parse(nic_dev, fs, tcam_key); + if (err != 0) + return err; + + tcam_key->key_info_ipv6.dport = ntohs(l4_val->pdst); + tcam_key->key_mask_ipv6.dport = l4_mask->pdst; + + tcam_key->key_info_ipv6.sport = ntohs(l4_val->psrc); + tcam_key->key_mask_ipv6.sport = l4_mask->psrc; + + if (fs->flow_type == TCP_V6_FLOW) + tcam_key->key_info_ipv6.ip_proto = NEXTHDR_TCP; + else + tcam_key->key_info_ipv6.ip_proto = NEXTHDR_UDP; + tcam_key->key_mask_ipv6.ip_proto = U8_MAX; + + return 0; +} + +static int hinic5_fdir_tcam_ipv6_init(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key) +{ + struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec; + struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec; + int err; + + err = hinic5_base_ipv6_parse(nic_dev, fs, tcam_key); + if (err != 0) + return err; + + tcam_key->key_info_ipv6.ip_proto = l3_val->l4_proto; + tcam_key->key_mask_ipv6.ip_proto = l3_mask->l4_proto; + + return 0; +} +#endif + +static int hinic5_fdir_tcam_info_init(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key, + struct nic_tcam_cfg_rule *fdir_tcam_rule) +{ + int err; + + switch (fs->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + err = hinic5_fdir_tcam_ipv4_l4_init(nic_dev, fs, tcam_key); + if (err != 0) + return err; + break; + case IP_USER_FLOW: + err = hinic5_fdir_tcam_ipv4_init(nic_dev, fs, tcam_key); + if (err != 0) + return err; + break; +#ifndef UNSUPPORT_NTUPLE_IPV6 + case TCP_V6_FLOW: + case UDP_V6_FLOW: + err = hinic5_fdir_tcam_ipv6_l4_init(nic_dev, fs, tcam_key); + if (err != 0) + return err; + break; + case IPV6_USER_FLOW: + err = hinic5_fdir_tcam_ipv6_init(nic_dev, fs, tcam_key); + if (err != 0) + return err; + break; +#endif + default: + return -EOPNOTSUPP; + } + + tcam_key->key_info.tunnel_type = 0; + tcam_key->key_mask.tunnel_type = TCAM_TUNNEL_TYPE_MASK; + + fdir_tcam_rule->data.fdir_info.qid = (u32)fs->ring_cookie; + tcam_key_calculate(tcam_key, fdir_tcam_rule); + + return 0; +} + +static int hinic5_fdir_tcam_info_htn_init(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs, + struct tag_tcam_key *tcam_key, + struct nic_tcam_cfg_rule *fdir_tcam_rule) +{ + int err; + + if (flow_bifurcations) + fdir_tcam_rule->data.fdir_info.qid_htn.flag = flow_bifurcations; + + tcam_key->key_mask_htn.function_id_h = UINT5_MAX; + tcam_key->key_mask_htn.function_id_l = UINT5_MAX; + tcam_key->key_info_htn.function_id_l = hinic5_global_func_id(nic_dev->hwdev) & UINT5_MAX; + tcam_key->key_info_htn.function_id_h = + (hinic5_global_func_id(nic_dev->hwdev) >> UINT5_WIDTH) & UINT5_MAX; + + tcam_key->key_info_htn.tunnel_type = 0; + tcam_key->key_mask_htn.tunnel_type = UINT3_MAX; + + switch (fs->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + err = hinic5_fdir_tcam_ipv4_l4_htn_init(nic_dev, fs, tcam_key); + if (err != 0) + return err; + break; + case IP_USER_FLOW: + err = hinic5_fdir_tcam_ipv4_htn_init(nic_dev, fs, tcam_key); + if (err != 0) + return err; + break; + +#ifndef UNSUPPORT_NTUPLE_IPV6 + case TCP_V6_FLOW: + case UDP_V6_FLOW: + err = hinic5_fdir_tcam_ipv6_l4_htn_init(nic_dev, fs, tcam_key); + if (err != 0) + return err; + break; + case IPV6_USER_FLOW: + err = hinic5_fdir_tcam_ipv6_htn_init(nic_dev, fs, tcam_key); + if (err != 0) + return err; + break; +#endif + default: + return -EOPNOTSUPP; + } + + fdir_tcam_rule->data.fdir_info.qid_htn.qid = (u32)fs->ring_cookie; + tcam_key_calculate(tcam_key, fdir_tcam_rule); + + return 0; +} + +void hinic5_flush_rx_flow_rule(struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_tcam_info *tcam_info = &nic_dev->tcam; + struct hinic5_ethtool_rx_flow_rule *eth_rule = NULL; + struct hinic5_ethtool_rx_flow_rule *eth_rule_tmp = NULL; + struct hinic5_tcam_filter *tcam_iter = NULL; + struct hinic5_tcam_filter *tcam_iter_tmp = NULL; + struct hinic5_tcam_dynamic_block *block = NULL; + struct hinic5_tcam_dynamic_block *block_tmp = NULL; + struct list_head *dynamic_list = + &tcam_info->tcam_dynamic_info.tcam_dynamic_list; + + if (list_empty(&tcam_info->tcam_list) == 0) { + list_for_each_entry_safe(tcam_iter, tcam_iter_tmp, + &tcam_info->tcam_list, + tcam_filter_list) { + list_del(&tcam_iter->tcam_filter_list); + kfree(tcam_iter); + } + } + if (list_empty(dynamic_list) == 0) { + list_for_each_entry_safe(block, block_tmp, dynamic_list, + block_list) { + list_del(&block->block_list); + kfree(block); + } + } + + if (list_empty(&nic_dev->rx_flow_rule.rules) == 0) { + list_for_each_entry_safe(eth_rule, eth_rule_tmp, + &nic_dev->rx_flow_rule.rules, list) { + list_del(ð_rule->list); + kfree(eth_rule); + } + } + + if (HINIC5_SUPPORT_FDIR(nic_dev->hwdev)) { + hinic5_flush_tcam_rule(nic_dev->hwdev); + hinic5_set_fdir_tcam_rule_filter(nic_dev->hwdev, false); + } +} + +static struct hinic5_tcam_dynamic_block * +hinic5_alloc_dynamic_block_resource(struct hinic5_nic_dev *nic_dev, + struct hinic5_tcam_info *tcam_info, + u16 dynamic_block_id) +{ + struct hinic5_tcam_dynamic_block *dynamic_block_ptr = NULL; + + dynamic_block_ptr = kzalloc(sizeof(*dynamic_block_ptr), GFP_KERNEL); + if (!dynamic_block_ptr) { + nicif_err(nic_dev, drv, nic_dev->netdev, "fdir filter dynamic alloc block index %u memory failed\n", + dynamic_block_id); + return NULL; + } + + dynamic_block_ptr->dynamic_block_id = dynamic_block_id; + list_add_tail(&dynamic_block_ptr->block_list, + &tcam_info->tcam_dynamic_info.tcam_dynamic_list); + + tcam_info->tcam_dynamic_info.dynamic_block_cnt++; + + return dynamic_block_ptr; +} + +static void hinic5_free_dynamic_block_resource(struct hinic5_tcam_info *tcam_info, + struct hinic5_tcam_dynamic_block *block_ptr) +{ + if (!block_ptr) + return; + + list_del(&block_ptr->block_list); + kfree(block_ptr); + + tcam_info->tcam_dynamic_info.dynamic_block_cnt--; +} + +static struct hinic5_tcam_dynamic_block * +hinic5_dynamic_lookup_tcam_filter(struct hinic5_nic_dev *nic_dev, + struct nic_tcam_cfg_rule *fdir_tcam_rule, + const struct hinic5_tcam_info *tcam_info, + struct hinic5_tcam_filter *tcam_filter, + u16 *tcam_index) +{ + struct hinic5_tcam_dynamic_block *tmp = NULL; + u16 index; + + list_for_each_entry(tmp, + &tcam_info->tcam_dynamic_info.tcam_dynamic_list, + block_list) + if (!tmp || tmp->dynamic_index_cnt < HINIC5_TCAM_DYNAMIC_BLOCK_SIZE) + break; + + if (!tmp || tmp->dynamic_index_cnt >= HINIC5_TCAM_DYNAMIC_BLOCK_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter dynamic lookup for index failed\n"); + return NULL; + } + + for (index = 0; index < HINIC5_TCAM_DYNAMIC_BLOCK_SIZE; index++) + if (tmp->dynamic_index_used[index] == 0) + break; + + if (index == HINIC5_TCAM_DYNAMIC_BLOCK_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, "tcam block 0x%x supports filter rules is full\n", + tmp->dynamic_block_id); + return NULL; + } + + tcam_filter->dynamic_block_id = tmp->dynamic_block_id; + tcam_filter->index = index; + *tcam_index = index; + + fdir_tcam_rule->index = index + + HINIC5_PKT_TCAM_DYNAMIC_INDEX_START(tmp->dynamic_block_id); + + return tmp; +} + +static int hinic5_tcam_filter_alloc_block(struct hinic5_tcam_info *tcam_info, + struct hinic5_nic_dev *nic_dev, + u16 *tcam_block_index, int *block_alloc_flag, + const struct hinic5_tcam_dynamic_block *dynamic_block_ptr) +{ + int err; + u16 block_cnt = tcam_info->tcam_dynamic_info.dynamic_block_cnt; + + if (tcam_info->tcam_rule_nums >= block_cnt * HINIC5_TCAM_DYNAMIC_BLOCK_SIZE) { + if (block_cnt >= (HINIC5_MAX_TCAM_FILTERS / HINIC5_TCAM_DYNAMIC_BLOCK_SIZE)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Dynamic tcam block is full, alloc failed\n"); + return -EFAULT; + } + + err = hinic5_alloc_tcam_block(nic_dev->hwdev, tcam_block_index); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fdir filter dynamic tcam alloc block failed\n"); + return -EFAULT; + } + + *block_alloc_flag = 1; + + dynamic_block_ptr = + hinic5_alloc_dynamic_block_resource(nic_dev, tcam_info, *tcam_block_index); + if (!dynamic_block_ptr) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Fdir filter dynamic alloc block memory failed\n"); + hinic5_free_tcam_block(nic_dev->hwdev, tcam_block_index); + return -EFAULT; + } + } + return 0; +} + +static int hinic5_add_tcam_filter(struct hinic5_nic_dev *nic_dev, + struct hinic5_tcam_filter *tcam_filter, + struct nic_tcam_cfg_rule *fdir_tcam_rule) +{ + struct hinic5_tcam_info *tcam_info = &nic_dev->tcam; + struct hinic5_tcam_dynamic_block *dynamic_block_ptr = NULL; + struct hinic5_tcam_dynamic_block *tmp = NULL; + u16 tcam_block_index = 0; + int block_alloc_flag = 0; + u16 index = 0; + int err; + + err = hinic5_tcam_filter_alloc_block(tcam_info, nic_dev, &tcam_block_index, + &block_alloc_flag, dynamic_block_ptr); + if (err != 0) + return err; + + tmp = hinic5_dynamic_lookup_tcam_filter(nic_dev, fdir_tcam_rule, + tcam_info, tcam_filter, &index); + if (!tmp) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Dynamic lookup tcam filter failed\n"); + goto lookup_tcam_index_failed; + } + + err = hinic5_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir_tcam_rule add failed\n"); + goto add_tcam_rules_failed; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, + "Add fdir tcam rule, function_id: 0x%x, tcam_block_id: %hu, local_index: %hu, global_index: %u, queue: %u, tcam_rule_nums: %d succeed\n", + hinic5_global_func_id(nic_dev->hwdev), + tcam_filter->dynamic_block_id, index, fdir_tcam_rule->index, + fdir_tcam_rule->data.fdir_info.qid, tcam_info->tcam_rule_nums + 1); + + if (tcam_info->tcam_rule_nums == 0) { + err = hinic5_set_fdir_tcam_rule_filter(nic_dev->hwdev, true); + if (err != 0) + goto enable_failed; + } + + list_add_tail(&tcam_filter->tcam_filter_list, &tcam_info->tcam_list); + + tmp->dynamic_index_used[index] = 1; + tmp->dynamic_index_cnt++; + + tcam_info->tcam_rule_nums++; + + return 0; + +enable_failed: + hinic5_del_tcam_rule(nic_dev->hwdev, fdir_tcam_rule->index); + +add_tcam_rules_failed: +lookup_tcam_index_failed: + if (block_alloc_flag == 1) { + hinic5_free_dynamic_block_resource(tcam_info, dynamic_block_ptr); + hinic5_free_tcam_block(nic_dev->hwdev, &tcam_block_index); + } + + return -EFAULT; +} + +static int hinic5_del_tcam_filter(struct hinic5_nic_dev *nic_dev, + struct hinic5_tcam_filter *tcam_filter) +{ + struct hinic5_tcam_info *tcam_info = &nic_dev->tcam; + u16 dynamic_block_id = tcam_filter->dynamic_block_id; + struct hinic5_tcam_dynamic_block *tmp = NULL; + u32 index = 0; + int err; + + list_for_each_entry(tmp, + &tcam_info->tcam_dynamic_info.tcam_dynamic_list, + block_list) { + if (tmp->dynamic_block_id == dynamic_block_id) + break; + } + if (!tmp || tmp->dynamic_block_id != dynamic_block_id) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Fdir filter del dynamic lookup for block failed\n"); + return -EFAULT; + } + + index = HINIC5_PKT_TCAM_DYNAMIC_INDEX_START(tmp->dynamic_block_id) + + tcam_filter->index; + + err = hinic5_del_tcam_rule(nic_dev->hwdev, index); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "fdir_tcam_rule del failed\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, + "Del tcam_dynamic_rule function_id: 0x%x, tcam_block_id: %hu, local_index: %hu, global_index: %u, local_rules_nums: %d, global_rule_nums: %d succeed\n", + hinic5_global_func_id(nic_dev->hwdev), dynamic_block_id, + tcam_filter->index, index, tmp->dynamic_index_cnt - 1, + tcam_info->tcam_rule_nums - 1); + + tmp->dynamic_index_used[tcam_filter->index] = 0; + tmp->dynamic_index_cnt--; + tcam_info->tcam_rule_nums--; + if (tmp->dynamic_index_cnt == 0) { + hinic5_free_tcam_block(nic_dev->hwdev, &dynamic_block_id); + hinic5_free_dynamic_block_resource(tcam_info, tmp); + } + + if (tcam_info->tcam_rule_nums == 0) + hinic5_set_fdir_tcam_rule_filter(nic_dev->hwdev, false); + + list_del(&tcam_filter->tcam_filter_list); + + return 0; +} + +static inline struct hinic5_tcam_filter * +hinic5_tcam_filter_lookup(const struct list_head *filter_list, + struct tag_tcam_key *key) +{ + struct hinic5_tcam_filter *iter = NULL; + + list_for_each_entry(iter, filter_list, tcam_filter_list) { + if (memcmp(key, &iter->tcam_key, + sizeof(struct tag_tcam_key)) == 0) { + return iter; + } + } + + return NULL; +} + +static void del_ethtool_rule(struct hinic5_nic_dev *nic_dev, + struct hinic5_ethtool_rx_flow_rule *eth_rule) +{ + list_del(ð_rule->list); + nic_dev->rx_flow_rule.tot_num_rules--; + + kfree(eth_rule); +} + +static int hinic5_remove_one_rule(struct hinic5_nic_dev *nic_dev, + struct hinic5_ethtool_rx_flow_rule *eth_rule) +{ + struct hinic5_tcam_info *tcam_info = &nic_dev->tcam; + struct hinic5_tcam_filter *tcam_filter = NULL; + struct nic_tcam_cfg_rule fdir_tcam_rule; + struct tag_tcam_key tcam_key; + int err; + + memset(&fdir_tcam_rule, 0, sizeof(fdir_tcam_rule)); + memset(&tcam_key, 0, sizeof(tcam_key)); + + if (hinic5_support_htn(nic_dev->hwdev)) { + err = hinic5_fdir_tcam_info_htn_init(nic_dev, ð_rule->flow_spec, &tcam_key, + &fdir_tcam_rule); + } else { + err = hinic5_fdir_tcam_info_init(nic_dev, ð_rule->flow_spec, &tcam_key, + &fdir_tcam_rule); + } + + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Init fdir info failed\n"); + return err; + } + + tcam_filter = hinic5_tcam_filter_lookup(&tcam_info->tcam_list, + &tcam_key); + if (!tcam_filter) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Filter does not exists\n"); + return -EEXIST; + } + + err = hinic5_del_tcam_filter(nic_dev, tcam_filter); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Delete tcam filter failed\n"); + goto free_tcam_filter; + } + + del_ethtool_rule(nic_dev, eth_rule); + +free_tcam_filter: + kfree(tcam_filter); + tcam_filter = NULL; + return err; +} + +static void add_rule_to_list(struct hinic5_nic_dev *nic_dev, + struct hinic5_ethtool_rx_flow_rule *rule) +{ + struct hinic5_ethtool_rx_flow_rule *iter = NULL; + struct list_head *head = &nic_dev->rx_flow_rule.rules; + + list_for_each_entry(iter, &nic_dev->rx_flow_rule.rules, list) { + if (iter->flow_spec.location > rule->flow_spec.location) + break; + head = &iter->list; + } + nic_dev->rx_flow_rule.tot_num_rules++; + list_add(&rule->list, head); +} + +static int hinic5_add_one_rule(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs) +{ + struct nic_tcam_cfg_rule fdir_tcam_rule; + struct tag_tcam_key tcam_key; + struct hinic5_ethtool_rx_flow_rule *eth_rule = NULL; + struct hinic5_tcam_filter *tcam_filter = NULL; + struct hinic5_tcam_info *tcam_info = &nic_dev->tcam; + int err; + + memset(&fdir_tcam_rule, 0, sizeof(fdir_tcam_rule)); + memset(&tcam_key, 0, sizeof(tcam_key)); + + if (hinic5_support_htn(nic_dev->hwdev)) { + err = hinic5_fdir_tcam_info_htn_init(nic_dev, fs, &tcam_key, + &fdir_tcam_rule); + } else { + err = hinic5_fdir_tcam_info_init(nic_dev, fs, &tcam_key, + &fdir_tcam_rule); + } + + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Init fdir info failed\n"); + return err; + } + + tcam_filter = hinic5_tcam_filter_lookup(&tcam_info->tcam_list, + &tcam_key); + if (tcam_filter) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Filter exists\n"); + return -EEXIST; + } + + tcam_filter = kzalloc(sizeof(*tcam_filter), GFP_KERNEL); + if (!tcam_filter) + return -ENOMEM; + memcpy(&tcam_filter->tcam_key, + &tcam_key, sizeof(struct tag_tcam_key)); + tcam_filter->queue = (u16)fdir_tcam_rule.data.fdir_info.qid; + + err = hinic5_add_tcam_filter(nic_dev, tcam_filter, &fdir_tcam_rule); + if (err != 0) + goto add_tcam_filter_fail; + + /* driver save new rule filter */ + eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL); + if (!eth_rule) { + err = -ENOMEM; + goto alloc_eth_rule_fail; + } + + eth_rule->flow_spec = *fs; + add_rule_to_list(nic_dev, eth_rule); + + return 0; + +alloc_eth_rule_fail: + hinic5_del_tcam_filter(nic_dev, tcam_filter); +add_tcam_filter_fail: + kfree(tcam_filter); + tcam_filter = NULL; + return err; +} + +static struct hinic5_ethtool_rx_flow_rule * +find_ethtool_rule(const struct hinic5_nic_dev *nic_dev, u32 location) +{ + struct hinic5_ethtool_rx_flow_rule *iter = NULL; + + list_for_each_entry(iter, &nic_dev->rx_flow_rule.rules, list) { + if (iter->flow_spec.location == location) + return iter; + } + return NULL; +} + +static int validate_flow(struct hinic5_nic_dev *nic_dev, + const struct ethtool_rx_flow_spec *fs) +{ + if (fs->location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) { + nicif_err(nic_dev, drv, nic_dev->netdev, "loc exceed limit[0,%lu]\n", + MAX_NUM_OF_ETHTOOL_NTUPLE_RULES); + return -EINVAL; + } + + if (fs->ring_cookie >= nic_dev->q_params.num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, "action is larger than queue number %u\n", + nic_dev->q_params.num_qps); + return -EINVAL; + } + + switch (fs->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case IP_USER_FLOW: +#ifndef UNSUPPORT_NTUPLE_IPV6 + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case IPV6_USER_FLOW: +#endif + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "flow type is not supported\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +int hinic5_ethtool_flow_replace(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs) +{ + struct hinic5_ethtool_rx_flow_rule *eth_rule = NULL; + struct ethtool_rx_flow_spec flow_spec_temp; + int loc_exit_flag = 0; + int err; + + if (!HINIC5_SUPPORT_FDIR(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported ntuple function\n"); + return -EOPNOTSUPP; + } + + err = validate_flow(nic_dev, fs); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "flow is not valid %d\n", err); + return err; + } + + eth_rule = find_ethtool_rule(nic_dev, fs->location); + /* when location is same, delete old location rule. */ + if (eth_rule) { + memcpy(&flow_spec_temp, ð_rule->flow_spec, + sizeof(struct ethtool_rx_flow_spec)); + err = hinic5_remove_one_rule(nic_dev, eth_rule); + if (err != 0) + return err; + + loc_exit_flag = 1; + } + + /* add new rule filter */ + err = hinic5_add_one_rule(nic_dev, fs); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Add new rule filter failed\n"); + if (loc_exit_flag != 0) + hinic5_add_one_rule(nic_dev, &flow_spec_temp); + + return -ENOENT; + } + + return 0; +} + +int hinic5_ethtool_flow_remove(struct hinic5_nic_dev *nic_dev, u32 location) +{ + struct hinic5_ethtool_rx_flow_rule *eth_rule = NULL; + int err; + + if (!HINIC5_SUPPORT_FDIR(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported ntuple function\n"); + return -EOPNOTSUPP; + } + + if (location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) + return -ENOSPC; + + eth_rule = find_ethtool_rule(nic_dev, location); + if (!eth_rule) + return -ENOENT; + + err = hinic5_remove_one_rule(nic_dev, eth_rule); + + return err; +} + +int hinic5_ethtool_get_flow(const struct hinic5_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 location) +{ + struct hinic5_ethtool_rx_flow_rule *eth_rule = NULL; + + if (!HINIC5_SUPPORT_FDIR(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported ntuple function\n"); + return -EOPNOTSUPP; + } + + if (location >= MAX_NUM_OF_ETHTOOL_NTUPLE_RULES) + return -EINVAL; + + list_for_each_entry(eth_rule, &nic_dev->rx_flow_rule.rules, list) { + if (eth_rule->flow_spec.location == location) { + info->fs = eth_rule->flow_spec; + return 0; + } + } + + return -ENOENT; +} + +int hinic5_ethtool_get_all_flows(const struct hinic5_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + u32 idx = 0; + struct hinic5_ethtool_rx_flow_rule *eth_rule = NULL; + + if (!HINIC5_SUPPORT_FDIR(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported ntuple function\n"); + return -EOPNOTSUPP; + } + + info->data = MAX_NUM_OF_ETHTOOL_NTUPLE_RULES; + list_for_each_entry(eth_rule, &nic_dev->rx_flow_rule.rules, list) + rule_locs[idx++] = eth_rule->flow_spec.location; + + return info->rule_cnt == idx ? 0 : -ENOENT; +} + +bool hinic5_validate_channel_setting_in_ntuple(const struct hinic5_nic_dev *nic_dev, u32 q_num) +{ + struct hinic5_ethtool_rx_flow_rule *iter = NULL; + + list_for_each_entry(iter, &nic_dev->rx_flow_rule.rules, list) { + if (iter->flow_spec.ring_cookie >= q_num) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "User defined filter %u assigns flow to queue %llu. Queue number %u is invalid\n", + iter->flow_spec.location, iter->flow_spec.ring_cookie, q_num); + return false; + } + } + + return true; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ntuple.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ntuple.h new file mode 100644 index 00000000..5ba189c5 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_ntuple.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_NTUPLE_H +#define HINIC5_NTUPLE_H + +#include <linux/types.h> +#include <linux/ethtool.h> + +#define MAX_NUM_OF_ETHTOOL_NTUPLE_RULES BIT(10) +struct hinic5_ethtool_rx_flow_rule { + struct list_head list; + struct ethtool_rx_flow_spec flow_spec; +}; + +#define UINT1_MAX 0x1 +#define UINT2_MAX 0x3 +#define UINT3_MAX 0x7 +#define UINT4_MAX 0xf +#define UINT5_WIDTH 0x5 +#define UINT5_MAX 0x1f +#define UINT15_MAX 0x7fff + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_rss.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_rss.c new file mode 100644 index 00000000..f44f4089 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_rss.c @@ -0,0 +1,1000 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/dcbnl.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_vram_common.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_nic_dev.h" +#include "hinic5_hw.h" +#include "hinic5_rss.h" + +static u16 num_qps; +module_param(num_qps, ushort, 0444); +MODULE_PARM_DESC(num_qps, "Number of Queue Pairs, 0-65535 (default=0)"); + +#define MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, out_qps) do { \ + if ((num_qps) > (nic_dev)->max_qps) \ + nic_warn((nic_dev)->lld_dev->dev, \ + "Module Parameter %s value %u is out of range, " \ + "Maximum value for the device: %u, using %u\n", \ + #num_qps, num_qps, (nic_dev)->max_qps, \ + (nic_dev)->max_qps); \ + if ((num_qps) > (nic_dev)->max_qps) \ + (out_qps) = (nic_dev)->max_qps; \ + else if ((num_qps) > 0) \ + (out_qps) = (num_qps); \ +} while (0) + +/* In rx, iq means cos */ +static u8 hinic5_get_iqmap_by_tc(const u8 *prio_tc, u8 num_iq, u8 tc) +{ + u8 i, map = 0; + + for (i = 0; i < num_iq; i++) { + if (prio_tc[i] == tc) + map |= (u8)(1U << ((num_iq - 1) - i)); + } + + return map; +} + +static u8 hinic5_get_tcid_by_rq(const u32 *indir_tbl, u8 num_tcs, u16 rq_id) +{ + u16 tc_group_size; + int i; + u8 temp_num_tcs = num_tcs; + + if (num_tcs == 0) + temp_num_tcs = 1; + + tc_group_size = NIC_RSS_INDIR_SIZE / temp_num_tcs; + for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) { + if (indir_tbl[i] == rq_id) + return (u8)(i / tc_group_size); + } + + return 0xFF; /* Invalid TC */ +} + +static int hinic5_get_rq2iq_map(struct hinic5_nic_dev *nic_dev, + u16 num_rq, u8 num_tcs, u8 *prio_tc, u8 cos_num, + u32 *indir_tbl, u8 *map, u32 map_size) +{ + u16 qid; + u8 tc_id; + u8 temp_num_tcs = num_tcs; + + if (num_tcs == 0) + temp_num_tcs = 1; + + if (num_rq > map_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Rq number(%u) exceed max map qid(%u)\n", + num_rq, map_size); + return -EINVAL; + } + + if (cos_num < HINIC_NUM_IQ_PER_FUNC) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Cos number(%u) less then map qid(%d)\n", + cos_num, HINIC_NUM_IQ_PER_FUNC); + return -EINVAL; + } + + for (qid = 0; qid < num_rq; qid++) { + tc_id = hinic5_get_tcid_by_rq(indir_tbl, temp_num_tcs, qid); + map[qid] = hinic5_get_iqmap_by_tc(prio_tc, + HINIC_NUM_IQ_PER_FUNC, tc_id); + } + + return 0; +} + +static inline void set_default_cos(u8 *default_cos, + const struct hinic5_nic_dev *nic_dev, + u8 valid_cos_map) +{ + if ((BIT(nic_dev->hw_dcb_cfg.default_cos) & valid_cos_map) != 0) { + *default_cos = nic_dev->hw_dcb_cfg.default_cos; + } else { + if (nic_dev->hw_default_cos_valid != 0) + *default_cos = nic_dev->hw_default_cos; + else + *default_cos = (u8)fls(nic_dev->func_dft_cos_bitmap) - 1; + } +} + +static void hinic5_fillout_indir_tbl(struct hinic5_nic_dev *nic_dev, u8 group_num, u32 *indir) +{ + u8 valid_cos_map = hinic5_get_dev_valid_cos_map(nic_dev); + u16 k, group_size, start_qid = 0, qp_num = 0; + u8 j, cur_cos = 0, group = 0; + u32 i = 0; + + if (!nic_dev) + return; + + if (nic_dev->flow_bifur_group_num > HINIC5_GROUP_NUMBER_MIN) { + group_size = NIC_RSS_INDIR_SIZE / nic_dev->flow_bifur_group_num; + for (i = 0; i < group_size; i++) + indir[i] = i % nic_dev->q_params.num_qps; + return; + } + + if (group_num == 0) { + for (i = 0; i < NIC_RSS_INDIR_SIZE; i++) + indir[i] = i % nic_dev->q_params.num_qps; + } else { + group_size = NIC_RSS_INDIR_SIZE / group_num; + for (group = 0; group < group_num; group++) { + cur_cos = nic_dev->hw_dcb_cfg.default_cos; + for (j = 0; j < NIC_DCB_COS_MAX; j++) { + if ((BIT(j) & valid_cos_map) != 0) { + cur_cos = j; + valid_cos_map -= (u8)BIT(j); + break; + } + } + + start_qid = nic_dev->hw_dcb_cfg.cos_qp_offset[cur_cos]; + qp_num = nic_dev->hw_dcb_cfg.cos_qp_num[cur_cos]; + + for (k = 0; k < group_size; k++) + indir[i++] = start_qid + k % qp_num; + } + } +} + +int hinic5_rss_init(struct hinic5_nic_dev *nic_dev, u8 *rq2iq_map, u32 map_size, u8 dcb_en) +{ + struct net_device *netdev = nic_dev->netdev; + u8 i, group_num, cos_bitmap, group = 0; + u8 cos_group[NIC_DCB_UP_MAX] = {0}; + int err; + + if (nic_dev->flow_bifur_group_num > HINIC5_GROUP_NUMBER_MIN) { + group_num = nic_dev->flow_bifur_group_num; + for (i = 0; i < NIC_DCB_UP_MAX; i++) { + /* when enable flow bifur, all cos are mapped to group 0 */ + cos_group[i] = 0; + } + } else if (dcb_en != 0) { + group_num = roundup_pow_of_two(hinic5_get_dev_user_cos_num(nic_dev)); + cos_bitmap = hinic5_get_dev_valid_cos_map(nic_dev); + + for (i = 0; i < NIC_DCB_UP_MAX; i++) { + if ((BIT(i) & cos_bitmap) != 0) + cos_group[NIC_DCB_UP_MAX - i - 1] = group++; + else + cos_group[NIC_DCB_UP_MAX - i - 1] = group_num - 1; + } + } else { + group_num = 0; + } + + err = hinic5_set_hw_rss_parameters(netdev, 1, group_num, cos_group, dcb_en); + if (err != 0) + return err; + + err = hinic5_get_rq2iq_map(nic_dev, nic_dev->q_params.num_qps, group_num, cos_group, + NIC_DCB_UP_MAX, nic_dev->rss_indir, rq2iq_map, map_size); + if (err != 0) + nicif_err(nic_dev, drv, netdev, "Failed to get rq map\n"); + return err; +} + +void hinic5_rss_deinit(struct hinic5_nic_dev *nic_dev) +{ + u8 cos_map[NIC_DCB_UP_MAX] = {0}; + + hinic5_rss_cfg(nic_dev->hwdev, 0, 0, cos_map, 1); +} + +void hinic5_init_rss_parameters(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + nic_dev->rss_hash_engine = HINIC5_RSS_HASH_ENGINE_TYPE_XOR; + nic_dev->rss_type.tcp_ipv6_ext = 1; + nic_dev->rss_type.ipv6_ext = 1; + nic_dev->rss_type.tcp_ipv6 = 1; + nic_dev->rss_type.ipv6 = 1; + nic_dev->rss_type.tcp_ipv4 = 1; + nic_dev->rss_type.ipv4 = 1; + nic_dev->rss_type.udp_ipv6 = 1; + nic_dev->rss_type.udp_ipv4 = 1; +} + +void hinic5_clear_rss_config(struct hinic5_nic_dev *nic_dev) +{ + kfree(nic_dev->rss_hkey); + nic_dev->rss_hkey = NULL; + + kfree(nic_dev->rss_indir); + nic_dev->rss_indir = NULL; +} + +static void decide_num_qps(struct hinic5_nic_dev *nic_dev) +{ + u16 tmp_num_qps = nic_dev->max_qps; + u16 num_cpus = 0; + int i, node; + int is_in_kexec = hinic5_vram_get_kexec_flag(); + + if (is_in_kexec != 0) { + nic_dev->q_params.num_qps = nic_dev->nic_hinic5_vram->hinic5_vram_num_qps; + nicif_info(nic_dev, drv, nic_dev->netdev, + "Os hotreplace use hinic5_vram to init num qps 1:%u 2:%u\n", + nic_dev->q_params.num_qps, + nic_dev->nic_hinic5_vram->hinic5_vram_num_qps); + return; + } + + if (nic_dev->nic_cap.default_num_queues != 0 && + nic_dev->nic_cap.default_num_queues < nic_dev->max_qps) + tmp_num_qps = nic_dev->nic_cap.default_num_queues; + + MOD_PARA_VALIDATE_NUM_QPS(nic_dev, num_qps, tmp_num_qps); + + for (i = 0; i < (int)num_online_cpus(); i++) { + node = (int)cpu_to_node(i); + if (node == dev_to_node(nic_dev->lld_dev->dev)) + num_cpus++; + } + + pr_info("num_cpus is: %u, and num_online_cpus is: %u", num_cpus, num_online_cpus()); + if (num_cpus == 0) + num_cpus = (u16)num_online_cpus(); + + nic_dev->q_params.num_qps = (u16)min_t(u16, tmp_num_qps, num_cpus); + nic_dev->nic_hinic5_vram->hinic5_vram_num_qps = nic_dev->q_params.num_qps; +} + +static void copy_value_to_rss_hkey(struct hinic5_nic_dev *nic_dev, + const u8 *hkey) +{ + u32 i; + u32 *rss_hkey = (u32 *)nic_dev->rss_hkey; + + memcpy(nic_dev->rss_hkey, hkey, NIC_RSS_KEY_SIZE); + + /* make a copy of the key, and convert it to Big Endian */ + for (i = 0; i < NIC_RSS_KEY_SIZE / sizeof(u32); i++) + nic_dev->rss_hkey_be[i] = cpu_to_be32(rss_hkey[i]); +} + +static int alloc_rss_resource(struct hinic5_nic_dev *nic_dev) +{ + u8 default_rss_key[NIC_RSS_KEY_SIZE] = { + 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, + 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, + 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, + 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa}; + + /* We request double spaces for the hash key, + * the second one holds the key of Big Edian + * format. + */ + nic_dev->rss_hkey = + kzalloc(NIC_RSS_KEY_SIZE * + HINIC5_RSS_KEY_RSV_NUM, GFP_KERNEL); + if (nic_dev->rss_hkey == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc memory for rss_hkey\n"); + return -ENOMEM; + } + + /* The second space is for big edian hash key */ + nic_dev->rss_hkey_be = (u32 *)(nic_dev->rss_hkey + + NIC_RSS_KEY_SIZE); + copy_value_to_rss_hkey(nic_dev, (u8 *)default_rss_key); + + nic_dev->rss_indir = kzalloc(sizeof(u32) * NIC_RSS_INDIR_SIZE, GFP_KERNEL); + if (nic_dev->rss_indir == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc memory for rss_indir\n"); + kfree(nic_dev->rss_hkey); + nic_dev->rss_hkey = NULL; + return -ENOMEM; + } + + return 0; +} + +void hinic5_try_to_enable_rss(struct hinic5_nic_dev *nic_dev) +{ + u8 cos_map[NIC_DCB_UP_MAX] = {0}; + int err = 0; + + if (!nic_dev) + return; + + nic_dev->max_qps = hinic5_func_max_nic_qnum(nic_dev->hwdev); + if (nic_dev->max_qps <= 1 || !HINIC5_SUPPORT_RSS(nic_dev->hwdev)) { + pr_err("nic_dev->max_qps is: %u", nic_dev->max_qps); + goto set_q_params; + } + + err = alloc_rss_resource(nic_dev); + if (err != 0) { + nic_dev->max_qps = 1; + pr_err("alloc_rss_resource failed"); + goto set_q_params; + } + + set_bit(HINIC5_RSS_ENABLE, &nic_dev->flags); + + decide_num_qps(nic_dev); + + hinic5_init_rss_parameters(nic_dev->netdev); + /* Attempt to deploy the RSS configuration; if deployment fails, + * revert to a single queue mode and disable RSS + * to ensure the functionality remains operational. + */ + err = hinic5_set_hw_rss_parameters(nic_dev->netdev, 0, 0, cos_map, + test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) ? 1 : 0); + if (err != 0) { + nic_err(nic_dev->lld_dev->dev, "Failed to set hardware rss parameters\n"); + + hinic5_clear_rss_config(nic_dev); + nic_dev->max_qps = 1; + goto set_q_params; + } + return; + +set_q_params: + clear_bit(HINIC5_RSS_ENABLE, &nic_dev->flags); + nic_dev->q_params.num_qps = nic_dev->max_qps; + nic_dev->nic_hinic5_vram->hinic5_vram_num_qps = nic_dev->max_qps; +} + +static int hinic5_config_rss_hw_resource(struct hinic5_nic_dev *nic_dev, + u32 *indir_tbl) +{ + int err; + + err = hinic5_rss_set_indir_tbl(nic_dev->hwdev, indir_tbl); + if (err != 0) + return err; + + err = hinic5_set_rss_type(nic_dev->hwdev, nic_dev->rss_type); + if (err != 0) + return err; + + return hinic5_rss_set_hash_engine(nic_dev->hwdev, + nic_dev->rss_hash_engine); +} + +int hinic5_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, + u8 cos_num, u8 *cos_map, u8 dcb_en) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + /* RSS key */ + err = hinic5_rss_set_hash_key(nic_dev->hwdev, nic_dev->rss_hkey); + if (err != 0) + return err; + + if (!netif_is_rxfh_configured(netdev)) + hinic5_fillout_indir_tbl(nic_dev, cos_num, nic_dev->rss_indir); + + err = hinic5_config_rss_hw_resource(nic_dev, nic_dev->rss_indir); + if (err != 0) + return err; + + err = hinic5_rss_cfg(nic_dev->hwdev, rss_en, cos_num, cos_map, + nic_dev->q_params.num_qps); + if (err != 0) + return err; + + return 0; +} + +/* for ethtool */ +static int set_l4_rss_hash_ops(const struct ethtool_rxnfc *cmd, + struct nic_rss_type *rss_type) +{ + u8 rss_l4_en = 0; + + switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_l4_en = 0; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_l4_en = 1; + break; + default: + return -EINVAL; + } + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + rss_type->tcp_ipv4 = rss_l4_en; + break; + case TCP_V6_FLOW: + rss_type->tcp_ipv6 = rss_l4_en; + break; + case UDP_V4_FLOW: + rss_type->udp_ipv4 = rss_l4_en; + break; + case UDP_V6_FLOW: + rss_type->udp_ipv6 = rss_l4_en; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int update_rss_hash_opts(struct hinic5_nic_dev *nic_dev, + struct ethtool_rxnfc *cmd, + struct nic_rss_type *rss_type) +{ + int err; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + err = set_l4_rss_hash_ops(cmd, rss_type); + if (err != 0) + return err; + + break; + case IPV4_FLOW: + rss_type->ipv4 = 1; + break; + case IPV6_FLOW: + rss_type->ipv6 = 1; + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unsupported flow type\n"); + return -EINVAL; + } + + return 0; +} + +static int hinic5_set_rss_hash_opts(struct hinic5_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) +{ + struct nic_rss_type *rss_type = &nic_dev->rss_type; + int err; + + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) == 0) { + cmd->data = 0; + nicif_err(nic_dev, drv, nic_dev->netdev, + "RSS is disable, not support to set flow-hash\n"); + return -EOPNOTSUPP; + } + + /* RSS does not support anything other than hashing + * to queues on src and dst IPs and ports + */ + if ((cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | + RXH_L4_B_2_3)) != 0) + return -EINVAL; + + /* We need at least the IP SRC and DEST fields for hashing */ + if (((cmd->data & RXH_IP_SRC) == 0) || ((cmd->data & RXH_IP_DST) == 0)) + return -EINVAL; + + err = hinic5_get_rss_type(nic_dev->hwdev, rss_type); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rss type\n"); + return -EFAULT; + } + + err = update_rss_hash_opts(nic_dev, cmd, rss_type); + if (err != 0) + return err; + + err = hinic5_set_rss_type(nic_dev->hwdev, *rss_type); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set rss type\n"); + return -EFAULT; + } + + nicif_info(nic_dev, drv, nic_dev->netdev, "Set rss hash options success\n"); + + return 0; +} + +static void convert_rss_type(u8 rss_opt, struct ethtool_rxnfc *cmd) +{ + if (rss_opt != 0) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +} + +static int hinic5_convert_rss_type(struct hinic5_nic_dev *nic_dev, + struct nic_rss_type *rss_type, + struct ethtool_rxnfc *cmd) +{ + cmd->data = RXH_IP_SRC | RXH_IP_DST; + switch (cmd->flow_type) { + case TCP_V4_FLOW: + convert_rss_type(rss_type->tcp_ipv4, cmd); + break; + case TCP_V6_FLOW: + convert_rss_type(rss_type->tcp_ipv6, cmd); + break; + case UDP_V4_FLOW: + convert_rss_type(rss_type->udp_ipv4, cmd); + break; + case UDP_V6_FLOW: + convert_rss_type(rss_type->udp_ipv6, cmd); + break; + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "Unsupported flow type\n"); + cmd->data = 0; + return -EINVAL; + } + + return 0; +} + +static int hinic5_get_rss_hash_opts(struct hinic5_nic_dev *nic_dev, struct ethtool_rxnfc *cmd) +{ + struct nic_rss_type rss_type = {0}; + int err; + + cmd->data = 0; + + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) == 0) + return 0; + + err = hinic5_get_rss_type(nic_dev->hwdev, &rss_type); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get rss type\n"); + return err; + } + + return hinic5_convert_rss_type(nic_dev, &rss_type, cmd); +} + +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULELOCS +int hinic5_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, void *rule_locs) +#else +int hinic5_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = nic_dev->q_params.num_qps; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = (u32)nic_dev->rx_flow_rule.tot_num_rules; + break; + case ETHTOOL_GRXCLSRULE: + err = hinic5_ethtool_get_flow(nic_dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + err = hinic5_ethtool_get_all_flows(nic_dev, cmd, rule_locs); + break; + case ETHTOOL_GRXFH: + err = hinic5_get_rss_hash_opts(nic_dev, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +int hinic5_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + err = hinic5_set_rss_hash_opts(nic_dev, cmd); + break; + case ETHTOOL_SRXCLSRLINS: + err = hinic5_ethtool_flow_replace(nic_dev, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + err = hinic5_ethtool_flow_remove(nic_dev, cmd->fs.location); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static u16 hinic5_max_channels(struct hinic5_nic_dev *nic_dev) +{ + u16 max_knl_qps_num = nic_dev->max_qps - nic_dev->usr_qps_num; + u8 tcs = (u8)netdev_get_num_tc(nic_dev->netdev); + + return (tcs != 0) ? max_knl_qps_num / tcs : max_knl_qps_num; +} + +static u16 hinic5_curr_channels(struct hinic5_nic_dev *nic_dev) +{ + if (netif_running(nic_dev->netdev)) { + return (nic_dev->q_params.num_qps != 0) ? + nic_dev->q_params.num_qps : 1; + } else { + u16 hinic5_max_ch = hinic5_max_channels(nic_dev); + + return (u16)min_t(u16, hinic5_max_ch, + nic_dev->q_params.num_qps); + } +} + +void hinic5_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + /* report maximum channels */ + channels->max_combined = hinic5_max_channels(nic_dev); + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + /* report flow director queues as maximum channels */ + channels->combined_count = hinic5_curr_channels(nic_dev); +} + +static int hinic5_validate_channel_parameter(struct net_device *netdev, + const struct ethtool_channels *channels) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u16 max_channel = hinic5_max_channels(nic_dev); + unsigned int count = channels->combined_count; + + if (count == 0) { + nicif_err(nic_dev, drv, netdev, + "Unsupported combined_count=0\n"); + return -EINVAL; + } + + if ((channels->tx_count + channels->rx_count + channels->other_count) != 0) { + nicif_err(nic_dev, drv, netdev, + "Setting rx/tx/other count not supported\n"); + return -EINVAL; + } + + if (count > max_channel) { + nicif_err(nic_dev, drv, netdev, + "Combined count %u exceed limit %u\n", count, + max_channel); + return -EINVAL; + } + + return 0; +} + +int hinic5_set_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_dyna_txrxq_params q_params = {0}; + unsigned int count = channels->combined_count; + int err; + u8 user_cos_num = hinic5_get_dev_user_cos_num(nic_dev); + + err = hinic5_validate_channel_parameter(netdev, channels); + if (err != 0) + return -EINVAL; + + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) == 0) { + nicif_err(nic_dev, drv, netdev, + "This function don't support RSS, only support 1 queue pair\n"); + return -EOPNOTSUPP; + } + + if (test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) != 0) { + if (count < user_cos_num) { + nicif_err(nic_dev, drv, netdev, + "DCB is on, channels num should more than valid cos num:%u\n", + user_cos_num); + + return -EOPNOTSUPP; + } + } + + if (HINIC5_SUPPORT_FDIR(nic_dev->hwdev) && + !hinic5_validate_channel_setting_in_ntuple(nic_dev, count)) + return -EOPNOTSUPP; + + nicif_info(nic_dev, drv, netdev, "Set max combined queue number from %u to %u\n", + nic_dev->q_params.num_qps, count); + + if (netif_running(netdev)) { + q_params = nic_dev->q_params; + q_params.num_qps = (u16)count; + q_params.txqs_res = NULL; + q_params.rxqs_res = NULL; + q_params.irq_cfg = NULL; + +#ifdef HAVE_XDP_SUPPORT + err = hinic5_set_xdp_num(nic_dev, &q_params); + if (err != 0) + return err; +#endif + + nicif_info(nic_dev, drv, netdev, "Restarting channel\n"); + err = hinic5_change_channel_settings(nic_dev, &q_params, NULL, NULL); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to change channel settings\n"); + return -EFAULT; + } + } else { + nic_dev->q_params.num_qps = (u16)count; + } + + nic_dev->nic_hinic5_vram->hinic5_vram_num_qps = nic_dev->q_params.num_qps; + return 0; +} + +#ifdef HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE +u32 hinic5_get_rxfh_indir_size(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + return (nic_dev->flow_bifur_group_num <= HINIC5_GROUP_NUMBER_MIN) + ? NIC_RSS_INDIR_SIZE + : NIC_RSS_INDIR_SIZE / nic_dev->flow_bifur_group_num; +} +#endif + +static void cfg_indir(struct hinic5_nic_dev *nic_dev, u32 *dest_indir, const u32 *src_indir) +{ + u16 kernel_indir_len; + + kernel_indir_len = (nic_dev->flow_bifur_group_num <= HINIC5_GROUP_NUMBER_MIN) + ? NIC_RSS_INDIR_SIZE + : NIC_RSS_INDIR_SIZE / nic_dev->flow_bifur_group_num; + + memcpy(dest_indir, src_indir, sizeof(u32) * kernel_indir_len); +} + +static int set_rss_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (indir) { + cfg_indir(nic_dev, nic_dev->rss_indir, indir); + err = hinic5_rss_set_indir_tbl(nic_dev->hwdev, nic_dev->rss_indir); + if (err != 0) + return -EFAULT; + err = hinic5_rss_set_indir_tbl(nic_dev->hwdev, nic_dev->rss_indir); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, + "Failed to set rss indir table\n"); + return -EFAULT; + } + nicif_info(nic_dev, drv, netdev, "Change rss indir success\n"); + } + + if (key) { + err = hinic5_rss_set_hash_key(nic_dev->hwdev, key); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to set rss key\n"); + return -EFAULT; + } + + copy_value_to_rss_hkey(nic_dev, key); + nicif_info(nic_dev, drv, netdev, "Change rss key success\n"); + } + + return 0; +} + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +u32 hinic5_get_rxfh_key_size(struct net_device *netdev) +{ + return NIC_RSS_KEY_SIZE; +} + +#if defined HAVE_ETHTOOL_RXFH_PARAM +int hinic5_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh_param) +#elif defined HAVE_RXFH_HASHFUNC +int hinic5_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +#else +int hinic5_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Rss is disable\n"); + return -EOPNOTSUPP; + } + +#ifdef HAVE_ETHTOOL_RXFH_PARAM + u32 *indir = rxfh_param->indir; + u8 *key = rxfh_param->key; +#endif + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc) + *hfunc = (nic_dev->rss_hash_engine != 0) ? + ETH_RSS_HASH_TOP : ETH_RSS_HASH_XOR; +#endif + + if (indir) + cfg_indir(nic_dev, indir, nic_dev->rss_indir); + + if (key) + memcpy(key, nic_dev->rss_hkey, NIC_RSS_KEY_SIZE); + + return 0; +} + +#if defined HAVE_ETHTOOL_RXFH_PARAM +int hinic5_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh_param, + struct netlink_ext_ack *extack) +#elif defined HAVE_RXFH_HASHFUNC +int hinic5_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc) +#else +#ifdef HAVE_RXFH_NONCONST +int hinic5_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) +#else +int hinic5_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key) +#endif +#endif /* HAVE_RXFH_HASHFUNC */ +{ +#ifdef HAVE_ETHTOOL_RXFH_PARAM + u32 *indir = rxfh_param->indir; + u8 *key = rxfh_param->key; +#endif + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Not support to set rss parameters when rss is disable\n"); + return -EOPNOTSUPP; + } + + if ((test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) != 0) && indir) { + nicif_err(nic_dev, drv, netdev, + "Not support to set indir when DCB is enabled\n"); + return -EOPNOTSUPP; + } + +#ifdef HAVE_RXFH_HASHFUNC + if (hfunc != ETH_RSS_HASH_NO_CHANGE) { + if (hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR) { + nicif_err(nic_dev, drv, netdev, + "Not support to set hfunc type except TOP and XOR\n"); + return -EOPNOTSUPP; + } + + nic_dev->rss_hash_engine = (hfunc == ETH_RSS_HASH_XOR) ? + HINIC5_RSS_HASH_ENGINE_TYPE_XOR : + HINIC5_RSS_HASH_ENGINE_TYPE_TOEP; + err = hinic5_rss_set_hash_engine(nic_dev->hwdev, + nic_dev->rss_hash_engine); + if (err != 0) + return -EFAULT; + + nicif_info(nic_dev, drv, netdev, + "Change hfunc to RSS_HASH_%s success\n", + (hfunc == ETH_RSS_HASH_XOR) ? "XOR" : "TOP"); + } +#endif + err = set_rss_rxfh(netdev, indir, key); + + return err; +} + +#else /* !(defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#ifdef HAVE_ETHTOOL_RXFH_INDIR_STRUCT_RXFH_INDIR +int hinic5_get_rxfh_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *indir1) +#else +int hinic5_get_rxfh_indir(struct net_device *netdev, u32 *indir) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); +#ifdef HAVE_ETHTOOL_RXFH_INDIR_STRUCT_RXFH_INDIR + u32 *indir = NULL; + + /* In a low version kernel(eg:suse 11.2), call the interface twice. + * First call to get the size value, + * and second call to get the rxfh indir according to the size value. + */ + if (indir1->size == 0) { + indir1->size = NIC_RSS_INDIR_SIZE; + return 0; + } + + if (indir1->size < NIC_RSS_INDIR_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get rss indir, rss size(%d) is more than system rss size(%u).\n", + NIC_RSS_INDIR_SIZE, indir1->size); + return -EINVAL; + } + + indir = indir1->ring_index; +#endif + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Rss is disable\n"); + return -EOPNOTSUPP; + } + + if (indir) + cfg_indir(nic_dev, indir, nic_dev->rss_indir); + + return 0; +} + +#ifdef HAVE_ETHTOOL_RXFH_INDIR_STRUCT_RXFH_INDIR +int hinic5_set_rxfh_indir(struct net_device *netdev, + const struct ethtool_rxfh_indir *indir1) +#else +int hinic5_set_rxfh_indir(struct net_device *netdev, const u32 *indir) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); +#ifdef HAVE_ETHTOOL_RXFH_INDIR_STRUCT_RXFH_INDIR + const u32 *indir = NULL; + + if (indir1->size != NIC_RSS_INDIR_SIZE) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to set rss indir, rss size(%d) is more than system rss size(%u).\n", + NIC_RSS_INDIR_SIZE, indir1->size); + return -EINVAL; + } + + indir = indir1->ring_index; +#endif + + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Not support to set rss indir when rss is disable\n"); + return -EOPNOTSUPP; + } + + if ((test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) != 0) && indir) { + nicif_err(nic_dev, drv, netdev, + "Not support to set indir when DCB is enabled\n"); + return -EOPNOTSUPP; + } + + return set_rss_rxfh(netdev, indir, NULL); +} + +#endif /* defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) */ + diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_rss.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_rss.h new file mode 100644 index 00000000..3bf9375f --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_rss.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_RSS_H +#define HINIC5_RSS_H + +#include "hinic5_nic_dev.h" + +#define HINIC_NUM_IQ_PER_FUNC 8 + +int hinic5_rss_init(struct hinic5_nic_dev *nic_dev, u8 *rq2iq_map, + u32 map_size, u8 dcb_en); + +void hinic5_rss_deinit(struct hinic5_nic_dev *nic_dev); + +int hinic5_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en, + u8 cos_num, u8 *cos_map, u8 dcb_en); + +void hinic5_init_rss_parameters(struct net_device *netdev); + +void hinic5_try_to_enable_rss(struct hinic5_nic_dev *nic_dev); + +void hinic5_clear_rss_config(struct hinic5_nic_dev *nic_dev); + +void hinic5_flush_rx_flow_rule(struct hinic5_nic_dev *nic_dev); +int hinic5_ethtool_get_flow(const struct hinic5_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 location); + +int hinic5_ethtool_get_all_flows(const struct hinic5_nic_dev *nic_dev, + struct ethtool_rxnfc *info, u32 *rule_locs); + +int hinic5_ethtool_flow_remove(struct hinic5_nic_dev *nic_dev, u32 location); + +int hinic5_ethtool_flow_replace(struct hinic5_nic_dev *nic_dev, + struct ethtool_rx_flow_spec *fs); + +bool hinic5_validate_channel_setting_in_ntuple(const struct hinic5_nic_dev *nic_dev, u32 q_num); + +/* for ethtool */ +#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULELOCS +int hinic5_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, void *rule_locs); +#else +int hinic5_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs); +#endif + +int hinic5_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd); + +void hinic5_get_channels(struct net_device *netdev, + struct ethtool_channels *channels); + +int hinic5_set_channels(struct net_device *netdev, + struct ethtool_channels *channels); + +#ifdef HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE +u32 hinic5_get_rxfh_indir_size(struct net_device *netdev); +#endif /* HAVE_ETHTOOL_GET_RXFH_INDIR_SIZE */ + +#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) +u32 hinic5_get_rxfh_key_size(struct net_device *netdev); + +#ifdef HAVE_ETHTOOL_RXFH_PARAM +int hinic5_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh_param); +#elif defined HAVE_RXFH_HASHFUNC +int hinic5_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc); +#else /* HAVE_RXFH_HASHFUNC */ +int hinic5_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key); +#endif /* HAVE_RXFH_HASHFUNC */ + +#ifdef HAVE_ETHTOOL_RXFH_PARAM +int hinic5_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh_param, + struct netlink_ext_ack *extack); +#elif defined HAVE_RXFH_HASHFUNC +int hinic5_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc); +#else +#ifdef HAVE_RXFH_NONCONST +int hinic5_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key); +#else +int hinic5_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key); +#endif /* HAVE_RXFH_NONCONST */ +#endif /* HAVE_RXFH_HASHFUNC */ + +#else /* !(defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#ifdef HAVE_ETHTOOL_RXFH_INDIR_STRUCT_RXFH_INDIR +int hinic5_get_rxfh_indir(struct net_device *netdev, + struct ethtool_rxfh_indir *indir1); +#else +int hinic5_get_rxfh_indir(struct net_device *netdev, u32 *indir); +#endif + +#ifdef HAVE_ETHTOOL_RXFH_INDIR_STRUCT_RXFH_INDIR +int hinic5_set_rxfh_indir(struct net_device *netdev, + const struct ethtool_rxfh_indir *indir1); +#else +int hinic5_set_rxfh_indir(struct net_device *netdev, const u32 *indir); +#endif /* HAVE_ETHTOOL_RXFH_INDIR_STRUCT_RXFH_INDIR */ + +#endif /* (defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH)) */ + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_tc.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_tc.c new file mode 100644 index 00000000..df93d6ca --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_tc.c @@ -0,0 +1,1163 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include "ossl_knl.h" +#if (KERNEL_VERSION(5, 1, 1) <= LINUX_VERSION_CODE) +#include <net/flow_offload.h> +#include <net/ip_tunnels.h> +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> + +#include "nic_cfg_comm.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_nic_dev.h" +#include "nic_tc_rule_defs.h" +#include "hinic5_tc.h" + +#define TUNNEL_OPT_OFF 0 +#define TUNNEL_OPT_ON 1 + +#define ENC_IPV4_TYPE 0 +#define ENC_IPV6_TYPE 1 +#define OFFSET_2BYTE 2 + +#define PFE_TUNNEL_OPT_SHIFT 22 +#define PFE_TUNNEL_OPT_MASK 0x1 +#define PFE_IPV6_SIP_SHIFT 16 +#define PFE_IPV6_SIP_MASK 0x3F +#define PFE_IPV6_SIP_DIP_SHIFT 2 +#define PFE_IPV6_SIP_DIP_MASK 0x3F +#define TYPE_OF_LACP 0x8809 +#define TC_LACP_KEY_FLAG 0x3D +#define PFE_ACTION_TO_PORT 0x20 + +#define PFE_GROUP_VLD_SHIFT 8 +#define PFE_GROUP_ID_MASK 0xFF +#define PFE_GROUP_CNT_MAX 64 + +#define GET_MASK_VAL(val, shift, mask) (((val) >> (shift)) & (mask)) + +enum parse_type { + KEY_TYPE, + MASK_TYPE +}; + +static const struct rhashtable_params tc_flow_ht_params = { + .head_offset = offsetof(struct hinic5_tc_flow_node, node), + .key_offset = offsetof(struct hinic5_tc_flow_node, cookie), + .key_len = sizeof(((struct hinic5_tc_flow_node *)0)->cookie), + .automatic_shrinking = true +}; + +static int hinic5_tc_info_init_from_reg(struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + struct hinic5_tc_pfe_cfg_reg_info cfg_info = {0}; + int ret; + + ret = hinic5_get_pfe_cfg(nic_dev->hwdev, &cfg_info); + if (ret != 0) + return ret; + + mutex_lock(&tc_info->tc_lock); + tc_info->tunnel_opt = GET_MASK_VAL(cfg_info.reg_value, + PFE_TUNNEL_OPT_SHIFT, + PFE_TUNNEL_OPT_MASK); + tc_info->ipv6_shift_value = GET_MASK_VAL(cfg_info.reg_value, + PFE_IPV6_SIP_SHIFT, + PFE_IPV6_SIP_MASK); + tc_info->ipv6_shift_value2 = GET_MASK_VAL(cfg_info.reg_value2, + PFE_IPV6_SIP_DIP_SHIFT, + PFE_IPV6_SIP_DIP_MASK); + mutex_unlock(&tc_info->tc_lock); + + return 0; +} + +static int hinic5_tc_del_flow_handler(struct hinic5_nic_dev *nic_dev, + struct hinic5_tc_flow_node *flow_node) +{ + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + + /* send del cmd to mpu */ + (void)hinic5_del_tc_flow_rule(nic_dev->hwdev, flow_node->rule_id); + + /* del flow from hashtable */ + rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node, + tc_info->flow_ht_params); + + mutex_lock(&tc_info->tc_lock); + clear_bit(flow_node->rule_id, tc_info->tcam_bitmap); + mutex_unlock(&tc_info->tc_lock); + + hinic5_info(nic_dev, drv, "flow with cookie:%lx is deleted\n", flow_node->cookie); + kfree(flow_node); + + return 0; +} + +static void hinic5_tc_free_node(void *ptr, void *arg) +{ + kfree(ptr); +} + +void hinic5_deinit_tc(struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + + rhashtable_free_and_destroy(&tc_info->flow_table, hinic5_tc_free_node, NULL); + + hinic5_flush_tc_flow_rule(nic_dev->hwdev, tc_info->tcam_bitmap); + + kfree(tc_info); + nic_dev->tc_info = NULL; +} + +static void hinic5_tc_match_basic(struct flow_rule *rule, struct hinic5_tc_flow *flow) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + if (match.key->n_proto != 0 && match.mask->n_proto != 0) { + flow->l2_key.ether_type = match.key->n_proto; + flow->l2_mask.ether_type = match.mask->n_proto; + flow->key_flags |= BIT(HINIC5_TC_KEY_ETH_TYPE); + + if (match.key->n_proto == htons(ETH_P_IP) || + match.key->n_proto == htons(ETH_P_IPV6)) { + flow->l4_key.ip_proto = match.key->ip_proto; + flow->l4_mask.ip_proto = match.mask->ip_proto; + flow->key_flags |= BIT(HINIC5_TC_KEY_PROTOCOL); + } + } + } +} + +static void hinic5_tc_match_eth_addrs(struct flow_rule *rule, struct hinic5_tc_flow *flow) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + if (!is_zero_ether_addr(match.key->dst) && !is_zero_ether_addr(match.mask->dst)) { + ether_addr_copy(flow->l2_key.dmac, match.key->dst); + ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); + flow->key_flags |= BIT(HINIC5_TC_KEY_DST_MAC); + } + + if (!is_zero_ether_addr(match.key->src) && !is_zero_ether_addr(match.mask->src)) { + ether_addr_copy(flow->l2_key.smac, match.key->src); + ether_addr_copy(flow->l2_mask.smac, match.mask->src); + flow->key_flags |= BIT(HINIC5_TC_KEY_SRC_MAC); + } + } +} + +static void hinic5_tc_match_vlan(struct flow_rule *rule, struct hinic5_tc_flow *flow) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + flow->l2_key.vlan_tag = cpu_to_be16(match.key->vlan_id | + (match.key->vlan_priority << VLAN_PRIO_SHIFT)); + flow->l2_mask.vlan_tag = cpu_to_be16(match.mask->vlan_id | + (match.mask->vlan_priority << VLAN_PRIO_SHIFT)); + flow->key_flags |= BIT(HINIC5_TC_KEY_VLAN_TAG); + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { + flow_rule_match_cvlan(rule, &match); + flow->l2_key.cvlan_tag = cpu_to_be16(match.key->vlan_id | + (match.key->vlan_priority << VLAN_PRIO_SHIFT)); + flow->l2_mask.cvlan_tag = cpu_to_be16(match.mask->vlan_id | + (match.mask->vlan_priority << VLAN_PRIO_SHIFT)); + flow->key_flags |= BIT(HINIC5_TC_KEY_CVLAN); + } + } else { + flow->l2_key.vlan_tag = 0; + flow->l2_mask.vlan_tag = 0; // 适配ipv4非vlan报文 + } +} + +static void hinic5_tc_match_ip_addrs(struct flow_rule *rule, struct hinic5_tc_flow *flow) +{ + struct flow_match_control ctrl_match; + + flow_rule_match_control(rule, &ctrl_match); + + if (ctrl_match.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + flow->l3_key.ipv4.daddr.s_addr = match.key->dst; + flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst; + flow->l3_key.ipv4.saddr.s_addr = match.key->src; + flow->l3_mask.ipv4.saddr.s_addr = match.mask->src; + + flow->key_flags |= BIT(HINIC5_TC_KEY_IPV4); + } else if (ctrl_match.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); + flow->l3_key.ipv6.daddr = match.key->dst; + flow->l3_mask.ipv6.daddr = match.mask->dst; + flow->l3_key.ipv6.saddr = match.key->src; + flow->l3_mask.ipv6.saddr = match.mask->src; + + flow->key_flags |= BIT(HINIC5_TC_KEY_IPV6); + } +} + +static void hinic5_tc_match_enc_ip_addrs(struct flow_rule *rule, struct hinic5_tc_flow *flow, + struct hinic5_tc_info *tc_info) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_enc_ipv4_addrs(rule, &match); + flow->l3_key.enc_ipv4.daddr.s_addr = match.key->dst; + flow->l3_mask.enc_ipv4.daddr.s_addr = match.mask->dst; + flow->l3_key.enc_ipv4.saddr.s_addr = match.key->src; + flow->l3_mask.enc_ipv4.saddr.s_addr = match.mask->src; + flow->key_flags |= BIT(HINIC5_TC_KEY_ENC_IP); + tc_info->enc_ip_type = ENC_IPV4_TYPE; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_enc_ipv6_addrs(rule, &match); + flow->l3_key.enc_ipv6.daddr = match.key->dst; + flow->l3_mask.enc_ipv6.daddr = match.mask->dst; + flow->l3_key.enc_ipv6.saddr = match.key->src; + flow->l3_mask.enc_ipv6.saddr = match.mask->src; + flow->key_flags |= BIT(HINIC5_TC_KEY_ENC_IP); + tc_info->enc_ip_type = ENC_IPV6_TYPE; + } +} + +static void hinic5_tc_match_ports(struct flow_rule *rule, struct hinic5_tc_flow *flow) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + flow->l4_key.ports.dport = match.key->dst; + flow->l4_mask.ports.dport = match.mask->dst; + flow->l4_key.ports.sport = match.key->src; + flow->l4_mask.ports.sport = match.mask->src; + + flow->key_flags |= BIT(HINIC5_TC_KEY_PORTS); + } +} + +static void hinic5_tc_match_vni(struct flow_rule *rule, struct hinic5_tc_flow *flow) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; + + flow_rule_match_enc_keyid(rule, &match); + flow->l2_key.vni = be32_to_cpu(match.key->keyid); + flow->l2_mask.vni = be32_to_cpu(match.mask->keyid); + flow->key_flags |= BIT(HINIC5_TC_KEY_VNI); + } +} + +typedef void (*hinic5_tc_key_func)(const u8 *attr[], const struct hinic5_tc_info *tc_info, u8 *mem); + +static void hinic5_tc_parse_key_field(struct hinic5_tc_flow *flow, const u8 *attr[]) +{ + attr[HINIC5_TC_FIELD_ETH_TYPE] = (const u8 *)&flow->l2_key.ether_type; + attr[HINIC5_TC_FIELD_VNI] = (const u8 *)&flow->l2_key.vni; + attr[HINIC5_TC_FIELD_VLAN_TAG] = (const u8 *)&flow->l2_key.vlan_tag; + attr[HINIC5_TC_FIELD_CVLAN_TAG] = (const u8 *)&flow->l2_key.cvlan_tag; + attr[HINIC5_TC_FIELD_SRC_MAC] = (const u8 *)&flow->l2_key.smac[0]; + attr[HINIC5_TC_FIELD_DST_MAC] = (const u8 *)&flow->l2_key.dmac[0]; + attr[HINIC5_TC_FIELD_SRC_IP] = (const u8 *)&flow->l3_key.ipv4.saddr; + attr[HINIC5_TC_FIELD_DST_IP] = (const u8 *)&flow->l3_key.ipv4.daddr; + attr[HINIC5_TC_FIELD_ENC_DST_IP] = (const u8 *)&flow->l3_key.enc_ipv4.daddr; + attr[HINIC5_TC_FIELD_SRC_IPV6] = (const u8 *)&flow->l3_key.ipv6.saddr; + attr[HINIC5_TC_FIELD_DST_IPV6] = (const u8 *)&flow->l3_key.ipv6.daddr; + attr[HINIC5_TC_FIELD_ENC_DST_IPV6] = (const u8 *)&flow->l3_key.enc_ipv6.daddr; + attr[HINIC5_TC_FIELD_SRC_PORT] = (const u8 *)&flow->l4_key.ports.sport; + attr[HINIC5_TC_FIELD_DST_PORT] = (const u8 *)&flow->l4_key.ports.dport; + attr[HINIC5_TC_FIELD_PROTOCOL] = (const u8 *)&flow->l4_key.ip_proto; +} + +static void hinic5_tc_parse_mask_field(struct hinic5_tc_flow *flow, const u8 *attr[]) +{ + attr[HINIC5_TC_FIELD_ETH_TYPE] = (const u8 *)&flow->l2_mask.ether_type; + attr[HINIC5_TC_FIELD_VNI] = (const u8 *)&flow->l2_mask.vni; + attr[HINIC5_TC_FIELD_VLAN_TAG] = (const u8 *)&flow->l2_mask.vlan_tag; + attr[HINIC5_TC_FIELD_CVLAN_TAG] = (const u8 *)&flow->l2_mask.cvlan_tag; + attr[HINIC5_TC_FIELD_SRC_MAC] = (const u8 *)&flow->l2_mask.smac[0]; + attr[HINIC5_TC_FIELD_DST_MAC] = (const u8 *)&flow->l2_mask.dmac[0]; + attr[HINIC5_TC_FIELD_SRC_IP] = (const u8 *)&flow->l3_mask.ipv4.saddr; + attr[HINIC5_TC_FIELD_DST_IP] = (const u8 *)&flow->l3_mask.ipv4.daddr; + attr[HINIC5_TC_FIELD_ENC_DST_IP] = (const u8 *)&flow->l3_mask.enc_ipv4.daddr; + attr[HINIC5_TC_FIELD_SRC_IPV6] = (const u8 *)&flow->l3_mask.ipv6.saddr; + attr[HINIC5_TC_FIELD_DST_IPV6] = (const u8 *)&flow->l3_mask.ipv6.daddr; + attr[HINIC5_TC_FIELD_ENC_DST_IPV6] = (const u8 *)&flow->l3_mask.enc_ipv6.daddr; + attr[HINIC5_TC_FIELD_SRC_PORT] = (const u8 *)&flow->l4_mask.ports.sport; + attr[HINIC5_TC_FIELD_DST_PORT] = (const u8 *)&flow->l4_mask.ports.dport; + attr[HINIC5_TC_FIELD_PROTOCOL] = (const u8 *)&flow->l4_mask.ip_proto; +} + +static u16 hinic5_tc_get_key_flags(u16 profile_id, u16 tunnel_opt) +{ + u16 key_flags[HINIC5_TC_PROFILE_MAX] = {0}; + + key_flags[HINIC5_TC_PROFILE_TUN_ETH] = BIT(HINIC5_TC_KEY_VNI) | + BIT(HINIC5_TC_KEY_SRC_MAC) | + BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_ETH_TYPE); + + key_flags[HINIC5_TC_PROFILE_TUN_ETH_VLAN] = BIT(HINIC5_TC_KEY_VNI) | + BIT(HINIC5_TC_KEY_SRC_MAC) | + BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_ETH_TYPE) | + BIT(HINIC5_TC_KEY_VLAN_TAG); + + key_flags[HINIC5_TC_PROFILE_TUN_ETH_QINQ] = BIT(HINIC5_TC_KEY_VNI) | + BIT(HINIC5_TC_KEY_SRC_MAC) | + BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_ETH_TYPE) | + BIT(HINIC5_TC_KEY_VLAN_TAG) | + BIT(HINIC5_TC_KEY_CVLAN); + + key_flags[HINIC5_TC_PROFILE_ETH] = BIT(HINIC5_TC_KEY_SRC_MAC) | BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_ETH_TYPE); + + key_flags[HINIC5_TC_PROFILE_ETH_VLAN] = BIT(HINIC5_TC_KEY_SRC_MAC) | + BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_ETH_TYPE) | + BIT(HINIC5_TC_KEY_VLAN_TAG); + + key_flags[HINIC5_TC_PROFILE_ETH_QINQ] = BIT(HINIC5_TC_KEY_SRC_MAC) | + BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_ETH_TYPE) | + BIT(HINIC5_TC_KEY_VLAN_TAG) | + BIT(HINIC5_TC_KEY_CVLAN); + + key_flags[HINIC5_TC_PROFILE_TUN_ETH_IP4] = BIT(HINIC5_TC_KEY_VNI) | + BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_ETH_TYPE) | + BIT(HINIC5_TC_KEY_IPV4) | + BIT(HINIC5_TC_KEY_PROTOCOL); + + key_flags[HINIC5_TC_PROFILE_TUN_ETH_IP4_TCPORUDP] = BIT(HINIC5_TC_KEY_VNI) | + BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_ETH_TYPE) | + BIT(HINIC5_TC_KEY_IPV4) | + BIT(HINIC5_TC_KEY_PROTOCOL) | + BIT(HINIC5_TC_KEY_PORTS); + + key_flags[HINIC5_TC_PROFILE_ETH_IP4] = BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_IPV4) | + BIT(HINIC5_TC_KEY_PROTOCOL); + + key_flags[HINIC5_TC_PROFILE_ETH_IP4_TCPORUDP] = BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_IPV4) | + BIT(HINIC5_TC_KEY_PROTOCOL) | + BIT(HINIC5_TC_KEY_PORTS); + + if (tunnel_opt == TUNNEL_OPT_OFF) { + key_flags[HINIC5_TC_PROFILE_TUN_ETH_IP6] = BIT(HINIC5_TC_KEY_VNI) | + BIT(HINIC5_TC_KEY_IPV6) | + BIT(HINIC5_TC_KEY_PROTOCOL); + key_flags[HINIC5_TC_PROFILE_TUN_ETH_IP6_TCPORUDP] = BIT(HINIC5_TC_KEY_VNI) | + BIT(HINIC5_TC_KEY_IPV6) | + BIT(HINIC5_TC_KEY_PROTOCOL) | + BIT(HINIC5_TC_KEY_PORTS); + } else { + key_flags[HINIC5_TC_PROFILE_TUN_ETH_IP6] = BIT(HINIC5_TC_KEY_VNI) | + BIT(HINIC5_TC_KEY_SRC_MAC) | + BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_ETH_TYPE) | + BIT(HINIC5_TC_KEY_IPV6) | + BIT(HINIC5_TC_KEY_PROTOCOL); + key_flags[HINIC5_TC_PROFILE_TUN_ETH_IP6_TCPORUDP] = BIT(HINIC5_TC_KEY_VNI) | + BIT(HINIC5_TC_KEY_SRC_MAC) | + BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_ETH_TYPE) | + BIT(HINIC5_TC_KEY_IPV6) | + BIT(HINIC5_TC_KEY_PROTOCOL) | + BIT(HINIC5_TC_KEY_PORTS); + } + + key_flags[HINIC5_TC_PROFILE_ETH_IP6] = BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_IPV6) | + BIT(HINIC5_TC_KEY_PROTOCOL); + + key_flags[HINIC5_TC_PROFILE_ETH_IP6_TCPORUDP] = BIT(HINIC5_TC_KEY_DST_MAC) | + BIT(HINIC5_TC_KEY_IPV6) | + BIT(HINIC5_TC_KEY_PROTOCOL) | + BIT(HINIC5_TC_KEY_PORTS); + + key_flags[HINIC5_TC_PROFILE_OUTER_IP_INNER_IP] = BIT(HINIC5_TC_KEY_ENC_IP) | + BIT(HINIC5_TC_KEY_VNI) | BIT(HINIC5_TC_KEY_IPV4); + + key_flags[HINIC5_TC_PROFILE_OUTER_IP_INNER_IP_TCPORUDP] = BIT(HINIC5_TC_KEY_ENC_IP) | + BIT(HINIC5_TC_KEY_VNI) | + BIT(HINIC5_TC_KEY_IPV4) | BIT(HINIC5_TC_KEY_PORTS); + + return key_flags[profile_id]; +} + +static void hinic5_tc_get_key_tun_eth(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_tun_eth *rule_st = (struct hinic5_tc_rule_tun_eth *)mem; + + WRITE_VNI(rule_st, attr[HINIC5_TC_FIELD_VNI]); + WRITE_MAC(rule_st, smac, attr[HINIC5_TC_FIELD_SRC_MAC]); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_FIELD_U16(rule_st, ether_type, attr[HINIC5_TC_FIELD_ETH_TYPE]); +} + +static void hinic5_tc_get_key_tun_eth_vlan(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_tun_eth_vlan *rule_st = (struct hinic5_tc_rule_tun_eth_vlan *)mem; + + WRITE_VNI(rule_st, attr[HINIC5_TC_FIELD_VNI]); + WRITE_MAC(rule_st, smac, attr[HINIC5_TC_FIELD_SRC_MAC]); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_FIELD_U16(rule_st, ether_type, attr[HINIC5_TC_FIELD_ETH_TYPE]); + WRITE_FIELD_SPLIT_U16(rule_st, vlan_tag, attr[HINIC5_TC_FIELD_VLAN_TAG]); +} + +static void hinic5_tc_get_key_tun_eth_qinq(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_tun_eth_qinq *rule_st = (struct hinic5_tc_rule_tun_eth_qinq *)mem; + + WRITE_VNI(rule_st, attr[HINIC5_TC_FIELD_VNI]); + WRITE_MAC(rule_st, smac, attr[HINIC5_TC_FIELD_SRC_MAC]); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_FIELD_U16(rule_st, ether_type, attr[HINIC5_TC_FIELD_ETH_TYPE]); + WRITE_FIELD_SPLIT_U16(rule_st, vlan_tag, attr[HINIC5_TC_FIELD_VLAN_TAG]); + WRITE_FIELD_U16(rule_st, cvlan_tag, attr[HINIC5_TC_FIELD_CVLAN_TAG]); +} + +static void hinic5_tc_get_key_eth(const u8 *attr[], const struct hinic5_tc_info *tc_info, u8 *mem) +{ + struct hinic5_tc_rule_eth *rule_st = (struct hinic5_tc_rule_eth *)mem; + + WRITE_MAC(rule_st, smac, attr[HINIC5_TC_FIELD_SRC_MAC]); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_FIELD_U16(rule_st, ether_type, attr[HINIC5_TC_FIELD_ETH_TYPE]); +} + +static void hinic5_tc_get_key_eth_vlan(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_eth_vlan *rule_st = (struct hinic5_tc_rule_eth_vlan *)mem; + + WRITE_MAC(rule_st, smac, attr[HINIC5_TC_FIELD_SRC_MAC]); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_FIELD_U16(rule_st, ether_type, attr[HINIC5_TC_FIELD_ETH_TYPE]); + WRITE_FIELD_U16(rule_st, vlan_tag, attr[HINIC5_TC_FIELD_VLAN_TAG]); +} + +static void hinic5_tc_get_key_eth_qinq(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_eth_qinq *rule_st = (struct hinic5_tc_rule_eth_qinq *)mem; + + WRITE_MAC(rule_st, smac, attr[HINIC5_TC_FIELD_SRC_MAC]); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_FIELD_U16(rule_st, ether_type, attr[HINIC5_TC_FIELD_ETH_TYPE]); + WRITE_FIELD_U16(rule_st, vlan_tag, attr[HINIC5_TC_FIELD_VLAN_TAG]); + WRITE_FIELD_U16(rule_st, cvlan_tag, attr[HINIC5_TC_FIELD_CVLAN_TAG]); +} + +static void hinic5_tc_get_key_tun_eth_ip4(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_tun_eth_ip4 *rule_st = (struct hinic5_tc_rule_tun_eth_ip4 *)mem; + + WRITE_VNI(rule_st, attr[HINIC5_TC_FIELD_VNI]); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_FIELD_SPLIT_U16(rule_st, ether_type, attr[HINIC5_TC_FIELD_ETH_TYPE]); + WRITE_IP4(rule_st, sip, attr[HINIC5_TC_FIELD_SRC_IP]); + WRITE_IP4(rule_st, dip, attr[HINIC5_TC_FIELD_DST_IP]); + WRITE_FIELD_U8(rule_st, proto, attr[HINIC5_TC_FIELD_PROTOCOL]); +} + +static void hinic5_tc_get_key_tun_eth_ip4_tcporudp(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_tun_eth_ip4_tcporudp *rule_st = + (struct hinic5_tc_rule_tun_eth_ip4_tcporudp *)mem; + + WRITE_VNI(rule_st, attr[HINIC5_TC_FIELD_VNI]); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_FIELD_SPLIT_U16(rule_st, ether_type, attr[HINIC5_TC_FIELD_ETH_TYPE]); + WRITE_IP4(rule_st, sip, attr[HINIC5_TC_FIELD_SRC_IP]); + WRITE_IP4(rule_st, dip, attr[HINIC5_TC_FIELD_DST_IP]); + WRITE_FIELD_U8(rule_st, proto, attr[HINIC5_TC_FIELD_PROTOCOL]); + WRITE_FIELD_U16(rule_st, sport, attr[HINIC5_TC_FIELD_SRC_PORT]); + WRITE_FIELD_U16(rule_st, dport, attr[HINIC5_TC_FIELD_DST_PORT]); +} + +static void hinic5_tc_get_key_eth_ip4(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_eth_ip4 *rule_st = (struct hinic5_tc_rule_eth_ip4 *)mem; + + WRITE_FIELD_U16(rule_st, vlan_tag, attr[HINIC5_TC_FIELD_VLAN_TAG]); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_IP4(rule_st, sip, attr[HINIC5_TC_FIELD_SRC_IP]); + WRITE_IP4(rule_st, dip, attr[HINIC5_TC_FIELD_DST_IP]); + WRITE_FIELD_U8(rule_st, proto, attr[HINIC5_TC_FIELD_PROTOCOL]); +} + +static void hinic5_tc_get_key_eth_ip4_tcporudp(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_eth_ip4_tcporudp *rule_st = + (struct hinic5_tc_rule_eth_ip4_tcporudp *)mem; + + WRITE_FIELD_U16(rule_st, vlan_tag, attr[HINIC5_TC_FIELD_VLAN_TAG]); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_IP4(rule_st, sip, attr[HINIC5_TC_FIELD_SRC_IP]); + WRITE_IP4(rule_st, dip, attr[HINIC5_TC_FIELD_DST_IP]); + WRITE_FIELD_U8(rule_st, proto, attr[HINIC5_TC_FIELD_PROTOCOL]); + WRITE_FIELD_SPLIT_U16(rule_st, sport, attr[HINIC5_TC_FIELD_SRC_PORT]); + WRITE_FIELD_U16(rule_st, dport, attr[HINIC5_TC_FIELD_DST_PORT]); +} + +static void hinic5_tc_get_ip6_trunc(const u8 *ip6, u8 *trunc_mem, u16 ip6_bit_len, u16 shift) +{ + /* keep ip6 bits[shift+ip6_bit_len:shift] */ + u16 i, trunc_byte_idx, trunc_bit_idx; + u16 start_bit = IP6_ADDR_128BITS - ip6_bit_len - shift; + u8 bit_val; + + for (i = start_bit; i < start_bit + ip6_bit_len; i++) { + trunc_byte_idx = (i - start_bit) / BYTE8_SIZE; + trunc_bit_idx = BYTE8_SIZE - 1 - (i - start_bit) % BYTE8_SIZE; + bit_val = (ip6[i / BYTE8_SIZE] >> (BYTE8_SIZE - 1 - i % BYTE8_SIZE)) & 1; + trunc_mem[trunc_byte_idx] |= bit_val << trunc_bit_idx; + } +} + +static void hinic5_tc_set_ip6_trunc(const u8 *attr[], const struct hinic5_tc_info *tc_info, + struct hinic5_tc_ip6_trunc *ip6_trunc) +{ + switch (tc_info->profile_id) { + case HINIC5_TC_PROFILE_TUN_ETH_IP6: + case HINIC5_TC_PROFILE_TUN_ETH_IP6_TCPORUDP: + hinic5_tc_get_ip6_trunc(attr[HINIC5_TC_FIELD_SRC_IPV6], ip6_trunc->sip6, + IP6_ADDR_TRUNC_72BITS, tc_info->ipv6_shift_value2); + attr[HINIC5_TC_FIELD_SRC_IPV6] = ip6_trunc->sip6; + hinic5_tc_get_ip6_trunc(attr[HINIC5_TC_FIELD_DST_IPV6], ip6_trunc->dip6, + IP6_ADDR_TRUNC_72BITS, tc_info->ipv6_shift_value2); + attr[HINIC5_TC_FIELD_DST_IPV6] = ip6_trunc->dip6; + break; + case HINIC5_TC_PROFILE_ETH_IP6_TCPORUDP: + hinic5_tc_get_ip6_trunc(attr[HINIC5_TC_FIELD_SRC_IPV6], ip6_trunc->sip6, + IP6_ADDR_TRUNC_96BITS, tc_info->ipv6_shift_value); + attr[HINIC5_TC_FIELD_SRC_IPV6] = ip6_trunc->sip6; + break; + default: + break; + } +} + +static void hinic5_tc_get_key_tun_eth_ip6(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_tun_eth_ip6_off *rule_st_off = + (struct hinic5_tc_rule_tun_eth_ip6_off *)mem; + struct hinic5_tc_rule_tun_eth_ip6_on *rule_st_on = + (struct hinic5_tc_rule_tun_eth_ip6_on *)mem; + struct hinic5_tc_ip6_trunc ip6_trunc = {0}; + + if (tc_info->tunnel_opt == TUNNEL_OPT_OFF) { + WRITE_VNI(rule_st_off, attr[HINIC5_TC_FIELD_VNI]); + WRITE_IP6_128BITS_OPT_OFF(rule_st_off, sip6, attr[HINIC5_TC_FIELD_SRC_IPV6]); + WRITE_IP6_128BITS_OPT_OFF(rule_st_off, dip6, attr[HINIC5_TC_FIELD_DST_IPV6]); + WRITE_FIELD_U8(rule_st_off, proto, attr[HINIC5_TC_FIELD_PROTOCOL]); + } else { + hinic5_tc_set_ip6_trunc(attr, tc_info, &ip6_trunc); + WRITE_VNI(rule_st_on, attr[HINIC5_TC_FIELD_VNI]); + WRITE_MAC(rule_st_on, smac, attr[HINIC5_TC_FIELD_SRC_MAC]); + WRITE_MAC(rule_st_on, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_FIELD_U16(rule_st_on, ether_type, attr[HINIC5_TC_FIELD_ETH_TYPE]); + WRITE_SIP6_72BITS_OPT_ON(rule_st_on, attr[HINIC5_TC_FIELD_SRC_IPV6]); + WRITE_DIP6_72BITS_OPT_ON(rule_st_on, attr[HINIC5_TC_FIELD_DST_IPV6]); + WRITE_FIELD_U8(rule_st_on, proto, attr[HINIC5_TC_FIELD_PROTOCOL]); + } +} + +static void hinic5_tc_get_key_tun_eth_ip6_tcporudp(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_tun_eth_ip6_tcporudp_off *rule_st_off = + (struct hinic5_tc_rule_tun_eth_ip6_tcporudp_off *)mem; + struct hinic5_tc_rule_tun_eth_ip6_tcporudp_on *rule_st_on = + (struct hinic5_tc_rule_tun_eth_ip6_tcporudp_on *)mem; + struct hinic5_tc_ip6_trunc ip6_trunc = {0}; + + if (tc_info->tunnel_opt == TUNNEL_OPT_OFF) { + WRITE_VNI(rule_st_off, attr[HINIC5_TC_FIELD_VNI]); + WRITE_IP6_128BITS_OPT_OFF(rule_st_off, sip6, attr[HINIC5_TC_FIELD_SRC_IPV6]); + WRITE_IP6_128BITS_OPT_OFF(rule_st_off, dip6, attr[HINIC5_TC_FIELD_DST_IPV6]); + WRITE_FIELD_U8(rule_st_off, proto, attr[HINIC5_TC_FIELD_PROTOCOL]); + WRITE_FIELD_U16(rule_st_off, sport, attr[HINIC5_TC_FIELD_SRC_PORT]); + WRITE_FIELD_U16(rule_st_off, dport, attr[HINIC5_TC_FIELD_DST_PORT]); + } else { + hinic5_tc_set_ip6_trunc(attr, tc_info, &ip6_trunc); + WRITE_VNI(rule_st_on, attr[HINIC5_TC_FIELD_VNI]); + WRITE_MAC(rule_st_on, smac, attr[HINIC5_TC_FIELD_SRC_MAC]); + WRITE_MAC(rule_st_on, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_FIELD_U16(rule_st_on, ether_type, attr[HINIC5_TC_FIELD_ETH_TYPE]); + WRITE_SIP6_72BITS_OPT_ON(rule_st_on, attr[HINIC5_TC_FIELD_SRC_IPV6]); + WRITE_DIP6_72BITS_OPT_ON(rule_st_on, attr[HINIC5_TC_FIELD_DST_IPV6]); + WRITE_FIELD_U8(rule_st_on, proto, attr[HINIC5_TC_FIELD_PROTOCOL]); + WRITE_FIELD_U16(rule_st_on, sport, attr[HINIC5_TC_FIELD_SRC_PORT]); + WRITE_FIELD_U16(rule_st_on, dport, attr[HINIC5_TC_FIELD_DST_PORT]); + } +} + +static void hinic5_tc_get_key_eth_ip6(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_eth_ip6 *rule_st = (struct hinic5_tc_rule_eth_ip6 *)mem; + + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_IP6_128BITS(rule_st, sip6, attr[HINIC5_TC_FIELD_SRC_IPV6]); + WRITE_IP6_128BITS(rule_st, dip6, attr[HINIC5_TC_FIELD_DST_IPV6]); + WRITE_FIELD_U8(rule_st, proto, attr[HINIC5_TC_FIELD_PROTOCOL]); +} + +static void hinic5_tc_get_key_eth_ip6_tcporudp(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_eth_ip6_tcporudp *rule_st = + (struct hinic5_tc_rule_eth_ip6_tcporudp *)mem; + struct hinic5_tc_ip6_trunc ip6_trunc = {0}; + + hinic5_tc_set_ip6_trunc(attr, tc_info, &ip6_trunc); + WRITE_MAC(rule_st, dmac, attr[HINIC5_TC_FIELD_DST_MAC]); + WRITE_IP6_96BITS(rule_st, sip6, attr[HINIC5_TC_FIELD_SRC_IPV6]); + WRITE_IP6_128BITS(rule_st, dip6, attr[HINIC5_TC_FIELD_DST_IPV6]); + WRITE_FIELD_U8(rule_st, proto, attr[HINIC5_TC_FIELD_PROTOCOL]); + WRITE_FIELD_U16(rule_st, sport, attr[HINIC5_TC_FIELD_SRC_PORT]); + WRITE_FIELD_SPLIT_U16(rule_st, dport, attr[HINIC5_TC_FIELD_DST_PORT]); +} + +static void hinic5_tc_get_key_outer_ip_inner_ip(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_outer_ip_inner_ip *rule_st = + (struct hinic5_tc_rule_outer_ip_inner_ip *)mem; + + if (tc_info->enc_ip_type == ENC_IPV4_TYPE) { + WRITE_FIELD_U16(rule_st, outer_dip_6, attr[HINIC5_TC_FIELD_ENC_DST_IP]); + WRITE_FIELD_U16(rule_st, outer_dip_7, + attr[HINIC5_TC_FIELD_ENC_DST_IP] + OFFSET_2BYTE); + } else { + WRITE_IP6_128BITS(rule_st, outer_dip, attr[HINIC5_TC_FIELD_ENC_DST_IPV6]); + } + + WRITE_VNI(rule_st, attr[HINIC5_TC_FIELD_VNI]); + WRITE_IP4(rule_st, sip, attr[HINIC5_TC_FIELD_SRC_IP]); + WRITE_IP4(rule_st, dip, attr[HINIC5_TC_FIELD_DST_IP]); +} + +static void hinic5_tc_get_key_outer_ip_inner_ip_tcporudp(const u8 *attr[], + const struct hinic5_tc_info *tc_info, + u8 *mem) +{ + struct hinic5_tc_rule_outer_ip_inner_ip_tcporudp *rule_st = + (struct hinic5_tc_rule_outer_ip_inner_ip_tcporudp *)mem; + + if (tc_info->enc_ip_type == ENC_IPV4_TYPE) { + WRITE_FIELD_U16(rule_st, outer_dip_6, attr[HINIC5_TC_FIELD_ENC_DST_IP]); + WRITE_FIELD_U16(rule_st, outer_dip_7, + attr[HINIC5_TC_FIELD_ENC_DST_IP] + OFFSET_2BYTE); + } else { + WRITE_IP6_128BITS(rule_st, outer_dip, attr[HINIC5_TC_FIELD_ENC_DST_IPV6]); + } + + WRITE_VNI(rule_st, attr[HINIC5_TC_FIELD_VNI]); + WRITE_IP4(rule_st, sip, attr[HINIC5_TC_FIELD_SRC_IP]); + WRITE_IP4(rule_st, dip, attr[HINIC5_TC_FIELD_DST_IP]); + WRITE_FIELD_U16(rule_st, sport, attr[HINIC5_TC_FIELD_SRC_PORT]); + WRITE_FIELD_SPLIT_U16(rule_st, dport, attr[HINIC5_TC_FIELD_DST_PORT]); +} + +static hinic5_tc_key_func g_hinic5_tc_key_funcs[HINIC5_TC_PROFILE_MAX] = { + hinic5_tc_get_key_tun_eth, + hinic5_tc_get_key_tun_eth_vlan, + hinic5_tc_get_key_tun_eth_qinq, + hinic5_tc_get_key_eth, + hinic5_tc_get_key_eth_vlan, + hinic5_tc_get_key_eth_qinq, + hinic5_tc_get_key_tun_eth_ip4, + hinic5_tc_get_key_tun_eth_ip4_tcporudp, + hinic5_tc_get_key_eth_ip4, + hinic5_tc_get_key_eth_ip4_tcporudp, + hinic5_tc_get_key_tun_eth_ip6, + hinic5_tc_get_key_tun_eth_ip6_tcporudp, + hinic5_tc_get_key_eth_ip6, + hinic5_tc_get_key_eth_ip6_tcporudp, + hinic5_tc_get_key_outer_ip_inner_ip, + hinic5_tc_get_key_outer_ip_inner_ip_tcporudp, +}; + +static hinic5_tc_key_func hinic5_tc_get_key_func(u16 profile_id) +{ + return g_hinic5_tc_key_funcs[profile_id]; +} + +static int hinic5_tc_parse_key(struct hinic5_tc_flow *flow, + u8 *mem, + struct hinic5_nic_dev *nic_dev, + enum parse_type type) +{ + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + const u8 *attr[HINIC5_TC_FIELD_TYPE_MAX] = {NULL}; + u16 profile_id, tunnel_opt; + hinic5_tc_key_func key_func = NULL; + u16 flags; + + mutex_lock(&tc_info->tc_lock); + profile_id = tc_info->profile_id; + tunnel_opt = tc_info->tunnel_opt; + mutex_unlock(&tc_info->tc_lock); + + /* verify profile_id */ + if (profile_id >= HINIC5_TC_PROFILE_MAX) { + hinic5_err(nic_dev, drv, "profile_id exceed limit: profile_id = %u\n", profile_id); + return -EINVAL; + } + + hinic5_info(nic_dev, drv, "Get profile id : %u\n", profile_id); + + flags = hinic5_tc_get_key_flags(profile_id, tunnel_opt); + if ((flow->key_flags & flags) != flags) { + hinic5_err(nic_dev, drv, "flow key flags not match, flow_flags(%u) key_flags(%u)\n", + flow->key_flags, flags); + return -EINVAL; + } + + if (type == KEY_TYPE) + hinic5_tc_parse_key_field(flow, attr); + else + hinic5_tc_parse_mask_field(flow, attr); + + key_func = hinic5_tc_get_key_func(profile_id); + key_func(attr, tc_info, mem); + + return 0; +} + +#define ACT_VXLAN_TBL_INDEX_BITS 8 +#define ACT_FLOW_MARK_BITS 24 +#define ACT_VLAN_SEL_BITS 3 +#define ACT_COUNT_ID_BITS 9 +#define ACT_CHECK_VLD(field_val, field_bits) ((field_val) < (1ULL << (field_bits))) + +static int hinic5_tc_parse_action_tunnel(struct hinic5_tc_action_info *action, + const struct flow_action_entry *act) +{ + const struct ip_tunnel_info *tun_info = act->tunnel; + const struct ip_tunnel_key *tun_key = &tun_info->key; + u64 index = be64_to_cpu(tun_key->tun_id); + + if (ip_tunnel_info_af(tun_info) != AF_INET) + return -EOPNOTSUPP; /* only IPv4 tunnel-encap is supported */ + + if (!ACT_CHECK_VLD(index, ACT_VXLAN_TBL_INDEX_BITS)) + return -EINVAL; + + action->vxlan_tbl_index = (u8)index; + action->action_flag |= BIT(HINIC5_TC_ACTION_VXLAN_ENCAP); + + return 0; +} + +static void hinic5_tc_parse_action_output(struct hinic5_tc_action_info *action, + const struct flow_action_entry *act) +{ + // action queue + action output 临时打桩方案 + /* output, chain_index[15:0] + * queue index, chain_index[23:16] + *queue flag, chain_index[31:24] + */ + action->output = act->chain_index & U16_MAX; + action->action_flag |= BIT(HINIC5_TC_ACTION_FLOW_OUTPUT); + if ((act->chain_index >> SHIFT_24BITS) > 0) { + action->flow_queue = (u8)(act->chain_index >> SHIFT_16BITS); + action->action_flag |= BIT(HINIC5_TC_ACTION_FLOW_QUEUE); + } +} + +static int hinic5_tc_parse_action(struct flow_action *flow_action, struct hinic5_nic_dev *nic_dev, + struct hinic5_tc_action_info *action) +{ + struct flow_action_entry *act = NULL; + unsigned int i; + + if (!flow_action_has_entries(flow_action)) { + hinic5_err(nic_dev, drv, "no actions\n"); + return -EINVAL; + } + + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: + action->action_flag |= BIT(HINIC5_TC_ACTION_FLOW_DROP); + break; + case FLOW_ACTION_ACCEPT: // action upcall功能打桩 + action->action_flag |= BIT(HINIC5_TC_ACTION_FLOW_UPCALL); + break; + case FLOW_ACTION_MARK: + action->flow_mark = act->mark; + if (!ACT_CHECK_VLD(action->flow_mark, ACT_FLOW_MARK_BITS)) + return -EINVAL; + action->action_flag |= BIT(HINIC5_TC_ACTION_FLOW_MARK); + break; + case FLOW_ACTION_GOTO: + hinic5_tc_parse_action_output(action, act); + break; +#ifdef HAVE_FLOW_ACTION_PRIORITY + case FLOW_ACTION_PRIORITY: // action count功能打桩 + action->count_id = (u16)act->priority; + if (!ACT_CHECK_VLD(action->count_id, ACT_COUNT_ID_BITS)) + return -EINVAL; + action->action_flag |= BIT(HINIC5_TC_ACTION_FLOW_COUNT); + break; +#endif + case FLOW_ACTION_VLAN_PUSH: + action->vlan_tag = act->vlan.vid; + action->vlan_sel = act->vlan.prio; + if (!ACT_CHECK_VLD(action->vlan_sel, ACT_VLAN_SEL_BITS)) + return -EINVAL; + action->action_flag |= BIT(HINIC5_TC_ACTION_FLOW_VLAN_PUSH); + break; + case FLOW_ACTION_VLAN_POP: + action->action_flag |= BIT(HINIC5_TC_ACTION_FLOW_VLAN_POP); + break; + case FLOW_ACTION_TUNNEL_ENCAP: + if (hinic5_tc_parse_action_tunnel(action, act) != 0) + return -EOPNOTSUPP; + break; + case FLOW_ACTION_TUNNEL_DECAP: + action->action_flag |= BIT(HINIC5_TC_ACTION_VXLAN_DECAP); + break; + default: + hinic5_err(nic_dev, drv, "act(%u) not support offload\n", act->id); + break; + } + } + return 0; +} + +int hinic5_tc_set_profile_id(struct hinic5_nic_dev *nic_dev, u16 profile_id) +{ + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + + if (profile_id >= HINIC5_TC_PROFILE_MAX) { + hinic5_err(nic_dev, drv, "profile_id exceed limit\n"); + return -EINVAL; + } + + mutex_lock(&tc_info->tc_lock); + tc_info->profile_id = profile_id; + mutex_unlock(&tc_info->tc_lock); + return 0; +} + +static int hinic5_tc_set_flow_info(struct hinic5_nic_dev *nic_dev, + struct hinic5_tc_flow *flow, + struct hinic5_tc_cfg_info *info) +{ + int ret; + + /* parse key */ + ret = hinic5_tc_parse_key(flow, info->key_tcam_mem, nic_dev, KEY_TYPE); + if (ret != 0) { + hinic5_err(nic_dev, drv, "parse key failed\n"); + return -EINVAL; + } + + /* parse mask, unassigned data set all f */ + memset(info->mask_tcam_mem, 0xFF, TC_ACL_KEY_BYTE); + ret = hinic5_tc_parse_key(flow, info->mask_tcam_mem, nic_dev, MASK_TYPE); + if (ret != 0) { + hinic5_err(nic_dev, drv, "parse mask failed\n"); + return -EINVAL; + } + + info->action = flow->actions; + + return 0; +} + +static int hinic5_tc_parse_flow(struct flow_cls_offload *tc_flow_cmd, + struct hinic5_nic_dev *nic_dev, + struct hinic5_tc_flow *flow, + struct hinic5_tc_info *tc_info) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd); + struct flow_dissector *dissector = rule->match.dissector; + int ret; + + /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ + if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || + (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { + hinic5_err(nic_dev, drv, "cannot form TC key: used_keys = 0x%x\n", + (unsigned int)dissector->used_keys); + return -EOPNOTSUPP; + } + + /* parse action */ + ret = hinic5_tc_parse_action(&rule->action, nic_dev, &flow->actions); + if (ret != 0) { + hinic5_err(nic_dev, drv, "parse action failed\n"); + return -EINVAL; + } + + /* save rule's info to flow */ + hinic5_tc_match_basic(rule, flow); + hinic5_tc_match_eth_addrs(rule, flow); + hinic5_tc_match_vlan(rule, flow); + hinic5_tc_match_ip_addrs(rule, flow); + hinic5_tc_match_enc_ip_addrs(rule, flow, tc_info); + hinic5_tc_match_ports(rule, flow); + hinic5_tc_match_vni(rule, flow); + + return 0; +} + +static int hinic5_add_cls_flower(struct flow_cls_offload *cls_flower, + struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + struct hinic5_tc_flow_node *new_node = NULL, *old_node = NULL; + struct hinic5_tc_cfg_info info = {0}; + struct hinic5_tc_flow *flow = NULL; + int ret; + + ret = hinic5_tc_info_init_from_reg(nic_dev); + if (ret != 0) { + hinic5_err(nic_dev, drv, "pfe get cfg from reg failed\n"); + return -EOPNOTSUPP; + } + + new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) + return -ENOMEM; + + new_node->cookie = cls_flower->cookie; + flow = &new_node->flow; + + /* If a flow exists with the same cookie, delete it */ + old_node = rhashtable_lookup_fast(&tc_info->flow_table, &cls_flower->cookie, + tc_info->flow_ht_params); + if (old_node) + hinic5_tc_del_flow_handler(nic_dev, old_node); + + ret = hinic5_tc_parse_flow(cls_flower, nic_dev, flow, tc_info); + if (ret != 0) + goto fail; + + /* trans flow to tc_cfg_info */ + ret = hinic5_tc_set_flow_info(nic_dev, flow, &info); + if (ret != 0) + goto fail; + + info.group_id = cls_flower->common.chain_index & PFE_GROUP_ID_MASK; + if (info.group_id < PFE_GROUP_CNT_MAX) { + info.group_vld = (cls_flower->common.chain_index >> PFE_GROUP_VLD_SHIFT) & 0x1; + } else { + hinic5_err(nic_dev, drv, "invalid group id:0x%x\n", info.group_id); + goto fail; + } + + /* send rule to mpu */ + ret = hinic5_add_tc_flow_rule(nic_dev->hwdev, &info, false); + if (ret != 0) + goto fail; + new_node->rule_id = info.index; + + /* save rule in hashmap */ + ret = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node, + tc_info->flow_ht_params); + if (ret != 0) { + hinic5_del_tc_flow_rule(nic_dev->hwdev, info.index); + goto fail; + } + + mutex_lock(&tc_info->tc_lock); + set_bit(info.index, tc_info->tcam_bitmap); + mutex_unlock(&tc_info->tc_lock); + + hinic5_info(nic_dev, drv, "Add tc rule cookie=0x%lx, index = %u\n", + cls_flower->cookie, info.index); + + return 0; +fail: + kfree(new_node); + hinic5_err(nic_dev, drv, "cookie=0x%lx error=%d\n", + cls_flower->cookie, ret); + return ret; +} + +static int hinic5_del_cls_flower(const struct flow_cls_offload *cls_flower, + struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + struct hinic5_tc_flow_node *flow_node = NULL; + + if (!tc_info) + return 0; + + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, &cls_flower->cookie, + tc_info->flow_ht_params); + if (!flow_node) { + hinic5_err(nic_dev, drv, "flow with cookie=0x%lx not exist\n", + cls_flower->cookie); + return -EINVAL; + } + + return hinic5_tc_del_flow_handler(nic_dev, flow_node); +} + +int hinic5_setup_cls_flower(struct flow_cls_offload *cls_flower, + struct hinic5_nic_dev *nic_dev) +{ + if (!HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, TC_FLOWER_OFFLOAD)) + return -EOPNOTSUPP; + + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return hinic5_add_cls_flower(cls_flower, nic_dev); + case FLOW_CLS_DESTROY: + return hinic5_del_cls_flower(cls_flower, nic_dev); + case FLOW_CLS_STATS: + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +static int hinic5_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct hinic5_nic_dev *nic_dev = cb_priv; + struct flow_cls_offload *cls_flower = (struct flow_cls_offload *)type_data; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return hinic5_setup_cls_flower(cls_flower, nic_dev); + default: + return -EOPNOTSUPP; + } +} + +static LIST_HEAD(hinic5_block_cb_list); + +int hinic5_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + switch (type) { + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, &hinic5_block_cb_list, + hinic5_setup_tc_block_cb, nic_dev, nic_dev, true); + default: + return -EOPNOTSUPP; + } +} + +/* PFE设置lacp协商报文的默认规则 */ +static int hinic5_set_default_rule_of_pfe_lcam(struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + struct hinic5_tc_cfg_info info = {0}; + struct hinic5_tc_flow flow = {0}; + u8 *mac = (u8 *)nic_dev->netdev->dev_addr; + u8 lacp_dmac[ETH_ALEN] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x02}; + + if (hinic5_func_type(nic_dev->hwdev) == TYPE_VF) + return 0; + + tc_info->profile_id = HINIC5_TC_PROFILE_ETH_QINQ; + flow.key_flags = TC_LACP_KEY_FLAG; + flow.actions.action_flag = PFE_ACTION_TO_PORT; + memset(flow.l2_mask.dmac, 0xff, ETH_ALEN); + memset(flow.l2_mask.smac, 0xff, ETH_ALEN); + memset(&flow.l2_mask.ether_type, 0xff, sizeof(flow.l2_mask.ether_type)); + memcpy(flow.l2_key.dmac, lacp_dmac, ETH_ALEN); + flow.l2_key.ether_type = cpu_to_be16(TYPE_OF_LACP); + memcpy(flow.l2_key.smac, mac, ETH_ALEN); + + /* trans flow to tc_cfg_info */ + (void)hinic5_tc_set_flow_info(nic_dev, &flow, &info); + + /* send rule to mpu */ + (void)hinic5_add_tc_flow_rule(nic_dev->hwdev, &info, true); + + mutex_lock(&tc_info->tc_lock); + set_bit(info.index, tc_info->tcam_bitmap); + mutex_unlock(&tc_info->tc_lock); + + tc_info->profile_id = HINIC5_TC_PROFILE_TUN_ETH; + + return 0; +} + +int hinic5_init_tc(struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_tc_info *tc_info = NULL; + int ret; + + nic_dev->tc_info = kmalloc(sizeof(struct hinic5_tc_info), GFP_KERNEL); + if (!nic_dev->tc_info) + return -ENOMEM; + tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + + tc_info->flow_ht_params = tc_flow_ht_params; + ret = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params); + if (ret != 0) + goto tc_info_init_err; + + tc_info->profile_id = 0; + mutex_init(&tc_info->tc_lock); + memset(tc_info->tcam_bitmap, 0, sizeof(tc_info->tcam_bitmap)); + + (void)hinic5_set_default_rule_of_pfe_lcam(nic_dev); + + return 0; + +tc_info_init_err: + kfree(tc_info); + nic_dev->tc_info = NULL; + return ret; +} + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_tc.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_tc.h new file mode 100644 index 00000000..573d85dc --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ethtool/hinic5_tc.h @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_TC_H +#define HINIC5_TC_H + +#include <linux/rhashtable.h> + +#include "ossl_knl.h" +#include "hinic5_nic_dev.h" +#include "nic_mpu_tc_cmd_defs.h" +#include "nic_tc_rule_defs.h" + +#ifdef static +#undef static +#define LLT_STATIC_DEF_SAVED +#endif + +enum hinic5_tc_key_flag { + HINIC5_TC_KEY_ETH_TYPE = 0, + HINIC5_TC_KEY_VNI, + HINIC5_TC_KEY_VLAN_TAG, + HINIC5_TC_KEY_CVLAN, + HINIC5_TC_KEY_SRC_MAC, + HINIC5_TC_KEY_DST_MAC, + HINIC5_TC_KEY_IPV4, + HINIC5_TC_KEY_IPV6, + HINIC5_TC_KEY_ENC_IP, + HINIC5_TC_KEY_PORTS, + HINIC5_TC_KEY_PROTOCOL, + HINIC5_TC_KEY_TYPE_MAX +}; + +enum hinic5_tc_field_type { + HINIC5_TC_FIELD_ETH_TYPE, + HINIC5_TC_FIELD_VNI, + HINIC5_TC_FIELD_VLAN_TAG, + HINIC5_TC_FIELD_CVLAN_TAG, + HINIC5_TC_FIELD_SRC_MAC, + HINIC5_TC_FIELD_DST_MAC, + HINIC5_TC_FIELD_SRC_IP, + HINIC5_TC_FIELD_DST_IP, + HINIC5_TC_FIELD_SRC_IPV6, + HINIC5_TC_FIELD_DST_IPV6, + HINIC5_TC_FIELD_ENC_DST_IP, + HINIC5_TC_FIELD_ENC_DST_IPV6, + HINIC5_TC_FIELD_SRC_PORT, + HINIC5_TC_FIELD_DST_PORT, + HINIC5_TC_FIELD_PROTOCOL, + HINIC5_TC_FIELD_TYPE_MAX +}; + +struct hinic5_tc_l2_key { + u8 dmac[ETH_ALEN]; + u8 smac[ETH_ALEN]; + __be16 vlan_tag; + __be16 cvlan_tag; + __be16 ether_type; + u32 vni; +}; + +struct hinic5_tc_l3_key { + union { + struct { + struct in_addr daddr; + struct in_addr saddr; + } ipv4; + struct { + struct in6_addr daddr; + struct in6_addr saddr; + } ipv6; + }; + union { + struct { + struct in_addr daddr; + struct in_addr saddr; + } enc_ipv4; + struct { + struct in6_addr daddr; + struct in6_addr saddr; + } enc_ipv6; + }; +}; + +struct hinic5_tc_l4_key { + u8 ip_proto; + struct { + __be16 sport; + __be16 dport; + } ports; +}; + +struct hinic5_tc_flow { + u16 key_flags; + struct hinic5_tc_l2_key l2_key; + struct hinic5_tc_l2_key l2_mask; + struct hinic5_tc_l3_key l3_key; + struct hinic5_tc_l3_key l3_mask; + struct hinic5_tc_l4_key l4_key; + struct hinic5_tc_l4_key l4_mask; + struct hinic5_tc_action_info actions; +}; + +#define SHIFT_8BITS 8 +#define SHIFT_16BITS 16 +#define SHIFT_24BITS 24 + +#define FIELD_BYTE_0 0 +#define FIELD_BYTE_1 1 +#define FIELD_BYTE_2 2 +#define FIELD_BYTE_3 3 +#define FIELD_BYTE_4 4 +#define FIELD_BYTE_5 5 +#define FIELD_BYTE_6 6 +#define FIELD_BYTE_7 7 +#define FIELD_BYTE_8 8 +#define FIELD_BYTE_9 9 +#define FIELD_BYTE_10 10 +#define FIELD_BYTE_11 11 +#define FIELD_BYTE_12 12 +#define FIELD_BYTE_13 13 +#define FIELD_BYTE_14 14 +#define FIELD_BYTE_15 15 + +#define FIELD_U16(data, hi, lo) \ + (((data)[hi] << SHIFT_8BITS) | (data)[lo]) + +#define WRITE_MAC(rule_st, field, data) do { \ + (rule_st)->field##_0 = (data)[FIELD_BYTE_0]; \ + (rule_st)->field##_1 = (data)[FIELD_BYTE_1]; \ + (rule_st)->field##_2 = (data)[FIELD_BYTE_2]; \ + (rule_st)->field##_3 = (data)[FIELD_BYTE_3]; \ + (rule_st)->field##_4 = (data)[FIELD_BYTE_4]; \ + (rule_st)->field##_5 = (data)[FIELD_BYTE_5]; \ +} while (0) + +#define WRITE_VNI(rule_st, data) do { \ + (rule_st)->vni_h = FIELD_U16(data, FIELD_BYTE_2, FIELD_BYTE_1); \ + (rule_st)->vni_l = (data)[FIELD_BYTE_0]; \ +} while (0) + +#define WRITE_FIELD_U8(rule_st, field, data) \ + ((rule_st)->field = (data)[FIELD_BYTE_0]) + +#define WRITE_FIELD_U16(rule_st, field, data) \ + ((rule_st)->field = FIELD_U16(data, FIELD_BYTE_0, FIELD_BYTE_1)) + +#define WRITE_FIELD_SPLIT_U16(rule_st, field, data) do { \ + (rule_st)->field##_l = (data)[FIELD_BYTE_1]; \ + (rule_st)->field##_h = (data)[FIELD_BYTE_0]; \ +} while (0) + +#define WRITE_IP4(rule_st, field, data) do { \ + (rule_st)->field##_0 = (data)[FIELD_BYTE_0]; \ + (rule_st)->field##_1 = (data)[FIELD_BYTE_1]; \ + (rule_st)->field##_2 = (data)[FIELD_BYTE_2]; \ + (rule_st)->field##_3 = (data)[FIELD_BYTE_3]; \ +} while (0) + +#define WRITE_IP6_128BITS(rule_st, field, data) do { \ + (rule_st)->field##_0 = FIELD_U16(data, FIELD_BYTE_0, FIELD_BYTE_1); \ + (rule_st)->field##_1 = FIELD_U16(data, FIELD_BYTE_2, FIELD_BYTE_3); \ + (rule_st)->field##_2 = FIELD_U16(data, FIELD_BYTE_4, FIELD_BYTE_5); \ + (rule_st)->field##_3 = FIELD_U16(data, FIELD_BYTE_6, FIELD_BYTE_7); \ + (rule_st)->field##_4 = FIELD_U16(data, FIELD_BYTE_8, FIELD_BYTE_9); \ + (rule_st)->field##_5 = FIELD_U16(data, FIELD_BYTE_10, FIELD_BYTE_11); \ + (rule_st)->field##_6 = FIELD_U16(data, FIELD_BYTE_12, FIELD_BYTE_13); \ + (rule_st)->field##_7 = FIELD_U16(data, FIELD_BYTE_14, FIELD_BYTE_15); \ +} while (0) + +#define WRITE_IP6_96BITS(rule_st, field, data) do { \ + (rule_st)->field##_0 = FIELD_U16(data, FIELD_BYTE_0, FIELD_BYTE_1); \ + (rule_st)->field##_1 = FIELD_U16(data, FIELD_BYTE_2, FIELD_BYTE_3); \ + (rule_st)->field##_2 = FIELD_U16(data, FIELD_BYTE_4, FIELD_BYTE_5); \ + (rule_st)->field##_3 = FIELD_U16(data, FIELD_BYTE_6, FIELD_BYTE_7); \ + (rule_st)->field##_4 = FIELD_U16(data, FIELD_BYTE_8, FIELD_BYTE_9); \ + (rule_st)->field##_5 = FIELD_U16(data, FIELD_BYTE_10, FIELD_BYTE_11); \ +} while (0) + +#define WRITE_IP6_128BITS_OPT_OFF(rule_st, field, data) do { \ + (rule_st)->field##_0 = FIELD_U16(data, FIELD_BYTE_0, FIELD_BYTE_1); \ + (rule_st)->field##_1_h = (data)[FIELD_BYTE_2]; \ + (rule_st)->field##_1_l = (data)[FIELD_BYTE_3]; \ + (rule_st)->field##_2 = FIELD_U16(data, FIELD_BYTE_4, FIELD_BYTE_5); \ + (rule_st)->field##_3_h = (data)[FIELD_BYTE_6]; \ + (rule_st)->field##_3_l = (data)[FIELD_BYTE_7]; \ + (rule_st)->field##_4 = FIELD_U16(data, FIELD_BYTE_8, FIELD_BYTE_9); \ + (rule_st)->field##_5_h = (data)[FIELD_BYTE_10]; \ + (rule_st)->field##_5_l = (data)[FIELD_BYTE_11]; \ + (rule_st)->field##_6 = FIELD_U16(data, FIELD_BYTE_12, FIELD_BYTE_13); \ + (rule_st)->field##_7_h = (data)[FIELD_BYTE_14]; \ + (rule_st)->field##_7_l = (data)[FIELD_BYTE_15]; \ +} while (0) + +#define WRITE_SIP6_72BITS_OPT_ON(rule_st, data) do { \ + (rule_st)->sip6_0_h = (data)[FIELD_BYTE_0]; \ + (rule_st)->sip6_0_l = (data)[FIELD_BYTE_1]; \ + (rule_st)->sip6_1 = FIELD_U16(data, FIELD_BYTE_2, FIELD_BYTE_3); \ + (rule_st)->sip6_2_h = (data)[FIELD_BYTE_4]; \ + (rule_st)->sip6_2_l = (data)[FIELD_BYTE_5]; \ + (rule_st)->sip6_3 = FIELD_U16(data, FIELD_BYTE_6, FIELD_BYTE_7); \ + (rule_st)->sip6_4_h = (data)[FIELD_BYTE_8]; \ +} while (0) + +#define WRITE_DIP6_72BITS_OPT_ON(rule_st, data) do { \ + (rule_st)->dip6_0 = FIELD_U16(data, FIELD_BYTE_0, FIELD_BYTE_1); \ + (rule_st)->dip6_1 = FIELD_U16(data, FIELD_BYTE_2, FIELD_BYTE_3); \ + (rule_st)->dip6_2 = FIELD_U16(data, FIELD_BYTE_4, FIELD_BYTE_5); \ + (rule_st)->dip6_3 = FIELD_U16(data, FIELD_BYTE_6, FIELD_BYTE_7); \ + (rule_st)->dip6_4_h = (data)[FIELD_BYTE_8]; \ +} while (0) + +#define BYTE8_SIZE 8 +#define IP6_ADDR_TRUNC_72BITS 72 +#define IP6_ADDR_TRUNC_96BITS 96 +#define IP6_ADDR_128BITS 128 + +struct hinic5_tc_ip6_trunc { + u8 sip6[IP6_ADDR_TRUNC_96BITS / BYTE8_SIZE]; + u8 dip6[IP6_ADDR_TRUNC_72BITS / BYTE8_SIZE]; +}; + +struct hinic5_tc_flow_node { + unsigned long cookie; /* hash key: provided by TC */ + struct hinic5_tc_flow flow; /* hash value: saved flow */ + u16 rule_id; /* index of pfe key: assigned in firmware */ + + struct rhash_head node; +}; + +#define HINIC5_TC_TCAM_BITMAP_LEN 64 + +/* PFE tc info */ +struct hinic5_tc_info { + u16 profile_id; + /* PFE组key使用的模板3-1:1'b0:使用模板3-1-1,1'b1:使用模板3-1-2 */ + u16 tunnel_opt; + /* PFE组key模板3-2 IPV6 sip截断使用时的偏移值N, + * 截取[N+len:N],最大值为32 + */ + u16 ipv6_shift_value; + /* PFE组key模板3-1 IPV6 sip和dip截断使用时的偏移值N, + * 截取[N+len:N],最大值为56 + */ + u16 ipv6_shift_value2; + u16 enc_ip_type; /* 隧道报文外层ip类型: 0-ipv4, 1-ipv6 */ + ulong tcam_bitmap[HINIC5_TC_TCAM_BITMAP_LEN]; + struct rhashtable flow_table; + struct rhashtable_params flow_ht_params; + struct mutex tc_lock; /* 保护此结构体的互斥锁,防止并发访问 */ +}; + +int hinic5_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); +int hinic5_init_tc(struct hinic5_nic_dev *nic_dev); +void hinic5_deinit_tc(struct hinic5_nic_dev *nic_dev); +int hinic5_tc_set_profile_id(struct hinic5_nic_dev *nic_dev, u16 profile_id); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dbg.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dbg.c new file mode 100644 index 00000000..74ba58f0 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dbg.c @@ -0,0 +1,1276 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/semaphore.h> +#include <linux/rhashtable.h> +#include <linux/netdevice.h> + +#include "nic_pub_cmd.h" +#include "hinic5_crm.h" +#include "hinic5_hw.h" +#include "hinic5_nic_dev.h" +#include "hinic5_nic_dbg.h" +#include "hinic5_nic_sq.h" +#include "hinic5_nic_rq.h" +#include "hinic5_rx.h" +#include "hinic5_tx.h" +#include "hinic5_dcb.h" +#include "hinic5_bond.h" +#include "nic_cfg_comm.h" +#include "bond_pub_cmd.h" +#include "hinic5_macsec_api.h" +#include "hinic5_tc.h" +#include "drv_nic_api.h" +#include "hinic5_dbg.h" + +static int get_nic_drv_version(void *buf_out, const u32 *out_size) +{ + struct drv_version_info *ver_info = buf_out; + int err; + + if (!buf_out) { + pr_err("Buf_out is NULL.\n"); + return -EINVAL; + } + + if (*out_size != sizeof(*ver_info)) { + pr_err("Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(*ver_info)); + return -EINVAL; + } + + err = snprintf(ver_info->ver, sizeof(ver_info->ver), "%s %s", + HINIC5_NIC_DRV_VERSION, __TIME_STR__); + if (err < 0) + return -EINVAL; + + return 0; +} + +static int get_tx_info(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct nic_sq_info *sq_info = buf_out; + u16 q_id; + int err; + + if (!HINIC5_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get tx info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_in or buf_out is NULL.\n"); + return -EINVAL; + } + + if (!out_size || in_size != sizeof(u32)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect in buf size from user :%u, expect: %lu\n", + in_size, sizeof(u32)); + return -EINVAL; + } + + q_id = (u16)(*((u32 *)buf_in)); + + err = hinic5_dbg_get_sq_info(nic_dev->hwdev, q_id, buf_out, *out_size); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get sq info failed, ret is %d.\n", err); + return err; + } + + sq_info->priority = nic_dev->txqs[q_id].cos; + + return 0; +} + +static int get_q_num(struct hinic5_nic_dev *nic_dev, + const void *buf_in, u32 in_size, + void *buf_out, const u32 *out_size) +{ + if (!HINIC5_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get queue number\n"); + return -EFAULT; + } + + if (!buf_out || !out_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Param buf_out or out_size is NULL.\n"); + return -EINVAL; + } + + if (*out_size != sizeof(u16)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(u16)); + return -EINVAL; + } + + *((u16 *)buf_out) = nic_dev->q_params.num_qps; + + return 0; +} + +static int get_tx_wqe_info(struct hinic5_nic_dev *nic_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + const struct wqe_info *info = buf_in; + u16 wqebb_cnt = 1; + + if (!HINIC5_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get tx wqe info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Buf_in or buf_out is NULL.\n"); + return -EINVAL; + } + + if (!out_size || in_size != sizeof(struct wqe_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, expect: %lu\n", + in_size, sizeof(struct wqe_info)); + return -EINVAL; + } + + return hinic5_dbg_get_wqe_info(nic_dev->hwdev, (u16)info->q_id, + (u16)info->wqe_id, wqebb_cnt, + buf_out, (u16 *)(u8 *)out_size, HINIC5_SQ); +} + +static int get_rx_info(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct nic_rq_info *rq_info = buf_out; + u16 q_id; + int err; + + if (!HINIC5_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_in or buf_out is NULL.\n"); + return -EINVAL; + } + + if (!out_size || in_size != sizeof(u32)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, expect: %lu\n", + in_size, sizeof(u32)); + return -EINVAL; + } + + q_id = (u16)(*((u32 *)buf_in)); + + err = hinic5_dbg_get_rq_info(nic_dev->hwdev, q_id, buf_out, *out_size); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Get rq info failed, ret is %d.\n", err); + return err; + } + + rq_info->delta = (u16)nic_dev->rxqs[q_id].delta; + rq_info->ci = (u16)(nic_dev->rxqs[q_id].cons_idx & nic_dev->rxqs[q_id].q_mask); + rq_info->sw_pi = nic_dev->rxqs[q_id].next_to_update; + rq_info->msix_vector = nic_dev->rxqs[q_id].irq_id; + + rq_info->coalesc_timer_cfg = nic_dev->rxqs[q_id].last_coalesc_timer_cfg; + rq_info->pending_limt = nic_dev->rxqs[q_id].last_pending_limt; + + return 0; +} + +static int get_rx_wqe_info(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + const struct wqe_info *info = buf_in; + u16 wqebb_cnt = 1; + + if (!HINIC5_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx wqe info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Buf_in or buf_out is NULL.\n"); + return -EINVAL; + } + + if (!out_size || in_size != sizeof(struct wqe_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, expect: %lu\n", + in_size, sizeof(struct wqe_info)); + return -EINVAL; + } + + return hinic5_dbg_get_wqe_info(nic_dev->hwdev, (u16)info->q_id, + (u16)info->wqe_id, wqebb_cnt, + buf_out, (u16 *)(u8 *)out_size, HINIC5_RQ); +} + +static int get_rx_cqe_info(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + const struct wqe_info *info = buf_in; + u16 q_id = 0; + u16 idx = 0; + + if (!HINIC5_CHANNEL_RES_VALID(nic_dev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't get rx cqe info\n"); + return -EFAULT; + } + + if (!buf_in || !buf_out || !out_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_in, buf_out or out_size is NULL.\n"); + return -EINVAL; + } + + if (in_size != sizeof(struct wqe_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, expect: %lu\n", + in_size, sizeof(struct wqe_info)); + return -EINVAL; + } + + if (*out_size != sizeof(struct hinic5_cqe_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(struct hinic5_cqe_info)); + return -EINVAL; + } + q_id = (u16)info->q_id; + idx = (u16)info->wqe_id; + + if (q_id >= nic_dev->q_params.num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid q_id[%u] >= %u.\n", q_id, nic_dev->q_params.num_qps); + return -EFAULT; + } + if (idx >= nic_dev->rxqs[q_id].q_depth) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid wqe idx[%u] >= %u.\n", idx, nic_dev->rxqs[q_id].q_depth); + return -EFAULT; + } + + memcpy(buf_out, nic_dev->rxqs[q_id].rx_info[idx].cqe_info, sizeof(struct hinic5_cqe_info)); + + return 0; +} + +static void clean_nicdev_stats(struct hinic5_nic_dev *nic_dev) +{ + u64_stats_update_begin(&nic_dev->stats.syncp); + nic_dev->stats.netdev_tx_timeout = 0; + nic_dev->stats.tx_carrier_off_drop = 0; + nic_dev->stats.tx_invalid_qid = 0; + nic_dev->stats.rsvd1 = 0; + nic_dev->stats.rsvd2 = 0; + u64_stats_update_end(&nic_dev->stats.syncp); +} + +static int clear_func_static(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int i; + + *out_size = 0; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + memset(&nic_dev->net_stats, 0, sizeof(nic_dev->net_stats)); +#endif + clean_nicdev_stats(nic_dev); + for (i = 0; i < nic_dev->max_qps; i++) { + hinic5_rxq_clean_stats(&nic_dev->rxqs[i].rxq_stats); + hinic5_txq_clean_stats(&nic_dev->txqs[i].txq_stats); + } + + return 0; +} + +static int get_loopback_mode(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct hinic5_nic_loop_mode *mode = buf_out; + + if (!out_size || !mode) + return -EINVAL; + + if (*out_size != sizeof(*mode)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(*mode)); + return -EINVAL; + } + + return hinic5_get_loopback_mode(nic_dev->hwdev, (u8 *)&mode->loop_mode, + (u8 *)&mode->loop_ctrl); +} + +static int set_loopback_mode(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + const struct hinic5_nic_loop_mode *mode = buf_in; + int err; + + if (test_bit(HINIC5_INTF_UP, &nic_dev->flags) == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't set loopback mode\n"); + return -EFAULT; + } + + if (!mode || !out_size || in_size != sizeof(*mode)) + return -EINVAL; + + if (*out_size != sizeof(*mode)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(*mode)); + return -EINVAL; + } + + err = hinic5_set_loopback_mode(nic_dev->hwdev, (u8)mode->loop_mode, + (u8)mode->loop_ctrl); + if (err == 0) + nicif_info(nic_dev, drv, nic_dev->netdev, "Set loopback mode %u en %u succeed\n", + mode->loop_mode, mode->loop_ctrl); + + return err; +} + +enum hinic5_nic_link_mode { + HINIC5_LINK_MODE_AUTO = 0, + HINIC5_LINK_MODE_UP, + HINIC5_LINK_MODE_DOWN, + HINIC5_LINK_MODE_MAX, +}; + +static int set_link_mode_param_valid(struct hinic5_nic_dev *nic_dev, + const void *buf_in, u32 in_size, + const u32 *out_size) +{ + if (test_bit(HINIC5_INTF_UP, &nic_dev->flags) == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Netdev is down, can't set link mode\n"); + return -EFAULT; + } + + if (!buf_in || !out_size || + in_size != sizeof(enum hinic5_nic_link_mode)) + return -EINVAL; + + if (*out_size != sizeof(enum hinic5_nic_link_mode)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(enum hinic5_nic_link_mode)); + return -EINVAL; + } + + return 0; +} + +static int set_link_mode(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + const enum hinic5_nic_link_mode *link = buf_in; + u8 link_status; + + if (set_link_mode_param_valid(nic_dev, buf_in, in_size, out_size) != 0) + return -EFAULT; + + switch (*link) { + case HINIC5_LINK_MODE_AUTO: + if (hinic5_get_link_state(nic_dev->hwdev, &link_status) != 0) + link_status = false; + hinic5_link_status_change(nic_dev, (bool)link_status); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: auto succeed, now is link %s\n", + ((link_status != 0) ? "up" : "down")); + break; + case HINIC5_LINK_MODE_UP: + hinic5_link_status_change(nic_dev, true); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: up succeed\n"); + break; + case HINIC5_LINK_MODE_DOWN: + hinic5_link_status_change(nic_dev, false); + nicif_info(nic_dev, drv, nic_dev->netdev, + "Set link mode: down succeed\n"); + break; + default: + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid link mode %d to set\n", *link); + return -EINVAL; + } + + return 0; +} + +static int set_pf_bw_limit(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + u32 pf_bw_limit; + int err; + + if (HINIC5_FUNC_IS_VF(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "To set VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!buf_in || !buf_out || in_size != sizeof(u32) || + !out_size || *out_size != sizeof(u8)) + return -EINVAL; + + pf_bw_limit = *((u32 *)buf_in); + + err = hinic5_set_pf_bw_limit(nic_dev->hwdev, pf_bw_limit); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to set pf bandwidth limit to %u%%\n", + pf_bw_limit); + if (err < 0) + return err; + } + + *((u8 *)buf_out) = (u8)err; + + return 0; +} + +static int get_pf_bw_limit(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + int err; + + if (HINIC5_FUNC_IS_VF(nic_dev->hwdev)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "To get VF bandwidth rate, please use ip link cmd\n"); + return -EINVAL; + } + + if (!buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(u32)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(u32)); + return -EFAULT; + } + + err = hinic5_get_pf_bw_limit(nic_dev->hwdev, (u32 *)buf_out); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get pf bandwidth limit err: %d\n", err); + return err; + } + + return 0; +} + +static int get_sset_count(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + u32 count; + + if (!buf_in || in_size != sizeof(u32) || !out_size || + *out_size != sizeof(u32) || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %u\n", + in_size); + return -EINVAL; + } + + switch (*((u32 *)buf_in)) { + case HINIC5_SHOW_SSET_IO_STATS: + count = hinic5_get_io_stats_size(nic_dev); + break; + default: + count = 0; + break; + } + + *((u32 *)buf_out) = count; + + return 0; +} + +static int get_sset_stats(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct hinic5_show_item *items = buf_out; + u32 sset, count, size; + int err; + + if (!buf_in || in_size != sizeof(u32) || !out_size || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Invalid parameters, in_size: %u\n", + in_size); + return -EINVAL; + } + + size = sizeof(u32); + err = get_sset_count(nic_dev, buf_in, in_size, &count, &size); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Get sset count failed, ret=%d\n", + err); + return -EINVAL; + } + if (count * sizeof(*items) != *out_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, count * sizeof(*items)); + return -EINVAL; + } + + sset = *((u32 *)buf_in); + + switch (sset) { + case HINIC5_SHOW_SSET_IO_STATS: + err = hinic5_get_io_stats(nic_dev, items); + if (err < 0) + return -EINVAL; + break; + + default: + nicif_err(nic_dev, drv, nic_dev->netdev, "Unknown %u to get stats\n", + sset); + err = -EINVAL; + break; + } + + return err; +} + +static int update_pcp_dscp_cfg(struct hinic5_nic_dev *nic_dev, + struct hinic5_dcb_config *wanted_dcb_cfg, + const struct hinic5_mt_qos_dev_cfg *qos_in) +{ + int i; + u8 cos_num = 0, valid_cos_bitmap = 0; + + if ((qos_in->cfg_bitmap & CMD_QOS_DEV_PCP2COS) != 0) { + for (i = 0; i < NIC_DCB_UP_MAX; i++) { + if ((nic_dev->func_dft_cos_bitmap & BIT(qos_in->pcp2cos[i])) == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid cos=%u, func cos valid map is %u", + qos_in->pcp2cos[i], nic_dev->func_dft_cos_bitmap); + return -EINVAL; + } + + if ((BIT(qos_in->pcp2cos[i]) & valid_cos_bitmap) == 0) { + valid_cos_bitmap |= (u8)BIT(qos_in->pcp2cos[i]); + cos_num++; + } + } + + memcpy(wanted_dcb_cfg->pcp2cos, + qos_in->pcp2cos, sizeof(qos_in->pcp2cos)); + wanted_dcb_cfg->pcp_user_cos_num = cos_num; + wanted_dcb_cfg->pcp_valid_cos_map = valid_cos_bitmap; + } + + if ((qos_in->cfg_bitmap & CMD_QOS_DEV_DSCP2COS) != 0) { + cos_num = 0; + valid_cos_bitmap = 0; + for (i = 0; i < NIC_DCB_IP_PRI_MAX; i++) { + u8 cos = qos_in->dscp2cos[i] == DBG_DFLT_DSCP_VAL ? + nic_dev->hw_dcb_cfg.dscp2cos[i] : qos_in->dscp2cos[i]; + + if (cos >= NIC_DCB_UP_MAX || + ((nic_dev->func_dft_cos_bitmap & BIT(cos)) == 0)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid cos=%u, func cos valid map is %u", + cos, nic_dev->func_dft_cos_bitmap); + return -EINVAL; + } + + if ((BIT(cos) & valid_cos_bitmap) == 0) { + valid_cos_bitmap |= (u8)BIT(cos); + cos_num++; + } + } + + for (i = 0; i < NIC_DCB_IP_PRI_MAX; i++) + wanted_dcb_cfg->dscp2cos[i] = qos_in->dscp2cos[i] == DBG_DFLT_DSCP_VAL ? + nic_dev->hw_dcb_cfg.dscp2cos[i] : qos_in->dscp2cos[i]; + wanted_dcb_cfg->dscp_user_cos_num = cos_num; + wanted_dcb_cfg->dscp_valid_cos_map = valid_cos_bitmap; + } + + return 0; +} + +static int update_wanted_qos_cfg(struct hinic5_nic_dev *nic_dev, + struct hinic5_dcb_config *wanted_dcb_cfg, + const struct hinic5_mt_qos_dev_cfg *qos_in) +{ + int ret; + u8 cos_num, valid_cos_bitmap; + + if ((qos_in->cfg_bitmap & CMD_QOS_DEV_TRUST) != 0) { + if (qos_in->trust > DCB_DSCP) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid trust=%u\n", qos_in->trust); + return -EINVAL; + } + + wanted_dcb_cfg->trust = qos_in->trust; + } + + if ((qos_in->cfg_bitmap & CMD_QOS_DEV_DFT_COS) != 0) { + if ((BIT(qos_in->dft_cos) & nic_dev->func_dft_cos_bitmap) == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Invalid dft_cos=%u\n", qos_in->dft_cos); + return -EINVAL; + } + + wanted_dcb_cfg->default_cos = qos_in->dft_cos; + } + + ret = update_pcp_dscp_cfg(nic_dev, wanted_dcb_cfg, qos_in); + if (ret != 0) + return ret; + + if (wanted_dcb_cfg->trust == DCB_PCP) { + cos_num = wanted_dcb_cfg->pcp_user_cos_num; + valid_cos_bitmap = wanted_dcb_cfg->pcp_valid_cos_map; + } else { + cos_num = wanted_dcb_cfg->dscp_user_cos_num; + valid_cos_bitmap = wanted_dcb_cfg->dscp_valid_cos_map; + } + + if (test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) != 0) { + if (cos_num > nic_dev->q_params.num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "DCB is on, cos num should not more than channel num:%u\n", + nic_dev->q_params.num_qps); + return -EOPNOTSUPP; + } + } + + if ((BIT(wanted_dcb_cfg->default_cos) & valid_cos_bitmap) == 0) { + nicif_info(nic_dev, drv, nic_dev->netdev, "Current default_cos=%u, change to %d\n", + wanted_dcb_cfg->default_cos, (u8)fls(valid_cos_bitmap) - 1); + wanted_dcb_cfg->default_cos = (u8)fls(valid_cos_bitmap) - 1; + } + + return 0; +} + +static int dcb_mt_qos_map(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + const struct hinic5_mt_qos_dev_cfg *qos_in = buf_in; + struct hinic5_mt_qos_dev_cfg *qos_out = buf_out; + struct hinic5_dcb_config wanted_dcb_cfg = {0}; + u8 i; + int err; + + if (!buf_out || !out_size || !buf_in) + return -EINVAL; + + if (*out_size != sizeof(*qos_out) || in_size != sizeof(*qos_in)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*qos_in)); + return -EINVAL; + } + + memcpy(qos_out, qos_in, sizeof(*qos_in)); + qos_out->head.status = 0; + if ((qos_in->op_code & MT_DCB_OPCODE_WR) != 0) { + memcpy(&wanted_dcb_cfg, &nic_dev->hw_dcb_cfg, + sizeof(struct hinic5_dcb_config)); + err = update_wanted_qos_cfg(nic_dev, &wanted_dcb_cfg, qos_in); + if (err != 0) { + qos_out->head.status = MT_EINVAL; + return 0; + } + + err = hinic5_dcbcfg_set_up_bitmap(nic_dev, &wanted_dcb_cfg); + if (err != 0) + qos_out->head.status = MT_EIO; + } else { + qos_out->dft_cos = nic_dev->hw_dcb_cfg.default_cos; + qos_out->trust = nic_dev->hw_dcb_cfg.trust; + for (i = 0; i < NIC_DCB_UP_MAX; i++) + qos_out->pcp2cos[i] = nic_dev->hw_dcb_cfg.pcp2cos[i]; + for (i = 0; i < NIC_DCB_IP_PRI_MAX; i++) + qos_out->dscp2cos[i] = nic_dev->hw_dcb_cfg.dscp2cos[i]; + } + + return 0; +} + +static int dcb_mt_dcb_state(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + const struct hinic5_mt_dcb_state *dcb_in = buf_in; + struct hinic5_mt_dcb_state *dcb_out = buf_out; + int err; + u8 user_cos_num; + u8 netif_run = 0; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*dcb_out) || in_size != sizeof(*dcb_in)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "buf size err, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*dcb_in)); + return -EINVAL; + } + + user_cos_num = hinic5_get_dev_user_cos_num(nic_dev); + memcpy(dcb_out, dcb_in, sizeof(*dcb_in)); + dcb_out->head.status = 0; + if ((dcb_in->op_code & MT_DCB_OPCODE_WR) != 0) { + if (test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) == dcb_in->state) + return 0; + + if (dcb_in->state != 0 && (netif_is_rxfh_configured(nic_dev->netdev))) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Cannot enable dcb when user has configured rss indirect table.\n"); + return -EOPNOTSUPP; + } + + if (dcb_in->state != 0 && user_cos_num > nic_dev->q_params.num_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, "cos num %u is big than qps num %u\n", + user_cos_num, nic_dev->q_params.num_qps); + return -EOPNOTSUPP; + } + + if (netif_running(nic_dev->netdev)) { + netif_run = 1; + hinic5_vport_down(nic_dev); + } + + err = hinic5_setup_cos(nic_dev->netdev, + (dcb_in->state != 0) ? user_cos_num : 0, netif_run); + if (err != 0) + goto setup_cos_fail; + + if (netif_run != 0) { + err = hinic5_vport_up(nic_dev); + if (err != 0) + goto vport_up_fail; + } + } else { + dcb_out->state = !!test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags); + } + + return 0; + +vport_up_fail: + hinic5_setup_cos(nic_dev->netdev, (dcb_in->state != 0) ? 0 : user_cos_num, netif_run); + +setup_cos_fail: + if (netif_run != 0) + hinic5_vport_up(nic_dev); + + return err; +} + +static int dcb_mt_hw_qos_get(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + const struct hinic5_mt_qos_cos_cfg *cos_cfg_in = buf_in; + struct hinic5_mt_qos_cos_cfg *cos_cfg_out = buf_out; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*cos_cfg_out) || in_size != sizeof(*cos_cfg_in)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*cos_cfg_in)); + return -EINVAL; + } + + memcpy(cos_cfg_out, cos_cfg_in, sizeof(*cos_cfg_in)); + cos_cfg_out->head.status = 0; + + cos_cfg_out->port_id = hinic5_physical_port_id(nic_dev->hwdev); + cos_cfg_out->func_cos_bitmap = (u8)nic_dev->func_dft_cos_bitmap; + cos_cfg_out->port_cos_bitmap = (u8)nic_dev->port_dft_cos_bitmap; + cos_cfg_out->func_max_cos_num = nic_dev->cos_config_num_max; + + return 0; +} + +static int get_inter_num(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + u16 intr_num; + + intr_num = hinic5_intr_num(nic_dev->hwdev); + + if (!buf_out || !out_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_out or out_size is NULL.\n"); + return -EINVAL; + } + + if (*out_size != sizeof(u16)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%u, expect: %lu\n", + *out_size, sizeof(u16)); + return -EFAULT; + } + *(u16 *)buf_out = intr_num; + + return 0; +} + +static int get_netdev_name(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + if (!buf_out || !out_size) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Buf_out or out_size is NULL.\n"); + return -EINVAL; + } + + if (*out_size != IFNAMSIZ) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user :%u, expect: %u\n", + *out_size, IFNAMSIZ); + return -EFAULT; + } + + strscpy(buf_out, nic_dev->netdev->name, IFNAMSIZ); + + return 0; +} + +static int get_netdev_tx_timeout(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct net_device *net_dev = nic_dev->netdev; + int *tx_timeout = buf_out; + + if (!buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(int)) { + nicif_err(nic_dev, drv, net_dev, "Unexpect buf size from user, out_size: %u, expect: %lu\n", + *out_size, sizeof(int)); + return -EINVAL; + } + + *tx_timeout = net_dev->watchdog_timeo; + + return 0; +} + +static int set_netdev_tx_timeout(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct net_device *net_dev = nic_dev->netdev; + const int *tx_timeout = buf_in; + + if (!buf_in) + return -EINVAL; + + if (in_size != sizeof(int)) { + nicif_err(nic_dev, drv, net_dev, "Unexpect buf size from user, in_size: %u, expect: %lu\n", + in_size, sizeof(int)); + return -EINVAL; + } + + net_dev->watchdog_timeo = *tx_timeout * HZ; + nicif_info(nic_dev, drv, net_dev, "Set tx timeout check period to %ds\n", *tx_timeout); + + return 0; +} + +static int get_xsfp_present(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct mag_cmd_get_xsfp_present *sfp_abs = buf_out; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*sfp_abs) || in_size != sizeof(*sfp_abs)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*sfp_abs)); + return -EINVAL; + } + + sfp_abs->head.status = 0; + sfp_abs->abs_status = hinic5_if_sfp_absent(nic_dev->hwdev); + + return 0; +} + +static int get_xsfp_info(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct mag_cmd_get_xsfp_info *sfp_info = buf_out; + int err; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*sfp_info) || in_size != sizeof(*sfp_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*sfp_info)); + return -EINVAL; + } + + err = hinic5_get_sfp_info(nic_dev->hwdev, sfp_info); + if (err != 0) { + sfp_info->head.status = MT_EIO; + return 0; + } + + return 0; +} + +static int get_xsfp_tlv_info(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct drv_tag_mag_cmd_get_xsfp_tlv_rsp *sfp_tlv_info = buf_out; + const struct tag_mag_cmd_get_xsfp_tlv_req *sfp_tlv_info_req = buf_in; + int err; + + if (!buf_in || !buf_out || !out_size) + return -EINVAL; + + if (*out_size != sizeof(*sfp_tlv_info) || in_size != sizeof(*sfp_tlv_info_req)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect buf size from user, in_size: %u, out_size: %u, expect: %lu\n", + in_size, *out_size, sizeof(*sfp_tlv_info)); + return -EINVAL; + } + + err = hinic5_get_sfp_tlv_info(nic_dev->hwdev, sfp_tlv_info, sfp_tlv_info_req); + if (err != 0) { + sfp_tlv_info->head.status = MT_EIO; + return 0; + } + + return 0; +} + +static int get_profile_id(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + + if (!HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, TC_FLOWER_OFFLOAD)) { + hinic5_err(nic_dev, drv, "dev is not enable PFE\n"); + return -EINVAL; + } + + if (!out_size || !buf_out) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Param buf_out or out_size is NULL.\n"); + return -EINVAL; + } + + if (*out_size != sizeof(u16)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect out buf size from user: %u, expect: %lu\n", + *out_size, sizeof(u16)); + return -EINVAL; + } + + *((u16 *)buf_out) = tc_info->profile_id; + + return 0; +} + +static int set_profile_id(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + const u16 *profile_id = buf_in; + + if (!HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, TC_FLOWER_OFFLOAD)) { + hinic5_err(nic_dev, drv, "dev is not enable PFE\n"); + return -EINVAL; + } + + if (!buf_in) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Param buf_in is NULL.\n"); + return -EINVAL; + } + + if (in_size != sizeof(u16)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect in_size from user: %u, expect: %lu\n", + in_size, sizeof(u16)); + return -EINVAL; + } + + if (*profile_id >= HINIC5_TC_PROFILE_MAX) { + hinic5_err(nic_dev, drv, "profile_id exceed limit\n"); + return -EINVAL; + } + + tc_info->profile_id = *profile_id; + + return 0; +} + +static int hinic5_move_tcam_table(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + struct hinic5_tc_move_info *acl_move_info = (struct hinic5_tc_move_info *)buf_in; + struct hinic5_tc_info *tc_info = (struct hinic5_tc_info *)nic_dev->tc_info; + struct hinic5_tc_flow_node *flow_node = NULL; + struct rhashtable_iter iter; + int ret; + u32 old_index, new_index, len; + + if (!HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, TC_FLOWER_OFFLOAD)) { + hinic5_err(nic_dev, drv, "dev is not enable PFE\n"); + return -EINVAL; + } + + if (!buf_in) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Param buf_in is NULL.\n"); + return -EINVAL; + } + + old_index = acl_move_info->old_index; + new_index = acl_move_info->new_index; + len = acl_move_info->len; + + if (in_size != sizeof(struct hinic5_tc_move_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Unexpect in_size from user: %u, expect: %lu\n", + in_size, sizeof(struct hinic5_tc_move_info)); + return -EINVAL; + } + + ret = hinic5_move_tc_tcam_table(nic_dev->hwdev, acl_move_info); + if (ret != 0) { + hinic5_err(nic_dev, drv, "move tcam table failed\n"); + return ret; + } + + rhashtable_walk_enter(&tc_info->flow_table, &iter); + rhashtable_walk_start(&iter); + while ((flow_node = (struct hinic5_tc_flow_node *)rhashtable_walk_next(&iter)) != NULL && + !IS_ERR(flow_node)) { + if (flow_node->rule_id >= old_index && flow_node->rule_id < old_index + len) + flow_node->rule_id += (u16)(new_index - old_index); + } + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); + + return ret; +} + +static int g_bond_event_err; + +void hinic_bond_dfx_active_event(const char *bond_name, struct bond_attr *attr, int err) +{ + if (err != 0) + g_bond_event_err = 1; +} + +int hinic_bond_dfx_ops(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, const u32 *out_size) +{ + u16 bond_id; + int err = 0; + struct bond_dfx_ops_info *info = (struct bond_dfx_ops_info *)buf_in; + struct bond_srv_func srv_func = { + .before_active = NULL, + .after_active = hinic_bond_dfx_active_event, + .before_modify = NULL, + .after_modify = NULL, + .before_deactive = NULL, + .after_deactive = NULL, + .can_attach = NULL, + }; + + if (!buf_in) { + nicif_err(nic_dev, drv, nic_dev->netdev, "buf_in is NULL.\n"); + return -EINVAL; + } + + if (in_size != sizeof(struct bond_dfx_ops_info)) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Unexpect in buf size from user :%u, expect: %lu\n", + in_size, sizeof(struct bond_dfx_ops_info)); + return -EINVAL; + } + + if (info->ops == BOND_DFX_OP_ADD) { + (void)hinic5_bond_register_service_func(info->user, &srv_func); + err = hinic5_bond_attach(info->bond_name, info->user, &bond_id); + if (g_bond_event_err != 0) { + hinic5_bond_detach(bond_id, info->user); + g_bond_event_err = 0; + } + } else if (info->ops == BOND_DFX_OP_DEL) { + err = hinic5_bond_get_id_by_name(info->bond_name, &bond_id); + if (err == 0) { + hinic5_bond_detach(bond_id, info->user); + hinic5_bond_unregister_service_func(info->user); + } + } + + info->head.status = (u8)err; + + return err; +} + +static const struct nic_drv_module_handle nic_driv_module_cmd_handle[] = { + /* 获取sq ifo */ + {TX_INFO, (nic_driv_module)get_tx_info}, + /* 获取队列数目 */ + {Q_NUM, (nic_driv_module)get_q_num}, + /* 获取tx wqe信息 */ + {TX_WQE_INFO, get_tx_wqe_info}, + /* 获取rx info */ + {RX_INFO, (nic_driv_module)get_rx_info}, + /* 获取rx wqe信息 */ + {RX_WQE_INFO, get_rx_wqe_info}, + /* 获取rx cqe信息 */ + {RX_CQE_INFO, (nic_driv_module)get_rx_cqe_info}, + /* 获取中断数目 */ + {GET_INTER_NUM, (nic_driv_module)get_inter_num}, + /* 清楚func的统计信息 */ + {CLEAR_FUNC_STASTIC, clear_func_static}, + /* 获取当前的环回模式 */ + {GET_LOOPBACK_MODE, (nic_driv_module)get_loopback_mode}, + /* 设置环回模式 */ + {SET_LOOPBACK_MODE, (nic_driv_module)set_loopback_mode}, + /* 设置link模式 */ + {SET_LINK_MODE, set_link_mode}, + /* 设置pxe带宽上限 */ + {SET_PF_BW_LIMIT, (nic_driv_module)set_pf_bw_limit}, + /* 获取pxe带宽上限 */ + {GET_PF_BW_LIMIT, (nic_driv_module)get_pf_bw_limit}, + /* 获取当前IO统计项的个数 */ + {GET_SSET_COUNT, (nic_driv_module)get_sset_count}, + /* 获取当前IO统计状态 */ + {GET_SSET_ITEMS, (nic_driv_module)get_sset_stats}, + /* 管理DCB状态 */ + {DCB_STATE, (nic_driv_module)dcb_mt_dcb_state}, + /* 管理qos的映射关系 */ + {QOS_DEV, (nic_driv_module)dcb_mt_qos_map}, + /* 获取硬件qos配置 */ + {GET_QOS_COS, (nic_driv_module)dcb_mt_hw_qos_get}, + /* 获取网络设备名称 */ + {GET_ULD_DEV_NAME, (nic_driv_module)get_netdev_name}, + /* 获取tx timeout的时间 */ + {GET_TX_TIMEOUT, (nic_driv_module)get_netdev_tx_timeout}, + /* 配置tx timeout的时间 */ + {SET_TX_TIMEOUT, set_netdev_tx_timeout}, + /* 获取光模块的在位信息 */ + {GET_XSFP_PRESENT, (nic_driv_module)get_xsfp_present}, + /* 获取光模块的信息 */ + {GET_XSFP_INFO, (nic_driv_module)get_xsfp_info}, + /* 以tlv格式获取光模块的信息 */ + {GET_XSFP_INFO_COMP_CMIS, (nic_driv_module)get_xsfp_tlv_info}, + /* 获取profile id */ + {CMD_GET_PROFILE_ID, (nic_driv_module)get_profile_id}, + /* 配置profile id */ + {CMD_SET_PROFILE_ID, (nic_driv_module)set_profile_id}, + /* 配置tcam表 */ + {CMD_MOVE_TCAM_TABLE, (nic_driv_module)hinic5_move_tcam_table}, + /* bond 绑定/解绑 dfx */ + {BOND_DFX_OPS, (nic_driv_module)hinic_bond_dfx_ops}, + /* 查询驱动侧MACsec表项 */ + {MACSEC_TOOL_OP_LIST, (nic_driv_module)macsec_cmd_list}, + /* 查询芯片侧MACsec表项 */ + {MACSEC_TOOL_OP_DUMP, (nic_driv_module)macsec_cmd_list}, + /* 获取芯片侧 SC MIB 信息或者 PORT MIB 信息 */ + {MACSEC_TOOL_OP_MIB, (nic_driv_module)macsec_cmd_mib}, + /* 新增 SC 或 SA 配置 */ + {MACSEC_TOOL_OP_ADD, (nic_driv_module)macsec_cmd_add}, + /* 删除 SC 或 SA 配置 */ + {MACSEC_TOOL_OP_DEL, (nic_driv_module)macsec_cmd_del}, + /* 修改 SC 配置 */ + {MACSEC_TOOL_OP_SET, (nic_driv_module)macsec_cmd_set}, + /* 清除某个设备管理的 MACsec 配置 */ + {MACSEC_TOOL_OP_FLUSH, (nic_driv_module)macsec_cmd_flush} +}; + +__weak int hinic5_tool_cmd_extend_handle(struct net_device *netdev, u32 cmd, + struct hinic5_nt_msg *nt_msg, bool *support) +{ + *support = false; + + return 0; +} + +static int send_to_nic_driver(struct hinic5_nic_dev *nic_dev, u32 cmd, struct hinic5_nt_msg *nt_msg) +{ + int index, num_cmds = (int)(sizeof(nic_driv_module_cmd_handle) / + sizeof(nic_driv_module_cmd_handle[0])); + enum driver_cmd_type cmd_type = (enum driver_cmd_type)cmd; + bool support = false; + int err = 0; + + if (cmd_type == DCB_STATE || cmd_type == QOS_DEV) + rtnl_lock(); + + mutex_lock(&nic_dev->nic_mutex); + for (index = 0; index < num_cmds; index++) { + if (cmd_type == + nic_driv_module_cmd_handle[index].driv_cmd_name) { + err = nic_driv_module_cmd_handle[index].driv_func + (nic_dev, nt_msg->buf_in, + nt_msg->in_size, nt_msg->buf_out, &nt_msg->out_size); + goto cmd_out; + } + } + + err = hinic5_tool_cmd_extend_handle(nic_dev->netdev, cmd_type, nt_msg, &support); + if (!support) { + pr_err("Can't find callback for %d\n", cmd_type); + err = -EINVAL; + } + +cmd_out: + mutex_unlock(&nic_dev->nic_mutex); + + if (cmd_type == DCB_STATE || cmd_type == QOS_DEV) + rtnl_unlock(); + + return err; +} + +int nic_ioctl(void *uld_dev, u32 cmd, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int err; + struct hinic5_nt_msg nt_msg = {0}; + + if (cmd == GET_DRV_VERSION) + return get_nic_drv_version(buf_out, out_size); + else if (!uld_dev) + return -EINVAL; + + nt_msg.buf_in = (void *)buf_in; + nt_msg.in_size = in_size; + nt_msg.buf_out = buf_out; + nt_msg.out_size = *out_size; + + err = send_to_nic_driver(uld_dev, cmd, &nt_msg); + + return err; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dbg.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dbg.h new file mode 100644 index 00000000..71221036 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dbg.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_DBG_H +#define HINIC5_DBG_H + +#include "hinic5_nic_dev.h" +#include "hinic5_mt.h" + +#define HINIC5_CAR_INFO_INDEX (0x4) + +typedef int (*nic_driv_module)(struct hinic5_nic_dev *nic_dev, + const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); + +struct nic_drv_module_handle { + u32 driv_cmd_name; + nic_driv_module driv_func; +}; + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dcb.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dcb.c new file mode 100644 index 00000000..6d0145b4 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dcb.c @@ -0,0 +1,410 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> + +#include "hinic5_crm.h" +#include "hinic5_lld.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic_dev.h" +#include "hinic5_dcb.h" + +#define MAX_BW_PERCENT 100 + +u8 hinic5_get_dev_user_cos_num(struct hinic5_nic_dev *nic_dev) +{ + if (nic_dev->hw_dcb_cfg.trust == 0) + return nic_dev->hw_dcb_cfg.pcp_user_cos_num; + if (nic_dev->hw_dcb_cfg.trust == 1) + return nic_dev->hw_dcb_cfg.dscp_user_cos_num; + return 0; +} + +u8 hinic5_get_dev_valid_cos_map(struct hinic5_nic_dev *nic_dev) +{ + if (!nic_dev) + return 0; + if (nic_dev->hw_dcb_cfg.trust == 0) + return nic_dev->hw_dcb_cfg.pcp_valid_cos_map; + if (nic_dev->hw_dcb_cfg.trust == 1) + return nic_dev->hw_dcb_cfg.dscp_valid_cos_map; + return 0; +} + +void hinic5_update_qp_cos_cfg(struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; + u8 valid_cos_map = hinic5_get_dev_valid_cos_map(nic_dev); + u8 num_cos = hinic5_get_dev_user_cos_num(nic_dev); + u8 cos_qp_num, cos_qp_offset = 0; + u8 i, remainder, num_qp_per_cos; + + if (num_cos == 0 || nic_dev->q_params.num_qps == 0) + return; + + num_qp_per_cos = (u8)(nic_dev->q_params.num_qps / num_cos); + remainder = nic_dev->q_params.num_qps % num_cos; + + memset(dcb_cfg->cos_qp_offset, 0, sizeof(dcb_cfg->cos_qp_offset)); + memset(dcb_cfg->cos_qp_num, 0, sizeof(dcb_cfg->cos_qp_num)); + + for (i = 0; i < PCP_MAX_UP; i++) { + if ((BIT(i) & valid_cos_map) != 0) { + cos_qp_num = num_qp_per_cos + ((remainder > 0) ? (remainder--, 1) : 0); + + dcb_cfg->cos_qp_offset[i] = cos_qp_offset; + dcb_cfg->cos_qp_num[i] = cos_qp_num; + hinic5_info(nic_dev, drv, "cos %u, cos_qp_offset=%u cos_qp_num=%u\n", + i, cos_qp_offset, cos_qp_num); + + cos_qp_offset += cos_qp_num; + valid_cos_map -= (u8)BIT(i); + } + } +} + +void hinic5_update_tx_db_cos(struct hinic5_nic_dev *nic_dev, u8 dcb_en) +{ + u8 i; + u16 start_qid, q_num; + + hinic5_set_txq_cos(nic_dev, 0, nic_dev->q_params.num_qps, + nic_dev->hw_dcb_cfg.default_cos); + if (dcb_en == 0) + return; + + for (i = 0; i < NIC_DCB_COS_MAX; i++) { + q_num = (u16)nic_dev->hw_dcb_cfg.cos_qp_num[i]; + if (q_num != 0) { + start_qid = (u16)nic_dev->hw_dcb_cfg.cos_qp_offset[i]; + + hinic5_set_txq_cos(nic_dev, start_qid, q_num, i); + hinic5_info(nic_dev, drv, "update tx db cos, start_qid %u, q_num=%u cos=%u\n", + start_qid, q_num, i); + } + } +} + +static int hinic5_set_tx_cos_state(struct hinic5_nic_dev *nic_dev, u8 dcb_en) +{ + struct hinic5_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; + struct hinic5_dcb_state dcb_state = {0}; + u8 i; + int err; + + dcb_state.dcb_on = dcb_en; + dcb_state.default_cos = dcb_cfg->default_cos; + dcb_state.trust = dcb_cfg->trust; + + if (dcb_en != 0) { + for (i = 0; i < NIC_DCB_COS_MAX; i++) + dcb_state.pcp2cos[i] = dcb_cfg->pcp2cos[i]; + for (i = 0; i < NIC_DCB_IP_PRI_MAX; i++) + dcb_state.dscp2cos[i] = dcb_cfg->dscp2cos[i]; + } else { + memset(dcb_state.pcp2cos, dcb_cfg->default_cos, sizeof(dcb_state.pcp2cos)); + memset(dcb_state.dscp2cos, dcb_cfg->default_cos, sizeof(dcb_state.dscp2cos)); + } + err = hinic5_set_dcb_state(nic_dev->hwdev, &dcb_state); + if (err != 0) { + hinic5_err(nic_dev, drv, "Failed to set dcb state\n"); + return err; + } + + err = hinic5_dcb_state_op(nic_dev->hwdev, HISDK5_DCB_STATE_SET, + (struct hisdk5_dcb_state *)(void *)&dcb_state); + if (err != 0) { + hinic5_err(nic_dev, drv, "Failed to sync nic dcb state to the sdk.\n"); + return err; + } + return 0; +} + +int hinic5_configure_dcb_hw(struct hinic5_nic_dev *nic_dev, u8 dcb_en) +{ + int err; + + err = hinic5_sync_dcb_state(nic_dev->hwdev, 1, dcb_en); + if (err != 0) { + hinic5_err(nic_dev, drv, "Set dcb state failed\n"); + return err; + } + + hinic5_update_qp_cos_cfg(nic_dev); + hinic5_update_tx_db_cos(nic_dev, dcb_en); + + err = hinic5_set_tx_cos_state(nic_dev, dcb_en); + if (err != 0) { + hinic5_err(nic_dev, drv, "Set tx cos state failed\n"); + goto set_tx_cos_fail; + } + + err = hinic5_rx_configure(nic_dev->netdev, dcb_en); + if (err != 0) { + hinic5_err(nic_dev, drv, "rx configure failed\n"); + goto rx_configure_fail; + } + + if (dcb_en != 0) { + set_bit(HINIC5_DCB_ENABLE, &nic_dev->flags); + set_bit(HINIC5_DCB_ENABLE, &nic_dev->nic_hinic5_vram->flags); + } else { + clear_bit(HINIC5_DCB_ENABLE, &nic_dev->flags); + clear_bit(HINIC5_DCB_ENABLE, &nic_dev->nic_hinic5_vram->flags); + } + + return 0; +rx_configure_fail: + hinic5_set_tx_cos_state(nic_dev, (dcb_en != 0) ? 0 : 1); + +set_tx_cos_fail: + hinic5_update_tx_db_cos(nic_dev, (dcb_en != 0) ? 0 : 1); + hinic5_sync_dcb_state(nic_dev->hwdev, 1, (dcb_en != 0) ? 0 : 1); + + return err; +} + +int hinic5_setup_cos(struct net_device *netdev, u8 cos, u8 netif_run) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (cos != 0 && (test_bit(HINIC5_SAME_RXTX, &nic_dev->flags) != 0)) { + nicif_err(nic_dev, drv, netdev, "Failed to enable DCB while Symmetric RSS is enabled\n"); + return -EOPNOTSUPP; + } + + if (cos > nic_dev->cos_config_num_max) { + nicif_err(nic_dev, drv, netdev, "Invalid num_tc: %u, max cos: %u\n", + cos, nic_dev->cos_config_num_max); + return -EINVAL; + } + + err = hinic5_configure_dcb_hw(nic_dev, (cos != 0) ? 1 : 0); + if (err != 0) + return err; + + return 0; +} + +static u8 get_cos_num(u8 hw_valid_cos_bitmap) +{ + u8 support_cos = 0; + u8 i; + + for (i = 0; i < NIC_DCB_COS_MAX; i++) + if ((hw_valid_cos_bitmap & BIT(i)) != 0) + support_cos++; + + return support_cos; +} + +static void hinic5_dcb_save(struct hinic5_hinic5_vram *nic_hinic5_vram, + struct hinic5_dcb_config *hw_cfg) +{ + nic_hinic5_vram->default_cos = hw_cfg->default_cos; + nic_hinic5_vram->trust = hw_cfg->trust; +} + +static void hinic5_dcb_restore(struct hinic5_dcb_config *hw_cfg, + struct hinic5_hinic5_vram *nic_hinic5_vram) +{ + hw_cfg->default_cos = nic_hinic5_vram->default_cos; + hw_cfg->trust = nic_hinic5_vram->trust; +} + +static void hinic5_sync_dcb_cfg(struct hinic5_nic_dev *nic_dev, + const struct hinic5_dcb_config *dcb_cfg) +{ + struct hinic5_dcb_config *hw_cfg = &nic_dev->hw_dcb_cfg; + + memcpy(hw_cfg, dcb_cfg, sizeof(struct hinic5_dcb_config)); + hinic5_dcb_save(nic_dev->nic_hinic5_vram, hw_cfg); +} + +static int init_default_dcb_cfg(struct hinic5_nic_dev *nic_dev, + struct hinic5_dcb_config *dcb_cfg) +{ + u8 i, hw_dft_cos_map, port_cos_bitmap, dscp_ind; + int err; + int is_in_kexec; + + err = hinic5_cos_valid_bitmap(nic_dev->hwdev, &hw_dft_cos_map, &port_cos_bitmap); + if (err != 0) { + hinic5_err(nic_dev, drv, "None cos supported\n"); + return -EFAULT; + } + + is_in_kexec = hinic5_vram_get_kexec_flag(); + + nic_dev->func_dft_cos_bitmap = hw_dft_cos_map; + nic_dev->port_dft_cos_bitmap = port_cos_bitmap; + + nic_dev->cos_config_num_max = get_cos_num(hw_dft_cos_map); + + if (is_in_kexec == 0) { + dcb_cfg->trust = DCB_PCP; + if (nic_dev->hw_default_cos_valid != 0) + dcb_cfg->default_cos = nic_dev->hw_default_cos; + else + dcb_cfg->default_cos = (u8)fls(nic_dev->func_dft_cos_bitmap) - 1; + hinic5_dcb_save(nic_dev->nic_hinic5_vram, dcb_cfg); + } else { + hinic5_dcb_restore(dcb_cfg, nic_dev->nic_hinic5_vram); + } + + dcb_cfg->pcp_user_cos_num = nic_dev->cos_config_num_max; + dcb_cfg->dscp_user_cos_num = nic_dev->cos_config_num_max; + dcb_cfg->pcp_valid_cos_map = hw_dft_cos_map; + dcb_cfg->dscp_valid_cos_map = hw_dft_cos_map; + + for (i = 0; i < NIC_DCB_COS_MAX; i++) { + dcb_cfg->pcp2cos[i] = ((hw_dft_cos_map & BIT(i)) != 0) ? i : dcb_cfg->default_cos; + for (dscp_ind = 0; dscp_ind < NIC_DCB_COS_MAX; dscp_ind++) + dcb_cfg->dscp2cos[i * NIC_DCB_DSCP_NUM + dscp_ind] = dcb_cfg->pcp2cos[i]; + } + + return 0; +} + +void hinic5_dcb_reset_hw_config(struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_dcb_config dft_cfg = {0}; + + init_default_dcb_cfg(nic_dev, &dft_cfg); + hinic5_sync_dcb_cfg(nic_dev, &dft_cfg); + + hinic5_info(nic_dev, drv, "Reset DCB configuration done\n"); +} + +int hinic5_configure_dcb(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + err = hinic5_sync_dcb_state(nic_dev->hwdev, 1, + test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) ? 1 : 0); + if (err != 0) { + hinic5_err(nic_dev, drv, "Set dcb state failed\n"); + return err; + } + + if (test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) == 0) + hinic5_dcb_reset_hw_config(nic_dev); + + return 0; +} + +int hinic5_dcb_init(struct hinic5_nic_dev *nic_dev) +{ + struct hinic5_dcb_config *dcb_cfg = &nic_dev->hw_dcb_cfg; + int err; + u8 dcb_en = test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) ? 1 : 0; + + err = init_default_dcb_cfg(nic_dev, dcb_cfg); + if (err != 0) { + hinic5_err(nic_dev, drv, "Initialize dcb configuration failed\n"); + return err; + } + + hinic5_info(nic_dev, drv, "Support num cos %u, default cos %u\n", + nic_dev->cos_config_num_max, dcb_cfg->default_cos); + + err = hinic5_set_tx_cos_state(nic_dev, dcb_en); + if (err != 0) { + hinic5_err(nic_dev, drv, "Set tx cos state failed\n"); + return err; + } + + return 0; +} + +static int change_qos_cfg(struct hinic5_nic_dev *nic_dev, const struct hinic5_dcb_config *dcb_cfg) +{ + struct net_device *netdev = nic_dev->netdev; + int err = 0; + + if (test_and_set_bit(HINIC5_DCB_UP_COS_SETTING, &nic_dev->flags)) { + nicif_warn(nic_dev, drv, netdev, + "Cos_up map setting in inprocess, please try again later\n"); + return -EFAULT; + } + + hinic5_sync_dcb_cfg(nic_dev, dcb_cfg); + + hinic5_update_qp_cos_cfg(nic_dev); + + clear_bit(HINIC5_DCB_UP_COS_SETTING, &nic_dev->flags); + + return err; +} + +int hinic5_dcbcfg_set_up_bitmap(struct hinic5_nic_dev *nic_dev, + struct hinic5_dcb_config *wanted_dcb_cfg) +{ + int err, rollback_err; + u8 netif_run = 0; + struct hinic5_dcb_config old_dcb_cfg; + u8 user_cos_num = hinic5_get_dev_user_cos_num(nic_dev); + + memcpy(&old_dcb_cfg, + &nic_dev->hw_dcb_cfg, sizeof(struct hinic5_dcb_config)); + + if (memcmp(wanted_dcb_cfg, &old_dcb_cfg, sizeof(struct hinic5_dcb_config)) == 0) { + nicif_info(nic_dev, drv, nic_dev->netdev, + "Same valid up bitmap, don't need to change anything\n"); + return 0; + } + + if (netif_running(nic_dev->netdev)) { + netif_run = 1; + hinic5_vport_down(nic_dev); + } + + err = change_qos_cfg(nic_dev, wanted_dcb_cfg); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Set cos_up map to hw failed\n"); + goto change_qos_cfg_fail; + } + + if (test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags)) { + err = hinic5_setup_cos(nic_dev->netdev, user_cos_num, netif_run); + if (err != 0) + goto set_err; + } + + if (netif_run != 0) { + err = hinic5_vport_up(nic_dev); + if (err != 0) + goto vport_up_fail; + } + + return 0; + +vport_up_fail: + if (test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags)) + hinic5_setup_cos(nic_dev->netdev, + (user_cos_num != 0) ? 0 : user_cos_num, netif_run); + +set_err: + rollback_err = change_qos_cfg(nic_dev, &old_dcb_cfg); + if (rollback_err != 0) + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to rollback qos configure\n"); + +change_qos_cfg_fail: + if (netif_run != 0) + hinic5_vport_up(nic_dev); + + return err; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dcb.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dcb.h new file mode 100644 index 00000000..6555f4a8 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/ioctl/hinic5_dcb.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC5_DCB_H +#define HINIC5_DCB_H + +#include "ossl_knl.h" + +struct hinic5_cos_cfg { + u8 up; + u8 bw_pct; + u8 tc_id; + u8 prio_sp; /* 0 - DWRR, 1 - SP */ +}; + +struct hinic5_tc_cfg { + u8 bw_pct; + u8 prio_sp; /* 0 - DWRR, 1 - SP */ + u16 rsvd; +}; + +enum HINIC5_DCB_TRUST { + DCB_PCP, + DCB_DSCP, +}; + +#define PCP_MAX_UP 8 +#define DSCP_MAC_UP 64 +#define DBG_DFLT_DSCP_VAL 0xFF + +struct hinic5_dcb_config { + u8 trust; /* pcp, dscp */ + u8 default_cos; + u8 pcp_user_cos_num; + u8 pcp_valid_cos_map; + u8 dscp_user_cos_num; + u8 dscp_valid_cos_map; + u8 pcp2cos[PCP_MAX_UP]; + u8 dscp2cos[DSCP_MAC_UP]; + + u8 cos_qp_offset[NIC_DCB_COS_MAX]; + u8 cos_qp_num[NIC_DCB_COS_MAX]; +}; + +struct hinic5_nic_dev; + +u8 hinic5_get_dev_user_cos_num(struct hinic5_nic_dev *nic_dev); +u8 hinic5_get_dev_valid_cos_map(struct hinic5_nic_dev *nic_dev); +int hinic5_dcb_init(struct hinic5_nic_dev *nic_dev); +void hinic5_dcb_reset_hw_config(struct hinic5_nic_dev *nic_dev); +int hinic5_configure_dcb(struct net_device *netdev); +int hinic5_setup_cos(struct net_device *netdev, u8 cos, u8 netif_run); +void hinic5_dcbcfg_set_pfc_state(struct hinic5_nic_dev *nic_dev, u8 pfc_state); +u8 hinic5_dcbcfg_get_pfc_state(struct hinic5_nic_dev *nic_dev); +void hinic5_dcbcfg_set_pfc_pri_en(struct hinic5_nic_dev *nic_dev, + u8 pfc_en_bitmap); +u8 hinic5_dcbcfg_get_pfc_pri_en(struct hinic5_nic_dev *nic_dev); +int hinic5_dcbcfg_set_ets_up_tc_map(struct hinic5_nic_dev *nic_dev, + const u8 *up_tc_map); +void hinic5_dcbcfg_get_ets_up_tc_map(struct hinic5_nic_dev *nic_dev, + u8 *up_tc_map); +int hinic5_dcbcfg_set_ets_tc_bw(struct hinic5_nic_dev *nic_dev, + const u8 *tc_bw); +void hinic5_dcbcfg_get_ets_tc_bw(struct hinic5_nic_dev *nic_dev, u8 *tc_bw); +void hinic5_dcbcfg_set_ets_tc_prio_type(struct hinic5_nic_dev *nic_dev, + u8 tc_prio_bitmap); +void hinic5_dcbcfg_get_ets_tc_prio_type(struct hinic5_nic_dev *nic_dev, + u8 *tc_prio_bitmap); +int hinic5_dcbcfg_set_up_bitmap(struct hinic5_nic_dev *nic_dev, + struct hinic5_dcb_config *wanted_dcb_cfg); +void hinic5_update_tx_db_cos(struct hinic5_nic_dev *nic_dev, u8 dcb_en); + +void hinic5_update_qp_cos_cfg(struct hinic5_nic_dev *nic_dev); +void hinic5_vport_down(struct hinic5_nic_dev *nic_dev); +int hinic5_vport_up(struct hinic5_nic_dev *nic_dev); +int hinic5_configure_dcb_hw(struct hinic5_nic_dev *nic_dev, u8 dcb_en); +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_api.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_api.h new file mode 100644 index 00000000..6845aa62 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_api.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: MACsec api header file + * Create: 2025/11/04 + */ + +#ifndef HINIC5_MACSEC_API_H +#define HINIC5_MACSEC_API_H + +#include <linux/types.h> +#include "hinic5_nic_dev.h" + +/* MACsec初始化 */ +int macsec_init_offload(struct hinic5_nic_dev *nic_dev); +void macsec_cleanup_offload(struct hinic5_nic_dev *nic_dev); + +/* MACsec ioctl接口 */ +int macsec_cmd_list(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +int macsec_cmd_mib(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +int macsec_cmd_add(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +int macsec_cmd_del(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +int macsec_cmd_set(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); +int macsec_cmd_flush(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +#endif // HINIC5_MACSEC_API_H diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_common.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_common.h new file mode 100644 index 00000000..379a0c38 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_common.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: common define + * Create: 2024/03/06 + */ + +#ifndef HINIC5_MACSEC_COMMON_H +#define HINIC5_MACSEC_COMMON_H + +#include <linux/types.h> + +#include "macsec_mpu_cmd.h" +#include "macsec_mpu_cmd_defs.h" +#include "macsec_pub_cmd.h" +#include "hinic5_nic_dev.h" +#include "hinic5_macsec_dev.h" + +#define PORT_MSK_IN_SCI 0x00FF // 广义 port,实际现阶段表示 sc_index +#define MACSEC_SC_STATUS_VALID(status) \ + ((((status) == SC_STATUS_MAX) || ((status) == SC_STATUS_NONE)) ? 0 : 1) +#define MACSEC_SA_STATUS_VALID(status) \ + ((((status) >= SA_STATUS_EXPIRED) || ((status) == SA_STATUS_NONE)) ? 0 : 1) +#define HIMACSEC_CONFIDENTIALITY_OFFSET_0 0 +#define HIMACSEC_CONFIDENTIALITY_OFFSET_30 1 +#define HIMACSEC_CONFIDENTIALITY_OFFSET_50 2 + +typedef enum { + HIMACSEC_SC_MODE_TWO_SA, + HIMACSEC_SC_MODE_FOUR_SA, + HIMACSEC_SC_MODE_MAX +} himacsec_sc_mode_e; + +/* 用于判断MACsec全局开关是否打开,若打开会影响NIC业务 */ +#define MACSEC_GLOBAL_SWITCH_IS_DISABLE 0 +#define MACSEC_GLOBAL_SWITCH_IS_ENABLE 1 + +/* MACsec offload adapt */ +void himacsec_offload_init(struct hinic5_nic_dev *nic_dev); +void himacsec_offload_deinit(struct hinic5_nic_dev *nic_dev); + +/* service function */ +int himacsec_create_sa(struct hinic5_nic_dev *nic_dev, macsec_sa_info_s *sa, + crypt_direction_e direct); +int himacsec_destroy_sa(struct hinic5_nic_dev *nic_dev, u64 sci, u8 assoc_num, + crypt_direction_e direct); +int himacsec_create_sc(struct hinic5_nic_dev *nic_dev, + macsec_sc_info_s *sc_info, crypt_direction_e direct); +int himacsec_destroy_sc(struct hinic5_nic_dev *nic_dev, u64 sci, crypt_direction_e direct); +int himacsec_set_sc(struct hinic5_nic_dev *nic_dev, macsec_sc_info_s *sc_info, + crypt_direction_e direct); +struct himacsec_sc *himacsec_get_valid_dev_sc(struct hinic5_nic_dev *nic_dev, + u64 sci, crypt_direction_e direct); +struct himacsec_sa *himacsec_get_valid_dev_sa(struct hinic5_nic_dev *nic_dev, + u64 sci, u8 an, crypt_direction_e direct); +struct himacsec_sc *get_g_macsec_port_res(u32 mode, u32 port_id); +struct himacsec_sc *himacsec_get_dev_sc(struct hinic5_nic_dev *nic_dev, crypt_direction_e direct); + +/* mailbox msg function */ +int himacsec_cmd_exec_get_spec(void *hwdev, struct himacsec_spec *spec); +int himacsec_cmd_exec_macsec_enable(struct hinic5_lld_dev *lld_dev, + macsec_mbox_service_op_cmd_e op_code, u8 *macsec_flag); +int himacsec_cmd_exec_sc_op(struct hinic5_lld_dev *lld_dev, + macsec_sc_info_s *sc_info, macsec_mbox_sc_op_cmd_e opcode); +int himacsec_cmd_exec_sa_op(struct hinic5_lld_dev *lld_dev, + macsec_sa_info_s *sa_info, macsec_mbox_sa_op_cmd_e opcode); +int himacsec_cmd_exec_mib_port(struct hinic5_lld_dev *lld_dev, + struct himacsec_cmd_mib_out *cmd_out); +int himacsec_cmd_exec_mib_sc(struct hinic5_lld_dev *lld_dev, + struct himacsec_cmd_mib_out *out_buf, u64 sci); +int himacsec_cmd_exec_get_feature_nego(struct hinic5_lld_dev *lld_dev, + u64 *feature_bitmap, u32 feature_size); +int himacsec_cmd_exec_flush(struct hinic5_lld_dev *lld_dev, tag_macsec_flush_cmd_s *flush_info); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dev.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dev.h new file mode 100644 index 00000000..29510905 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dev.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: 存放实体 + * Create: 2024/03/01 + */ + +#ifndef HINIC5_MACSEC_DEV_H +#define HINIC5_MACSEC_DEV_H + +#include "macsec_mpu_cmd_defs.h" +#include "macsec_pub_cmd.h" +#include "ossl_knl.h" + +typedef struct himacsec_spec { + u8 macsec_support; /* 硬件是否支持MACsec */ + u8 max_port; /* Maximum number of ports on the chip */ + u8 max_port_sc; /* Maximum number of SCs per port */ + u8 max_sa; /* Maximum number of SA stored per SC */ +} himacsec_spec_s; + +typedef struct himacsec_stats { + u64 sa_update_times; + u64 sa_expire_times; +} himacsec_stats_s; + +typedef struct himacsec_mib_stats { + u32 max_port_size; + u32 max_port_sc_size; + macsec_port_mib_info_s *port_mibs; + macsec_sc_mib_info_s *sc_mibs; +} himacsec_mib_stats_s; + +typedef struct macsec_resource { + struct himacsec_spec spec; + u64 himacsec_feature[MACSEC_MAX_FEATURE_QWORD]; + u32 num_of_device; + himacsec_stats_s *stats; // 驱动内部计数 + struct notifier_block nb; + u8 function_port; + /* macsec子设备 的ifindex, + * 卸载的macsec设备的ifindex会存到此数组里 + */ + u32 offload_child_dev_idx[MACSEC_SC_NUM]; + u32 offload_dev_num; // offload_child_dev_idx数组中有效的元素个数 +} macsec_resource_s; + +struct macsec_port_res { + struct himacsec_sc enc_sc; // 数组头指针 + struct himacsec_sc dec_sc; +}; + +struct macsec_extra_param { + u64 threshold; + u8 algo_type; + u8 offset; + u8 bitmap; + bool threshold_set; + bool offset_set; + u8 rsvd; +}; + +enum param_bit { + PARAM_BIT_ENC_SC_ENCODING_SA, + PARAM_BIT_ENC_SC_PROTECTION_MODE, + PARAM_BIT_ENC_SC_PROTECT_FRAME_MODE, + PARAM_BIT_DEC_SC_VALID_FRAM_MODE, + PARAM_BIT_MAX, +}; + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dfx.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dfx.c new file mode 100644 index 00000000..4bf9f1ef --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dfx.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: macsec dfx 相关代码 + * Create: 2024/05/23 + */ +#include <linux/printk.h> +#include <linux/types.h> +#include <linux/semaphore.h> + +#include "hinic5_mt.h" +#include "hinic5_crm.h" +#include "hinic5_nic_dev.h" + +#include "macsec_mpu_cmd_defs.h" +#include "macsec_pub_cmd.h" +#include "hinic5_macsec_common.h" +#include "hinic5_macsec_dfx.h" + +void himacsec_dfx_convert_key_length(u32 *key_length, u32 chip_key_len_val) +{ + if (chip_key_len_val == HIMACSEC_REG_KEY_LENGTH_128) + *key_length = CRYPT_KEY_LENGTH_128; + else if (chip_key_len_val == HIMACSEC_REG_KEY_LENGTH_256) + *key_length = CRYPT_KEY_LENGTH_256; + else + *key_length = chip_key_len_val; +} + +void himacsec_dfx_show_enc_sa(struct hinic5_nic_dev *nic_dev, macsec_sa_info_s *enc_sa) +{ + u32 key_len; + + himacsec_dfx_convert_key_length(&key_len, enc_sa->current_key_length); + macsec_info(nic_dev->lld_dev->dev, + "%s: encryption sa: sci=%llx, an=%d, algorithm=%d[0: AES, 1:SM4], offset=0x%x, key length=%d[0: 128, 1: 256], xpn enable=%d, ssci=0x%x", + nic_dev->netdev->name, enc_sa->sci, enc_sa->an, + enc_sa->current_crypto_algo, enc_sa->confidentiality_offset, + enc_sa->current_key_length, enc_sa->extended_pn_enable, + enc_sa->ssci); + // encrytion info + macsec_info(nic_dev->lld_dev->dev, "%s: encryption sa: sci=%llx, an=%d, pn threshold=0x%llx, next pn=0x%llx, enable transmit=%d", + nic_dev->netdev->name, enc_sa->sci, enc_sa->an, + enc_sa->pn_th, enc_sa->next_pn, enc_sa->enable_transmit); +} + +void himacsec_dfx_show_dec_sa(struct hinic5_nic_dev *nic_dev, macsec_sa_info_s *dec_sa) +{ + u32 key_len; + + himacsec_dfx_convert_key_length(&key_len, dec_sa->current_key_length); + macsec_info(nic_dev->lld_dev->dev, + "%s: decryption sa: sci=%llx, an=%d, algorithm=%d[0: AES, 1:SM4], offset=0x%x, key length=%d[0: 128, 1: 256], xpn enable=%d, ssci=0x%x", + nic_dev->netdev->name, dec_sa->sci, dec_sa->an, + dec_sa->current_crypto_algo, dec_sa->confidentiality_offset, + dec_sa->current_key_length, dec_sa->extended_pn_enable, + dec_sa->ssci); + // decrytion info + macsec_info(nic_dev->lld_dev->dev, "%s: decryption sa: sci=%llx, an=%d, replay=%d, windows=0x%x, enable receive=%d, lowest pn=0x%llx", + nic_dev->netdev->name, dec_sa->sci, dec_sa->an, + dec_sa->replay_protect, dec_sa->replay_window, + dec_sa->enable_receive, dec_sa->lowest_pn); +} + +void himacsec_dfx_show_sa(struct hinic5_nic_dev *nic_dev, + macsec_sa_info_s *sa_info, crypt_direction_e direct) +{ + if (direct == MACSEC_OUTBOUND) + himacsec_dfx_show_enc_sa(nic_dev, sa_info); + else + himacsec_dfx_show_dec_sa(nic_dev, sa_info); +} + +void himacsec_dfx_show_enc_sc(struct hinic5_nic_dev *nic_dev, macsec_sc_info_s *enc_sc) +{ + macsec_info(nic_dev->lld_dev->dev, + "%s: encryption sc: sci=%llx, protect_frames=%d, protection_mode=%d, include_sci=%d, use_scb=%d, use_es=%d, encodingsa=%d", + nic_dev->netdev->name, enc_sc->sci, + enc_sc->protect_frames, enc_sc->protection_mode, + enc_sc->include_sci_enable, enc_sc->use_scb_enable, + enc_sc->use_es_enable, enc_sc->encoding_sa); +} + +void himacsec_dfx_show_dec_sc(struct hinic5_nic_dev *nic_dev, macsec_sc_info_s *dec_sc) +{ + macsec_info(nic_dev->lld_dev->dev, "%s: decryption sc: sci=%llx, validate_frames=%d", + nic_dev->netdev->name, dec_sc->sci, dec_sc->validate_frames); +} + +void himacsec_dfx_show_sc(struct hinic5_nic_dev *nic_dev, + macsec_sc_info_s *sc_info, crypt_direction_e direct) +{ + if (direct == MACSEC_OUTBOUND) + himacsec_dfx_show_enc_sc(nic_dev, sc_info); + else + himacsec_dfx_show_dec_sc(nic_dev, sc_info); +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dfx.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dfx.h new file mode 100644 index 00000000..a02e5d76 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_dfx.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: 存放实体 + * Create: 2024/03/01 + */ + +#ifndef HINIC5_MACSEC_DFX_H +#define HINIC5_MACSEC_DFX_H + +#include <linux/netdevice.h> + +#include "hinic5_nic_dev.h" + +#include "macsec_mpu_cmd_defs.h" +#include "macsec_pub_cmd.h" + +#ifdef GLOBAL_VERSION_STR +#define HIMACSEC_DRV_VER GLOBAL_VERSION_STR +#else +#define HIMACSEC_DRV_VER "" +#endif + +#define macsec_err(dev, format, ...) dev_err(dev, "[MACsec]" format, ##__VA_ARGS__) +#define macsec_warning(dev, format, ...) dev_warn(dev, "[MACsec]" format, ##__VA_ARGS__) +#define macsec_notice(dev, format, ...) dev_notice(dev, "[MACsec]" format, ##__VA_ARGS__) +#define macsec_info(dev, format, ...) dev_info(dev, "[MACsec]" format, ##__VA_ARGS__) + +void himacsec_dfx_convert_key_length(u32 *key_length, u32 chip_key_len_val); +void himacsec_dfx_show_sa(struct hinic5_nic_dev *nic_dev, + macsec_sa_info_s *sa_info, crypt_direction_e direct); +void himacsec_dfx_show_sc(struct hinic5_nic_dev *nic_dev, + macsec_sc_info_s *sc_info, crypt_direction_e direct); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_main.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_main.c new file mode 100644 index 00000000..be2c9c54 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_main.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * Description: macsec main + * Create: 2025/11/05 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [MACsec]" fmt + +#include <net/rtnetlink.h> +#include <linux/module.h> +#include <linux/netdev_features.h> +#include <linux/netlink.h> +#include <linux/netdevice.h> +#include <linux/rculist.h> + +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_srv_nic.h" + +#include "hinic5_macsec_dfx.h" +#include "hinic5_macsec_common.h" +#include "hinic5_macsec_dev.h" + +// TODO 该参数适配支持4sa模式, 配置文件ready后删除 +static u8 macsec_sc_mode = HIMACSEC_SC_MODE_FOUR_SA; +module_param(macsec_sc_mode, byte, 0444); +MODULE_PARM_DESC(macsec_sc_mode, "MACsec SC mode, 0: 2SA, 1: 4SA (default=1)"); + +struct macsec_port_res g_macsec_port_res[MACSEC_PORT_NUM] = {0}; + +#define HIMACSEC_DRV_DESC "Huawei(R) Intelligent Network Interface Card, MACsec driver" +#define HIMACSEC_AUTHOR "Huawei Technologies CO., Ltd" + +struct himacsec_sc *get_g_macsec_port_res(u32 mode, u32 port_id) +{ + if (port_id >= MACSEC_PORT_NUM) { + pr_err("MACsec param port_id(0x%x) invalid", port_id); + return NULL; + } + if (mode == MACSEC_OUTBOUND) + return &g_macsec_port_res[port_id].enc_sc; + else + return &g_macsec_port_res[port_id].dec_sc; +} + +int himacsec_init_dev_spec(struct macsec_resource *macsec_res, + struct hinic5_lld_dev *lld_dev, struct himacsec_spec *spec) +{ + macsec_res->spec.macsec_support = spec->macsec_support; + macsec_res->spec.max_port = spec->max_port; + + if (macsec_sc_mode >= HIMACSEC_SC_MODE_MAX) { + macsec_err(lld_dev->dev, "MACsec param macsec_sc_mode(0x%x) invalid", + macsec_sc_mode); + return -EINVAL; + } + + macsec_res->spec.max_sa = (macsec_sc_mode + 1) << 0x1; // macsec_sc_mod=0: 2SA模式 + // macsec_sc_mode=1: 4SA模式 + macsec_res->spec.max_port_sc = spec->max_port_sc; // 端口SC数量:2SA模式-8个SC + // 4SA模式-4个SC + + return 0; +} + +int himacsec_init_resource(struct hinic5_nic_dev *nic_dev, struct hinic5_lld_dev *lld_dev, + struct himacsec_spec *spec, u64 feature_bitmap) +{ + struct macsec_resource *macsec_res = NULL; + int ret; + u16 func_id = hinic5_global_func_id(lld_dev->hwdev); + u8 port = hinic5_physical_port_id(lld_dev->hwdev); + + macsec_res = kzalloc(sizeof(struct macsec_resource), GFP_KERNEL); + if (!macsec_res) + return -ENOMEM; + + /* 1、初始化 spec */ + ret = himacsec_init_dev_spec(macsec_res, lld_dev, spec); + if (ret != 0) { + kfree(macsec_res); + return ret; + } + + /* 3、私有数据回挂 */ + macsec_res->function_port = port; + macsec_res->offload_dev_num = 0; + macsec_res->himacsec_feature[0] = feature_bitmap; + nic_dev->macsec_res = macsec_res; // 回挂到nic_dev + + macsec_info(lld_dev->dev, "Func 0x%x macsec capability: sc_mode(0:2sa, 1:4sa): %u, func_port: %u, feature: 0x%llx", + func_id, macsec_sc_mode, port, feature_bitmap); + + return ret; +} + +void himacsec_release_resource(struct hinic5_nic_dev *nic_dev) +{ + struct macsec_resource *macsec_res = NULL; + + if (!nic_dev) + return; + + macsec_res = nic_dev->macsec_res; + if (!macsec_res) + return; + + // 2、清理 MACsec 资源 + kfree(macsec_res); + nic_dev->macsec_res = NULL; +} + +int macsec_init_offload(struct hinic5_nic_dev *nic_dev) +{ + u64 feature_bitmap = 0; + int ret; + struct himacsec_spec spec = {0}; + struct hinic5_lld_dev *lld_dev = NULL; + u8 macsec_flag = MACSEC_GLOBAL_SWITCH_IS_DISABLE; + + if (!nic_dev || !nic_dev->lld_dev || !nic_dev->netdev) { + pr_err("lld device is NULL, MACsec init failed"); + return -ENODEV; + } + + lld_dev = nic_dev->lld_dev; + + /* 1、确认MACsec使能情况 */ + if (!hinic5_support_macsec(lld_dev->hwdev)) { + macsec_info(lld_dev->dev, "HW don't support macsec"); + return 0; + } + + /* 2、特性协商 */ + if (himacsec_cmd_exec_get_feature_nego(lld_dev, &feature_bitmap, 1) != 0) { + macsec_err(lld_dev->dev, "Feature negotiation failed"); + return -EBUSY; + } + + /* 3、全局配置获取 */ + if (himacsec_cmd_exec_get_spec(lld_dev->hwdev, &spec) != 0) { + macsec_err(lld_dev->dev, "Get chip spec failed"); + return -EBUSY; + } + + /* 4、nic_dev中初始化MACsec私有数据 */ + ret = himacsec_init_resource(nic_dev, lld_dev, &spec, feature_bitmap); + if (ret != 0) { + macsec_err(lld_dev->dev, "Init MACsec private data failed, ret:%d", ret); + return ret; + } + + /* 5、内核协议栈 macsec 功能卸载初始化 */ + himacsec_offload_init(nic_dev); + + /* 6、使能 macsec,需要进行初始化操作(只需要一次) */ + ret = himacsec_cmd_exec_macsec_enable(lld_dev, MACSEC_CMD_SERVICE_OP_MACSEC_ENABLE, + &macsec_flag); + if (ret != 0) { + macsec_err(lld_dev->dev, "Enable macsec failed, ret:%d", ret); + goto enable_macsec_fail; + } + + macsec_info(lld_dev->dev, "Macsec resource init successfully"); + return ret; + +enable_macsec_fail: + himacsec_offload_deinit(nic_dev); + himacsec_release_resource(nic_dev); + if (macsec_flag == MACSEC_GLOBAL_SWITCH_IS_DISABLE) + return 0; + return ret; +} + +void macsec_cleanup_offload(struct hinic5_nic_dev *nic_dev) +{ + struct macsec_resource *macsec_res = NULL; + struct hinic5_lld_dev *lld_dev = NULL; + int ret; + + if (!nic_dev || !nic_dev->lld_dev || !nic_dev->netdev) { + pr_err("lld device is NULL, MACsec resource release failed"); + return; + } + + lld_dev = nic_dev->lld_dev; + macsec_res = nic_dev->macsec_res; + + if (!macsec_res || macsec_res->spec.macsec_support == 0) + return; + + ret = himacsec_cmd_exec_macsec_enable(lld_dev, MACSEC_CMD_SERVICE_OP_MACSEC_DISABLE, NULL); + if (ret != 0) + macsec_err(lld_dev->dev, "Disable macsec failed, ret:%d", ret); + + himacsec_offload_deinit(nic_dev); + + himacsec_release_resource(nic_dev); + macsec_info(lld_dev->dev, "Macsec resource release successfully"); +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_mgmt.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_mgmt.c new file mode 100644 index 00000000..877a9726 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_mgmt.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: macsec 配置下发 + * Create: 2024/03/07 + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [MACsec]" fmt + +#include <linux/types.h> + +#include "macsec_mpu_cmd.h" +#include "macsec_mpu_cmd_defs.h" +#include "hinic5_hw.h" +#include "comm_defs.h" +#include "hinic5_macsec_dev.h" +#include "hinic5_macsec_dfx.h" +#include "hinic5_macsec_api.h" +#include "hinic5_macsec_common.h" + +int himacsec_cmd_exec_get_feature_nego(struct hinic5_lld_dev *lld_dev, + u64 *feature_bitmap, u32 feature_size) +{ + int ret; + u16 out_size = sizeof(macsec_feature_nego_cmd_s); + macsec_feature_nego_cmd_s feature_nego = {0}; + + if (!feature_bitmap) { + macsec_err(lld_dev->dev, "MACsec get feature nego invalid param, feature bitmap is NULL"); + return -EINVAL; + } + + if (feature_size > MACSEC_MAX_FEATURE_QWORD) { + macsec_err(lld_dev->dev, "MACsec get feature nego invalid param, feature_size(0x%x) is greater than 0x%x", + feature_size, MACSEC_MAX_FEATURE_QWORD); + return -EINVAL; + } + + feature_nego.op_code = MACSEC_FEATURE_NEGO_OPCODE_GET; + ret = hinic5_msg_to_mgmt_sync(lld_dev->hwdev, HINIC5_MOD_MACSEC, + MACSEC_CMD_FEATURE_NEGO_OP, &feature_nego, + sizeof(macsec_feature_nego_cmd_s), + &feature_nego, &out_size, 0, + HINIC5_CHANNEL_MACSEC); + if (ret != 0 || feature_nego.head.status != 0 || + out_size != (u32)sizeof(feature_nego)) { + macsec_err(lld_dev->dev, "MACsec get feature nego status(0x%x) incorrect, out size(0x%x) not equals 0x%x", + feature_nego.head.status, out_size, (u32)sizeof(feature_nego)); + return -EINVAL; + } + + memcpy(feature_bitmap, feature_nego.s_feature, feature_size * sizeof(u64)); + + return 0; +} + +/* TODO(B998):获得芯片规格,port数、最大支持SC数/port, + * AT&产品化考虑是否从配置文件获取,当前FT固化 + */ +int himacsec_cmd_exec_get_spec(void *hwdev, struct himacsec_spec *spec) +{ + spec->macsec_support = 1; + spec->max_port = 0x4; + spec->max_port_sc = 1; + spec->max_sa = 0x4; + return 0; +} + +int himacsec_cmd_exec_macsec_enable(struct hinic5_lld_dev *lld_dev, + macsec_mbox_service_op_cmd_e op_code, u8 *macsec_flag) +{ + macsec_cmd_service_operation_s macsec_cfg = {0}; + u16 out_size = sizeof(macsec_cmd_service_operation_s); + int ret; + + /* MACsec全局开关仅在PPF设备配置时配置 */ + if (hinic5_func_type(lld_dev->hwdev) != TYPE_PPF) { + if (macsec_flag) + *macsec_flag = MACSEC_GLOBAL_SWITCH_IS_ENABLE; + return 0; + } + + macsec_cfg.op_code = op_code; + + ret = hinic5_msg_to_mgmt_sync(lld_dev->hwdev, HINIC5_MOD_MACSEC, + MACSEC_CMD_SERVICE_OP, &macsec_cfg, + sizeof(macsec_cmd_service_operation_s), + &macsec_cfg, &out_size, 0, + HINIC5_CHANNEL_MACSEC); + if (ret != 0 || out_size != sizeof(macsec_cmd_service_operation_s) || + macsec_cfg.head.status != 0) { + macsec_err(lld_dev->dev, "Failed to exec service init cmd, err=0x%x, status=0x%x, out size:0x%x, enable:0x%x", + ret, macsec_cfg.head.status, out_size, macsec_cfg.op_code); + return -EINVAL; + } + if (macsec_flag) + *macsec_flag = MACSEC_GLOBAL_SWITCH_IS_ENABLE; + + return 0; +} + +int himacsec_cmd_exec_sc_op(struct hinic5_lld_dev *lld_dev, macsec_sc_info_s *sc_info, + macsec_mbox_sc_op_cmd_e opcode) +{ + u16 out_size = sizeof(macsec_cmd_sc_operation_s); + macsec_cmd_sc_operation_s macsec_cfg = {0}; + int ret; + + memcpy(&macsec_cfg.sc_info, sc_info, sizeof(macsec_sc_info_s)); + macsec_cfg.op_code = opcode; + + // HINIC5_CHANNEL_MACSEC 作用于场景: + // 需要拦截所有使用该通道的特性发往 MPU 的请求 + ret = hinic5_msg_to_mgmt_sync(lld_dev->hwdev, HINIC5_MOD_MACSEC, + MACSEC_CMD_SC_OP, &macsec_cfg, + sizeof(macsec_cmd_sc_operation_s), + &macsec_cfg, &out_size, 0, + HINIC5_CHANNEL_MACSEC); + if (ret != 0 || out_size != sizeof(macsec_cmd_sc_operation_s) || + macsec_cfg.head.status != 0) { + macsec_err(lld_dev->dev, "Failed to exec sc cmd, err=0x%x, status=0x%x, out size:0x%x", + ret, macsec_cfg.head.status, out_size); + return -EINVAL; + } + + // buf 结果拷贝 + if (opcode == MACSEC_CMD_ENC_SC_GET_INFO || opcode == MACSEC_CMD_DEC_SC_GET_INFO) + memcpy(sc_info, &macsec_cfg.sc_info, sizeof(macsec_sc_info_s)); + return 0; +} + +int himacsec_cmd_exec_sa_op(struct hinic5_lld_dev *lld_dev, macsec_sa_info_s *sa_info, + macsec_mbox_sa_op_cmd_e opcode) +{ + /* out_size 在 driver->mpu 流程中表示用于接收 mailbox 返回消息的 buf 大小 + * out_size 在 mpu->driver 流程中表示 mailbox 返回消息实际的大小, + * 由 sdk 拷贝到 out_buf 中 + */ + u16 out_size = sizeof(macsec_cmd_sa_operation_s); + macsec_cmd_sa_operation_s macsec_cfg = {0}; + int ret; + + memcpy(&macsec_cfg.sa_info, sa_info, sizeof(macsec_sa_info_s)); + macsec_cfg.op_code = opcode; + + ret = hinic5_msg_to_mgmt_sync(lld_dev->hwdev, HINIC5_MOD_MACSEC, + MACSEC_CMD_SA_OP, &macsec_cfg, + sizeof(macsec_cmd_sa_operation_s), + &macsec_cfg, &out_size, 0, + HINIC5_CHANNEL_MACSEC); + memset(macsec_cfg.sa_info.sak, 0, HIMACSEC_MAX_SAK_KEY_LEN); + if (ret != 0 || out_size != sizeof(macsec_cmd_sa_operation_s) || + macsec_cfg.head.status != 0) { + macsec_err(lld_dev->dev, "Failed to exec sa cmd, err=0x%x, status=0x%x, out size:0x%x", + ret, macsec_cfg.head.status, out_size); + return -EINVAL; + } + + // buf 结果拷贝 + if (opcode == MACSEC_CMD_ENC_SA_GET_INFO || opcode == MACSEC_CMD_DEC_SA_GET_INFO) + memcpy(sa_info, &macsec_cfg.sa_info, sizeof(macsec_sa_info_s)); + return 0; +} + +int himacsec_cmd_exec_mib_port(struct hinic5_lld_dev *lld_dev, struct himacsec_cmd_mib_out *cmd_out) +{ + macsec_cmd_port_mib_operation_s macsec_cfg = {0}; + u16 out_size = sizeof(macsec_cmd_port_mib_operation_s); + int ret; + + ret = hinic5_msg_to_mgmt_sync(lld_dev->hwdev, HINIC5_MOD_MACSEC, + MACSEC_CMD_GET_PORT_MIB, &macsec_cfg, + sizeof(macsec_cmd_port_mib_operation_s), + &macsec_cfg, &out_size, 0, + HINIC5_CHANNEL_MACSEC); + if (ret != 0 || out_size != sizeof(macsec_cmd_port_mib_operation_s) || + macsec_cfg.head.status != 0) { + macsec_err(lld_dev->dev, "Failed to exec port mib cmd, err=0x%x, status=0x%x, out size:0x%x", + ret, macsec_cfg.head.status, out_size); + return -EINVAL; + } + + // 复制结果到 out_buf + memcpy(cmd_out->mib_buf, &macsec_cfg.port_mib, sizeof(macsec_cfg.port_mib)); + + return 0; +} + +int himacsec_cmd_exec_mib_sc(struct hinic5_lld_dev *lld_dev, + struct himacsec_cmd_mib_out *out_buf, u64 sci) +{ + macsec_cmd_sc_mib_operation_s macsec_cfg = {0}; + u16 out_size = sizeof(macsec_cmd_sc_mib_operation_s); + int ret; + + macsec_cfg.sci = sci; + ret = hinic5_msg_to_mgmt_sync(lld_dev->hwdev, HINIC5_MOD_MACSEC, + MACSEC_CMD_GET_SC_MIB, &macsec_cfg, + sizeof(macsec_cmd_sc_mib_operation_s), + &macsec_cfg, &out_size, 0, + HINIC5_CHANNEL_MACSEC); + if (ret != 0 || out_size != sizeof(macsec_cmd_sc_mib_operation_s) || + macsec_cfg.head.status != 0) { + macsec_err(lld_dev->dev, "Failed to exec sc mib cmd, err=0x%x, status=0x%x, out size:0x%x", + ret, macsec_cfg.head.status, out_size); + return -EINVAL; + } + + out_buf->num = 1; + memcpy(out_buf->mib_buf, &macsec_cfg.sc_mib, sizeof(macsec_sc_mib_info_s)); + + return 0; +} + +int himacsec_cmd_exec_flush(struct hinic5_lld_dev *lld_dev, tag_macsec_flush_cmd_s *flush_info) +{ + u16 out_size = sizeof(tag_macsec_flush_cmd_s); + int ret; + + ret = hinic5_msg_to_mgmt_sync(lld_dev->hwdev, HINIC5_MOD_MACSEC, + MACSEC_CMD_FLUSH_OP, flush_info, + sizeof(tag_macsec_flush_cmd_s), flush_info, + &out_size, 0, HINIC5_CHANNEL_MACSEC); + if (ret != 0 || out_size != sizeof(tag_macsec_flush_cmd_s) || + flush_info->head.status != 0) { + macsec_err(lld_dev->dev, "Failed to exec flush cmd, err=0x%x, status=0x%x, out size:0x%x", + ret, flush_info->head.status, out_size); + return -EINVAL; + } + + return 0; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_nictool.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_nictool.c new file mode 100644 index 00000000..cf387644 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_nictool.c @@ -0,0 +1,575 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: macsec nictool intaface + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [MACsec]" fmt + +#include <linux/semaphore.h> + +#include "hinic5_lld.h" +#include "hinic5_mt.h" + +#include "macsec_pub_cmd.h" +#include "nic_pub_cmd.h" +#include "hinic5_macsec_common.h" +#include "hinic5_macsec_dfx.h" + +static int himacsec_cmd_check_param_buf(const void *buf, u32 buf_size, u32 exp_buf_size) +{ + if (!buf || buf_size != exp_buf_size) { + pr_err("Buffer in or out can not be NULL when exec macsec cmd, buf size=%d, exp size=%d", + buf_size, exp_buf_size); + return -EINVAL; + } + return 0; +} + +int himacsec_fill_enc_sa(struct hinic5_nic_dev *nic_dev, struct himacsec_sc *enc_sc, + struct himacsec_sa *tar_sa, u8 *sa_cnt, u32 cmd_type) +{ + struct himacsec_sa enc_sa = {0}; + u8 sa_index = 0; + u8 an = 0; // There is no an in the sa table, it needs to be rewritten back + int index; + int ret = 0; + + for (index = 0; index < nic_dev->macsec_res->spec.max_sa; index++) { + enc_sa = enc_sc->sa[index]; + an = enc_sa.info.an; + if (MACSEC_SA_STATUS_VALID(enc_sa.status.status.sa) == 0) + continue; + if (cmd_type == MACSEC_TOOL_OP_DUMP) { + ret = himacsec_cmd_exec_sa_op(nic_dev->lld_dev, + &enc_sa.info, + MACSEC_CMD_ENC_SA_GET_INFO); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd dump encryption sa config failed, ret=%d", + nic_dev->netdev->name, ret); + return ret; + } + enc_sa.info.an = an; + } + tar_sa[sa_index++] = enc_sa; + } + *sa_cnt = sa_index; + return ret; +} + +int himacsec_fill_dec_sa(struct hinic5_nic_dev *nic_dev, struct himacsec_sc *dec_sc, + struct himacsec_sa *tar_sa, u8 *sa_cnt, u32 cmd_type) +{ + struct himacsec_sa dec_sa = {0}; + u8 an = 0; // There is no an in the sa table, it needs to be rewritten back + u8 sa_index = 0; + int index; + int ret = 0; + + for (index = 0; index < nic_dev->macsec_res->spec.max_sa; index++) { + dec_sa = dec_sc->sa[index]; + an = dec_sa.info.an; + if (MACSEC_SA_STATUS_VALID(dec_sa.status.status.sa) == 0) + continue; + if (cmd_type == MACSEC_TOOL_OP_DUMP) { + ret = himacsec_cmd_exec_sa_op(nic_dev->lld_dev, + &dec_sa.info, + MACSEC_CMD_DEC_SA_GET_INFO); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd dump decryption sa config failed, ret=%d", + nic_dev->netdev->name, ret); + return ret; + } + dec_sa.info.an = an; + } + tar_sa[sa_index++] = dec_sa; + } + *sa_cnt = sa_index; + return ret; +} + +int macsec_get_enc_sc_info(struct hinic5_nic_dev *nic_dev, struct himacsec_sc *temp_sc, + struct himacsec_cmd_list_sc_buf *cur_sc_buf, u32 cmd_type) +{ + int ret = 0; + + if (cmd_type == MACSEC_TOOL_OP_DUMP) { + ret = himacsec_cmd_exec_sc_op(nic_dev->lld_dev, &temp_sc->info, + MACSEC_CMD_ENC_SC_GET_INFO); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "Dump cmd failed ret=%d, fetch tx sc info fail.", + ret); + return ret; + } + } + ret = himacsec_fill_enc_sa(nic_dev, temp_sc, cur_sc_buf->sc.sa, + &cur_sc_buf->sa_cnt, cmd_type); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "Dump cmd failed ret=%d, fetch tx sa info fail.", + ret); + return ret; + } + cur_sc_buf->sc.info = temp_sc->info; + + return 0; +} + +int macsec_get_dec_sc_info(struct hinic5_nic_dev *nic_dev, struct himacsec_sc *temp_sc, + struct himacsec_cmd_list_sc_buf *cur_sc_buf, u32 cmd_type) +{ + int ret = 0; + + if (cmd_type == MACSEC_TOOL_OP_DUMP) { + ret = himacsec_cmd_exec_sc_op(nic_dev->lld_dev, &temp_sc->info, + MACSEC_CMD_DEC_SC_GET_INFO); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "Dump cmd failed ret=%d, fetch rx sc info fail.", + ret); + return ret; + } + } + ret = himacsec_fill_dec_sa(nic_dev, temp_sc, cur_sc_buf->sc.sa, + &cur_sc_buf->sa_cnt, cmd_type); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "Dump cmd failed ret=%d, fetch rx sa info fail.", + ret); + return ret; + } + cur_sc_buf->sc.info = temp_sc->info; + + return 0; +} + +int himacsec_fill_config(struct hinic5_nic_dev *nic_dev, struct himacsec_cmd_list_out *cmd_out, + u32 cmd_type) +{ + struct himacsec_cmd_list_sc_buf *enc_sc_buf = + (struct himacsec_cmd_list_sc_buf *)cmd_out->enc_sc_buf; + struct himacsec_cmd_list_sc_buf *dec_sc_buf = + (struct himacsec_cmd_list_sc_buf *)cmd_out->dec_sc_buf; + struct himacsec_cmd_list_sc_buf *cur_sc_buf = NULL; + struct himacsec_sc *temp_sc = NULL; + u32 enc_sc_cnt = 0; + u32 dec_sc_cnt = 0; + u32 index; + u32 port_id = nic_dev->macsec_res->function_port; + int ret = 0; + + for (index = 0; index < nic_dev->macsec_res->spec.max_port_sc; index++) { + // 获取加密方向 macsec 配置 + temp_sc = get_g_macsec_port_res(MACSEC_OUTBOUND, port_id); + if (!temp_sc && (MACSEC_SC_STATUS_VALID(temp_sc->status.status.sc) != 0)) { + /* 计算下一个接收sc参数的地址 */ + cur_sc_buf = enc_sc_buf + enc_sc_cnt; + ret = macsec_get_enc_sc_info(nic_dev, temp_sc, cur_sc_buf, cmd_type); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "Dump cmd failed ret=%d, get tx macsec info fail.", + ret); + return ret; + } + enc_sc_cnt++; + } + + // 获取解密方向 macsec 配置 + temp_sc = get_g_macsec_port_res(MACSEC_INBOUND, port_id); + if (temp_sc && (MACSEC_SC_STATUS_VALID(temp_sc->status.status.sc) != 0)) { + /* 计算下一个接收sc参数的地址 */ + cur_sc_buf = dec_sc_buf + dec_sc_cnt; + ret = macsec_get_dec_sc_info(nic_dev, temp_sc, cur_sc_buf, cmd_type); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "Dump cmd failed ret=%d, get rx macsec info fail.", + ret); + return ret; + } + dec_sc_cnt++; + } + } + + cmd_out->enc_sc_cnt = enc_sc_cnt; + cmd_out->dec_sc_cnt = dec_sc_cnt; + return 0; +} + +int himacsec_fill_mib_sc(struct hinic5_nic_dev *nic_dev, struct himacsec_cmd_mib_out *cmd_out, + u64 sci) +{ + struct himacsec_sc *enc_sc = NULL; + struct himacsec_sc *dec_sc = NULL; + int ret; + + // 1. 参数校验 + enc_sc = himacsec_get_valid_dev_sc(nic_dev, sci, MACSEC_OUTBOUND); + dec_sc = himacsec_get_valid_dev_sc(nic_dev, sci, MACSEC_INBOUND); + if (!enc_sc && !dec_sc) { // SCI 不存在 + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd querying sc mib failed, sci=%llx not found", + nic_dev->netdev->name, sci); + return -EINVAL; + } + + // 2. 发送查询请求 + ret = himacsec_cmd_exec_mib_sc(nic_dev->lld_dev, cmd_out, sci); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd querying sc mib failed, sci=%llx, ret=%d", + nic_dev->netdev->name, sci, ret); + return ret; + } + + return ret; +} + +int himacsec_fill_mib_port(struct hinic5_nic_dev *nic_dev, struct himacsec_cmd_mib_out *cmd_out) +{ + int ret; + + ret = himacsec_cmd_exec_mib_port(nic_dev->lld_dev, cmd_out); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd query port mib failed, ret=%d", + nic_dev->netdev->name, ret); + + return ret; +} + +/* hinicadmdfx5 macsec -o list -i enp133s0f1 + * Query all macsec configuration information for the current port in memory + */ +int macsec_cmd_list(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = -EINVAL; + struct himacsec_cmd_in *cmd_in = (struct himacsec_cmd_in *)buf_in; + struct himacsec_cmd_list_out *cmd_out = (struct himacsec_cmd_list_out *)buf_out; + + // 1. 参数校验 + if (!nic_dev->macsec_res) { + macsec_err(nic_dev->lld_dev->dev, "Macsec resource is NULL"); + return -EINVAL; + } + + if ((himacsec_cmd_check_param_buf(buf_in, in_size, sizeof(struct himacsec_cmd_hdr)) != 0) || + (himacsec_cmd_check_param_buf(buf_out, *out_size, + sizeof(struct himacsec_cmd_list_out)) != 0)) { + return -EINVAL; + } + + // 2. 参数赋值 + ret = himacsec_fill_config(nic_dev, cmd_out, cmd_in->hdr.cmd_type); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, + "Cmd list/dump macsec config exec failed, ret=%d", ret); + + return ret; +} + +/* hinicadmdfx5 macsec -o mib -i enp133s0f1 -t [sc -s <sci> | port] + * Query all macsec mib information + */ +int macsec_cmd_mib(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int ret = -EINVAL; + struct himacsec_cmd_in *cmd_in = (struct himacsec_cmd_in *)buf_in; + struct himacsec_cmd_mib_in *mib_in = NULL; + struct himacsec_cmd_mib_out *cmd_out = (struct himacsec_cmd_mib_out *)buf_out; + u32 exp_in_size = sizeof(struct himacsec_cmd_hdr) + sizeof(struct himacsec_cmd_mib_in); + + // 1. 参数校验 + if (!nic_dev->macsec_res) { + macsec_err(nic_dev->lld_dev->dev, "Macsec resource is NULL"); + return -EINVAL; + } + + if ((himacsec_cmd_check_param_buf(buf_in, in_size, exp_in_size) != 0) || + (himacsec_cmd_check_param_buf(buf_out, *out_size, + sizeof(struct himacsec_cmd_mib_out)) != 0)) { + return -EINVAL; + } + + // 2. 解析入参 + mib_in = (struct himacsec_cmd_mib_in *)cmd_in->buf; + if (mib_in->mib_type == HIMACSEC_TOOL_MIB_TYPE_PORT) { + ret = himacsec_fill_mib_port(nic_dev, cmd_out); + } else if (mib_in->mib_type == HIMACSEC_TOOL_MIB_TYPE_SC) { + ret = himacsec_fill_mib_sc(nic_dev, cmd_out, mib_in->sci); + } else { + macsec_err(nic_dev->lld_dev->dev, "Unknown mib type %d", mib_in->mib_type); + return -EINVAL; + } + + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "Cmd querying macsec mib failed, ret=%d", ret); + return ret; +} + +int macsec_cmd_flush(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + int ret, index; + struct himacsec_cmd_in *cmd_in = (struct himacsec_cmd_in *)buf_in; + tag_macsec_flush_cmd_s flush_cmd = {0}; + struct macsec_resource *macsec_res = nic_dev->macsec_res; + struct himacsec_sc *enc_sc = NULL; + struct himacsec_sc *dec_sc = NULL; + + if (!macsec_res) { + macsec_err(nic_dev->lld_dev->dev, "%s: MACsec resource is NULL", + nic_dev->netdev->name); + return -EINVAL; + } + + // 1. 参数校验 + if (himacsec_cmd_check_param_buf(buf_in, in_size, sizeof(struct himacsec_cmd_hdr)) != 0) + return -EINVAL; + + macsec_info(nic_dev->lld_dev->dev, "MACsec flush process, obj_type=0x%x", + cmd_in->hdr.obj_type); + + flush_cmd.op_code = MACSEC_CMD_FLUSH_SC_OP; + ret = himacsec_cmd_exec_flush(nic_dev->lld_dev, &flush_cmd); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "MACsec flush failed, ret=0x%x", ret); + + enc_sc = himacsec_get_dev_sc(nic_dev, MACSEC_OUTBOUND); + dec_sc = himacsec_get_dev_sc(nic_dev, MACSEC_INBOUND); + /* 清除驱动内数据 */ + for (index = 0; index < macsec_res->spec.max_port_sc; index++) { + memset(enc_sc, 0, sizeof(struct himacsec_sc)); + memset(dec_sc, 0, sizeof(struct himacsec_sc)); + } + return ret; +} + +int himacsec_nictool_add_sc(struct hinic5_nic_dev *nic_dev, const struct himacsec_cmd_in *cmd_in, + u32 in_size, crypt_direction_e direct) +{ + int ret = 0; + macsec_sc_info_s *sc = (macsec_sc_info_s *)cmd_in->buf; + u32 exp_in_size = sizeof(struct himacsec_cmd_hdr) + sizeof(macsec_sc_info_s); + + if (in_size != exp_in_size) { + macsec_err(nic_dev->lld_dev->dev, "Add encryption cmd buffer invalid, in size=0x%x, exp_size=0x%x", + in_size, exp_in_size); + return -EINVAL; + } + + ret = himacsec_create_sc(nic_dev, sc, direct); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "Add encryption cmd fail, ret=%d", ret); + + return ret; +} + +int himacsec_nictool_add_sa(struct hinic5_nic_dev *nic_dev, const struct himacsec_cmd_in *cmd_in, + u32 in_size, crypt_direction_e direct) +{ + macsec_sa_info_s *sa = (macsec_sa_info_s *)cmd_in->buf; + u32 exp_in_size = sizeof(struct himacsec_cmd_hdr) + sizeof(macsec_sa_info_s); + struct macsec_resource *macsec_res = nic_dev->macsec_res; + + if (in_size != exp_in_size) { + macsec_err(nic_dev->lld_dev->dev, "Add sa failed, size error, in size=0x%x, exp_size=0x%x", + in_size, exp_in_size); + return -EINVAL; + } + + // SM4 算法需要校验特性协商结果是否支持 + if (sa->current_crypto_algo == HIMACSEC_CRYPTO_ALGO_SM4 && + (macsec_res->himacsec_feature[0] & (u64)MACSEC_F_SUPPORT_SM4) == 0) { + macsec_err(nic_dev->lld_dev->dev, "Add sa failed, unsupported algorithm(0x%x)", + sa->current_crypto_algo); + return -EINVAL; + } + + // 需要显示配置RX侧的next_pn表 + if (sa->next_pn < sa->replay_window) { + macsec_err(nic_dev->lld_dev->dev, "Add sa failed, replay window(0x%x) is over next pn(0x%llx)", + sa->replay_window, sa->next_pn); + return -EINVAL; + } + sa->lowest_pn = sa->next_pn - sa->replay_window; + + return himacsec_create_sa(nic_dev, sa, direct); +} + +int macsec_cmd_add(struct hinic5_nic_dev *nic_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct himacsec_cmd_in *cmd_in = (struct himacsec_cmd_in *)buf_in; + himacsec_tool_obj_e obj = HIMACSEC_TOOL_OBJ_MAX; + int ret; + + // 1. 参数校验 + if (!nic_dev->macsec_res) { + macsec_err(nic_dev->lld_dev->dev, "Macsec resource is NULL"); + return -EINVAL; + } + + if (!buf_in) { + macsec_err(nic_dev->lld_dev->dev, "Buffer in can not be NULL when exec macsec add cmd"); + return -EINVAL; + } + + obj = cmd_in->hdr.obj_type; // 需要添加的类型 + switch (obj) { + case HIMACSEC_TOOL_OBJ_ENC_SC: + ret = himacsec_nictool_add_sc(nic_dev, cmd_in, in_size, MACSEC_OUTBOUND); + break; + case HIMACSEC_TOOL_OBJ_DEC_SC: + ret = himacsec_nictool_add_sc(nic_dev, cmd_in, in_size, MACSEC_INBOUND); + break; + case HIMACSEC_TOOL_OBJ_ENC_SA: + ret = himacsec_nictool_add_sa(nic_dev, cmd_in, in_size, MACSEC_OUTBOUND); + break; + case HIMACSEC_TOOL_OBJ_DEC_SA: + ret = himacsec_nictool_add_sa(nic_dev, cmd_in, in_size, MACSEC_INBOUND); + break; + default: + macsec_err(nic_dev->lld_dev->dev, "Unknown macsec object type:%d ", obj); + ret = -EINVAL; + break; + } + return ret; +} + +int macsec_cmd_del(struct hinic5_nic_dev *nic_dev, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size) +{ + struct himacsec_cmd_in *cmd_in = (struct himacsec_cmd_in *)buf_in; + u32 exp_in_size = sizeof(struct himacsec_cmd_del_in) + sizeof(struct himacsec_cmd_hdr); + himacsec_tool_obj_e obj = HIMACSEC_TOOL_OBJ_MAX; + struct himacsec_cmd_del_in *param = NULL; + int ret = 0; + + if (!buf_in || exp_in_size != in_size) { + macsec_err(nic_dev->lld_dev->dev, "Buffer in invalid when exec macsec del cmd, in_size=%d, exp_size=%d", + in_size, exp_in_size); + return -EINVAL; + } + + param = (struct himacsec_cmd_del_in *)cmd_in->buf; + obj = cmd_in->hdr.obj_type; + + switch (obj) { + case HIMACSEC_TOOL_OBJ_ENC_SC: + ret = himacsec_destroy_sc(nic_dev, param->sci, MACSEC_OUTBOUND); + break; + case HIMACSEC_TOOL_OBJ_DEC_SC: + ret = himacsec_destroy_sc(nic_dev, param->sci, MACSEC_INBOUND); + break; + case HIMACSEC_TOOL_OBJ_ENC_SA: + ret = himacsec_destroy_sa(nic_dev, param->sci, param->an, MACSEC_OUTBOUND); + break; + case HIMACSEC_TOOL_OBJ_DEC_SA: + ret = himacsec_destroy_sa(nic_dev, param->sci, param->an, MACSEC_INBOUND); + break; + default: + macsec_err(nic_dev->lld_dev->dev, "Unknown macsec object type:%d ", obj); + ret = -EINVAL; + break; + } + return ret; +} + +int himacsec_cmd_set_enc_sc(struct hinic5_nic_dev *nic_dev, struct himacsec_cmd_in *cmd_in, + u32 in_size) +{ + struct himacsec_sc *priv_sc_ptr = NULL; + struct himacsec_cmd_set_sc_in *param = NULL; + u32 exp_in_size = sizeof(struct himacsec_cmd_set_sc_in) + sizeof(struct himacsec_cmd_hdr); + macsec_sc_info_s enc_sc = {0}; + struct macsec_resource *macsec_res = nic_dev->macsec_res; + + if (!macsec_res) { + macsec_err(nic_dev->lld_dev->dev, "%s: MACsec resource is NULL", + nic_dev->netdev->name); + return -EINVAL; + } + + param = (struct himacsec_cmd_set_sc_in *)cmd_in->buf; + if (!param || exp_in_size != in_size) { + macsec_err(nic_dev->lld_dev->dev, "Buffer in invalid when exec macsec set cmd, in_size=%d, exp_size=%d", + in_size, exp_in_size); + return -EINVAL; + } + + priv_sc_ptr = himacsec_get_valid_dev_sc(nic_dev, param->sci, MACSEC_OUTBOUND); + if (!priv_sc_ptr) { + macsec_err(nic_dev->lld_dev->dev, "%s: Set encryption sc failed, sc not found", + nic_dev->netdev->name); + return -EINVAL; + } + + memcpy(&enc_sc, &priv_sc_ptr->info, sizeof(macsec_sc_info_s)); + // 使用sc副本数据,更新完成后自动更新驱动内部数据 + if ((param->set_flag_bitmap & HIMACSEC_SET_SC_ENCODING_SA_BIT_VAL) != 0) { + // sa 合法性校验 + if (!himacsec_get_valid_dev_sa(nic_dev, param->sci, + param->sc.encoding_sa, MACSEC_OUTBOUND)) { + macsec_err(nic_dev->lld_dev->dev, "%s: Update encodingsa failed, sa '%d' invalid", + nic_dev->netdev->name, param->sc.encoding_sa); + return -EINVAL; + } + // an 转换为 sa_index + enc_sc.encoding_sa = param->sc.encoding_sa % macsec_res->spec.max_sa; + } + + if ((param->set_flag_bitmap & HIMACSEC_SET_SC_PROTECT_FRAMES_BIT_VAL) != 0) + enc_sc.protect_frames = param->sc.protect_frames; + + if ((param->set_flag_bitmap & HIMACSEC_SET_SC_PROTECTION_MODE_BIT_VAL) != 0) + enc_sc.protection_mode = param->sc.protection_mode; + + return himacsec_set_sc(nic_dev, &enc_sc, MACSEC_OUTBOUND); +} + +int himacsec_cmd_set_dec_sc(struct hinic5_nic_dev *nic_dev, struct himacsec_cmd_in *cmd_in, + u32 in_size) +{ + struct himacsec_sc *priv_sc_ptr = NULL; + struct himacsec_cmd_set_sc_in *param = NULL; + u32 exp_in_size = sizeof(struct himacsec_cmd_set_sc_in) + sizeof(struct himacsec_cmd_hdr); + macsec_sc_info_s dec_sc = {0}; + + param = (struct himacsec_cmd_set_sc_in *)cmd_in->buf; + if (!param || exp_in_size != in_size) { + macsec_err(nic_dev->lld_dev->dev, "Buffer in invalid when exec macsec set cmd, in_size=%d, exp_size=%d", + in_size, exp_in_size); + return -EINVAL; + } + + priv_sc_ptr = himacsec_get_valid_dev_sc(nic_dev, param->sci, MACSEC_INBOUND); + if (!priv_sc_ptr) { + macsec_err(nic_dev->lld_dev->dev, "%s: Set decryption sc failed, sc not found", + nic_dev->netdev->name); + return -EINVAL; + } + memcpy(&dec_sc, &priv_sc_ptr->info, sizeof(macsec_sc_info_s)); + + if ((param->set_flag_bitmap & HIMACSEC_SET_SC_VALIDATE_FRAMES_BIT_VAL) != 0) + dec_sc.validate_frames = param->sc.validate_frames; + return himacsec_set_sc(nic_dev, &dec_sc, MACSEC_INBOUND); +} + +int macsec_cmd_set(struct hinic5_nic_dev *nic_dev, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size) +{ + struct himacsec_cmd_in *cmd_in = (struct himacsec_cmd_in *)buf_in; + himacsec_tool_obj_e obj = HIMACSEC_TOOL_OBJ_MAX; + int ret; + + if (!buf_in) { + macsec_err(nic_dev->lld_dev->dev, "Buffer in is NULL when exec macsec set cmd"); + return -EINVAL; + } + + obj = cmd_in->hdr.obj_type; + if (obj == HIMACSEC_TOOL_OBJ_ENC_SC) { + ret = himacsec_cmd_set_enc_sc(nic_dev, cmd_in, in_size); + } else if (obj == HIMACSEC_TOOL_OBJ_DEC_SC) { + ret = himacsec_cmd_set_dec_sc(nic_dev, cmd_in, in_size); + } else { + macsec_err(nic_dev->lld_dev->dev, "Unknown macsec object type:%d ", obj); + ret = -EINVAL; + } + + return ret; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_protocol.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_protocol.c new file mode 100644 index 00000000..efc76db6 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_protocol.c @@ -0,0 +1,926 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: 对接 Linux 标准内核 macsec 适配 + * Create: 2024/03/01 + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [MACsec]" fmt + +#include <linux/netdevice.h> +#include "ossl_knl.h" + +#include "hinic5_srv_nic.h" +#include "hinic5_nic_dev.h" + +#include "hinic5_macsec_common.h" +#include "hinic5_macsec_dfx.h" +#include "hinic5_macsec_dev.h" + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(6, 6, 0)) && \ + IS_ENABLED(CONFIG_MACSEC) && \ + defined(HAVE_NETDEVICE_MACSEC_OPS)) + +#include <net/macsec.h> + +#define htonll(x) (htonl(1) == 1 ? (x) : \ + ((((uint64_t)htonl((x) & 0xFFFFFFFF)) << 32) | \ + htonl((x) >> 32))) +#define ntohll(x) (ntohl(1) == 1 ? (x) : \ + ((((uint64_t)ntohl((x) & 0xFFFFFFFF)) << 32) | \ + ntohl((x) >> 32))) +#define VALIDATE_RET_OK 0x5a5aa5a5 +#define HIMACSEC_DEV_LINK_KIND "macsec" + +struct hinic5_nic_dev *macsec_get_nic_dev_by_netdev(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = NULL; + + if (!netdev) { + pr_err("Get nic dev fail, netdev is NULL"); + return NULL; + } + + nic_dev = netdev_priv(netdev); + if (!nic_dev) { + pr_err("Get nic dev fail, nic_dev is NULL"); + return NULL; + } + return nic_dev; +} + +int macsec_base_validate(struct hinic5_nic_dev *nic_dev, struct macsec_context *ctx) +{ + if (!nic_dev || !nic_dev->macsec_res) { + pr_err("NIC device is NULL"); + return -ENODEV; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + if (ctx->prepare != 0) + return 0; +#endif + return VALIDATE_RET_OK; +} + +int macsec_sa_update_validate(struct hinic5_nic_dev *nic_dev, struct macsec_context *ctx, + struct himacsec_sa **priv_sa, crypt_direction_e direct, u64 sci) +{ + int ret = macsec_base_validate(nic_dev, ctx); + u8 assoc_num = ctx->sa.assoc_num; + + if (ret != VALIDATE_RET_OK) + return ret; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 5, 8) + if (ctx->sa.update_pn) { + macsec_err(nic_dev->lld_dev->dev, "Update sa failed, not support pn update"); + return -EINVAL; + } +#endif + + *priv_sa = himacsec_get_valid_dev_sa(nic_dev, sci, assoc_num, direct); + if (!*priv_sa) { + macsec_err(nic_dev->lld_dev->dev, "Update sa failed, sa does not exist, sci=%llx, an=0x%x", + sci, assoc_num); + return -EINVAL; + } + + return VALIDATE_RET_OK; +} + +int himacsec_sa_active_update(struct hinic5_nic_dev *nic_dev, + struct himacsec_sa *macsec_sa, u8 active) +{ + int ret; + u8 temp_active_status; + + // 1、命令前后,sa active 相同,不作处理 + if (macsec_sa->info.enable_receive == active) + return 0; + + // 2、改变了 sa enable 配置 + temp_active_status = macsec_sa->info.enable_receive; + macsec_sa->info.enable_receive = active; + ret = himacsec_cmd_exec_sa_op(nic_dev->lld_dev, &macsec_sa->info, MACSEC_CMD_DEC_SA_UPDATE); + if (ret != 0) { + macsec_sa->info.enable_receive = temp_active_status; + macsec_err(nic_dev->lld_dev->dev, "Update sa failed, ret=%d", ret); + } + + return ret; +} + +void reverse_u8_array(u8 *array, u32 size) +{ + u32 left = 0; + u32 right = size - 1; + + while (left < right) { + u8 temp = array[left]; + + array[left] = array[right]; + array[right] = temp; + left++; + right--; + } +} + +int macsec_secy_features_validate(struct macsec_context *ctx) +{ + const struct net_device *netdev = ctx->netdev; + const struct macsec_secy *secy = ctx->secy; + + if (secy->icv_len != HIMACSEC_ICV_LEN) { + pr_err("%s: MACsec offload is supported only when icv_len is %d", + netdev->name, HIMACSEC_ICV_LEN); + return -EINVAL; + } + + if (secy->key_len != HIMACSEC_KEY_LENGTH_128 && + secy->key_len != HIMACSEC_KEY_LENGTH_256) { + pr_err("%s: MACsec offload is supported only when key_len is 128bit or 256bit", + netdev->name); + return -EINVAL; + } + + if (secy->validate_frames >= __MACSEC_VALIDATE_END) { + pr_err("%s: MACsec offload is supported only when validate value was legal", + netdev->name); + return -EINVAL; + } + + return 0; +} + +/* direct:true macsec_key_length convert to reg_key_length; + * false reg_key_length convert to macsec_key_length + */ +void macsec_adapt_convert_key_length(u8 *reg_key_length, u16 *macsec_key_length, bool direct) +{ + if (direct) { + // macsec_key_length convert to reg_key_length + if (*macsec_key_length == HIMACSEC_KEY_LENGTH_128) { + *reg_key_length = HIMACSEC_REG_KEY_LENGTH_128; + } else if (*macsec_key_length == HIMACSEC_KEY_LENGTH_256) { + *reg_key_length = HIMACSEC_REG_KEY_LENGTH_256; + } else { + // macsec_secy_features_validate has checked, macsec_key_length=128 or 256 + pr_err("Parse sa key length failed, macsec key length=%d", *reg_key_length); + } + } else { + // reg_key_length convert to macsec_key_length + if (*reg_key_length == HIMACSEC_REG_KEY_LENGTH_128) { + *macsec_key_length = HIMACSEC_KEY_LENGTH_128; + } else if (*reg_key_length == HIMACSEC_REG_KEY_LENGTH_256) { + *macsec_key_length = HIMACSEC_KEY_LENGTH_256; + } else { + // Read the exception value from the register, + // request does not return failure, fill in the exception value + *macsec_key_length = 0; + pr_err("Parse sa key length failed, reg key length=%d", *reg_key_length); + } + } +} + +void macsec_adapt_convert_validate_type(u8 *reg_validate_val, + enum macsec_validation_type *macsec_validation_val, + bool direct) +{ + if (direct) { + // macsec val to reg val + if (*macsec_validation_val < __MACSEC_VALIDATE_END) { + *reg_validate_val = (u8)(*macsec_validation_val) + 1; + return; + } + pr_err("Parse validate type failed, macsec validate=%d", *macsec_validation_val); + } else { + // reg val to macsec val + if (*reg_validate_val < (u8)VALIDATE_MODE_MAX) { + *macsec_validation_val = *reg_validate_val - 1; + return; + } + pr_err("Parse validate type failed, reg validate=%d", *reg_validate_val); + } +} + +void macsec_adapt_key_handle(struct macsec_context *ctx, struct himacsec_sa *sa, u32 key_len) +{ + memcpy(sa->info.sak, ctx->sa.key, HIMACSEC_MAX_SAK_KEY_LEN); + memset(ctx->sa.key, 0, HIMACSEC_MAX_SAK_KEY_LEN); + reverse_u8_array((u8 *)sa->info.sak, ctx->secy->key_len); + + // set 128bit key in high 128bit + if (key_len == HIMACSEC_KEY_LENGTH_128) { + memcpy(((u8 *)sa->info.sak + HIMACSEC_KEY_LENGTH_128), + sa->info.sak, HIMACSEC_KEY_LENGTH_128); + memset(sa->info.sak, 0, HIMACSEC_KEY_LENGTH_128); + } +} + +/* 算法只有 AES, length/xpn 使能在 secy */ +int macsec_adapt_add_tx_sa(struct macsec_context *ctx) +{ + struct net_device *netdev = ctx->netdev; + struct himacsec_sa enc_sa = {0}; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + int ret; + + if (macsec_secy_features_validate(ctx) != 0) + return -EINVAL; + + // Adapt kernel 2 stage commit offload +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + if (ctx->prepare != 0) + return 0; +#endif + + if (!nic_dev) { + pr_err("Add protocol encryption sa failed, nic device is NULL"); + return -ENODEV; + } + + // Sak param set + macsec_adapt_key_handle(ctx, &enc_sa, ctx->secy->key_len); + + // Key length set + macsec_adapt_convert_key_length(&enc_sa.info.current_key_length, &ctx->secy->key_len, true); + + enc_sa.info.sci = ntohll(ctx->secy->sci); + enc_sa.info.an = ctx->sa.assoc_num; + enc_sa.info.enable_transmit = ctx->sa.tx_sa->active; + enc_sa.info.pn_th = HIMACSEC_DEFAULT_PN_THRESHOLD; + enc_sa.info.next_pn = ctx->sa.tx_sa->next_pn; + + // The algorithm only has AES + enc_sa.info.current_crypto_algo = HIMACSEC_CRYPTO_ALGO_AES; + enc_sa.info.extended_pn_enable = (u8)ctx->secy->xpn; + + enc_sa.info.ssci = ntohl(ctx->sa.tx_sa->ssci); + + // Salt value parameter processing + memcpy(enc_sa.info.salt, ctx->sa.tx_sa->key.salt.bytes, MACSEC_SALT_LEN); + reverse_u8_array((u8 *)enc_sa.info.salt, MACSEC_SALT_LEN); + + ret = himacsec_create_sa(nic_dev, &enc_sa.info, MACSEC_OUTBOUND); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "%s: Add protocol encryption sa failed, ret=%d", + netdev->name, ret); + return ret; +} + +int macsec_adapt_del_tx_sa(struct macsec_context *ctx) +{ + struct net_device *netdev = ctx->netdev; + u64 sci = 0; + u8 assoc_num = 0; + int ret; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + if (ctx->prepare != 0) + return 0; +#endif + + if (!nic_dev) { + pr_err("Del protocol encryption sa failed, nic device is NULL"); + return -ENODEV; + } + + sci = ntohll(ctx->secy->sci); + assoc_num = ctx->sa.assoc_num; + ret = himacsec_destroy_sa(nic_dev, sci, assoc_num, MACSEC_OUTBOUND); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "%s: Delete protocol encryption sa failed, ret=%d", + netdev->name, ret); + return ret; +} + +int macsec_adapt_add_rx_sa(struct macsec_context *ctx) +{ + struct net_device *netdev = ctx->netdev; + struct himacsec_sa dec_sa = {0}; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + int ret; + + if (macsec_secy_features_validate(ctx) != 0) + return -EINVAL; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + if (ctx->prepare != 0) + return 0; +#endif + + if (!nic_dev) { + pr_err("Add protocol decryption sa failed, macsec device is NULL"); + return -ENODEV; + } + + // Sak param set + macsec_adapt_key_handle(ctx, &dec_sa, ctx->secy->key_len); + + // Set key length + macsec_adapt_convert_key_length(&dec_sa.info.current_key_length, &ctx->secy->key_len, true); + + dec_sa.info.sci = ntohll(ctx->sa.rx_sa->sc->sci); + dec_sa.info.an = ctx->sa.assoc_num; + dec_sa.info.next_pn = ctx->sa.rx_sa->next_pn; + dec_sa.info.replay_protect = ctx->secy->replay_protect; + dec_sa.info.replay_window = ctx->secy->replay_window; + dec_sa.info.enable_receive = ctx->sa.rx_sa->active; + + // Only support AES + dec_sa.info.current_crypto_algo = HIMACSEC_CRYPTO_ALGO_AES; + dec_sa.info.extended_pn_enable = (u8)ctx->secy->xpn; + + // Salt value parameter processing + dec_sa.info.ssci = ntohl(ctx->sa.rx_sa->ssci); + memcpy(dec_sa.info.salt, ctx->sa.rx_sa->key.salt.bytes, MACSEC_SALT_LEN); + reverse_u8_array((u8 *)dec_sa.info.salt, MACSEC_SALT_LEN); + + /* TODO set lowest_pn */ + + ret = himacsec_create_sa(nic_dev, &dec_sa.info, MACSEC_INBOUND); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "%s: Add protocol decryption sa failed, ret=%d", + netdev->name, ret); + return ret; +} + +int macsec_adapt_del_rx_sa(struct macsec_context *ctx) +{ + struct net_device *netdev = ctx->netdev; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + u64 sci; + int ret; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + if (ctx->prepare != 0) + return 0; +#endif + + if (!nic_dev) { + pr_err("Del protocol decryption sa failed, nic device is NULL"); + return -ENODEV; + } + + sci = ntohll(ctx->sa.rx_sa->sc->sci); + ret = himacsec_destroy_sa(nic_dev, sci, ctx->sa.assoc_num, MACSEC_INBOUND); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "%s Delete protocol decryption sa failed, ret=%d", + netdev->name, ret); + return ret; +} + +int macsec_adapt_add_rx_sc(struct macsec_context *ctx) +{ + struct net_device *netdev = ctx->netdev; + struct himacsec_sc dec_sc = {0}; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + int ret; + + if (macsec_secy_features_validate(ctx) != 0) + return -EINVAL; + + // adapt kernel 2 stage commit offload +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + if (ctx->prepare != 0) + return 0; +#endif + + if (!nic_dev) { + pr_err("Add protocol decryption sc failed, nic device is NULL"); + return -ENODEV; + } + + dec_sc.info.sci = ntohll(ctx->rx_sc->sci); + macsec_adapt_convert_validate_type(&dec_sc.info.validate_frames, + &ctx->secy->validate_frames, true); + + ret = himacsec_create_sc(nic_dev, &dec_sc.info, MACSEC_INBOUND); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "%s: Add protocol decryption sc failed, ret=%d", + netdev->name, ret); + return ret; +} + +static int macsec_update_rxsa_by_rxsc(const struct macsec_rx_sc *ctx_rx_sc, + struct hinic5_nic_dev *nic_dev, u64 sci) +{ + const struct macsec_rx_sa *rx_sa = NULL; + struct himacsec_sa *dec_sa = NULL; + int ret, i; + + for (i = 0; i < MACSEC_NUM_AN; i++) { + rx_sa = ctx_rx_sc->sa[i]; + if (!rx_sa) + continue; + + dec_sa = himacsec_get_valid_dev_sa(nic_dev, sci, i, MACSEC_INBOUND); + if (!dec_sa) { + macsec_err(nic_dev->lld_dev->dev, "Update decryption sc failed, an=%d", i); + return -EINVAL; + } + + if (ctx_rx_sc->active) { + /* rx_sc active, the state of SA is determined + * by the SA state in the standard kernel + */ + ret = himacsec_sa_active_update(nic_dev, dec_sa, rx_sa->active); + } else { + /* rx_sc not active, all SA off */ + ret = himacsec_sa_active_update(nic_dev, dec_sa, (u8)ctx_rx_sc->active); + } + + if (ret != 0) + return ret; + } + return 0; +} + +int macsec_adapt_update_rx_sc(struct macsec_context *ctx) +{ + const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc; + struct net_device *netdev = ctx->netdev; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + struct himacsec_sc *dec_sc = NULL; + u64 sci; + int ret; + + if (!ctx_rx_sc) { + pr_info("%s, nothing changed", __func__); + return 0; + } + + ret = macsec_base_validate(nic_dev, ctx); + if (ret != VALIDATE_RET_OK) { + pr_err("%s, macsec_base_validate fail", __func__); + return ret; + } + + sci = ntohll(ctx->rx_sc->sci); + dec_sc = himacsec_get_valid_dev_sc(nic_dev, sci, MACSEC_INBOUND); + if (!dec_sc) { + macsec_err(nic_dev->lld_dev->dev, "Update decryption sc failed, not found sc, sci=%llx", + sci); + return -EINVAL; + } + + ret = macsec_update_rxsa_by_rxsc(ctx_rx_sc, nic_dev, sci); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "Update decryption sc failed, ret=%d", ret); + return ret; + } + return 0; +} + +int macsec_adapt_del_rx_sc(struct macsec_context *ctx) +{ + struct net_device *netdev = ctx->netdev; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + u64 sci = 0; + int ret; + + // adapt kernel 2 stage commit offload +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + if (ctx->prepare != 0) + return 0; +#endif + + if (!nic_dev) { + pr_err("Del protocol decryption sc failed, nic device is NULL"); + return -ENODEV; + } + + sci = ntohll(ctx->rx_sc->sci); + ret = himacsec_destroy_sc(nic_dev, sci, MACSEC_INBOUND); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "%s: Delete protocol decryption sc failed, ret=%d", + netdev->name, ret); + return ret; +} + +int macsec_adapt_add_secy(struct macsec_context *ctx) +{ + struct net_device *netdev = ctx->netdev; + struct himacsec_sc tx_sc = {0}; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + struct macsec_resource *macsec_res = NULL; + int ret; + + if (macsec_secy_features_validate(ctx) != 0) + return -EINVAL; + + // adapt kernel 2 stage commit offload +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + if (ctx->prepare != 0) + return 0; +#endif + + if (!nic_dev) { + pr_err("Add protocol secy failed, nic device is NULL"); + return -ENODEV; + } + + macsec_res = nic_dev->macsec_res; + if (!macsec_res) { + macsec_err(nic_dev->lld_dev->dev, "Add protocol secy failed, macsec resource is NULL"); + return -ENODEV; + } + // 保存已offload的vf index + if (ctx->secy->netdev) + macsec_res->offload_child_dev_idx[macsec_res->offload_dev_num++] = + ctx->secy->netdev->ifindex; + + tx_sc.info.sci = ntohll(ctx->secy->sci); + tx_sc.info.use_es_enable = ctx->secy->tx_sc.end_station; + tx_sc.info.use_scb_enable = ctx->secy->tx_sc.scb; + tx_sc.info.include_sci_enable = ctx->secy->tx_sc.send_sci; + tx_sc.info.protect_frames = ctx->secy->protect_frames; // 表示是否保护 + // (只有校验也是保护) + /* 0: integrity only + * 1:confidentiality, 无法配置偏移保护 + */ + tx_sc.info.protection_mode = ctx->secy->tx_sc.encrypt; + + /* 调用 service 层 */ + ret = himacsec_create_sc(nic_dev, &tx_sc.info, MACSEC_OUTBOUND); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "%s: Add protocol encryption sc failed, ret=%d", + netdev->name, ret); + return ret; +} + +int macsec_adapt_clean_up_rx_sc(struct net_device *netdev, struct hinic5_nic_dev *nic_dev, + struct macsec_context *ctx) +{ + struct macsec_rx_sc *rx_sc = NULL; + u64 rx_sci; + int ret = 0; + + if (!ctx->secy || !ctx->secy->rx_sc) { + pr_info("%s: Delete protocol decryption sc skip, decryption sc not exist", + netdev->name); + return 0; + } + + rx_sc = ctx->secy->rx_sc; + while (rx_sc) { + rx_sci = ntohll(ctx->secy->rx_sc->sci); + ret |= himacsec_destroy_sc(nic_dev, rx_sci, MACSEC_INBOUND); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Delete decryption sc failed, sci=%llx, ret=%d", + netdev->name, rx_sci, ret); + } + rx_sc = rx_sc->next; + } + return ret; +} + +int macsec_adapt_del_secy(struct macsec_context *ctx) +{ + struct net_device *netdev = ctx->netdev; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + u64 tx_sci = ntohll(ctx->secy->sci); + int ret; + + // adapt kernel 2 stage commit offload +#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 1, 0) + if (ctx->prepare != 0) + return 0; +#endif + + if (!nic_dev) { + pr_err("Delete protocol secy failed, macsec device is NULL"); + return -ENODEV; + } + + // delete macsec tx config + ret = himacsec_destroy_sc(nic_dev, tx_sci, MACSEC_OUTBOUND); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Delete protocol encryption sc failed, sci=%llx, ret=%d", + netdev->name, tx_sci, ret); + return ret; + } + + // delete macsec rx config + ret = macsec_adapt_clean_up_rx_sc(netdev, nic_dev, ctx); + if (ret != 0) + macsec_err(nic_dev->lld_dev->dev, "%s: Delete protocol decryption sc failed, ret=%d", + netdev->name, ret); + + return ret; +} + +int macsec_adapt_update_tx_sa(struct macsec_context *ctx) +{ + const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa; + struct net_device *netdev = ctx->netdev; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + struct himacsec_sa *macsec_sa = NULL; + u64 tx_sci = ntohll(ctx->secy->sci); + int ret; + u8 active; + + if (!nic_dev) { + pr_err("Update encryption sa failed, nic device is NULL"); + return -ENODEV; + } + + ret = macsec_sa_update_validate(nic_dev, ctx, &macsec_sa, MACSEC_OUTBOUND, tx_sci); + if (ret != VALIDATE_RET_OK) + return ret; + + // 1、命令前后,sa active 相同,不作处理 + if (macsec_sa->info.enable_transmit == (u8)ctx_tx_sa->active) + return 0; + + // 2、改变了 sa enable 配置 + active = macsec_sa->info.enable_transmit; + macsec_sa->info.enable_transmit = (u8)ctx_tx_sa->active; + ret = himacsec_cmd_exec_sa_op(nic_dev->lld_dev, &macsec_sa->info, MACSEC_CMD_ENC_SA_UPDATE); + if (ret != 0) { + macsec_sa->info.enable_transmit = active; + macsec_err(nic_dev->lld_dev->dev, "Update encryption sa failed, ret=%d", ret); + } + + return ret; +} + +int macsec_adapt_update_rx_sa(struct macsec_context *ctx) +{ + const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa; + struct net_device *netdev = ctx->netdev; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + struct himacsec_sa *macsec_sa = NULL; + u64 rx_sci = ntohll(ctx_rx_sa->sc->sci); + int ret; + + ret = macsec_sa_update_validate(nic_dev, ctx, &macsec_sa, MACSEC_INBOUND, rx_sci); + if (ret != VALIDATE_RET_OK) + return ret; + + return himacsec_sa_active_update(nic_dev, macsec_sa, (u8)ctx_rx_sa->active); +} + +static int macsec_update_txsa_by_txsc(const struct macsec_tx_sc *ctx_tx_sc, + struct hinic5_nic_dev *nic_dev, u64 sci) +{ + struct macsec_tx_sa *tx_sa = NULL; + struct himacsec_sa *enc_sa = NULL; + int ret, i; + + // 遍历更新ctx_tx_sa + for (i = 0; i < MACSEC_NUM_AN; i++) { + tx_sa = ctx_tx_sc->sa[i]; + if (!tx_sa) + continue; + + enc_sa = himacsec_get_valid_dev_sa(nic_dev, sci, i, MACSEC_OUTBOUND); + if (!enc_sa) { + macsec_err(nic_dev->lld_dev->dev, "%s, get sa failed, an= %d", __func__, i); + return -EINVAL; + } + + if (ctx_tx_sc->active) + ret = himacsec_sa_active_update(nic_dev, enc_sa, tx_sa->active); + else + ret = himacsec_sa_active_update(nic_dev, enc_sa, (u8)ctx_tx_sc->active); + if (ret != 0) + return ret; + } + return 0; +} + +static int macsec_adapt_update_tx_sc(struct macsec_context *ctx) +{ + const struct macsec_tx_sc *ctx_tx_sc = &ctx->secy->tx_sc; + struct net_device *netdev = ctx->netdev; + struct hinic5_nic_dev *nic_dev = macsec_get_nic_dev_by_netdev(netdev); + struct himacsec_sc *enc_sc = NULL; + int ret; + u64 sci; + + if (!ctx_tx_sc) { + pr_info("%s, nothing changed", __func__); + return 0; + } + + ret = macsec_base_validate(nic_dev, ctx); + if (ret != VALIDATE_RET_OK) { + pr_err("%s, macsec_base_validate fail, ret:%d", __func__, ret); + return ret; + } + + sci = ntohll(ctx->secy->sci); + enc_sc = himacsec_get_valid_dev_sc(nic_dev, sci, MACSEC_OUTBOUND); + if (!enc_sc) { + macsec_err(nic_dev->lld_dev->dev, "%s, update encryption sc failed, sc not found, sci=0x%llx", + __func__, sci); + return -EINVAL; + } + + if (ctx_tx_sc->encrypt) { + macsec_info(nic_dev->lld_dev->dev, "%s, encrypt is true, set protection_mode CONFIDENTIALITY", + __func__); + enc_sc->info.protection_mode = PROTECTION_MODE_CONFIDENTIALITY; + } + + // 更新encoding_sa + if (enc_sc->info.encoding_sa != ctx_tx_sc->encoding_sa) { + enc_sc->info.encoding_sa = ctx_tx_sc->encoding_sa; + ret = himacsec_set_sc(nic_dev, &enc_sc->info, MACSEC_OUTBOUND); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s, himacsec_set_sc(sci=0x%llx) failed, ret: %d", + __func__, sci, ret); + return ret; + } + } + + ret = macsec_update_txsa_by_txsc(ctx_tx_sc, nic_dev, sci); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s, update encryption sc failed, ret= %d", + __func__, ret); + return ret; + } + + return 0; +} + +int macsec_adapt_update_secy(struct macsec_context *ctx) +{ + /* Secy 涉及到所有 sc、sa 的配置,流程复杂,暂时不允许做修改 */ + int ret; + + if (!macsec_secy_features_validate(ctx)) + return -EINVAL; + + ret = macsec_adapt_update_tx_sc(ctx); + if (ret != 0) { + pr_err("%s update tx_sc failed, ret=%d", __func__, ret); + return ret; + } + + ret = macsec_adapt_update_rx_sc(ctx); + if (ret != 0) { + pr_err("%s update rx_sc failed, ret=%d", __func__, ret); + return ret; + } + + return 0; +} + +static const struct macsec_ops macsec_offload_ops = { + /* Secy */ + .mdo_add_secy = macsec_adapt_add_secy, + .mdo_upd_secy = macsec_adapt_update_secy, + .mdo_del_secy = macsec_adapt_del_secy, + + /* Security channels */ + .mdo_add_rxsc = macsec_adapt_add_rx_sc, + .mdo_upd_rxsc = macsec_adapt_update_rx_sc, + .mdo_del_rxsc = macsec_adapt_del_rx_sc, + + /* Security associations */ + .mdo_add_rxsa = macsec_adapt_add_rx_sa, + .mdo_upd_rxsa = macsec_adapt_update_rx_sa, + .mdo_del_rxsa = macsec_adapt_del_rx_sa, + .mdo_add_txsa = macsec_adapt_add_tx_sa, + .mdo_upd_txsa = macsec_adapt_update_tx_sa, + .mdo_del_txsa = macsec_adapt_del_tx_sa, +}; + +void himacsec_offload_init(struct hinic5_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + struct macsec_resource *macsec_res = nic_dev->macsec_res; + + if (macsec_res->spec.max_sa == 2) { /* 2 SA模式 */ + macsec_info(nic_dev->lld_dev->dev, "%s: Sc mode is 0, do not support offload protocol macsec", + netdev->name); + return; + } + + netdev->macsec_ops = &macsec_offload_ops; + netdev->features |= NETIF_F_HW_MACSEC; +} + +bool himacsec_check_offload(u32 ifindex, u32 *offload) +{ + u32 i; + + for (i = 0; i < MACSEC_SC_NUM; i++) + if (offload[i] == ifindex) + return true; + return false; +} + +int himacsec_get_offload_idx(u32 ifindex, u32 *offload, u32 *index) +{ + u32 i; + + for (i = 0; i < MACSEC_SC_NUM; i++) { + if (offload[i] == ifindex) { + *index = i; + return 0; + } + } + return -1; +} + +/* 因为在himacsec_dev里以数组的形式保存了vf的ifindex, + * 所以需要在删除的时候从当前待删除的vf索引开始进行前移 + */ +int himacsec_dev_del_offload(u32 ifindex, struct macsec_resource *macsec_res) +{ + u32 index = 0; + int ret = 0; + + if (macsec_res->offload_dev_num == 0) + return 0; + + ret = himacsec_get_offload_idx(ifindex, macsec_res->offload_child_dev_idx, &index); + if (ret < 0) + return ret; + + for (; index < macsec_res->offload_dev_num - 1; index++) + macsec_res->offload_child_dev_idx[index] = + macsec_res->offload_child_dev_idx[index + 1]; + // offload的vf数量减1 + macsec_res->offload_dev_num--; + return ret; +} + +void himacsec_remove_macsec_offload(struct net_device *dev, struct macsec_resource *macsec_res, + struct hinic5_lld_dev *lld_dev) +{ + int ret = 0; + + struct net_device *lower_dev; + struct list_head *iter; + // 遍历所有下层设备 + rcu_read_lock(); + netdev_for_each_upper_dev_rcu(dev, lower_dev, iter) { + // 检查是否是macsec设备 + if (lower_dev->rtnl_link_ops && + lower_dev->rtnl_link_ops->kind && + strcmp(lower_dev->rtnl_link_ops->kind, HIMACSEC_DEV_LINK_KIND) == 0) { + macsec_info(lld_dev->dev, "Unregistering macsec offload for dev: %s, lower_dev %s, pf_ifindex: %d, vf_ifindex: %d ", + dev->name, lower_dev->name, dev->ifindex, lower_dev->ifindex); + + // 卸载链路,删除vf设备 + if (himacsec_check_offload((u32)lower_dev->ifindex, + macsec_res->offload_child_dev_idx)) { + macsec_info(lld_dev->dev, "dev %s is offload , delete link", + lower_dev->name); + rtnl_lock(); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0)) /* LINUX_VERSION_CODE < 6.6.0 */ + rtnl_delete_link(lower_dev); +#else /* LINUX_VERSION_CODE >= 6.6.0 */ + rtnl_delete_link(lower_dev, 0, NULL); +#endif + rtnl_unlock(); + // 删除offload_child_dev_idx中记录的vf_ifindex + ret = himacsec_dev_del_offload(lower_dev->ifindex, macsec_res); + if (ret < 0) { + macsec_err(lld_dev->dev, "delete child dev index fail: if_index is not in offload list, if_index: %d", + lower_dev->ifindex); + break; + } + } else { + // 非卸载链路,直接返回 + //(协议栈创建的vf链路,不需要删除) + macsec_info(lld_dev->dev, "dev %s is not offload , skip delete link", + lower_dev->name); + } + } else { + macsec_info(lld_dev->dev, "%s is not a macsec device\n", lower_dev->name); + } + } + + rcu_read_unlock(); +} + +void himacsec_offload_deinit(struct hinic5_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + struct macsec_resource *macsec_res = nic_dev->macsec_res; + + if (!netdev->macsec_ops) + return; + + netdev->macsec_ops = NULL; + netdev->features &= ~NETIF_F_HW_MACSEC; + + himacsec_remove_macsec_offload(netdev, macsec_res, nic_dev->lld_dev); +} + +#else + +void himacsec_offload_init(struct hinic5_nic_dev *nic_dev) +{ +} + +void himacsec_offload_deinit(struct hinic5_nic_dev *nic_dev) +{ +} + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_service.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_service.c new file mode 100644 index 00000000..0ef45465 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/macsec/hinic5_macsec_service.c @@ -0,0 +1,401 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Description: macsec 业务代码 + * Create: 2024/03/01 + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": [MACsec]" fmt + +#include "ossl_knl.h" + +#include "hinic5_srv_nic.h" +#include "hinic5_nic_dev.h" + +#include "macsec_mpu_cmd.h" +#include "hinic5_macsec_dfx.h" +#include "hinic5_macsec_common.h" +#include "hinic5_macsec_dev.h" + +int himacsec_get_sci_port(u8 *port, u8 *cos, u64 sci, struct macsec_resource *macsec_res) +{ + himacsec_spec_s *spec = &macsec_res->spec; + u8 port_cos = (u8)sci; + + *port = port_cos / (spec->max_port_sc); + *cos = port_cos % (spec->max_port_sc); + return 0; +} + +/* 获取sci对应的 sc: + * 1.解析sci,获得sc_index. + * 2.根据数组下标返回对应地址. + */ +struct himacsec_sc *himacsec_get_dev_sc(struct hinic5_nic_dev *nic_dev, crypt_direction_e direct) +{ + u32 port_id = 0; + + if (nic_dev->macsec_res) + port_id = nic_dev->macsec_res->function_port; + else + macsec_err(nic_dev->lld_dev->dev, "%s: MACsec resource is NULL", + nic_dev->netdev->name); + + return get_g_macsec_port_res(direct, port_id); +} + +/* 获取sci对应的sc,并校验该sc是否与参数sci内容一致: + * 1.获取sci对应sc_index的sc内容. + * 2.检查该sc是否有效. + */ +struct himacsec_sc *himacsec_get_valid_dev_sc(struct hinic5_nic_dev *nic_dev, + u64 sci, crypt_direction_e direct) +{ + struct himacsec_sc *knl_sc = NULL; + u64 priv_sci = 0; // The sci of target sc index'sc + u32 sc_status = SC_STATUS_MAX; + + // 1. get sc + knl_sc = himacsec_get_dev_sc(nic_dev, direct); + if (!knl_sc) + return NULL; + + sc_status = knl_sc->status.status.sc; + priv_sci = knl_sc->info.sci; + + // 2. check sc + if ((MACSEC_SC_STATUS_VALID(sc_status) == 0) || sci != priv_sci) { + macsec_info(nic_dev->lld_dev->dev, "%s: Can not find kernel device sc, direct=0x%x, target sci=%llx, sc status=%d", + nic_dev->netdev->name, direct, priv_sci, sc_status); + return NULL; + } + + return knl_sc; +} + +struct himacsec_sa *himacsec_get_dev_sa(struct hinic5_nic_dev *nic_dev, u64 sci, + u8 an, crypt_direction_e direct) +{ + struct himacsec_sc *sc = NULL; + u32 sa_index = 0; + struct macsec_resource *macsec_res = nic_dev->macsec_res; + + if (!macsec_res) { + macsec_err(nic_dev->lld_dev->dev, "%s: MACsec resource is NULL", + nic_dev->netdev->name); + return NULL; + } + + sc = himacsec_get_valid_dev_sc(nic_dev, sci, direct); + if (!sc) { + macsec_err(nic_dev->lld_dev->dev, "%s: Get kernel device sa failed, sc not found, direct=0x%x, sci=%llx, an=%d", + nic_dev->netdev->name, direct, sci, an); + return NULL; + } + sa_index = an % macsec_res->spec.max_sa; + return &sc->sa[sa_index]; +} + +struct himacsec_sa *himacsec_get_valid_dev_sa(struct hinic5_nic_dev *nic_dev, + u64 sci, u8 an, crypt_direction_e direct) +{ + struct himacsec_sa *sa = NULL; + + sa = himacsec_get_dev_sa(nic_dev, sci, an, direct); + if (!sa) + return NULL; + + if ((MACSEC_SA_STATUS_VALID(sa->status.status.sa) == 0) || an != sa->info.an) { + macsec_info(nic_dev->lld_dev->dev, "%s: Get kernel device sa failed, direct=0x%x, sci=%llx, target an=%d, already exist an=%d", + nic_dev->netdev->name, direct, sci, an, sa->info.an); + return NULL; + } + + return sa; +} + +int himacsec_del_sa(struct hinic5_nic_dev *nic_dev, u64 sci, u8 assoc_num, crypt_direction_e direct) +{ + macsec_sa_info_s sa_info = {0}; + struct net_device *netdev = nic_dev->netdev; + struct himacsec_sa *priv_sa_ptr = NULL; + macsec_mbox_sa_op_cmd_e sa_op = (direct == MACSEC_OUTBOUND) + ? MACSEC_CMD_ENC_SA_DELETE + : MACSEC_CMD_DEC_SA_DELETE; + int ret; + + priv_sa_ptr = himacsec_get_valid_dev_sa(nic_dev, sci, assoc_num, direct); + if (!priv_sa_ptr) { + macsec_err(nic_dev->lld_dev->dev, "%s: Delete sa failed, sa not found, direct=0x%x", + netdev->name, direct); + return -ENOENT; + } + + sa_info.sci = sci; + sa_info.an = assoc_num; + ret = himacsec_cmd_exec_sa_op(nic_dev->lld_dev, &sa_info, sa_op); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd sc delete failed, direct=0x%x, ret=%d", + netdev->name, direct, ret); + return ret; + } + + memset(priv_sa_ptr, 0, sizeof(struct himacsec_sa)); + macsec_info(nic_dev->lld_dev->dev, "%s: Delete sa success, direct=0x%x, sci=%llx, an=%d", + netdev->name, direct, sci, assoc_num); + return ret; +} + +int himacsec_add_sa(struct hinic5_nic_dev *nic_dev, macsec_sa_info_s *sa_info, + crypt_direction_e direct) +{ + struct net_device *netdev = nic_dev->netdev; + struct himacsec_sa *priv_sa_ptr = NULL; + macsec_mbox_sa_op_cmd_e sa_op = (direct == MACSEC_OUTBOUND) + ? MACSEC_CMD_ENC_SA_CREATE + : MACSEC_CMD_DEC_SA_CREATE; + int ret; + + priv_sa_ptr = himacsec_get_dev_sa(nic_dev, sa_info->sci, sa_info->an, direct); + if (!priv_sa_ptr) { + macsec_err(nic_dev->lld_dev->dev, "%s: Add sa failed, sc not found", netdev->name); + return -EINVAL; + } + + ret = himacsec_cmd_exec_sa_op(nic_dev->lld_dev, sa_info, sa_op); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd sa create failed, direct=0x%x, ret=%d", + netdev->name, direct, ret); + return ret; + } + + priv_sa_ptr->status.status.sa = SA_STATUS_CREATED; + memcpy(&priv_sa_ptr->info, sa_info, sizeof(macsec_sa_info_s)); + macsec_info(nic_dev->lld_dev->dev, "%s: Add sa success, direct=0x%x, sci=%llx, an=%d", + netdev->name, direct, sa_info->sci, sa_info->an); + himacsec_dfx_show_sa(nic_dev, sa_info, direct); + return ret; +} + +int himacsec_create_sc(struct hinic5_nic_dev *nic_dev, macsec_sc_info_s *sc_info, + crypt_direction_e direct) +{ + struct himacsec_sc *priv_sc_ptr = NULL; + struct net_device *netdev = nic_dev->netdev; + macsec_mbox_sc_op_cmd_e sc_op = (direct == MACSEC_OUTBOUND) + ? MACSEC_CMD_ENC_SC_CREATE + : MACSEC_CMD_DEC_SC_CREATE; + int ret; + + // 1. get target sc_index's data + priv_sc_ptr = himacsec_get_dev_sc(nic_dev, direct); + if (!priv_sc_ptr) { + macsec_err(nic_dev->lld_dev->dev, "Add sc failed, priv_sc_ptr is NULL"); + return -EINVAL; + } + + // 2. check target sc_idnex's has valid sc + if (MACSEC_SC_STATUS_VALID(priv_sc_ptr->status.status.sc) != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Add sc failed, sc already exists, direct=0x%x", + netdev->name, direct); + return -EEXIST; + } + + // 3. create sc + ret = himacsec_cmd_exec_sc_op(nic_dev->lld_dev, sc_info, sc_op); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd sc create failed, direct=0x%x, ret=%d", + netdev->name, direct, ret); + return ret; + } + + (void)memcpy(&priv_sc_ptr->info, sc_info, sizeof(macsec_sc_info_s)); + priv_sc_ptr->status.status.sc = SC_STATUS_CREATED; + macsec_info(nic_dev->lld_dev->dev, "%s: Add encryption sc success, sci=%llx", + netdev->name, sc_info->sci); + himacsec_dfx_show_sc(nic_dev, &priv_sc_ptr->info, direct); + return ret; +} + +int himacsec_destroy_sc(struct hinic5_nic_dev *nic_dev, u64 sci, crypt_direction_e direct) +{ + struct net_device *netdev = nic_dev->netdev; + struct himacsec_sc *priv_sc_ptr = NULL; + macsec_sc_info_s sc_info = {0}; + macsec_mbox_sc_op_cmd_e sc_op = (direct == MACSEC_OUTBOUND) + ? MACSEC_CMD_ENC_SC_DELETE + : MACSEC_CMD_DEC_SC_DELETE; + int ret; + + // 1. get and check SCI corresponding to scindex + priv_sc_ptr = himacsec_get_valid_dev_sc(nic_dev, sci, direct); + if (!priv_sc_ptr) { + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd sc delete failed, sc not found, direct=0x%x", + netdev->name, direct); + return -EINVAL; + } + + sc_info.sci = sci; + ret = himacsec_cmd_exec_sc_op(nic_dev->lld_dev, &sc_info, sc_op); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd sc delete failed, direct=0x%x, ret=%d", + netdev->name, direct, ret); + return ret; + } + + memset(priv_sc_ptr, 0, sizeof(struct himacsec_sc)); + macsec_info(nic_dev->lld_dev->dev, "%s: Delete sc success, direct=0x%x, sci=%llx", + netdev->name, direct, sci); + return 0; +} + +int himacsec_set_sc(struct hinic5_nic_dev *nic_dev, macsec_sc_info_s *sc_info, + crypt_direction_e direct) +{ + struct himacsec_sc *priv_sc_ptr = NULL; + struct net_device *netdev = nic_dev->netdev; + macsec_mbox_sc_op_cmd_e sc_op = (direct == MACSEC_OUTBOUND) + ? MACSEC_CMD_ENC_SC_UPDATE + : MACSEC_CMD_DEC_SC_UPDATE; + int ret; + + // 取出驱动内数据准备回写 + priv_sc_ptr = himacsec_get_valid_dev_sc(nic_dev, sc_info->sci, direct); + if (!priv_sc_ptr) { + macsec_err(nic_dev->lld_dev->dev, "%s: Set sc failed, sc not found, direct=0x%x", + netdev->name, direct); + return -EINVAL; + } + + // 执行命令 + ret = himacsec_cmd_exec_sc_op(nic_dev->lld_dev, sc_info, sc_op); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Exec cmd sc set failed, direct=0x%x, ret=%d", + netdev->name, direct, ret); + return ret; + } + + // 回写驱动内数据 + memcpy(&priv_sc_ptr->info, sc_info, sizeof(macsec_sc_info_s)); + macsec_info(nic_dev->lld_dev->dev, "%s: Set sc success, direct=0x%x, sci=%llx", + netdev->name, direct, sc_info->sci); + himacsec_dfx_show_sc(nic_dev, &priv_sc_ptr->info, direct); + return ret; +} + +int himacsec_update_sa_an(struct hinic5_nic_dev *nic_dev, struct himacsec_sc *sc, + u32 sa_index, u8 an, crypt_direction_e direct) +{ + struct himacsec_sc temp_sc = {0}; + int ret; + + memcpy(&temp_sc, sc, sizeof(struct himacsec_sc)); + temp_sc.info.sa_an[sa_index] = an; + + ret = himacsec_set_sc(nic_dev, &temp_sc.info, direct); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Update sa_an failed, sci=0x%016llx, an=%d", + nic_dev->netdev->name, sc->info.sci, an); + } + return ret; +} + +int himacsec_update_sc_in_sa_add(struct hinic5_nic_dev *nic_dev, u64 sci, u8 an, + crypt_direction_e direct) +{ + struct himacsec_sc *sc = NULL; + u32 sa_index; + int ret; + struct macsec_resource *macsec_res = nic_dev->macsec_res; + + if (!macsec_res) { + macsec_err(nic_dev->lld_dev->dev, "%s: MACsec resource is NULL", + nic_dev->netdev->name); + return -EINVAL; + } + + // sa_index 需要保证数组不越界 + sa_index = an % macsec_res->spec.max_sa; + if (sa_index >= HIMACSEC_MAX_SA_IN_SC) { + macsec_err(nic_dev->lld_dev->dev, "%s: Update sc info in sa operation failed, sa_index overflow, an=%d, max_sa=%d", + nic_dev->netdev->name, an, macsec_res->spec.max_sa); + return -EINVAL; + } + + sc = himacsec_get_valid_dev_sc(nic_dev, sci, direct); + if (!sc) { + macsec_err(nic_dev->lld_dev->dev, "%s: Update sc info in sa operation failed, sci=%llx not found", + nic_dev->netdev->name, sci); + return -EINVAL; + } + + // 更新 sa_an + ret = himacsec_update_sa_an(nic_dev, sc, sa_index, an, direct); + if (ret != 0) + return ret; + sc->info.sa_an[sa_index] = an; + return 0; +} + +int himacsec_create_sa(struct hinic5_nic_dev *nic_dev, macsec_sa_info_s *sa, + crypt_direction_e direct) +{ + int ret; + + ret = himacsec_add_sa(nic_dev, sa, direct); + if (ret != 0) { + macsec_err(nic_dev->lld_dev->dev, "Create sa failed, ret=0x%x, direct=0x%x", + ret, direct); + return ret; + } + + ret = himacsec_update_sc_in_sa_add(nic_dev, sa->sci, sa->an, direct); + if (ret != 0) { + if (himacsec_del_sa(nic_dev, sa->sci, sa->an, direct) != 0) { + macsec_err(nic_dev->lld_dev->dev, "%s: Fallback to delete sa data failed, direct=0x%x", + nic_dev->netdev->name, direct); + } + } + return ret; +} + +int himacsec_destroy_sa(struct hinic5_nic_dev *nic_dev, u64 sci, u8 assoc_num, + crypt_direction_e direct) +{ + struct himacsec_sc *sc = NULL; + u32 sa_index = 0; + int ret; + struct macsec_resource *macsec_res = nic_dev->macsec_res; + + if (!macsec_res) { + macsec_err(nic_dev->lld_dev->dev, "%s: MACsec resource is NULL", + nic_dev->netdev->name); + return -EINVAL; + } + + ret = himacsec_del_sa(nic_dev, sci, assoc_num, direct); + if (ret != 0) + return ret; + + // 清理 sa_an + sc = himacsec_get_valid_dev_sc(nic_dev, sci, direct); + if (!sc) { + macsec_err(nic_dev->lld_dev->dev, "%s: Del sa, update sc failed, direct=0x%x, sci=%llx not found", + nic_dev->netdev->name, direct, sci); + return -EINVAL; + } + + // sa_index 需要保证数组不越界 + sa_index = assoc_num % macsec_res->spec.max_sa; + if (sa_index >= HIMACSEC_MAX_SA_IN_SC) { + macsec_err(nic_dev->lld_dev->dev, "%s: sa_index overflow, direct=0x%x, an=%d, max_sa=%d", + nic_dev->netdev->name, direct, assoc_num, macsec_res->spec.max_sa); + return -EINVAL; + } + + ret = himacsec_update_sa_an(nic_dev, sc, sa_index, 0, direct); + if (ret != 0) + return ret; + + sc->info.sa_an[sa_index] = 0; + return 0; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_filter.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_filter.c new file mode 100644 index 00000000..e476ae78 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_filter.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/moduleparam.h> + +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_crm.h" +#include "hinic5_nic_dev.h" +#include "hinic5_srv_nic.h" +#include "hinic5_filter.h" + +static int hinic5_uc_sync(struct net_device *netdev, u8 *addr) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + return hinic5_set_mac(nic_dev->hwdev, addr, 0, + hinic5_global_func_id(nic_dev->hwdev), + HINIC5_CHANNEL_NIC); +} + +static int hinic5_uc_unsync(struct net_device *netdev, u8 *addr) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + /* The addr is in use */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + return hinic5_del_mac(nic_dev->hwdev, addr, 0, + hinic5_global_func_id(nic_dev->hwdev), + HINIC5_CHANNEL_NIC); +} + +void hinic5_clean_mac_list_filter(struct hinic5_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic5_mac_filter *ftmp = NULL; + struct hinic5_mac_filter *f = NULL; + + list_for_each_entry_safe(f, ftmp, &nic_dev->uc_filter_list, list) { + if (f->state == HINIC5_MAC_HW_SYNCED) + hinic5_uc_unsync(netdev, f->addr); + list_del(&f->list); + kfree(f); + } + + list_for_each_entry_safe(f, ftmp, &nic_dev->mc_filter_list, list) { + if (f->state == HINIC5_MAC_HW_SYNCED) + hinic5_uc_unsync(netdev, f->addr); + list_del(&f->list); + kfree(f); + } +} + +static struct hinic5_mac_filter *hinic5_find_mac(const struct list_head *filter_list, + u8 *addr) +{ + struct hinic5_mac_filter *f = NULL; + + list_for_each_entry(f, filter_list, list) { + if (ether_addr_equal(addr, f->addr)) + return f; + } + return NULL; +} + +static void hinic5_add_filter(struct hinic5_nic_dev *nic_dev, + struct list_head *mac_filter_list, + u8 *addr) +{ + struct hinic5_mac_filter *f = NULL; + + // check if addr is broadcast address + if (is_broadcast_ether_addr(addr)) + return; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return; + + ether_addr_copy(f->addr, addr); + + INIT_LIST_HEAD(&f->list); + list_add_tail(&f->list, mac_filter_list); + + f->state = HINIC5_MAC_WAIT_HW_SYNC; + set_bit(HINIC5_MAC_FILTER_CHANGED, &nic_dev->flags); +} + +static void hinic5_del_filter(struct hinic5_nic_dev *nic_dev, + struct hinic5_mac_filter *f) +{ + set_bit(HINIC5_MAC_FILTER_CHANGED, &nic_dev->flags); + + if (f->state == HINIC5_MAC_WAIT_HW_SYNC) { + /* have not added to hw, delete it directly */ + list_del(&f->list); + kfree(f); + return; + } + + f->state = HINIC5_MAC_WAIT_HW_UNSYNC; +} + +static struct hinic5_mac_filter *hinic5_mac_filter_entry_clone(const struct hinic5_mac_filter *src) +{ + struct hinic5_mac_filter *f = NULL; + + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return NULL; + + *f = *src; + INIT_LIST_HEAD(&f->list); + + return f; +} + +static void hinic5_undo_del_filter_entries(struct list_head *filter_list, + const struct list_head *from) +{ + struct hinic5_mac_filter *ftmp = NULL; + struct hinic5_mac_filter *f = NULL; + + list_for_each_entry_safe(f, ftmp, from, list) { + if (hinic5_find_mac(filter_list, f->addr)) + continue; + + if (f->state == HINIC5_MAC_HW_UNSYNCED) + f->state = HINIC5_MAC_WAIT_HW_UNSYNC; + + list_move_tail(&f->list, filter_list); + } +} + +static void hinic5_undo_add_filter_entries(struct list_head *filter_list, + const struct list_head *from) +{ + struct hinic5_mac_filter *ftmp = NULL; + struct hinic5_mac_filter *tmp = NULL; + struct hinic5_mac_filter *f = NULL; + + list_for_each_entry_safe(f, ftmp, from, list) { + tmp = hinic5_find_mac(filter_list, f->addr); + if (tmp && tmp->state == HINIC5_MAC_HW_SYNCED) + tmp->state = HINIC5_MAC_WAIT_HW_SYNC; + } +} + +static void hinic5_cleanup_filter_list(const struct list_head *head) +{ + struct hinic5_mac_filter *ftmp = NULL; + struct hinic5_mac_filter *f = NULL; + + list_for_each_entry_safe(f, ftmp, head, list) { + list_del(&f->list); + kfree(f); + } +} + +static int hinic5_mac_filter_sync_hw(struct hinic5_nic_dev *nic_dev, + struct list_head *del_list, + struct list_head *add_list) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic5_mac_filter *ftmp = NULL; + struct hinic5_mac_filter *f = NULL; + int err = 0, add_count = 0; + + if (list_empty(del_list) == 0) { + list_for_each_entry_safe(f, ftmp, del_list, list) { + err = hinic5_uc_unsync(netdev, f->addr); + if (err != 0) { /* ignore errors when delete mac */ + nic_err(nic_dev->lld_dev->dev, "Failed to delete mac\n"); + } + + list_del(&f->list); + kfree(f); + } + } + + if (list_empty(add_list) == 0) { + list_for_each_entry_safe(f, ftmp, add_list, list) { + err = hinic5_uc_sync(netdev, f->addr); + if (err != 0) { + nic_err(nic_dev->lld_dev->dev, "Failed to add mac\n"); + return err; + } + + add_count++; + list_del(&f->list); + kfree(f); + } + } + + return add_count; +} + +static int hinic5_mac_filter_sync(struct hinic5_nic_dev *nic_dev, + struct list_head *mac_filter_list, bool uc) +{ + struct list_head tmp_del_list, tmp_add_list; + struct hinic5_mac_filter *fclone = NULL; + struct hinic5_mac_filter *ftmp = NULL; + struct hinic5_mac_filter *f = NULL; + int err = 0, add_count = 0; + + INIT_LIST_HEAD(&tmp_del_list); + INIT_LIST_HEAD(&tmp_add_list); + + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != HINIC5_MAC_WAIT_HW_UNSYNC) + continue; + + f->state = HINIC5_MAC_HW_UNSYNCED; + list_move_tail(&f->list, &tmp_del_list); + } + + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != HINIC5_MAC_WAIT_HW_SYNC) + continue; + + fclone = hinic5_mac_filter_entry_clone(f); + if (!fclone) { + err = -ENOMEM; + break; + } + + f->state = HINIC5_MAC_HW_SYNCED; + list_add_tail(&fclone->list, &tmp_add_list); + } + + if (err != 0) { + hinic5_undo_del_filter_entries(mac_filter_list, &tmp_del_list); + hinic5_undo_add_filter_entries(mac_filter_list, &tmp_add_list); + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to clone mac_filter_entry\n"); + + hinic5_cleanup_filter_list(&tmp_del_list); + hinic5_cleanup_filter_list(&tmp_add_list); + return -ENOMEM; + } + + add_count = hinic5_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); + if (list_empty(&tmp_add_list) != 0) + return add_count; + + /* there are errors when add mac to hw, delete all mac in hw */ + hinic5_undo_add_filter_entries(mac_filter_list, &tmp_add_list); + /* VF don't support to enter promisc mode, + * so we can't delete any other uc mac + */ + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev) || !uc) { + list_for_each_entry_safe(f, ftmp, mac_filter_list, list) { + if (f->state != HINIC5_MAC_HW_SYNCED) + continue; + + fclone = hinic5_mac_filter_entry_clone(f); + if (!fclone) + break; + + f->state = HINIC5_MAC_WAIT_HW_SYNC; + list_add_tail(&fclone->list, &tmp_del_list); + } + } + + hinic5_cleanup_filter_list(&tmp_add_list); + hinic5_mac_filter_sync_hw(nic_dev, &tmp_del_list, &tmp_add_list); + + /* need to enter promisc/allmulti mode */ + return -ENOMEM; +} + +static void hinic5_mac_filter_sync_all(struct hinic5_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int add_count; + + if (test_bit(HINIC5_MAC_FILTER_CHANGED, &nic_dev->flags) != 0) { + clear_bit(HINIC5_MAC_FILTER_CHANGED, &nic_dev->flags); + add_count = hinic5_mac_filter_sync(nic_dev, + &nic_dev->uc_filter_list, + true); + if (add_count < 0 && HINIC5_SUPPORT_PROMISC(nic_dev->hwdev)) { + set_bit(HINIC5_PROMISC_FORCE_ON, + &nic_dev->rx_mod_state); + nicif_info(nic_dev, drv, netdev, "Promisc mode forced on\n"); + } else if (add_count != 0) { + clear_bit(HINIC5_PROMISC_FORCE_ON, + &nic_dev->rx_mod_state); + } + + add_count = hinic5_mac_filter_sync(nic_dev, + &nic_dev->mc_filter_list, + false); + if (add_count < 0 && HINIC5_SUPPORT_ALLMULTI(nic_dev->hwdev)) { + set_bit(HINIC5_ALLMULTI_FORCE_ON, + &nic_dev->rx_mod_state); + nicif_info(nic_dev, drv, netdev, "All multicast mode forced on\n"); + } else if (add_count != 0) { + clear_bit(HINIC5_ALLMULTI_FORCE_ON, + &nic_dev->rx_mod_state); + } + } +} + +static void hinic5_update_mac_filter(struct hinic5_nic_dev *nic_dev, + const struct netdev_hw_addr_list *src_list, + struct list_head *filter_list) +{ + struct hinic5_mac_filter *filter = NULL; + struct hinic5_mac_filter *ftmp = NULL; + struct hinic5_mac_filter *f = NULL; + struct netdev_hw_addr *ha = NULL; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(ha, src_list) { + filter = hinic5_find_mac(filter_list, ha->addr); + if (!filter) + hinic5_add_filter(nic_dev, filter_list, ha->addr); + else if (filter->state == HINIC5_MAC_WAIT_HW_UNSYNC) + filter->state = HINIC5_MAC_HW_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f, ftmp, filter_list, list) { + bool found = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_hw_addr_list_for_each(ha, src_list) + if (ether_addr_equal(ha->addr, f->addr)) { + found = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (found) + continue; + + hinic5_del_filter(nic_dev, f); + } +} + +#ifndef NETDEV_HW_ADDR_T_MULTICAST +static void hinic5_update_mc_filter(struct hinic5_nic_dev *nic_dev, + struct list_head *filter_list) +{ + struct hinic5_mac_filter *filter = NULL; + struct hinic5_mac_filter *ftmp = NULL; + struct hinic5_mac_filter *f = NULL; + struct dev_mc_list *ha = NULL; + + /* add addr if not already in the filter list */ + netif_addr_lock_bh(nic_dev->netdev); + netdev_for_each_mc_addr(ha, nic_dev->netdev) { + filter = hinic5_find_mac(filter_list, ha->da_addr); + if (!filter) + hinic5_add_filter(nic_dev, filter_list, ha->da_addr); + else if (filter->state == HINIC5_MAC_WAIT_HW_UNSYNC) + filter->state = HINIC5_MAC_HW_SYNCED; + } + netif_addr_unlock_bh(nic_dev->netdev); + /* delete addr if not in netdev list */ + list_for_each_entry_safe(f, ftmp, filter_list, list) { + bool found = false; + + netif_addr_lock_bh(nic_dev->netdev); + netdev_for_each_mc_addr(ha, nic_dev->netdev) + if (ether_addr_equal(ha->da_addr, f->addr)) { + found = true; + break; + } + netif_addr_unlock_bh(nic_dev->netdev); + + if (found) + continue; + + hinic5_del_filter(nic_dev, f); + } +} +#endif + +static void update_mac_filter(struct hinic5_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + + if (test_and_clear_bit(HINIC5_UPDATE_MAC_FILTER, &nic_dev->flags) != 0) { + hinic5_update_mac_filter(nic_dev, &netdev->uc, + &nic_dev->uc_filter_list); + +#ifdef NETDEV_HW_ADDR_T_MULTICAST + hinic5_update_mac_filter(nic_dev, &netdev->mc, + &nic_dev->mc_filter_list); +#else + hinic5_update_mc_filter(nic_dev, + &nic_dev->mc_filter_list); +#endif + } +} + +static void sync_rx_mode_to_hw(struct hinic5_nic_dev *nic_dev, int promisc_en, + int allmulti_en) +{ + struct net_device *netdev = nic_dev->netdev; + u32 rx_mod = HINIC5_DEFAULT_RX_MODE; + int err; + + rx_mod |= ((promisc_en != 0) ? NIC_RX_MODE_PROMISC : 0); + rx_mod |= ((allmulti_en != 0) ? NIC_RX_MODE_MC_ALL : 0); + + if (promisc_en != test_bit(HINIC5_HW_PROMISC_ON, + &nic_dev->rx_mod_state)) + nicif_info(nic_dev, drv, netdev, + "%s promisc mode\n", + (promisc_en != 0) ? "Enter" : "Left"); + if (allmulti_en != + test_bit(HINIC5_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) + nicif_info(nic_dev, drv, netdev, + "%s all_multi mode\n", + (allmulti_en != 0) ? "Enter" : "Left"); + + err = hinic5_set_rx_mode(nic_dev->hwdev, rx_mod); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to set rx_mode\n"); + return; + } + + (promisc_en != 0) ? set_bit(HINIC5_HW_PROMISC_ON, &nic_dev->rx_mod_state) : + clear_bit(HINIC5_HW_PROMISC_ON, &nic_dev->rx_mod_state); + + (allmulti_en != 0) ? set_bit(HINIC5_HW_ALLMULTI_ON, &nic_dev->rx_mod_state) : + clear_bit(HINIC5_HW_ALLMULTI_ON, &nic_dev->rx_mod_state); +} + +void hinic5_set_rx_mode_work(struct work_struct *work) +{ + struct hinic5_nic_dev *nic_dev = + container_of(work, struct hinic5_nic_dev, rx_mode_work); + struct net_device *netdev = nic_dev->netdev; + int promisc_en = 0, allmulti_en = 0; + + update_mac_filter(nic_dev); + + hinic5_mac_filter_sync_all(nic_dev); + + if (HINIC5_SUPPORT_PROMISC(nic_dev->hwdev)) + promisc_en = ((netdev->flags & IFF_PROMISC) != 0) || + test_bit(HINIC5_PROMISC_FORCE_ON, + &nic_dev->rx_mod_state); + + if (HINIC5_SUPPORT_ALLMULTI(nic_dev->hwdev)) + allmulti_en = ((netdev->flags & IFF_ALLMULTI) != 0) || + test_bit(HINIC5_ALLMULTI_FORCE_ON, + &nic_dev->rx_mod_state); + + if (promisc_en != + test_bit(HINIC5_HW_PROMISC_ON, &nic_dev->rx_mod_state) || + allmulti_en != + test_bit(HINIC5_HW_ALLMULTI_ON, &nic_dev->rx_mod_state)) + sync_rx_mode_to_hw(nic_dev, promisc_en, allmulti_en); +} + diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_filter.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_filter.h new file mode 100644 index 00000000..2d6f3f1a --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_filter.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_FILTER_H +#define HINIC5_FILTER_H + +#define HINIC5_DEFAULT_RX_MODE (NIC_RX_MODE_UC | NIC_RX_MODE_MC | \ + NIC_RX_MODE_BC) + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_netdev_ops.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_netdev_ops.c new file mode 100644 index 00000000..fdf23173 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_netdev_ops.c @@ -0,0 +1,2416 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include <net/dsfield.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/netlink.h> +#include <linux/debugfs.h> +#include <linux/net_tstamp.h> +#include <linux/ip.h> + +#include "ossl_knl.h" +#include "drv_nic_api.h" +#ifdef HAVE_XDP_SUPPORT +#include <linux/bpf.h> +#endif +#include "hinic5_hw.h" +#include "hinic5_crm.h" +#include "hinic5_vram_common.h" +#include "nic_cfg_comm.h" +#include "hinic5_hinic5_vram.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_dev.h" +#include "hinic5_srv_nic.h" +#include "hinic5_tx.h" +#include "hinic5_rx.h" +#include "hinic5_xdp.h" +#include "hinic5_dcb.h" +#include "hinic5_irq.h" +#include "hinic5_ptp.h" +#include "hinic5_tc.h" +#include "hinic5_mag_cfg.h" +#include "hinic5_netdev_ops.h" + +#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_UDP_TUNNEL_NIC_INFO) +#include <net/udp_tunnel.h> +#endif /* HAVE_NDO_UDP_TUNNEL_ADD || HAVE_UDP_TUNNEL_NIC_INFO */ + +static void hinic5_nic_set_rx_mode(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + if (netdev_uc_count(netdev) != nic_dev->netdev_uc_cnt || + netdev_mc_count(netdev) != nic_dev->netdev_mc_cnt) { + set_bit(HINIC5_UPDATE_MAC_FILTER, &nic_dev->flags); + nic_dev->netdev_uc_cnt = netdev_uc_count(netdev); + nic_dev->netdev_mc_cnt = netdev_mc_count(netdev); + } + + queue_work(nic_dev->workq, &nic_dev->rx_mode_work); +} + +static int hinic5_alloc_txrxq_resources(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_txrxq_params *q_params) +{ + u32 size; + int err; + u16 total_num_qps = q_params->num_qps + q_params->xdp_qps; + + size = sizeof(*q_params->txqs_res) * (total_num_qps); + q_params->txqs_res = kzalloc(size, GFP_KERNEL); + if (!q_params->txqs_res) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txqs resources array\n"); + return -ENOMEM; + } + + size = sizeof(*q_params->rxqs_res) * (total_num_qps); + q_params->rxqs_res = kzalloc(size, GFP_KERNEL); + if (!q_params->rxqs_res) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxqs resource array\n"); + err = -ENOMEM; + goto alloc_rxqs_res_arr_err; + } + + size = sizeof(*q_params->irq_cfg) * (total_num_qps); + q_params->irq_cfg = kzalloc(size, GFP_KERNEL); + if (!q_params->irq_cfg) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc irq resource array\n"); + err = -ENOMEM; + goto alloc_irq_cfg_err; + } + + err = hinic5_alloc_txqs_res(nic_dev, total_num_qps, + q_params->sq_depth, q_params->txqs_res); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txqs resource\n"); + goto alloc_txqs_res_err; + } + + err = hinic5_alloc_rxqs_res(nic_dev, total_num_qps, + q_params->rq_depth, q_params->rxqs_res); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc rxqs resource\n"); + goto alloc_rxqs_res_err; + } + + return 0; + +alloc_rxqs_res_err: + hinic5_free_txqs_res(nic_dev, q_params->num_qps, q_params->sq_depth, + q_params->txqs_res); + +alloc_txqs_res_err: + kfree(q_params->irq_cfg); + q_params->irq_cfg = NULL; + +alloc_irq_cfg_err: + kfree(q_params->rxqs_res); + q_params->rxqs_res = NULL; + +alloc_rxqs_res_arr_err: + kfree(q_params->txqs_res); + q_params->txqs_res = NULL; + + return err; +} + +static void hinic5_free_txrxq_resources(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_txrxq_params *q_params) +{ + u16 total_num_qps = q_params->num_qps + q_params->xdp_qps; + + hinic5_free_rxqs_res(nic_dev, total_num_qps, q_params->rq_depth, + q_params->rxqs_res); + hinic5_free_txqs_res(nic_dev, total_num_qps, q_params->sq_depth, + q_params->txqs_res); + + kfree(q_params->irq_cfg); + q_params->irq_cfg = NULL; + + kfree(q_params->rxqs_res); + q_params->rxqs_res = NULL; + + kfree(q_params->txqs_res); + q_params->txqs_res = NULL; +} + +static void hinic5_remove_configure_txrxqs(struct hinic5_nic_dev *nic_dev) +{ + hinic5_remove_configure_rxqs(nic_dev); +} + +static int hinic5_configure_txrxqs(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_txrxq_params *q_params) +{ + int err; + u16 total_num_qps = q_params->num_qps + q_params->xdp_qps; + + err = hinic5_configure_txqs(nic_dev, total_num_qps, + q_params->sq_depth, q_params->txqs_res); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to configure txqs\n"); + return err; + } + + err = hinic5_configure_rxqs(nic_dev, total_num_qps, + q_params->rq_depth, q_params->rxqs_res); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to configure rxqs\n"); + return err; + } + + return 0; +} + +static void config_dcb_qps_map(struct hinic5_nic_dev *nic_dev) +{ + u8 num_cos = hinic5_get_dev_user_cos_num(nic_dev); + struct net_device *netdev = nic_dev->netdev; + + if (test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) == 0) { + hinic5_update_tx_db_cos(nic_dev, 0); + return; + } + + hinic5_update_qp_cos_cfg(nic_dev); + /* For now, we don't support to change num_cos */ + if (num_cos > nic_dev->cos_config_num_max || + nic_dev->q_params.num_qps < num_cos) { + nicif_err(nic_dev, drv, netdev, "Invalid num_cos: %u or num_qps: %u, disable DCB\n", + num_cos, nic_dev->q_params.num_qps); + nic_dev->q_params.num_cos = 0; + clear_bit(HINIC5_DCB_ENABLE, &nic_dev->flags); + clear_bit(HINIC5_DCB_ENABLE, &nic_dev->nic_hinic5_vram->flags); + /* if we can't enable rss or get enough num_qps, + * need to sync default configure to hw + */ + hinic5_configure_dcb(netdev); + } + + hinic5_update_tx_db_cos(nic_dev, 1); +} + +static int hinic5_configure(struct hinic5_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + int err; + int is_in_kexec = hinic5_vram_get_kexec_flag(); + + if (is_in_kexec == 0) { + err = hinic5_set_port_mtu(nic_dev->hwdev, (u16)netdev->mtu); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to set mtu\n"); + return err; + } + } + + config_dcb_qps_map(nic_dev); + + /* rx rss init */ + err = hinic5_rx_configure(netdev, test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) ? 1 : 0); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to configure rx\n"); + return err; + } + + return 0; +} + +static void hinic5_remove_configure(struct hinic5_nic_dev *nic_dev) +{ + hinic5_rx_remove_configure(nic_dev->netdev); +} + +/* try to modify the number of irq to the target number, + * and return the actual number of irq. + */ +static u16 hinic5_qp_irq_change(struct hinic5_nic_dev *nic_dev, + u16 dst_num_qp_irq) +{ + struct irq_info *qps_irq_info = nic_dev->qps_irq_info; + u16 resp_irq_num, irq_num_gap, i; + u16 idx; + int err; + + if (dst_num_qp_irq > nic_dev->num_qp_irq) { + irq_num_gap = dst_num_qp_irq - nic_dev->num_qp_irq; + err = hinic5_alloc_irqs(nic_dev->hwdev, SERVICE_T_NIC, + irq_num_gap, + &qps_irq_info[nic_dev->num_qp_irq], + &resp_irq_num); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc irqs\n"); + return nic_dev->num_qp_irq; + } + + nic_dev->num_qp_irq += resp_irq_num; + } else if (dst_num_qp_irq < nic_dev->num_qp_irq) { + irq_num_gap = nic_dev->num_qp_irq - dst_num_qp_irq; + for (i = 0; i < irq_num_gap; i++) { + idx = (nic_dev->num_qp_irq - i) - 1; + hinic5_free_irq(nic_dev->hwdev, SERVICE_T_NIC, + qps_irq_info[idx].irq_id); + qps_irq_info[idx].irq_id = 0; + qps_irq_info[idx].msix_entry_idx = 0; + } + nic_dev->num_qp_irq = dst_num_qp_irq; + } + + return nic_dev->num_qp_irq; +} + +static void config_dcb_num_qps(struct hinic5_nic_dev *nic_dev, + const struct hinic5_dyna_txrxq_params *q_params, + u16 max_qps) +{ + u8 num_cos = q_params->num_cos; + + if (num_cos == 0 || num_cos > nic_dev->cos_config_num_max || num_cos > max_qps) + return; /* will disable DCB in config_dcb_qps_map() */ + + hinic5_update_qp_cos_cfg(nic_dev); +} + +int hinic5_set_usr_qps_num(struct net_device *netdev, u16 usr_qps_num) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + if (usr_qps_num + nic_dev->q_params.num_qps > nic_dev->max_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "The usr qps num is too big, usr qps num: %u, knl qps num: %u\n", + usr_qps_num, nic_dev->q_params.num_qps); + return -EINVAL; + } + nic_dev->usr_qps_num = usr_qps_num; + return 0; +} + +static void hinic5_knl_qps_num_check(struct hinic5_nic_dev *nic_dev) +{ + /* nic_dev->usr_qps_num由产品侧配置 */ + if (nic_dev->usr_qps_num == 0) + return; + if (nic_dev->usr_qps_num + nic_dev->q_params.num_qps > nic_dev->max_qps) { + nic_dev->q_params.num_qps = nic_dev->max_qps - nic_dev->usr_qps_num; + nicif_warn(nic_dev, drv, nic_dev->netdev, + "Can not get enough knl qps, adjust kernel qps num to %u\n", + nic_dev->q_params.num_qps); + } +} + +static void hinic5_config_num_qps(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_txrxq_params *q_params) +{ + u16 alloc_num_irq, cur_num_irq; + u16 dst_num_irq; + + /* 根据 usr qps 校验 knl qps */ + (void)hinic5_knl_qps_num_check(nic_dev); + + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) == 0) + q_params->num_qps = 1; + config_dcb_num_qps(nic_dev, q_params, q_params->num_qps); + + if (nic_dev->num_qp_irq >= q_params->num_qps + q_params->xdp_qps) + goto out; + + cur_num_irq = nic_dev->num_qp_irq; + + alloc_num_irq = hinic5_qp_irq_change(nic_dev, q_params->num_qps + q_params->xdp_qps); + if (alloc_num_irq < q_params->num_qps + q_params->xdp_qps) { + if (q_params->xdp_qps != 0) { + /* 使能XDP,XDP和内核平分队列数 */ + q_params->num_qps = alloc_num_irq / XDP_QPS_NUM_EXPANSION; + q_params->xdp_qps = alloc_num_irq / XDP_QPS_NUM_EXPANSION; + } else { + q_params->num_qps = alloc_num_irq; + } + config_dcb_num_qps(nic_dev, q_params, q_params->num_qps); + nicif_warn(nic_dev, drv, nic_dev->netdev, + "Can not get enough irqs, adjust num_qps to %u\n", q_params->num_qps); + + /* The current irq may be in use, we must keep it */ + dst_num_irq = (u16)max_t(u16, cur_num_irq, q_params->num_qps + q_params->xdp_qps); + hinic5_qp_irq_change(nic_dev, dst_num_irq); + } + +out: + nicif_info(nic_dev, drv, nic_dev->netdev, "Finally num_qps: %u\n", + q_params->num_qps); +} + +/* determin num_qps from rss_tmpl_id/irq_num/dcb_en */ +static int hinic5_setup_num_qps(struct hinic5_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u32 irq_size; + + nic_dev->num_qp_irq = 0; + + irq_size = sizeof(*nic_dev->qps_irq_info) * nic_dev->max_qps; + if (irq_size == 0) { + nicif_err(nic_dev, drv, netdev, "Cannot allocate zero size entries\n"); + return -EINVAL; + } + nic_dev->qps_irq_info = kzalloc(irq_size, GFP_KERNEL); + if (!nic_dev->qps_irq_info) + return -ENOMEM; + + hinic5_config_num_qps(nic_dev, &nic_dev->q_params); + + return 0; +} + +static void hinic5_destroy_num_qps(struct hinic5_nic_dev *nic_dev) +{ + u16 i; + + for (i = 0; i < nic_dev->num_qp_irq; i++) + hinic5_free_irq(nic_dev->hwdev, SERVICE_T_NIC, + nic_dev->qps_irq_info[i].irq_id); + + kfree(nic_dev->qps_irq_info); +} + +int hinic5_set_flow_bifurcation_group_num(struct net_device *netdev, u8 group_num) +{ + int err; + u8 enable_queue_pooling; + struct hinic5_nic_dev *nic_dev = NULL; + + if (!netdev) + return -ENODEV; + + nic_dev = netdev_priv(netdev); + if (group_num < HINIC5_GROUP_NUMBER_MIN || group_num > HINIC5_GROUP_NUMBER_MAX) { + nicif_err(nic_dev, drv, netdev, "The group number: %d is out of [%d, %d].\n", + group_num, HINIC5_GROUP_NUMBER_MIN, HINIC5_GROUP_NUMBER_MAX); + return -EINVAL; + } + nic_dev->flow_bifur_group_num = (u8)roundup_pow_of_two(group_num); + + if (nic_dev->flow_bifur_group_num == 0) { + nicif_err(nic_dev, drv, netdev, "The value of flow_bifur_group_num: %d is 0\n", + nic_dev->flow_bifur_group_num); + return -EINVAL; + } + enable_queue_pooling = nic_dev->flow_bifur_group_num > HINIC5_GROUP_NUMBER_MIN ? 1 : 0; + hinic5_set_queue_pooling(nic_dev->hwdev, enable_queue_pooling); + + if (nic_dev->q_params.num_qps > (nic_dev->max_qps / nic_dev->flow_bifur_group_num)) { + nicif_err(nic_dev, drv, netdev, + "The value of qp_nums: %d in use is greater than (max_qps / group_num): %d\n", + nic_dev->q_params.num_qps, + (nic_dev->max_qps / nic_dev->flow_bifur_group_num)); + return -EINVAL; + } + + nic_dev->usr_qps_num = nic_dev->max_qps - nic_dev->max_qps / nic_dev->flow_bifur_group_num; + + err = hinic5_rx_configure(netdev, test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) ? 1 : 0); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to configure rx when switch flow bifur group num.\n"); + return err; + } + return 0; +} +EXPORT_SYMBOL(hinic5_set_flow_bifurcation_group_num); + +int hinic5_cfg_flow_bifurcation_paras(struct net_device *netdev, u8 op_code, + u8 group_id, u32 *indir, u16 indir_length) +{ + int err; + u16 expect_indir_length; + u16 indir_start; + struct hinic5_nic_dev *nic_dev = NULL; + + if (!netdev) + return -ENODEV; + + nic_dev = netdev_priv(netdev); + if (group_id < HINIC5_GROUP_NUMBER_MIN || group_id > nic_dev->flow_bifur_group_num) { + nicif_err(nic_dev, drv, netdev, + "The group id: %u is invalid, current group num: %u\n", + group_id, nic_dev->flow_bifur_group_num); + return -EINVAL; + } + + if (!indir) { + nicif_err(nic_dev, drv, netdev, "The indir is NULL\n"); + return -EINVAL; + } + + expect_indir_length = NIC_RSS_INDIR_SIZE / nic_dev->flow_bifur_group_num; + if (indir_length != expect_indir_length) { + nicif_err(nic_dev, drv, netdev, + "The indir_length: %u is invalid, expect_indir_length: %u\n", + indir_length, expect_indir_length); + return -EINVAL; + } + + indir_start = group_id * indir_length; + if (op_code == 0) { + memcpy(indir, nic_dev->rss_indir + indir_start, sizeof(u32) * indir_length); + return 0; + } + + memcpy(nic_dev->rss_indir + indir_start, indir, sizeof(u32) * indir_length); + err = hinic5_rss_set_indir_tbl(nic_dev->hwdev, nic_dev->rss_indir); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to set rss indir table when cfg flow bifur.\n"); + return -EFAULT; + } + return 0; +} +EXPORT_SYMBOL(hinic5_cfg_flow_bifurcation_paras); + +int hinic5_force_port_disable(struct hinic5_nic_dev *nic_dev) +{ + int err; + + down(&nic_dev->port_state_sem); + + err = hinic5_set_port_enable(nic_dev->hwdev, false, HINIC5_CHANNEL_NIC); + if (err == 0) + nic_dev->force_port_disable = true; + + up(&nic_dev->port_state_sem); + + return err; +} + +int hinic5_force_set_port_state(struct hinic5_nic_dev *nic_dev, bool enable) +{ + int err = 0; + + down(&nic_dev->port_state_sem); + + nic_dev->force_port_disable = false; + err = hinic5_set_port_enable(nic_dev->hwdev, enable, + HINIC5_CHANNEL_NIC); + + up(&nic_dev->port_state_sem); + + return err; +} + +int hinic5_maybe_set_port_state(struct hinic5_nic_dev *nic_dev, bool enable) +{ + int err; + + down(&nic_dev->port_state_sem); + + /* Do nothing when force disable + * Port will disable when call force port disable + * and should not enable port when in force mode + */ + if (nic_dev->force_port_disable) { + up(&nic_dev->port_state_sem); + return 0; + } + + err = hinic5_set_port_enable(nic_dev->hwdev, enable, + HINIC5_CHANNEL_NIC); + + up(&nic_dev->port_state_sem); + + return err; +} + +static void hinic5_print_link_message(struct hinic5_nic_dev *nic_dev, + u8 link_status) +{ + if (nic_dev->link_status == link_status) + return; + + nic_dev->link_status = link_status; + + nicif_info(nic_dev, link, nic_dev->netdev, "Link is %s\n", + ((link_status != 0) ? "up" : "down")); +} + +static int hinic5_alloc_channel_resources(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_qp_params *qp_params, + struct hinic5_dyna_txrxq_params *trxq_params) +{ + int err; + + qp_params->num_qps = trxq_params->num_qps; + qp_params->xdp_qps = trxq_params->xdp_qps; + qp_params->sq_depth = trxq_params->sq_depth; + qp_params->rq_depth = trxq_params->rq_depth; + + err = hinic5_alloc_qps(nic_dev->hwdev, nic_dev->qps_irq_info, + qp_params); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc qps\n"); + return err; + } + + err = hinic5_alloc_txrxq_resources(nic_dev, trxq_params); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to alloc txrxq resources\n"); + hinic5_free_qps(nic_dev->hwdev, qp_params); + return err; + } + + return 0; +} + +static void hinic5_free_channel_resources(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_qp_params *qp_params, + struct hinic5_dyna_txrxq_params *trxq_params) +{ + mutex_lock(&nic_dev->nic_mutex); + hinic5_free_txrxq_resources(nic_dev, trxq_params); + hinic5_free_qps(nic_dev->hwdev, qp_params); + mutex_unlock(&nic_dev->nic_mutex); +} + +static int hinic5_open_channel(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_qp_params *qp_params, + struct hinic5_dyna_txrxq_params *trxq_params) +{ + int err; + + err = hinic5_init_qps(nic_dev->hwdev, qp_params); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init qps\n"); + return err; + } + + err = hinic5_configure_txrxqs(nic_dev, trxq_params); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to configure txrxqs\n"); + goto cfg_txrxqs_err; + } + + err = hinic5_qps_irq_init(nic_dev); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init txrxq irq\n"); + goto init_qp_irq_err; + } + + err = hinic5_configure(nic_dev); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to init txrxq irq\n"); + goto configure_err; + } + + return 0; + +configure_err: + hinic5_qps_irq_deinit(nic_dev); + +init_qp_irq_err: + hinic5_remove_configure_txrxqs(nic_dev); + +cfg_txrxqs_err: + hinic5_deinit_qps(nic_dev->hwdev, qp_params); + + return err; +} + +static void hinic5_close_channel(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_qp_params *qp_params) +{ + hinic5_remove_configure(nic_dev); + hinic5_qps_irq_deinit(nic_dev); + hinic5_remove_configure_txrxqs(nic_dev); + hinic5_deinit_qps(nic_dev->hwdev, qp_params); +} + +int hinic5_vport_up(struct hinic5_nic_dev *nic_dev) +{ + struct net_device *netdev = nic_dev->netdev; + u8 link_status = 0; + u16 glb_func_id; + int err; + + err = hinic5_cache_out_qps_res(nic_dev->hwdev); + if (err != 0) + return err; + + glb_func_id = hinic5_global_func_id(nic_dev->hwdev); + err = hinic5_set_vport_enable(nic_dev->hwdev, glb_func_id, true, + HINIC5_CHANNEL_NIC); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to enable vport\n"); + goto vport_enable_err; + } + + err = hinic5_maybe_set_port_state(nic_dev, true); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to enable port\n"); + goto port_enable_err; + } + + netif_set_real_num_tx_queues(netdev, nic_dev->q_params.num_qps); + netif_set_real_num_rx_queues(netdev, nic_dev->q_params.num_qps); + netif_tx_wake_all_queues(netdev); + + if (test_bit(HINIC5_FORCE_LINK_UP, &nic_dev->flags) != 0) { + link_status = true; + netif_carrier_on(netdev); + } else { + err = hinic5_get_link_state(nic_dev->hwdev, &link_status); + if (err == 0 && link_status != 0) + netif_carrier_on(netdev); + } + + queue_delayed_work(nic_dev->workq, &nic_dev->moderation_task, + HINIC5_MODERATONE_DELAY); + if (test_bit(HINIC5_RXQ_RECOVERY, &nic_dev->flags) != 0) + queue_delayed_work(nic_dev->workq, &nic_dev->rxq_check_work, HZ); + + hinic5_print_link_message(nic_dev, link_status); + + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) + hinic5_notify_all_vfs_link_changed(nic_dev->hwdev, link_status); + + return 0; + +port_enable_err: + hinic5_set_vport_enable(nic_dev->hwdev, glb_func_id, false, + HINIC5_CHANNEL_NIC); + +vport_enable_err: + hinic5_flush_qps_res(nic_dev->hwdev); + /* After set vport disable 100ms, no packets will be send to host */ + msleep(100); + + return err; +} + +void hinic5_vport_down(struct hinic5_nic_dev *nic_dev) +{ + u16 glb_func_id; + + netif_carrier_off(nic_dev->netdev); + netif_tx_disable(nic_dev->netdev); + + cancel_delayed_work_sync(&nic_dev->rxq_check_work); + + cancel_delayed_work_sync(&nic_dev->moderation_task); + + if (hinic5_get_chip_present_flag(nic_dev->hwdev) != 0) { + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) + hinic5_notify_all_vfs_link_changed(nic_dev->hwdev, 0); + + if (nic_dev->state != 0) + nicif_info(nic_dev, drv, nic_dev->netdev, "Skip changing mag status!\n"); + else + hinic5_maybe_set_port_state(nic_dev, false); + + glb_func_id = hinic5_global_func_id(nic_dev->hwdev); + hinic5_set_vport_enable(nic_dev->hwdev, glb_func_id, false, + HINIC5_CHANNEL_NIC); + + hinic5_flush_txqs(nic_dev->netdev); + /* After set vport disable 100ms, + * no packets will be send to host + * FPGA set 2000ms + */ + msleep(nic_dev->timeout.wait_flush_qp_res_timeout); + + if (nic_dev->usr_qps_num > 0) + hinic5_flush_qps_res_by_nums(nic_dev->hwdev, + nic_dev->max_qps - nic_dev->usr_qps_num); + else + hinic5_flush_qps_res(nic_dev->hwdev); + } +} + +int hinic5_change_channel_settings(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_txrxq_params *trxq_params, + hinic5_reopen_handler reopen_handler, + const void *priv_data) +{ + struct hinic5_dyna_qp_params new_qp_params = {0}; + struct hinic5_dyna_qp_params cur_qp_params = {0}; + int err; + u16 num_qps; + + hinic5_config_num_qps(nic_dev, trxq_params); + + err = hinic5_alloc_channel_resources(nic_dev, &new_qp_params, + trxq_params); + if (err != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc channel resources\n"); + return err; + } + + if (test_and_set_bit(HINIC5_CHANGE_RES_INVALID, &nic_dev->flags) == 0) { + hinic5_vport_down(nic_dev); + hinic5_close_channel(nic_dev, &cur_qp_params); + hinic5_free_channel_resources(nic_dev, &cur_qp_params, + &nic_dev->q_params); + } + num_qps = trxq_params->num_qps + trxq_params->xdp_qps; + if (nic_dev->num_qp_irq > num_qps) + hinic5_qp_irq_change(nic_dev, num_qps); + nic_dev->q_params = *trxq_params; + + err = hinic5_open_channel(nic_dev, &new_qp_params, trxq_params); + if (err != 0) + goto open_channel_err; + + err = hinic5_vport_up(nic_dev); + if (err != 0) + goto vport_up_err; + + clear_bit(HINIC5_CHANGE_RES_INVALID, &nic_dev->flags); + nicif_info(nic_dev, drv, nic_dev->netdev, "Change channel settings success\n"); + + return 0; + +vport_up_err: + hinic5_close_channel(nic_dev, &new_qp_params); + +open_channel_err: + hinic5_free_channel_resources(nic_dev, &new_qp_params, trxq_params); + + return err; +} + +int hinic5_open(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_dyna_qp_params qp_params = {0}; + int err; + + err = hinic5_wait_for_devices_flush(NULL, 0, NULL); + if (err != 0) + return err; + + if (test_bit(HINIC5_INTF_UP, &nic_dev->flags) != 0) { + nicif_info(nic_dev, drv, netdev, "Netdev already open, do nothing\n"); + return 0; + } + + err = hinic5_init_nicio_res(nic_dev->hwdev, nic_dev->usr_qps_num); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to init nicio resources\n"); + return err; + } + + err = hinic5_setup_num_qps(nic_dev); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to setup num_qps\n"); + goto setup_qps_err; + } + + err = hinic5_alloc_channel_resources(nic_dev, &qp_params, + &nic_dev->q_params); + if (err != 0) + goto alloc_channel_res_err; + + err = hinic5_open_channel(nic_dev, &qp_params, &nic_dev->q_params); + if (err != 0) + goto open_channel_err; + + err = hinic5_vport_up(nic_dev); + if (err != 0) + goto vport_up_err; + + set_bit(HINIC5_INTF_UP, &nic_dev->flags); + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is up\n"); + + return 0; + +vport_up_err: + hinic5_close_channel(nic_dev, &qp_params); + +open_channel_err: + hinic5_free_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); + +alloc_channel_res_err: + hinic5_destroy_num_qps(nic_dev); + +setup_qps_err: + hinic5_deinit_nicio_res(nic_dev->hwdev); + + return err; +} + +#ifdef HAVE_XDP_SUPPORT +int hinic5_set_xdp_num(struct hinic5_nic_dev *nic_dev, struct hinic5_dyna_txrxq_params *trxq_params) +{ + if (hinic5_is_xdp_enable(nic_dev)) { + trxq_params->xdp_qps = trxq_params->num_qps; + /* 使能 XDP 的情况, + * 需要检查内核队列数和XDP队列数和小于最大队列数 + */ + if (trxq_params->num_qps + trxq_params->xdp_qps > nic_dev->max_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to change num qps\n"); + return -EINVAL; + } + } else { + trxq_params->xdp_qps = 0; + } + + return 0; +} + +int hinic5_safe_switch_channels(struct hinic5_nic_dev *nic_dev) +{ + int err; + struct hinic5_dyna_txrxq_params q_params = {0}; + + q_params = nic_dev->q_params; + q_params.sq_depth = nic_dev->q_params.sq_depth; + q_params.rq_depth = nic_dev->q_params.rq_depth; + q_params.txqs_res = NULL; + q_params.rxqs_res = NULL; + q_params.irq_cfg = NULL; + + err = hinic5_set_xdp_num(nic_dev, &q_params); + if (err != 0) + return err; + return hinic5_change_channel_settings(nic_dev, &q_params, NULL, NULL); +} +#endif + +int hinic5_close(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_dyna_qp_params qp_params = {0}; + + if (test_and_clear_bit(HINIC5_INTF_UP, &nic_dev->flags) == 0) { + nicif_info(nic_dev, drv, netdev, "Netdev already close, do nothing\n"); + return 0; + } + + if (test_and_clear_bit(HINIC5_CHANGE_RES_INVALID, &nic_dev->flags) != 0) + goto out; + + hinic5_vport_down(nic_dev); + hinic5_close_channel(nic_dev, &qp_params); + hinic5_free_channel_resources(nic_dev, &qp_params, &nic_dev->q_params); + +out: + hinic5_deinit_nicio_res(nic_dev->hwdev); + hinic5_destroy_num_qps(nic_dev); + + nicif_info(nic_dev, drv, nic_dev->netdev, "Netdev is down\n"); + + return 0; +} + +/* + * Currently, hinic5_close is resued to flush nic devices during sdinanoos-hotrepalce. + */ +int hinic5_flush_nic_dev(void *priv_data) +{ + struct hinic5_lld_dev *lld_dev = priv_data; + struct net_device *net_dev = NULL; + struct hinic5_nic_dev *nic_dev = NULL; + int is_in_kexec; + int err; + + net_dev = hinic5_get_netdev_by_lld(lld_dev); + if (!net_dev) + return -ENODEV; + + is_in_kexec = hinic5_vram_get_kexec_flag(); + + nic_dev = netdev_priv(net_dev); + + nic_dev->state = (u32)is_in_kexec; + err = hinic5_close(net_dev); + nic_dev->state = 0; + + return err; +} + +#define IPV6_ADDR_LEN 4 +#define PKT_INFO_LEN 9 +#define BITS_PER_TUPLE 32 +static u32 calc_xor_rss(u8 *rss_tuple, u32 len) +{ + u32 hash_value; + u32 i; + + hash_value = rss_tuple[0]; + for (i = 1; i < len; i++) + hash_value = hash_value ^ rss_tuple[i]; + + return hash_value; +} + +static u32 calc_toep_rss(const u32 *rss_tuple, u32 len, const u32 *rss_key) +{ + u32 rss = 0; + u32 i, j; + + for (i = 1; i <= len; i++) { + for (j = 0; j < BITS_PER_TUPLE; j++) + if ((rss_tuple[i - 1] & ((u32)1 << + (u32)((BITS_PER_TUPLE - 1) - j))) != 0) + rss ^= (rss_key[i - 1] << j) | + (u32)((u64)rss_key[i] >> + (BITS_PER_TUPLE - j)); + } + + return rss; +} + +#define RSS_VAL(val, type) \ + (((type) == HINIC5_RSS_HASH_ENGINE_TYPE_TOEP) ? ntohl(val) : (val)) + +static u8 parse_ipv6_info(struct sk_buff *skb, u32 *rss_tuple, + u8 hash_engine, u32 *len, unsigned char *l3_hdr) +{ + unsigned char *l4_hdr = (skb->encapsulation != 0) ? + skb_inner_transport_header(skb) : skb_transport_header(skb); + struct ipv6hdr *ipv6hdr = (struct ipv6hdr *)l3_hdr; + u32 *saddr = (u32 *)(u8 *)&ipv6hdr->saddr; + u32 *daddr = (u32 *)(u8 *)&ipv6hdr->daddr; + u8 i; + + for (i = 0; i < IPV6_ADDR_LEN; i++) { + rss_tuple[i] = RSS_VAL(daddr[i], hash_engine); + /* The offset of the sport relative to the dport is 4 */ + rss_tuple[(u32)(i + IPV6_ADDR_LEN)] = + RSS_VAL(saddr[i], hash_engine); + } + *len = IPV6_ADDR_LEN + IPV6_ADDR_LEN; + + /* IPv6 packets with extension headers(include IPv6 fragment packets) + * re hashed according to L3(s_ip&d_ip), align with ucode RSS. + */ + if ((uintptr_t)l3_hdr + sizeof(*ipv6hdr) == (uintptr_t)l4_hdr) + return ipv6hdr->nexthdr; + return 0; +} + +static u32 calc_rss_prepare(struct sk_buff *skb, u32 *rss_tuple, struct hinic5_nic_dev *nic_dev) +{ + u32 len = 0; + u8 l4_proto = 0; + unsigned char *l3_hdr = NULL; + unsigned char *l4_hdr = NULL; + struct iphdr *iphdr = NULL; + u8 tunnel_flag = skb->encapsulation; + struct nic_rss_type rss_type = nic_dev->rss_type; + u8 hash_engine = nic_dev->rss_hash_engine; + + l3_hdr = (tunnel_flag != 0) ? skb_inner_network_header(skb) : skb_network_header(skb); + iphdr = (struct iphdr *)l3_hdr; + + if (iphdr->version == IPV4_VERSION) { + rss_tuple[len++] = RSS_VAL(iphdr->daddr, hash_engine); + rss_tuple[len++] = RSS_VAL(iphdr->saddr, hash_engine); + + /* IP fragmented packets are hashed according to L3(s_ip&d_ip), + * align with ucode RSS. + */ + l4_proto = ip_is_fragment(iphdr) ? 0 : iphdr->protocol; + } else if (iphdr->version == IPV6_VERSION) { + l4_proto = parse_ipv6_info(skb, (u32 *)rss_tuple, hash_engine, &len, l3_hdr); + } else { + return len; + } + + if ((iphdr->version == IPV4_VERSION && + ((l4_proto == IPPROTO_UDP && rss_type.udp_ipv4 != 0) || + (l4_proto == IPPROTO_TCP && rss_type.tcp_ipv4 != 0))) || + (iphdr->version == IPV6_VERSION && + ((l4_proto == IPPROTO_UDP && rss_type.udp_ipv6 != 0) || + (l4_proto == IPPROTO_TCP && rss_type.tcp_ipv6 != 0)))) { + l4_hdr = (tunnel_flag != 0) ? + skb_inner_transport_header(skb) : skb_transport_header(skb); + /* High 16 bits are dport, low 16 bits are sport. */ + rss_tuple[len++] = ((u32)ntohs(*((u16 *)l4_hdr + 1U)) << 16) | + ntohs(*(u16 *)l4_hdr); + } /* rss_type.ipv4 and rss_type.ipv6 default on. */ + return len; +} + +static u16 select_queue_by_hash_func(struct net_device *dev, struct sk_buff *skb, + unsigned int num_tx_queues) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(dev); + struct iphdr *iphdr = NULL; + struct ipv6hdr *ipv6hdr = NULL; + u32 rss_tuple[PKT_INFO_LEN] = {0}; + u32 len = 0; + u32 hash = 0; + u32 *saddr = NULL; + u32 *daddr = NULL; + u8 hash_engine = nic_dev->rss_hash_engine; + + if (skb_rx_queue_recorded(skb)) { + hash = skb_get_rx_queue(skb); + if (unlikely(hash >= num_tx_queues)) + hash %= num_tx_queues; + + return (u16)hash; + } + + iphdr = ip_hdr(skb); + + /* If the tunnel packet has outer IP fragmentation or an IPv6 extension header, + * it should be hashed directly at the L3. + */ + if (skb->encapsulation != 0) { + if (iphdr->version == IPV4_VERSION) { + if (ip_is_fragment(iphdr)) { + rss_tuple[len++] = RSS_VAL(iphdr->daddr, hash_engine); + rss_tuple[len++] = RSS_VAL(iphdr->saddr, hash_engine); + goto hash_by_l3; + } + } else if (iphdr->version == IPV6_VERSION) { + ipv6hdr = ipv6_hdr(skb); + saddr = (u32 *)(u8 *)&ipv6hdr->saddr; + daddr = (u32 *)(u8 *)&ipv6hdr->daddr; + if (skb_network_header(skb) + sizeof(*ipv6hdr) != + skb_transport_header(skb)) { + for (len = 0; len < IPV6_ADDR_LEN; len++) { + rss_tuple[len] = RSS_VAL(daddr[len], hash_engine); + /* The offset of the sport relative to the dport is 4 */ + rss_tuple[(u32)(len + IPV6_ADDR_LEN)] = + RSS_VAL(saddr[len], hash_engine); + } + len += IPV6_ADDR_LEN; + goto hash_by_l3; + } + } else { + return HINIC5_INVALID_QUEUE; + } + } + + /* Calculate the RSS tuple and length for + * the inner layer of tunnel packets or non-tunnel packets. + */ + len = calc_rss_prepare(skb, (u32 *)rss_tuple, nic_dev); + if (len == 0) + return HINIC5_INVALID_QUEUE; + +hash_by_l3: + if (hash_engine == HINIC5_RSS_HASH_ENGINE_TYPE_TOEP) + hash = calc_toep_rss((u32 *)rss_tuple, len, + nic_dev->rss_hkey_be); + else + hash = calc_xor_rss((u8 *)rss_tuple, len * (u32)sizeof(u32)); + + return (u16)nic_dev->rss_indir[hash & 0xFF]; +} + +#define GET_DSCP_PRI_OFFSET 2 +static u8 hinic5_get_dscp_up(struct hinic5_nic_dev *nic_dev, struct sk_buff *skb) +{ + int dscp_cp; + + if (skb->protocol == htons(ETH_P_IP)) + dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> GET_DSCP_PRI_OFFSET; + else if (skb->protocol == htons(ETH_P_IPV6)) + dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> GET_DSCP_PRI_OFFSET; + else + return nic_dev->hw_dcb_cfg.default_cos; + return nic_dev->hw_dcb_cfg.dscp2cos[dscp_cp]; +} + +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) +static u16 hinic5_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV) +static u16 hinic5_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +#else +static u16 hinic5_select_queue(struct net_device *netdev, struct sk_buff *skb, + __always_unused void *accel, + select_queue_fallback_t fallback) +#endif + +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) +static u16 hinic5_select_queue(struct net_device *netdev, struct sk_buff *skb, + __always_unused void *accel) + +#else +static u16 hinic5_select_queue(struct net_device *netdev, struct sk_buff *skb) +#endif /* end of HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK */ +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u16 txq; + u8 cos, qp_num; + + if (test_bit(HINIC5_SAME_RXTX, &nic_dev->flags) != 0) { + txq = select_queue_by_hash_func(netdev, skb, netdev->real_num_tx_queues); + if (txq != HINIC5_INVALID_QUEUE) + return txq; + } + + txq = +#if defined(HAVE_NDO_SELECT_QUEUE_SB_DEV_ONLY) + netdev_pick_tx(netdev, skb, NULL); +#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) +#ifdef HAVE_NDO_SELECT_QUEUE_SB_DEV + fallback(netdev, skb, sb_dev); +#else + fallback(netdev, skb); +#endif +#else + skb_tx_hash(netdev, skb); +#endif + + if (test_bit(HINIC5_DCB_ENABLE, &nic_dev->flags) != 0) { + if (nic_dev->hw_dcb_cfg.trust == DCB_PCP) { + if (skb->vlan_tci != 0) + cos = nic_dev->hw_dcb_cfg.pcp2cos[skb->vlan_tci >> VLAN_PRIO_SHIFT]; + else + cos = nic_dev->hw_dcb_cfg.default_cos; + } else { + cos = hinic5_get_dscp_up(nic_dev, skb); + } + + qp_num = (nic_dev->hw_dcb_cfg.cos_qp_num[cos] != 0) ? + txq % nic_dev->hw_dcb_cfg.cos_qp_num[cos] : 0; + txq = nic_dev->hw_dcb_cfg.cos_qp_offset[cos] + qp_num; + } + + return txq; +} + +static void hinic5_get_tx_stats64(struct hinic5_nic_dev *nic_dev, + u64 *bytes, u64 *packets, u64 *dropped, unsigned int *start) +{ + struct hinic5_txq_stats *txq_stats = NULL; + struct hinic5_txq *txq = NULL; + int i; + + for (i = 0; i < nic_dev->max_qps; i++) { + if (!nic_dev->txqs) + break; + + txq = &nic_dev->txqs[i]; + txq_stats = &txq->txq_stats; + do { + *start = u64_stats_fetch_begin(&txq_stats->syncp); + *bytes += txq_stats->bytes; + *packets += txq_stats->packets; + *dropped += txq_stats->dropped; + } while (u64_stats_fetch_retry(&txq_stats->syncp, *start)); + } +} + +#ifdef HAVE_NDO_GET_STATS64 +#ifdef HAVE_VOID_NDO_GET_STATS64 +static void hinic5_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#else +static struct rtnl_link_stats64 *hinic5_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +#endif + +#else /* !HAVE_NDO_GET_STATS64 */ +static struct net_device_stats *hinic5_get_stats(struct net_device *netdev) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); +#ifndef HAVE_NDO_GET_STATS64 +#ifdef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats *stats = &netdev->stats; +#else + struct net_device_stats *stats = &nic_dev->net_stats; +#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ +#endif /* HAVE_NDO_GET_STATS64 */ + struct hinic5_rxq_stats *rxq_stats = NULL; + struct hinic5_rxq *rxq = NULL; + u64 bytes, packets, dropped, errors, multi_cast; + unsigned int start; + int i; + struct hinic5_vport_stats vport_stats = {0}; + int err; + + bytes = 0; + packets = 0; + dropped = 0; + + hinic5_get_tx_stats64(nic_dev, &bytes, &packets, &dropped, &start); + /* 与1823V100保持一致,仅PF使用,降低通道压力; + * 协议栈bond场景暂时不支持芯片丢包统计读取 + */ + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev) && !netif_is_bond_slave(netdev) && + !netif_is_bond_master(netdev)) { + err = hinic5_get_vport_stats(nic_dev->hwdev, + hinic5_global_func_id(nic_dev->hwdev), + &vport_stats); + if (err != 0) + nicif_err(nic_dev, drv, netdev, "Failed to get function stats from fw.\n"); + } + + stats->tx_packets = packets; + stats->tx_bytes = bytes; + stats->tx_dropped = dropped; + + bytes = 0; + packets = 0; + errors = 0; + dropped = 0; + multi_cast = 0; + for (i = 0; i < nic_dev->max_qps; i++) { + if (!nic_dev->rxqs) + break; + + rxq = &nic_dev->rxqs[i]; + rxq_stats = &rxq->rxq_stats; + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + bytes += rxq_stats->bytes; + packets += rxq_stats->packets; + errors += rxq_stats->csum_errors + + rxq_stats->other_errors; + dropped += rxq_stats->dropped; + multi_cast += rxq_stats->pkt_mc; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + } + stats->rx_packets = packets; + stats->rx_bytes = bytes; + stats->rx_errors = errors; + stats->rx_dropped = dropped + vport_stats.rx_discard_vport; + stats->multicast = multi_cast; + +#ifndef HAVE_VOID_NDO_GET_STATS64 + return stats; +#endif +} + +#ifdef HAVE_NDO_TX_TIMEOUT_TXQ +static void hinic5_tx_timeout(struct net_device *netdev, unsigned int txqueue) +#else +static void hinic5_tx_timeout(struct net_device *netdev) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_io_queue *sq = NULL; + struct hinic5_txq *txq = NULL; + bool hw_err = false; + u32 sw_pi, hw_ci; + u8 q_id; + + HINIC5_NIC_STATS_INC(nic_dev, netdev_tx_timeout); + nicif_err(nic_dev, drv, netdev, "Tx timeout\n"); + + for (q_id = 0; q_id < nic_dev->q_params.num_qps + nic_dev->q_params.xdp_qps; q_id++) { + if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) + continue; + txq = &nic_dev->txqs[q_id]; + sq = nic_dev->txqs[q_id].sq; + sw_pi = hinic5_get_sq_local_pi(sq); + hw_ci = hinic5_get_sq_hw_ci(sq); + nicif_info(nic_dev, drv, netdev, + "txq%u: sw_pi: %u, hw_ci: %u, sw_ci: %u, napi->state: 0x%lx.\n", + q_id, sw_pi, hw_ci, hinic5_get_sq_local_ci(sq), + nic_dev->q_params.irq_cfg[q_id].napi.state); + + if (sw_pi != hw_ci) { + hw_err = true; + TXQ_STATS_INC(txq, unfinished); + } + } + + if (hw_err) + set_bit(EVENT_WORK_TX_TIMEOUT, &nic_dev->event_flag); +} + +__weak int hinic5_change_mtu_pre_hook(struct net_device *netdev, int new_mtu) +{ + return 0; +} + +static int hinic5_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u32 mtu = (u32)new_mtu; + int err = 0; + int is_in_kexec = hinic5_vram_get_kexec_flag(); +#if defined(HAVE_XDP_SUPPORT) && (defined(HAVE_NDO_BPF) || defined(HAVE_NDO_XDP)) + u32 xdp_max_mtu; +#endif + + err = hinic5_change_mtu_pre_hook(netdev, new_mtu); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Skip mtu config\n"); + return err; + } + + if (is_in_kexec != 0) { + nicif_info(nic_dev, drv, netdev, "Hotreplace skip change mtu\n"); + return err; + } + +#if defined(HAVE_XDP_SUPPORT) && (defined(HAVE_NDO_BPF) || defined(HAVE_NDO_XDP)) + if (hinic5_is_xdp_enable(nic_dev)) { + xdp_max_mtu = (u32)hinic5_xdp_max_mtu(nic_dev); + if (mtu > xdp_max_mtu) { + nicif_err(nic_dev, drv, netdev, + "Max MTU for xdp usage is %u\n", xdp_max_mtu); + return -EINVAL; + } + } +#endif + + err = hinic5_set_port_mtu(nic_dev->hwdev, (u16)mtu); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to change port mtu to %d\n", + new_mtu); + } else { + nicif_info(nic_dev, drv, nic_dev->netdev, "Change mtu from %u to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = mtu; + nic_dev->nic_hinic5_vram->hinic5_vram_mtu = mtu; + } + + return err; +} + +__weak int hinic5_set_mac_addr_pre_hook(struct net_device *netdev, void *addr) +{ + return 0; +} + +static int hinic5_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct sockaddr *saddr = addr; + int err; + + err = hinic5_set_mac_addr_pre_hook(netdev, addr); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Skip mac config\n"); + return err; + } + + if (!is_valid_ether_addr((const u8 *)saddr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, (const u8 *)saddr->sa_data)) { + nicif_info(nic_dev, drv, netdev, + "Already using mac address %pM\n", + saddr->sa_data); + return 0; + } + + err = hinic5_update_mac(nic_dev->hwdev, nic_dev->netdev->dev_addr, (u8 *)saddr->sa_data, 0, + hinic5_global_func_id(nic_dev->hwdev)); + if (err != 0) + return err; + + hinic5_eth_hw_addr_set(netdev, saddr->sa_data); + + nicif_info(nic_dev, drv, netdev, "Set new mac address %pM\n", + saddr->sa_data); + + return 0; +} + +#if defined(HAVE_NDO_UDP_TUNNEL_ADD) || defined(HAVE_UDP_TUNNEL_NIC_INFO) +static int hinic5_udp_tunnel_port_config(struct net_device *netdev, + struct udp_tunnel_info *ti, u8 action) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u16 func_id = hinic5_global_func_id(nic_dev->hwdev); + u16 dst_port; + int ret = 0; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + dst_port = ntohs(ti->port); + ret = hinic5_vxlan_port_config(nic_dev->hwdev, func_id, + dst_port, action, 0); + if (ret != 0 && ret != -EOPNOTSUPP) { + nicif_warn(nic_dev, drv, netdev, "Setting vxlan port %u to device not supported\n", + dst_port); + break; + } + default: + ret = -EINVAL; + } + return ret; +} +#endif /* HAVE_NDO_UDP_TUNNEL_ADD || HAVE_UDP_TUNNEL_NIC_INFO */ + +#ifdef HAVE_NDO_UDP_TUNNEL_ADD +static void hinic5_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti) +{ + if (ti->sa_family != AF_INET && ti->sa_family != AF_INET6) + return; + hinic5_udp_tunnel_port_config(netdev, ti, HINIC5_CMD_OP_ADD); +} + +static void hinic5_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti) +{ + if (ti->sa_family != AF_INET && ti->sa_family != AF_INET6) + return; + + hinic5_udp_tunnel_port_config(netdev, ti, HINIC5_CMD_OP_DEL); +} +#endif /* HAVE_NDO_UDP_TUNNEL_ADD */ + +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +int hinic5_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) +{ + return hinic5_udp_tunnel_port_config(netdev, ti, HINIC5_CMD_OP_ADD); +} + +int hinic5_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) +{ + return hinic5_udp_tunnel_port_config(netdev, ti, HINIC5_CMD_OP_DEL); +} +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ + +#if (KERNEL_VERSION(3, 3, 0) > LINUX_VERSION_CODE) +static void +#else +static int +#endif +hinic5_vlan_rx_add_vid(struct net_device *netdev, + #if (KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE) + __always_unused __be16 proto, + #endif + u16 vid) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; + u16 func_id; + u32 col, line; + int err = 0; + + /* VLAN 0 donot be added, which is the same as VLAN 0 deleted. */ + if (vid == 0) + goto end; + + col = VID_COL(nic_dev, vid); + line = VID_LINE(nic_dev, vid); + + func_id = hinic5_global_func_id(nic_dev->hwdev); + + err = hinic5_add_vlan(nic_dev->hwdev, vid, func_id); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to add vlan %u\n", vid); + goto end; + } + + set_bit(col, &vlan_bitmap[line]); + + nicif_info(nic_dev, drv, netdev, "Add vlan %u\n", vid); + +end: +#if (KERNEL_VERSION(3, 3, 0) <= LINUX_VERSION_CODE) + return err; +#else + return; +#endif +} + +#if (KERNEL_VERSION(3, 3, 0) > LINUX_VERSION_CODE) +static void +#else +static int +#endif +hinic5_vlan_rx_kill_vid(struct net_device *netdev, + #if (KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE) + __always_unused __be16 proto, + #endif + u16 vid) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; + u16 func_id; + int col, line; + int err = 0; + + col = VID_COL(nic_dev, vid); + line = (int)(VID_LINE(nic_dev, vid)); + + /* In the broadcast scenario, ucode finds the corresponding function + * based on VLAN 0 of vlan table. If we delete VLAN 0, the VLAN function + * is affected. + */ + if (vid == 0) + goto end; + + func_id = hinic5_global_func_id(nic_dev->hwdev); + err = hinic5_del_vlan(nic_dev->hwdev, vid, func_id); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); + goto end; + } + + clear_bit(col, &vlan_bitmap[line]); + + nicif_info(nic_dev, drv, netdev, "Remove vlan %u\n", vid); + +end: +#if (KERNEL_VERSION(3, 3, 0) <= LINUX_VERSION_CODE) + return err; +#else + return; +#endif +} + +#ifdef NEED_VLAN_RESTORE +static int hinic5_vlan_restore(struct net_device *netdev) +{ + int err = 0; +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) + struct net_device *vlandev = NULL; + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + unsigned long *vlan_bitmap = nic_dev->vlan_bitmap; + u32 col, line; + u16 i; + + if (!netdev->netdev_ops->ndo_vlan_rx_add_vid) + return -EFAULT; + rcu_read_lock(); + for (i = 0; i < VLAN_N_VID; i++) { +#ifdef HAVE_VLAN_FIND_DEV_DEEP_RCU + vlandev = + __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), i); +#else + vlandev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), i); +#endif + col = VID_COL(nic_dev, i); + line = VID_LINE(nic_dev, i); + if (!vlandev && (vlan_bitmap[line] & (1UL << col)) != 0) { +#if (KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE) + err = netdev->netdev_ops->ndo_vlan_rx_kill_vid(netdev, + htons(ETH_P_8021Q), i); + if (err != 0) { + hinic5_err(nic_dev, drv, "delete vlan %u failed, err code %d\n", + i, err); + break; + } +#else + netdev->netdev_ops->ndo_vlan_rx_kill_vid(netdev, i); +#endif + } else if (vlandev && (vlan_bitmap[line] & (1UL << col)) == 0) { +#if (KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE) + err = netdev->netdev_ops->ndo_vlan_rx_add_vid(netdev, + htons(ETH_P_8021Q), i); + if (err != 0) { + hinic5_err(nic_dev, drv, "restore vlan %u failed, err code %d\n", + i, err); + break; + } +#else + netdev->netdev_ops->ndo_vlan_rx_add_vid(netdev, i); +#endif + } + } + rcu_read_unlock(); +#endif + + return err; +} +#endif + +#define SET_FEATURES_OP_STR(op) (((op) != 0) ? "Enable" : "Disable") + +static int set_feature_rx_csum(struct hinic5_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + + if ((changed & NETIF_F_RXCSUM) != 0) + hinic5_info(nic_dev, drv, "%s rx csum success\n", + SET_FEATURES_OP_STR(wanted_features & + NETIF_F_RXCSUM)); + + return 0; +} + +static int set_feature_tso(struct hinic5_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + + if ((changed & NETIF_F_TSO) != 0) + hinic5_info(nic_dev, drv, "%s tso success\n", + SET_FEATURES_OP_STR(wanted_features & NETIF_F_TSO)); + + return 0; +} + +#ifdef NETIF_F_UFO +static int set_feature_ufo(struct hinic5_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + + if (changed & NETIF_F_UFO) + hinic5_info(nic_dev, drv, "%s ufo success\n", + SET_FEATURES_OP_STR(wanted_features & NETIF_F_UFO)); + + return 0; +} +#endif + +static int set_feature_lro(struct hinic5_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; + bool en = (wanted_features & NETIF_F_LRO) != 0; + int err; + + if ((changed & NETIF_F_LRO) == 0) + return 0; + +#if defined(HAVE_XDP_SUPPORT) && (defined(HAVE_NDO_BPF) || defined(HAVE_NDO_XDP)) + if (en && hinic5_is_xdp_enable(nic_dev)) { + hinic5_err(nic_dev, drv, "Can not enable LRO when xdp is enable\n"); + *failed_features |= NETIF_F_LRO; + return -EINVAL; + } +#endif + + err = hinic5_set_rx_lro_state(nic_dev->hwdev, en, + HINIC5_LRO_DEFAULT_TIME_LIMIT, + HINIC5_LRO_DEFAULT_COAL_PKT_SIZE); + if (err != 0) { + hinic5_err(nic_dev, drv, "%s lro failed\n", + SET_FEATURES_OP_STR(en)); + *failed_features |= NETIF_F_LRO; + } else { + hinic5_info(nic_dev, drv, "%s lro success\n", + SET_FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_feature_rx_cvlan(struct hinic5_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; +#ifdef NETIF_F_HW_VLAN_CTAG_RX + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_CTAG_RX; +#else + netdev_features_t vlan_feature = NETIF_F_HW_VLAN_RX; +#endif + bool en = (wanted_features & vlan_feature) != 0; + int err; + + if ((changed & vlan_feature) == 0) + return 0; + + err = hinic5_set_rx_vlan_offload(nic_dev->hwdev, en); + if (err != 0) { + hinic5_err(nic_dev, drv, "%s rxvlan failed\n", + SET_FEATURES_OP_STR(en)); + *failed_features |= vlan_feature; + } else { + hinic5_info(nic_dev, drv, "%s rxvlan success\n", + SET_FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_feature_vlan_filter(struct hinic5_nic_dev *nic_dev, + netdev_features_t wanted_features, + netdev_features_t features, + netdev_features_t *failed_features) +{ + netdev_features_t changed = wanted_features ^ features; +#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) + netdev_features_t vlan_filter_feature = NETIF_F_HW_VLAN_CTAG_FILTER; +#elif defined(NETIF_F_HW_VLAN_FILTER) + netdev_features_t vlan_filter_feature = NETIF_F_HW_VLAN_FILTER; +#endif + bool en = (wanted_features & vlan_filter_feature) != 0; + int err = 0; + + if ((changed & vlan_filter_feature) == 0) + return 0; + +#ifdef NEED_VLAN_RESTORE + if (en != 0) { + err = hinic5_vlan_restore(nic_dev->netdev); + if (err != 0) { + hinic5_err(nic_dev, drv, "vlan restore failed\n"); + *failed_features |= vlan_filter_feature; + return err; + } + } +#endif + + err = hinic5_set_vlan_fliter(nic_dev->hwdev, en); + if (err != 0) { + hinic5_err(nic_dev, drv, "%s rx vlan filter failed\n", + SET_FEATURES_OP_STR(en)); + *failed_features |= vlan_filter_feature; + } else { + hinic5_info(nic_dev, drv, "%s rx vlan filter success\n", + SET_FEATURES_OP_STR(en)); + } + + return err; +} + +static int set_features(struct hinic5_nic_dev *nic_dev, + netdev_features_t pre_features, + netdev_features_t features) +{ + netdev_features_t failed_features = 0; + u32 err = 0; + + err |= (u32)set_feature_rx_csum(nic_dev, features, pre_features, + &failed_features); + err |= (u32)set_feature_tso(nic_dev, features, pre_features, + &failed_features); + err |= (u32)set_feature_lro(nic_dev, features, pre_features, + &failed_features); +#ifdef NETIF_F_UFO + err |= (u32)set_feature_ufo(nic_dev, features, pre_features, + &failed_features); +#endif + err |= (u32)set_feature_rx_cvlan(nic_dev, features, pre_features, + &failed_features); + err |= (u32)set_feature_vlan_filter(nic_dev, features, pre_features, + &failed_features); + if (err != 0) { + nic_dev->netdev->features = features ^ failed_features; + return -EIO; + } + + return 0; +} + +#ifdef HAVE_NDO_SET_U32_FEATURES +static int hinic5_set_features(struct net_device *netdev, u32 features) +#else +static int hinic5_set_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + return set_features(nic_dev, nic_dev->netdev->features, + features); +} + +int hinic5_set_hw_features(struct hinic5_nic_dev *nic_dev) +{ + /* enable all hw features in netdev->features */ + return set_features(nic_dev, ~nic_dev->netdev->features, + nic_dev->netdev->features); +} + +#ifdef HAVE_NDO_SET_U32_FEATURES +static u32 hinic5_fix_features(struct net_device *netdev, u32 features) +#else +static netdev_features_t hinic5_fix_features(struct net_device *netdev, + netdev_features_t features) +#endif +{ + netdev_features_t features_tmp = features; + + /* If Rx checksum is disabled, then LRO should also be disabled */ + if ((features_tmp & NETIF_F_RXCSUM) == 0) + features_tmp &= ~NETIF_F_LRO; + + return features_tmp; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void hinic5_netpoll(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u16 i; + + for (i = 0; i < nic_dev->q_params.num_qps; i++) + napi_schedule(&nic_dev->q_params.irq_cfg[i].napi); +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int hinic5_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct hinic5_nic_dev *adapter = netdev_priv(netdev); + int err; + + if (is_multicast_ether_addr(mac) || vf >= hinic5_get_vf_num(adapter->lld_dev)) + return -EINVAL; + + err = hinic5_set_vf_mac(adapter->hwdev, OS_VF_ID_TO_HW(vf), mac); + if (err != 0) + return err; + + if (!is_zero_ether_addr(mac)) + nic_info(adapter->lld_dev->dev, "Setting MAC %pM on VF %d\n", + mac, vf); + else + nic_info(adapter->lld_dev->dev, "Deleting MAC on VF %d\n", vf); + + nic_info(adapter->lld_dev->dev, "Please reload the VF driver to make this change effective."); + + return 0; +} + +#ifdef IFLA_VF_MAX +static int set_hw_vf_vlan(void *hwdev, u16 cur_vlanprio, int vf, + u16 vlan, u8 qos) +{ + int err = 0; + u16 old_vlan = cur_vlanprio & VLAN_VID_MASK; + + if (vlan != 0 || qos != 0) { + if (cur_vlanprio != 0) { + err = hinic5_kill_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf)); + if (err != 0) + return err; + } + err = hinic5_add_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf), vlan, qos); + } else { + err = hinic5_kill_vf_vlan(hwdev, OS_VF_ID_TO_HW(vf)); + } + + if (err == 0) + err = hinic5_update_mac_vlan(hwdev, old_vlan, vlan, OS_VF_ID_TO_HW(vf)); + + return err; +} + +#define HINIC5_MAX_VLAN_ID 4094 +#define HINIC5_MAX_QOS_NUM 7 + +#ifdef IFLA_VF_VLAN_INFO_MAX +static int hinic5_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +#else +static int hinic5_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos) +#endif +{ + struct hinic5_nic_dev *adapter = netdev_priv(netdev); + + u16 vlanprio, cur_vlanprio; + + if (vf >= hinic5_get_vf_num(adapter->lld_dev) || + vlan > HINIC5_MAX_VLAN_ID || qos > HINIC5_MAX_QOS_NUM) + return -EINVAL; +#ifdef IFLA_VF_VLAN_INFO_MAX + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; +#endif + vlanprio = vlan | (qos << HINIC5_VLAN_PRIORITY_SHIFT); + cur_vlanprio = hinic5_vf_info_vlanprio(adapter->hwdev, + OS_VF_ID_TO_HW(vf)); + /* duplicate request, so just return success */ + if (vlanprio == cur_vlanprio) + return 0; + + return set_hw_vf_vlan(adapter->hwdev, cur_vlanprio, vf, vlan, qos); +} +#endif + +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE +static int hinic5_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, + bool setting) +{ + struct hinic5_nic_dev *adapter = netdev_priv(netdev); + int err = 0; + bool cur_spoofchk = false; + + if (vf >= hinic5_get_vf_num(adapter->lld_dev)) + return -EINVAL; + + cur_spoofchk = hinic5_vf_info_spoofchk(adapter->hwdev, + OS_VF_ID_TO_HW(vf)); + /* same request, so just return success */ + if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk)) + return 0; + + err = hinic5_set_vf_spoofchk(adapter->hwdev, + (u16)OS_VF_ID_TO_HW(vf), setting); + if (err == 0) + nicif_info(adapter, drv, netdev, "Set VF %d spoofchk %s\n", + vf, setting ? "on" : "off"); + + return err; +} +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +static int hinic5_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) +{ + struct hinic5_nic_dev *adapter = netdev_priv(netdev); + int err; + bool cur_trust; + + if (vf >= hinic5_get_vf_num(adapter->lld_dev)) + return -EINVAL; + + cur_trust = hinic5_get_vf_trust(adapter->hwdev, + OS_VF_ID_TO_HW(vf)); + /* same request, so just return success */ + if ((setting && cur_trust) || (!setting && !cur_trust)) + return 0; + + err = hinic5_set_vf_trust(adapter->hwdev, + (u16)OS_VF_ID_TO_HW(vf), setting); + if (err == 0) + nicif_info(adapter, drv, netdev, "Set VF %d trusted %s successfully\n", + vf, setting ? "on" : "off"); + else + nicif_err(adapter, drv, netdev, "Failed set VF %d trusted %s\n", + vf, setting ? "on" : "off"); + + return err; +} +#endif + +static int hinic5_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) +{ + struct hinic5_nic_dev *adapter = netdev_priv(netdev); + + if (vf >= hinic5_get_vf_num(adapter->lld_dev)) + return -EINVAL; + + hinic5_get_vf_config(adapter->hwdev, (u16)OS_VF_ID_TO_HW(vf), ivi); + + return 0; +} + +/** + * hinic5_ndo_set_vf_link_state + * @netdev: network interface device structure + * @vf_id: VF identifier + * @link: required link state + * + * Set the link state of a specified VF, regardless of physical link state + **/ +int hinic5_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) +{ + static const char * const vf_link[] = {"auto", "enable", "disable"}; + struct hinic5_nic_dev *adapter = netdev_priv(netdev); + int err; + + /* validate the request */ + if (vf_id >= hinic5_get_vf_num(adapter->lld_dev)) { + nicif_err(adapter, drv, netdev, + "Invalid VF Identifier %d\n", vf_id); + return -EINVAL; + } + + err = hinic5_set_vf_link_state(adapter->hwdev, + (u16)OS_VF_ID_TO_HW(vf_id), link); + if (err == 0) + nicif_info(adapter, drv, netdev, "Set VF %d link state: %s\n", + vf_id, vf_link[link]); + + return err; +} + +static int is_set_vf_bw_param_valid(const struct hinic5_nic_dev *adapter, + int vf, int min_tx_rate, int max_tx_rate) +{ + int enable_vf_num = hinic5_get_vf_num(adapter->lld_dev); + + if (!HINIC5_SUPPORT_RATE_LIMIT(adapter->hwdev)) { + nicif_err(adapter, drv, adapter->netdev, "Current function doesn't support to set vf rate limit\n"); + return -EOPNOTSUPP; + } + + /* verify VF is active */ + if (vf >= enable_vf_num) { + nicif_err(adapter, drv, adapter->netdev, + "VF number must be less than %d\n", enable_vf_num); + return -EINVAL; + } + + if (max_tx_rate < min_tx_rate) { + nicif_err(adapter, drv, adapter->netdev, "Invalid rate, max rate %d must greater than min rate %d\n", + max_tx_rate, min_tx_rate); + return -EINVAL; + } + + return 0; +} + +#define HINIC5_TX_RATE_TABLE_FULL 12 + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE +static int hinic5_ndo_set_vf_bw(struct net_device *netdev, + int vf, int min_tx_rate, int max_tx_rate) +#else +static int hinic5_ndo_set_vf_bw(struct net_device *netdev, int vf, + int max_tx_rate) +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +{ + struct hinic5_nic_dev *adapter = netdev_priv(netdev); + struct mag_port_info port_info = {0}; +#ifndef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + int min_tx_rate = 0; +#endif + u8 link_status = 0; + u32 speeds[] = {0, SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_50000, SPEED_100000, SPEED_200000, + SPEED_400000, SPEED_400000}; + int err = 0; + + err = is_set_vf_bw_param_valid(adapter, vf, min_tx_rate, max_tx_rate); + if (err != 0) + return err; + + err = hinic5_get_link_state(adapter->hwdev, &link_status); + if (err != 0) { + nicif_err(adapter, drv, netdev, "Get link status failed when set vf tx rate\n"); + return -EIO; + } + + if (link_status == 0) { + nicif_err(adapter, drv, netdev, "Link status must be up when set vf tx rate\n"); + return -EINVAL; + } + + err = hinic5_get_port_info(adapter->hwdev, &port_info, + HINIC5_CHANNEL_NIC); + if (err != 0 || port_info.speed >= PORT_SPEED_UNKNOWN) + return -EIO; + + /* rate limit cannot be less than 0 and greater than link speed */ + if (max_tx_rate < 0 || max_tx_rate > (int)(speeds[port_info.speed])) { + nicif_err(adapter, drv, netdev, "Set vf max tx rate must be in [0 - %u]\n", + speeds[port_info.speed]); + return -EINVAL; + } + + err = hinic5_set_vf_tx_rate(adapter->hwdev, (u16)OS_VF_ID_TO_HW(vf), + (u32)max_tx_rate, (u32)min_tx_rate); + if (err != 0) { + nicif_err(adapter, drv, netdev, + "Unable to set VF %d max rate %d min rate %d%s\n", + vf, max_tx_rate, min_tx_rate, + err == HINIC5_TX_RATE_TABLE_FULL ? + ", tx rate profile is full" : ""); + return -EIO; + } + +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + nicif_info(adapter, drv, netdev, + "Set VF %d max tx rate %d min tx rate %d successfully\n", + vf, max_tx_rate, min_tx_rate); +#else + nicif_info(adapter, drv, netdev, "Set VF %d tx rate %d successfully\n", vf, max_tx_rate); +#endif + + return 0; +} + +#if defined(HAVE_XDP_SUPPORT) && (defined(HAVE_NDO_BPF) || defined(HAVE_NDO_XDP)) +bool hinic5_is_xdp_enable(struct hinic5_nic_dev *nic_dev) +{ + return !!nic_dev->xdp_prog; +} + +int hinic5_xdp_max_mtu(struct hinic5_nic_dev *nic_dev) +{ + /* To Check MTU, support use integreted cqe, XDP_PACKET_HEADROOM and skb_shared_info */ + return nic_dev->rx_buff_len - (ETH_HLEN + ETH_FCS_LEN + + VLAN_HLEN + VLAN_HLEN) - HINIC5_COMPACT_CQE_16B - + XDP_PACKET_HEADROOM - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +#if (defined(HAVE_NDO_BPF) || defined(HAVE_NDO_XDP)) +static int hinic5_xdp_setup(struct hinic5_nic_dev *nic_dev, + struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct bpf_prog *old_prog = NULL; + int max_mtu = hinic5_xdp_max_mtu(nic_dev); + int q_id, err = 0; + + if ((XDP_QPS_NUM_EXPANSION * nic_dev->q_params.num_qps) > nic_dev->max_qps) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to setup xdp program, max qps %u is less than double of current qps %u, please reduce current num qps\n", + nic_dev->max_qps, nic_dev->q_params.num_qps); + NL_SET_ERR_MSG_MOD(extack, + "Failed to setup xdp program, max qps is less than double of current qps"); + return -EINVAL; + } + + if (nic_dev->netdev->mtu > (u32)max_mtu) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to setup xdp program, the current MTU %u is larger than max allowed MTU %d\n", + nic_dev->netdev->mtu, max_mtu); + NL_SET_ERR_MSG_MOD(extack, + "MTU too large for loading xdp program"); + return -EINVAL; + } + + if ((nic_dev->netdev->features & NETIF_F_LRO) != 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to setup xdp program while LRO is on\n"); + NL_SET_ERR_MSG_MOD(extack, + "Failed to setup xdp program while LRO is on"); + return -EINVAL; + } + + old_prog = xchg(&nic_dev->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + if (!nic_dev->remove_flag && netif_running(nic_dev->netdev)) { + err = hinic5_safe_switch_channels(nic_dev); + if (err) + return err; + } + + for (q_id = 0; q_id < nic_dev->max_qps; q_id++) + xchg(&nic_dev->rxqs[q_id].xdp_prog, nic_dev->xdp_prog); + + return 0; +} + +#if defined(HAVE_NDO_BPF) +static int hinic5_xdp(struct net_device *netdev, struct netdev_bpf *xdp) +#elif defined(HAVE_NDO_XDP) +static int hinic5_xdp(struct net_device *netdev, struct netdev_xdp *xdp) +#endif +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return hinic5_xdp_setup(nic_dev, xdp->prog, xdp->extack); +#ifdef HAVE_XDP_QUERY_PROG + case XDP_QUERY_PROG: + xdp->prog_id = nic_dev->xdp_prog ? + nic_dev->xdp_prog->aux->id : 0; + return 0; +#endif + default: + return -EINVAL; + } +} +#endif +#endif + +static int hinic5_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + switch (cmd) { + case SIOCGHWTSTAMP: + return hinic5_ptp_get_ts_config(nic_dev, ifr); + case SIOCSHWTSTAMP: + return hinic5_ptp_set_ts_config(nic_dev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static const struct net_device_ops hinic5_netdev_ops = { + .ndo_open = hinic5_open, + .ndo_stop = hinic5_close, + .ndo_start_xmit = hinic5_xmit_frame, +#if (KERNEL_VERSION(5, 1, 1) <= LINUX_VERSION_CODE) + .ndo_setup_tc = hinic5_setup_tc, +#endif +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = hinic5_get_stats64, +#else + .ndo_get_stats = hinic5_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + +#ifdef HAVE_NDO_ETH_IOCTL + .ndo_eth_ioctl = hinic5_netdev_ioctl, +#else + .ndo_do_ioctl = hinic5_netdev_ioctl, +#endif + .ndo_tx_timeout = hinic5_tx_timeout, + .ndo_select_queue = hinic5_select_queue, +#ifdef HAVE_NET_DEV_OPS_EXT_NDO_CHANGE_MTU + .extended.ndo_change_mtu = hinic5_change_mtu, +#else + .ndo_change_mtu = hinic5_change_mtu, +#endif + .ndo_set_mac_address = hinic5_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = hinic5_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hinic5_vlan_rx_kill_vid, +#endif + +#ifdef HAVE_NET_DEVICE_OPS_EXTENDED + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 + * uses the function get_ndo_ext to retrieve offsets for extended + * fields from with the net_device_ops struct and ndo_size is checked + * to determine whether or not the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + +#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = hinic5_ndo_set_vf_mac, +#ifdef HAVE_NET_DEV_OPS_EXT_NDO_SET_VF_VLAN + .extended.ndo_set_vf_vlan = hinic5_ndo_set_vf_vlan, +#else + .ndo_set_vf_vlan = hinic5_ndo_set_vf_vlan, +#endif +#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = hinic5_ndo_set_vf_bw, +#else + .ndo_set_vf_tx_rate = hinic5_ndo_set_vf_bw, +#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + .ndo_set_vf_spoofchk = hinic5_ndo_set_vf_spoofchk, +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST +#ifdef HAVE_NET_DEVICE_OPS_EXTENDED + .extended.ndo_set_vf_trust = hinic5_ndo_set_vf_trust, +#else + .ndo_set_vf_trust = hinic5_ndo_set_vf_trust, +#endif /* HAVE_NET_DEVICE_OPS_EXTENDED */ +#endif /* HAVE_NDO_SET_VF_TRUST */ + + .ndo_get_vf_config = hinic5_ndo_get_vf_config, +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = hinic5_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = hinic5_nic_set_rx_mode, + +#ifdef HAVE_XDP_SUPPORT + .ndo_xdp_xmit = hinic5_xdp_xmit_frames, +#if defined(HAVE_NDO_BPF) + .ndo_bpf = hinic5_xdp, +#elif defined(HAVE_NDO_XDP) + .ndo_xdp = hinic5_xdp, +#endif +#endif +#ifdef HAVE_NDO_UDP_TUNNEL_ADD + .ndo_udp_tunnel_add = hinic5_udp_tunnel_add, + .ndo_udp_tunnel_del = hinic5_udp_tunnel_del, +#endif /* HAVE_NDO_UDP_TUNNEL_ADD */ +#ifdef HAVE_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext hinic5_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_NET_DEVICE_OPS_EXT */ + +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = hinic5_ndo_set_vf_link_state, +#endif + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = hinic5_fix_features, + .ndo_set_features = hinic5_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; + +static const struct net_device_ops hinic5vf_netdev_ops = { + .ndo_open = hinic5_open, + .ndo_stop = hinic5_close, + .ndo_start_xmit = hinic5_xmit_frame, + +#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = hinic5_get_stats64, +#else + .ndo_get_stats = hinic5_get_stats, +#endif /* HAVE_NDO_GET_STATS64 */ + + .ndo_tx_timeout = hinic5_tx_timeout, + .ndo_select_queue = hinic5_select_queue, + +#ifdef HAVE_NET_DEVICE_OPS_EXTENDED + /* RHEL7 requires this to be defined to enable extended ops. RHEL7 + * uses the function get_ndo_ext to retrieve offsets for extended + * fields from with the net_device_ops struct and ndo_size is checked + * to determine whether or not the offset is valid. + */ + .ndo_size = sizeof(const struct net_device_ops), +#endif + +#ifdef HAVE_NET_DEV_OPS_EXT_NDO_CHANGE_MTU + .extended.ndo_change_mtu = hinic5_change_mtu, +#else + .ndo_change_mtu = hinic5_change_mtu, +#endif + .ndo_set_mac_address = hinic5_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + +#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = hinic5_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hinic5_vlan_rx_kill_vid, +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = hinic5_netpoll, +#endif /* CONFIG_NET_POLL_CONTROLLER */ + + .ndo_set_rx_mode = hinic5_nic_set_rx_mode, + +#ifdef HAVE_XDP_SUPPORT + .ndo_xdp_xmit = hinic5_xdp_xmit_frames, +#if defined(HAVE_NDO_BPF) + .ndo_bpf = hinic5_xdp, +#elif defined(HAVE_NDO_XDP) + .ndo_xdp = hinic5_xdp, +#endif +#endif +#ifdef HAVE_NET_DEVICE_OPS_EXT +}; + +/* RHEL6 keeps these operations in a separate structure */ +static const struct net_device_ops_ext hinic5vf_netdev_ops_ext = { + .size = sizeof(struct net_device_ops_ext), +#endif /* HAVE_NET_DEVICE_OPS_EXT */ + +#ifdef HAVE_NDO_SET_FEATURES + .ndo_fix_features = hinic5_fix_features, + .ndo_set_features = hinic5_set_features, +#endif /* HAVE_NDO_SET_FEATURES */ +}; + +void hinic5_set_netdev_ops(struct hinic5_nic_dev *nic_dev) +{ + if (!HINIC5_FUNC_IS_VF(nic_dev->hwdev)) { + nic_dev->netdev->netdev_ops = &hinic5_netdev_ops; +#ifdef HAVE_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(nic_dev->netdev, &hinic5_netdev_ops_ext); +#endif /* HAVE_NET_DEVICE_OPS_EXT */ + } else { + nic_dev->netdev->netdev_ops = &hinic5vf_netdev_ops; +#ifdef HAVE_NET_DEVICE_OPS_EXT + set_netdev_ops_ext(nic_dev->netdev, &hinic5vf_netdev_ops_ext); +#endif /* HAVE_NET_DEVICE_OPS_EXT */ + } +} + +bool hinic5_is_netdev_ops_match(const struct net_device *netdev) +{ + return netdev->netdev_ops == &hinic5_netdev_ops || + netdev->netdev_ops == &hinic5vf_netdev_ops; +} diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_netdev_ops.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_netdev_ops.h new file mode 100644 index 00000000..847c92c5 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_netdev_ops.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_NETDEV_OPS_H +#define HINIC5_NETDEV_OPS_H + +#define HINIC5_DEFAULT_RX_CSUM_OFFLOAD 0xFFF +#define HINIC5_INVALID_QUEUE 0xFFFF + +/* to enable xdp, we suppose we can double current qps */ +#define XDP_QPS_NUM_EXPANSION 2 + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_nic_dev.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_nic_dev.h new file mode 100644 index 00000000..f783159a --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/netdev/hinic5_nic_dev.h @@ -0,0 +1,472 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_NIC_DEV_H +#define HINIC5_NIC_DEV_H + +#include <linux/netdevice.h> +#include <linux/semaphore.h> +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/net_tstamp.h> +#include <linux/rhashtable.h> + +#include "ossl_knl.h" +#include "hinic5_lld.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_tx.h" +#include "hinic5_rx.h" +#include "hinic5_dcb.h" +#include "hinic5_profile.h" +#include "hinic5_macsec_dev.h" +#include "hinic5_vram_common.h" + +#define HINIC5_NIC_DRV_NAME "hinic5" +#define HINIC5_NIC_DRV_VERSION GLOBAL_VERSION_STR + +#define HINIC5_FUNC_IS_VF(hwdev) (hinic5_func_type(hwdev) == TYPE_VF) + +#define HINIC5_AVG_PKT_SMALL 256U +#define HINIC5_MODERATONE_DELAY HZ + +#define LP_PKT_CNT 64 +#define LP_PKT_LEN 60 + +enum hinic5_flags { + HINIC5_INTF_UP, + HINIC5_MAC_FILTER_CHANGED, + HINIC5_LP_TEST, + HINIC5_RSS_ENABLE, + HINIC5_DCB_ENABLE, + HINIC5_SAME_RXTX, + HINIC5_INTR_ADAPT, + HINIC5_UPDATE_MAC_FILTER, + HINIC5_CHANGE_RES_INVALID, + HINIC5_FORCE_LINK_UP, + HINIC5_BONDING_MASTER, + HINIC5_AUTONEG_RESET, + HINIC5_RXQ_RECOVERY, + HINIC5_BONDING_BLOCK, + HINIC5_PTP_CLOCK, + HINIC5_DCB_UP_COS_SETTING, +}; + +#define HINIC5_CHANNEL_RES_VALID(nic_dev) \ + ((test_bit(HINIC5_INTF_UP, &(nic_dev)->flags) != 0) && \ + (test_bit(HINIC5_CHANGE_RES_INVALID, &(nic_dev)->flags) == 0)) + +#define RX_BUFF_NUM_PER_PAGE 2 + +#define VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap)) +#define VLAN_BITMAP_BITS_SIZE(nic_dev) (VLAN_BITMAP_BYTE_SIZE(nic_dev) * 8) +#define VLAN_NUM_BITMAPS(nic_dev) (VLAN_N_VID / \ + VLAN_BITMAP_BITS_SIZE(nic_dev)) +#define VLAN_BITMAP_SIZE(nic_dev) (VLAN_N_VID / \ + VLAN_BITMAP_BYTE_SIZE(nic_dev)) +#define VID_LINE(nic_dev, vid) ((vid) / VLAN_BITMAP_BITS_SIZE(nic_dev)) +#define VID_COL(nic_dev, vid) ((vid) & (VLAN_BITMAP_BITS_SIZE(nic_dev) - 1)) + +#define NIC_DRV_DEFAULT_FEATURE NIC_F_ALL_MASK + +enum hinic5_event_work_flags { + EVENT_WORK_TX_TIMEOUT, +}; + +enum hinic5_rx_mode_state { + HINIC5_HW_PROMISC_ON, + HINIC5_HW_ALLMULTI_ON, + HINIC5_PROMISC_FORCE_ON, + HINIC5_ALLMULTI_FORCE_ON, +}; + +enum mac_filter_state { + HINIC5_MAC_WAIT_HW_SYNC, + HINIC5_MAC_HW_SYNCED, + HINIC5_MAC_WAIT_HW_UNSYNC, + HINIC5_MAC_HW_UNSYNCED, +}; + +struct hinic5_mac_filter { + struct list_head list; + u8 addr[ETH_ALEN]; + unsigned long state; +}; + +struct hinic5_irq { + struct net_device *netdev; + /* IRQ corresponding index number */ + u16 msix_entry_idx; + u16 rsvd1; + u32 irq_id; /* The IRQ number from OS */ + + char irq_name[IFNAMSIZ + 16]; + struct napi_struct napi; + cpumask_t affinity_mask; + struct hinic5_txq *txq; + struct hinic5_rxq *rxq; +}; + +struct hinic5_dyna_txrxq_params { + u16 num_qps; + u8 num_cos; + u8 rsvd1; + u16 xdp_qps; + u16 rsvd2; + u32 sq_depth; + u32 rq_depth; + + struct hinic5_dyna_txq_res *txqs_res; + struct hinic5_dyna_rxq_res *rxqs_res; + struct hinic5_irq *irq_cfg; +}; + +#define HINIC5_NIC_STATS_INC(nic_dev, field) \ +do { \ + u64_stats_update_begin(&(nic_dev)->stats.syncp); \ + (nic_dev)->stats.field++; \ + u64_stats_update_end(&(nic_dev)->stats.syncp); \ +} while (0) + +struct hinic5_nic_stats { + u64 netdev_tx_timeout; + + /* Subdivision statistics show in private tool */ + u64 tx_carrier_off_drop; + u64 tx_invalid_qid; + u64 rsvd1; + u64 rsvd2; +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#else + struct u64_stats_sync_empty syncp; +#endif +}; + +#define HINIC5_TCAM_DYNAMIC_BLOCK_SIZE 16 +#define HINIC5_MAX_TCAM_FILTERS 1024 + +#define HINIC5_PKT_TCAM_DYNAMIC_INDEX_START(block_index) \ + (HINIC5_TCAM_DYNAMIC_BLOCK_SIZE * (block_index)) + +struct hinic5_rx_flow_rule { + struct list_head rules; + int tot_num_rules; +}; + +struct hinic5_tcam_dynamic_block { + struct list_head block_list; + u16 dynamic_block_id; + u16 dynamic_index_cnt; + u8 dynamic_index_used[HINIC5_TCAM_DYNAMIC_BLOCK_SIZE]; +}; + +struct hinic5_tcam_dynamic_block_info { + struct list_head tcam_dynamic_list; + u16 dynamic_block_cnt; +}; + +struct hinic5_tcam_filter { + struct list_head tcam_filter_list; + u16 dynamic_block_id; + u16 index; + struct tag_tcam_key tcam_key; + u16 queue; +}; + +/* function level struct info */ +struct hinic5_tcam_info { + u16 tcam_rule_nums; + struct list_head tcam_list; + struct hinic5_tcam_dynamic_block_info tcam_dynamic_info; +}; + +struct hinic5_hinic5_vram { + u32 hinic5_vram_mtu; + u16 hinic5_vram_num_qps; + unsigned long flags; + + /* dcb */ + u8 trust; /* pcp, dscp */ + u8 default_cos; +}; + +struct hinic5_ptp_ctrl { + unsigned long flags; /* PTP_TX_BUSY flag, ses enum hinic5_ptp_flags */ + void *hwdev; + u32 inc_val; /* rtc inc val per cycle */ + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_info; + spinlock_t ptp_clock_lock; /* lock for access ptp hw reg */ + struct sk_buff *tx_saved_skb; + struct hwtstamp_config config; + unsigned long tx_start; /* PTP tx send jiffies */ + int tx_enable; + int rx_enable; +}; + +struct hinic5_timeout { + u32 wait_flush_qp_res_timeout; +}; + +typedef u8 (*hinic5_nic_cqe_cb)(void *llddev, void *data); + +struct hinic5_tx_rx_ops { + void (*tx_set_wqe_offload)(struct hinic5_offload_info *offload_info, + struct hinic5_sq_wqe_combo *wqe_combo); + void (*rx_get_cqe_info)(struct hinic5_rq_cqe *rx_cqe, + struct hinic5_cqe_info *cqe_info, u8 cqe_mode, bool enable_pfe); + bool (*rx_cqe_done)(struct hinic5_rxq *rxq, struct hinic5_rq_cqe **rx_cqe); + hinic5_nic_cqe_cb cqe_cb[SERVICE_T_MAX]; + unsigned long cqe_cb_state[SERVICE_T_MAX]; + unsigned long cqe_cb_running[SERVICE_T_MAX]; +}; + +struct hinic5_nic_dev { + struct net_device *netdev; + struct hinic5_lld_dev *lld_dev; + void *hwdev; + void *extend; /* 产品自定义数据结构 */ + + /* Currently, 1 indicates is_in_kexec. */ + u32 state; + + int poll_weight; + unsigned long *vlan_bitmap; + + u16 max_qps; + u16 usr_qps_num; + + u8 flow_bifur_group_num; + + u32 msg_enable; + unsigned long flags; + + u32 lro_replenish_thld; + u32 dma_rx_buff_size; + u16 rx_buff_len; + u32 page_order; + bool page_pool_enabled; + + /* Rss related varibles */ + u8 rss_hash_engine; + struct nic_rss_type rss_type; + u8 *rss_hkey; + /* hkey in big endian */ + u32 *rss_hkey_be; + u32 *rss_indir; + + u8 cos_config_num_max; + u8 func_dft_cos_bitmap; + u16 port_dft_cos_bitmap; /* used to tool validity check */ + + struct hinic5_dcb_config hw_dcb_cfg; + + struct hinic5_hinic5_vram *nic_hinic5_vram; + char nic_hinic5_vram_name[HINIC5_VRAM_NAME_MAX_LEN]; + + int disable_port_cnt; + + struct hinic5_qp_coalesce_info *intr_coalesce; + unsigned long last_moder_jiffies; + u32 adaptive_rx_coal; + u8 intr_coal_set_flag; + +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif + + struct hinic5_nic_stats stats; + + /* lock for nic resource */ + struct mutex nic_mutex; + bool force_port_disable; + struct semaphore port_state_sem; + u8 link_status; + + struct nic_service_cap nic_cap; + + struct hinic5_txq *txqs; + struct hinic5_rxq *rxqs; + struct hinic5_dyna_txrxq_params q_params; + u8 cqe_mode; /* rx_cqe */ + bool support_htn; + + u16 num_qp_irq; + struct irq_info *qps_irq_info; + + struct workqueue_struct *workq; + + struct work_struct rx_mode_work; + struct delayed_work moderation_task; + + struct list_head uc_filter_list; + struct list_head mc_filter_list; + unsigned long rx_mod_state; + int netdev_uc_cnt; + int netdev_mc_cnt; + + int lb_test_rx_idx; + int lb_pkt_len; + u8 *lb_test_rx_buf; + + struct hinic5_tcam_info tcam; + struct hinic5_rx_flow_rule rx_flow_rule; + +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; + bool remove_flag; +#endif + + struct delayed_work periodic_work; + /* reference to enum hinic5_event_work_flags */ + unsigned long event_flag; + + struct hinic5_nic_prof_attr *prof_attr; + struct hinic5_prof_adapter *prof_adap; + u64 rsvd8[7]; + u8 cos_mask_mode; + u8 hw_default_cos_valid; + u8 hw_default_cos; + u8 tx_wqe_compact_task; + u32 rxq_get_err_times; + struct delayed_work rxq_check_work; + struct hinic5_ptp_ctrl ptp_ctrl; + + struct hinic5_tx_rx_ops tx_rx_ops; + + void *tc_info; + + struct hinic5_timeout timeout; + + struct macsec_resource *macsec_res; // MACsec模块使用资源 + struct work_struct arp_dual_work; + struct sk_buff_head arp_queue; +}; + +#define nicif_err(priv, type, dev, fmt, args...) \ + netif_level(err, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, "[NIC]" fmt, ##args) +#define nicif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, "[NIC]" fmt, ##args) + +#define hinic_msg(level, nic_dev, msglvl, format, arg...) \ +do { \ + if ((nic_dev)->netdev && (nic_dev)->netdev->reg_state \ + == NETREG_REGISTERED) \ + nicif_##level((nic_dev), msglvl, (nic_dev)->netdev, \ + format, ## arg); \ + else \ + nic_##level((nic_dev)->lld_dev->dev, \ + format, ## arg); \ +} while (0) + +#define hinic5_info(nic_dev, msglvl, format, arg...) \ + hinic_msg(info, nic_dev, msglvl, format, ## arg) + +#define hinic5_warn(nic_dev, msglvl, format, arg...) \ + hinic_msg(warn, nic_dev, msglvl, format, ## arg) + +#define hinic5_err(nic_dev, msglvl, format, arg...) \ + hinic_msg(err, nic_dev, msglvl, format, ## arg) + +struct hinic5_uld_info *get_nic_uld_info(void); + +u32 hinic5_get_io_stats_size(const struct hinic5_nic_dev *nic_dev); + +int hinic5_get_io_stats(const struct hinic5_nic_dev *nic_dev, void *stats); + +int hinic5_open(struct net_device *netdev); + +int hinic5_close(struct net_device *netdev); + +int hinic5_flush_nic_dev(void *priv_data); + +void hinic5_set_ethtool_ops(struct net_device *netdev); + +void hinic5vf_set_ethtool_ops(struct net_device *netdev); + +int nic_ioctl(void *uld_dev, u32 cmd, const void *buf_in, + u32 in_size, void *buf_out, u32 *out_size); + +void hinic5_update_num_qps(struct net_device *netdev); + +void hinic5_set_netdev_ops(struct hinic5_nic_dev *nic_dev); + +bool hinic5_is_netdev_ops_match(const struct net_device *netdev); + +int hinic5_set_hw_features(struct hinic5_nic_dev *nic_dev); + +void hinic5_set_rx_mode_work(struct work_struct *work); + +void hinic5_clean_mac_list_filter(struct hinic5_nic_dev *nic_dev); + +void hinic5_get_strings(struct net_device *netdev, u32 stringset, u8 *data); + +void hinic5_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); + +int hinic5_get_sset_count(struct net_device *netdev, int sset); + +int hinic5_force_port_disable(struct hinic5_nic_dev *nic_dev); + +int hinic5_force_set_port_state(struct hinic5_nic_dev *nic_dev, bool enable); + +int hinic5_maybe_set_port_state(struct hinic5_nic_dev *nic_dev, bool enable); + +#ifdef ETHTOOL_GLINKSETTINGS +#ifndef XENSERVER_HAVE_NEW_ETHTOOL_OPS +int hinic5_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_settings); +int hinic5_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings + *link_settings); +#endif +#endif + +#ifndef HAVE_NEW_ETHTOOL_LINK_SETTINGS_ONLY +int hinic5_get_settings(struct net_device *netdev, struct ethtool_cmd *ep); +int hinic5_set_settings(struct net_device *netdev, + struct ethtool_cmd *link_settings); +#endif + +void hinic5_auto_moderation_work(struct work_struct *work); + +typedef void (*hinic5_reopen_handler)(struct hinic5_nic_dev *nic_dev, + const void *priv_data); +int hinic5_change_channel_settings(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_txrxq_params *trxq_params, + hinic5_reopen_handler reopen_handler, + const void *priv_data); + +void hinic5_link_status_change(struct hinic5_nic_dev *nic_dev, bool status); + +#ifdef HAVE_XDP_SUPPORT +bool hinic5_is_xdp_enable(struct hinic5_nic_dev *nic_dev); +int hinic5_xdp_max_mtu(struct hinic5_nic_dev *nic_dev); +int hinic5_safe_switch_channels(struct hinic5_nic_dev *nic_dev); +int hinic5_set_xdp_num(struct hinic5_nic_dev *nic_dev, + struct hinic5_dyna_txrxq_params *trxq_params); +#endif + +#if defined(ETHTOOL_GFECPARAM) && defined(ETHTOOL_SFECPARAM) +int hinic5_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam); +int hinic5_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam); +#endif + +#ifdef HAVE_UDP_TUNNEL_NIC_INFO +/* 设置vxlan dport */ +int hinic5_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti); +int hinic5_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti); +#endif /* HAVE_UDP_TUNNEL_NIC_INFO */ + +#endif + diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_rx.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_rx.c new file mode 100644 index 00000000..cd4e7178 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_rx.c @@ -0,0 +1,1588 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/device.h> +#include <linux/u64_stats_sync.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <linux/pkt_sched.h> +#include <linux/ipv6.h> +#include <linux/module.h> +#include <linux/compiler.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_common.h" +#include "hinic5_nic_sq.h" +#include "hinic5_nic_rq.h" +#include "hinic5_nic_io.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic_dev.h" +#include "hinic5_rss.h" +#include "hinic5_ptp.h" +#include "hinic5_xdp.h" +#include "hinic5_rx.h" + +#ifdef HAVE_XDP_SUPPORT +#include <net/xdp.h> +#include <linux/bpf.h> +#include <linux/bpf_trace.h> +#endif + +#ifndef HAVE_PP_FLAG_PAGE_FRAG +#define PP_FLAG_PAGE_FRAG BIT(2) /* for page frag feature */ +#endif + +static bool rx_alloc_mapped_page(struct hinic5_nic_dev *nic_dev, + struct hinic5_rx_info *rx_info) +{ + struct page *page = rx_info->page; + dma_addr_t dma = rx_info->buf_dma_addr; + u32 page_offset = 0; + + if (likely(dma != 0)) + return true; + + /* alloc new page for storage */ +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool) { + page = page_pool_alloc_frag(rx_info->page_pool, &page_offset, + nic_dev->rx_buff_len, + GFP_ATOMIC | __GFP_COMP); + if (unlikely(!page)) + return false; + dma = page_pool_get_dma_addr(page); + goto set_rx_info; + } +#endif + page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC | __GFP_COLD | + __GFP_COMP, nic_dev->page_order); + if (unlikely(!page)) + return false; + + /* map page for use */ + dma = dma_map_page(nic_dev->lld_dev->dev, page, 0, nic_dev->dma_rx_buff_size, + DMA_FROM_DEVICE); + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (unlikely(dma_mapping_error(nic_dev->lld_dev->dev, dma) != 0)) { + __free_pages(page, nic_dev->page_order); + return false; + } + goto set_rx_info; + +set_rx_info: + rx_info->page = page; + rx_info->buf_dma_addr = dma; + rx_info->page_offset = page_offset; + + return true; +} + +static u32 hinic5_rx_fill_wqe(struct hinic5_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + int rq_wqe_len = rxq->rq->wq.wqebb_size; + struct hinic5_rq_wqe *rq_wqe = NULL; + struct hinic5_rx_info *rx_info = NULL; + u32 i; + + for (i = 0; i < rxq->q_depth; i++) { + rx_info = &rxq->rx_info[i]; + rq_wqe = hinic5_rq_wqe_addr(rxq->rq, (u16)i); + + if (rxq->rq->wqe_type == HINIC5_EXTEND_RQ_WQE) { + /* unit of cqe length is 16B */ + hinic5_set_sge(&rq_wqe->extend_wqe.cqe_sect.sge, + rx_info->cqe_dma, + (HINIC5_CQE_LEN >> HINIC5_CQE_SIZE_SHIFT)); + /* use fixed len */ + rq_wqe->extend_wqe.buf_desc.sge.len = + nic_dev->rx_buff_len; + } else if (rxq->rq->wqe_type == HINIC5_NORMAL_RQ_WQE) { + rq_wqe->normal_wqe.cqe_hi_addr = + upper_32_bits(rx_info->cqe_dma); + rq_wqe->normal_wqe.cqe_lo_addr = + lower_32_bits(rx_info->cqe_dma); + } + + hinic5_hw_be32_len(rq_wqe, rq_wqe_len); + rx_info->rq_wqe = rq_wqe; + } + + return i; +} + +static u32 hinic5_rx_fill_buffers(struct hinic5_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_rq_wqe *rq_wqe = NULL; + struct hinic5_rx_info *rx_info = NULL; + dma_addr_t dma_addr; + u32 i, free_wqebbs = rxq->delta - 1; + + for (i = 0; i < free_wqebbs; i++) { + rx_info = &rxq->rx_info[rxq->next_to_update]; + + if (unlikely(!rx_alloc_mapped_page(nic_dev, rx_info))) { + RXQ_STATS_INC(rxq, alloc_rx_buf_err); + break; + } + +#ifdef HAVE_XDP_SUPPORT + dma_addr = (rxq->xdp_headroom_flag == 0) ? + rx_info->buf_dma_addr + rx_info->page_offset : + rx_info->buf_dma_addr + rx_info->page_offset + XDP_PACKET_HEADROOM; +#else + dma_addr = rx_info->buf_dma_addr + rx_info->page_offset; +#endif + + rq_wqe = rx_info->rq_wqe; + + /* Regardless of the WQE type, the address is located in the first 64 bits */ + rq_wqe->compact_wqe.buf_hi_addr = + hinic5_hw_be32(upper_32_bits(dma_addr)); + rq_wqe->compact_wqe.buf_lo_addr = + hinic5_hw_be32(lower_32_bits(dma_addr)); + + rxq->next_to_update = (u16)((rxq->next_to_update + 1) & rxq->q_mask); + } + + if (likely(i != 0)) { + hinic5_write_db(rxq->rq, + (rxq->q_id & 0x3), + RQ_CFLAG_DP, + (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); + rxq->delta -= i; + rxq->next_to_alloc = rxq->next_to_update; + } else if (free_wqebbs == rxq->q_depth - 1) { + RXQ_STATS_INC(rxq, rx_buf_empty); + } + + return i; +} + +static u32 hinic5_rx_alloc_buffers(struct hinic5_nic_dev *nic_dev, u32 rq_depth, + struct hinic5_rx_info *rx_info_arr) +{ + u32 free_wqebbs = rq_depth - 1; + u32 idx; + + for (idx = 0; idx < free_wqebbs; idx++) { + if (!rx_alloc_mapped_page(nic_dev, &rx_info_arr[idx])) + break; + } + + return idx; +} + +static void hinic5_rx_free_buffers(struct hinic5_nic_dev *nic_dev, u32 q_depth, + struct hinic5_rx_info *rx_info_arr) +{ + struct hinic5_rx_info *rx_info = NULL; + u32 i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < q_depth; i++) { + rx_info = &rx_info_arr[i]; + +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool && rx_info->page) { + page_pool_put_full_page(rx_info->page_pool, + rx_info->page, false); + goto clean_info; + } +#endif + if (rx_info->buf_dma_addr != 0) { + dma_unmap_page(nic_dev->lld_dev->dev, + rx_info->buf_dma_addr, + nic_dev->dma_rx_buff_size, + DMA_FROM_DEVICE); + __free_pages(rx_info->page, nic_dev->page_order); + goto clean_info; + } +clean_info: + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + } +} + +void hinic5_reuse_rx_page(struct hinic5_rxq *rxq, + struct hinic5_rx_info *old_rx_info) +{ + struct hinic5_rx_info *new_rx_info = NULL; + u16 nta = rxq->next_to_alloc; + + new_rx_info = &rxq->rx_info[nta]; + + /* update, and store next to alloc */ + nta++; + rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0; + + new_rx_info->page = old_rx_info->page; + new_rx_info->page_offset = old_rx_info->page_offset; + new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr, + new_rx_info->page_offset, + rxq->buf_len, + DMA_FROM_DEVICE); +} + +static bool hinic5_add_rx_frag(struct hinic5_rxq *rxq, + struct hinic5_rx_info *rx_info, + struct sk_buff *skb, u32 size, u8 packet_offset) +{ + struct page *page = NULL; + u8 *va = NULL; + + page = rx_info->page; + va = (u8 *)page_address(page) + rx_info->page_offset; + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + dma_sync_single_range_for_cpu(rxq->dev, + rx_info->buf_dma_addr, + rx_info->page_offset, + rxq->buf_len, + DMA_FROM_DEVICE); + + if (size <= HINIC5_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { + __skb_put_data(skb, va + packet_offset, size); + +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool) { + page_pool_put_full_page(rx_info->page_pool, page, false); + return false; + } +#endif + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + goto discard_page; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + (int)rx_info->page_offset + packet_offset, (int)size, rxq->buf_len); + +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rx_info->page_pool) { + skb_mark_for_recycle(skb); + return false; + } +#endif + + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + goto discard_page; + + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + goto discard_page; + + /* flip page offset to other buffer */ + rx_info->page_offset ^= rxq->buf_len; + get_page(page); + + return true; + +discard_page: + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, + rxq->dma_rx_buff_size, DMA_FROM_DEVICE); + return false; +} + +static void packaging_skb(struct hinic5_rxq *rxq, struct sk_buff *head_skb, + u8 sge_num, u32 pkt_len, u8 packet_offset) +{ + struct hinic5_rx_info *rx_info = NULL; + struct sk_buff *skb = NULL; + u8 frag_num = 0; + u32 size; + u32 sw_ci; + u32 temp_pkt_len = pkt_len; + u8 temp_sge_num = sge_num; + u8 temp_offset = packet_offset; + + sw_ci = rxq->cons_idx & rxq->q_mask; + skb = head_skb; + while (temp_sge_num != 0) { + rx_info = &rxq->rx_info[sw_ci]; + sw_ci = (sw_ci + 1) & rxq->q_mask; + if (unlikely(temp_pkt_len > rxq->buf_len - temp_offset)) { + size = rxq->buf_len - temp_offset; + temp_pkt_len -= (rxq->buf_len - temp_offset); + } else { + size = temp_pkt_len; + } + + if (unlikely(frag_num == MAX_SKB_FRAGS)) { + frag_num = 0; + if (skb == head_skb) + skb = skb_shinfo(skb)->frag_list; + else + skb = skb->next; + } + + if (unlikely(skb != head_skb)) { + head_skb->len += size; + head_skb->data_len += size; + head_skb->truesize += rxq->buf_len; + } + + if (likely(hinic5_add_rx_frag(rxq, rx_info, skb, size, temp_offset))) + hinic5_reuse_rx_page(rxq, rx_info); + + /* clear contents of buffer_info */ + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + temp_sge_num--; + temp_offset = 0; /* only the first (compact) sge use the offset */ + frag_num++; + } +} + +struct sk_buff *hinic5_fetch_rx_buffer(struct hinic5_rxq *rxq, + const struct hinic5_cqe_info *cqe_info) +{ + struct sk_buff *head_skb = NULL; + struct sk_buff *cur_skb = NULL; + struct sk_buff *skb = NULL; + struct net_device *netdev = rxq->netdev; + u32 pkt_len = cqe_info->pkt_len; + u8 packet_offset = cqe_info->packet_offset; + u8 sge_num, skb_num; + u16 wqebb_cnt = 0; + + head_skb = netdev_alloc_skb_ip_align(netdev, HINIC5_RX_HDR_SIZE); + if (unlikely(!head_skb)) + return NULL; + + sge_num = HINIC5_GET_SGE_NUM(pkt_len + packet_offset, rxq); + if (likely(sge_num <= MAX_SKB_FRAGS)) + skb_num = 1; + else + skb_num = (sge_num / MAX_SKB_FRAGS) + + (((sge_num % MAX_SKB_FRAGS) != 0) ? 1 : 0); + + while (unlikely(skb_num > 1)) { + cur_skb = netdev_alloc_skb_ip_align(netdev, HINIC5_RX_HDR_SIZE); + if (unlikely(!cur_skb)) + goto alloc_skb_fail; + + if (!skb) { + skb_shinfo(head_skb)->frag_list = cur_skb; + skb = cur_skb; + } else { + skb->next = cur_skb; + skb = cur_skb; + } + + skb_num--; + } + + prefetchw(head_skb->data); + wqebb_cnt = sge_num; + packaging_skb(rxq, head_skb, sge_num, pkt_len, packet_offset); + rxq->cons_idx += wqebb_cnt; + rxq->delta += wqebb_cnt; + + return head_skb; + +alloc_skb_fail: + dev_kfree_skb_any(head_skb); + + return NULL; +} + +void hinic5_rxq_get_stats(struct hinic5_rxq *rxq, + struct hinic5_rxq_stats *stats) +{ + struct hinic5_rxq_stats *rxq_stats = &rxq->rxq_stats; + unsigned int start; + + u64_stats_update_begin(&stats->syncp); + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + stats->bytes = rxq_stats->bytes; + stats->packets = rxq_stats->packets; + stats->errors = rxq_stats->csum_errors + + rxq_stats->other_errors; + stats->csum_errors = rxq_stats->csum_errors; + stats->other_errors = rxq_stats->other_errors; + stats->dropped = rxq_stats->dropped; +#ifdef HAVE_XDP_SUPPORT + stats->xdp_dropped = rxq_stats->xdp_dropped; + stats->xdp_redirected = rxq_stats->xdp_redirected; + stats->xdp_large_pkt = rxq_stats->xdp_large_pkt; +#endif + stats->rx_buf_empty = rxq_stats->rx_buf_empty; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + u64_stats_update_end(&stats->syncp); +} + +void hinic5_rxq_clean_stats(struct hinic5_rxq_stats *rxq_stats) +{ + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->bytes = 0; + rxq_stats->packets = 0; + rxq_stats->errors = 0; + rxq_stats->csum_errors = 0; + rxq_stats->other_errors = 0; + rxq_stats->dropped = 0; + rxq_stats->rx_buf_empty = 0; + + rxq_stats->alloc_skb_err = 0; + rxq_stats->alloc_rx_buf_err = 0; + rxq_stats->restore_drop_sge = 0; + rxq_stats->pkt_mc = 0; +#ifdef HAVE_XDP_SUPPORT + rxq_stats->xdp_dropped = 0; + rxq_stats->xdp_redirected = 0; + rxq_stats->xdp_large_pkt = 0; +#endif + u64_stats_update_end(&rxq_stats->syncp); +} + +static void rxq_stats_init(struct hinic5_rxq *rxq) +{ + struct hinic5_rxq_stats *rxq_stats = &rxq->rxq_stats; + + u64_stats_init(&rxq_stats->syncp); + hinic5_rxq_clean_stats(rxq_stats); +} + +#ifndef HAVE_ETH_GET_HEADLEN_FUNC +static unsigned int hinic5_eth_get_headlen(unsigned char *data, unsigned int max_len) +{ +#define IP_FRAG_OFFSET 0x1FFF +#define FCOE_HLEN 38 +#define ETH_P_8021_AD 0x88A8 +#define ETH_P_8021_Q 0x8100 +#define TCP_HEAD_OFFSET 12 + union { + unsigned char *data; + struct ethhdr *eth; + struct vlan_ethhdr *vlan; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + } hdr; + u16 protocol; + u8 nexthdr = 0; + u8 hlen; + + if (unlikely(max_len < ETH_HLEN)) + return max_len; + + hdr.data = data; + protocol = hdr.eth->h_proto; + + /* L2 header */ + if (protocol == htons(ETH_P_8021_AD) || + protocol == htons(ETH_P_8021_Q)) { + if (unlikely(max_len < ETH_HLEN + VLAN_HLEN)) + return max_len; + + /* L3 protocol */ + protocol = hdr.vlan->h_vlan_encapsulated_proto; + hdr.data += sizeof(struct vlan_ethhdr); + } else { + hdr.data += ETH_HLEN; + } + + /* L3 header */ + switch (protocol) { + case htons(ETH_P_IP): + if ((int)(hdr.data - data) > + (int)(max_len - sizeof(struct iphdr))) + return max_len; + + /* L3 header length = (1st byte & 0x0F) << 2 */ + hlen = (hdr.data[0] & 0x0F) << 2; + + if (hlen < sizeof(struct iphdr)) + return (unsigned int)(hdr.data - data); + + if ((hdr.ipv4->frag_off & htons(IP_FRAG_OFFSET)) == 0) + nexthdr = hdr.ipv4->protocol; + + hdr.data += hlen; + break; + + case htons(ETH_P_IPV6): + if ((int)(hdr.data - data) > + (int)(max_len - sizeof(struct ipv6hdr))) + return max_len; + /* L4 protocol */ + nexthdr = hdr.ipv6->nexthdr; + hdr.data += sizeof(struct ipv6hdr); + break; + + case htons(ETH_P_FCOE): + hdr.data += FCOE_HLEN; + break; + + default: + return (unsigned int)(hdr.data - data); + } + + /* L4 header */ + switch (nexthdr) { + case IPPROTO_TCP: + if ((int)(hdr.data - data) > + (int)(max_len - sizeof(struct tcphdr))) + return max_len; + + /* L4 header length = (13st byte & 0xF0) >> 2 */ + if (((hdr.data[TCP_HEAD_OFFSET] & 0xF0) >> + HINIC5_HEADER_DATA_UNIT) > sizeof(struct tcphdr)) + hdr.data += ((hdr.data[TCP_HEAD_OFFSET] & 0xF0) >> + HINIC5_HEADER_DATA_UNIT); + else + hdr.data += sizeof(struct tcphdr); + break; + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + hdr.data += sizeof(struct udphdr); + break; + + case IPPROTO_SCTP: + hdr.data += sizeof(struct sctphdr); + break; + default: + break; + } + + if ((hdr.data - data) > max_len) + return max_len; + else + return (unsigned int)(hdr.data - data); +} +#endif + +static void hinic5_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va = NULL; + unsigned int pull_len; + + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + +#ifdef HAVE_ETH_GET_HEADLEN_FUNC + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ +#ifdef ETH_GET_HEADLEN_NEED_DEV + pull_len = eth_get_headlen(skb->dev, va, HINIC5_RX_HDR_SIZE); +#elif defined(HAVE_ETH_GET_HEADLEN_NET_DEVICE_ARG) + pull_len = eth_get_headlen(skb->dev, va, HINIC5_RX_HDR_SIZE); +#else + pull_len = eth_get_headlen(va, HINIC5_RX_HDR_SIZE); +#endif + +#else + pull_len = hinic5_eth_get_headlen(va, HINIC5_RX_HDR_SIZE); +#endif + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, (int)pull_len); + skb_frag_off_add(frag, (int)pull_len); + + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static void hinic5_rx_csum(struct hinic5_rxq *rxq, const struct hinic5_cqe_info *cqe_info, + struct sk_buff *skb) +{ + struct net_device *netdev = rxq->netdev; + + if (unlikely(cqe_info->csum_err == HINIC5_RX_CSUM_IPSU_OTHER_ERR)) + rxq->rxq_stats.other_errors++; + + if ((netdev->features & NETIF_F_RXCSUM) == 0) + return; + + if (unlikely(cqe_info->csum_err != 0)) { + /* pkt type is recognized by HW, and csum is wrong */ + if ((cqe_info->csum_err & (HINIC5_RX_CSUM_HW_CHECK_NONE | + HINIC5_RX_CSUM_IPSU_OTHER_ERR)) == 0) + rxq->rxq_stats.csum_errors++; + skb->ip_summed = CHECKSUM_NONE; + return; + } + + if (cqe_info->ip_type == HINIC5_RX_INVALID_IP_TYPE || + !(cqe_info->pkt_fmt == HINIC5_RX_PKT_FORMAT_NON_TUNNEL || + cqe_info->pkt_fmt == HINIC5_RX_PKT_FORMAT_VXLAN)) { + skb->ip_summed = CHECKSUM_NONE; + return; + } + + switch (cqe_info->pkt_type) { + case HINIC5_RX_TCP_PKT: + case HINIC5_RX_UDP_PKT: + case HINIC5_RX_SCTP_PKT: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + skb->ip_summed = CHECKSUM_NONE; + break; + } +} + +#ifdef HAVE_SKBUFF_CSUM_LEVEL +static void hinic5_rx_gro(struct hinic5_rxq *rxq, u8 pkt_fmt, + struct sk_buff *skb) +{ + struct net_device *netdev = rxq->netdev; + bool l2_tunnel = false; + + if ((netdev->features & NETIF_F_GRO) == 0) + return; + + l2_tunnel = (pkt_fmt == HINIC5_RX_PKT_FORMAT_VXLAN) ? 1 : 0; + if (l2_tunnel && skb->ip_summed == CHECKSUM_UNNECESSARY) + /* If we checked the outer header let the stack know */ + skb->csum_level = 1; +} +#endif /* HAVE_SKBUFF_CSUM_LEVEL */ + +static void hinic5_copy_lp_data(struct hinic5_nic_dev *nic_dev, + struct sk_buff *skb) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *lb_buf = nic_dev->lb_test_rx_buf; + void *frag_data = NULL; + int lb_len = nic_dev->lb_pkt_len; + int pkt_offset, frag_len, i; + + if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { + nic_dev->lb_test_rx_idx = 0; + nicif_warn(nic_dev, rx_err, netdev, "Loopback test warning, receive too many test pkts\n"); + } + + if (skb->len != (u32)(nic_dev->lb_pkt_len)) { + nicif_warn(nic_dev, rx_err, netdev, "Wrong packet length\n"); + nic_dev->lb_test_rx_idx++; + return; + } + + pkt_offset = nic_dev->lb_test_rx_idx * lb_len; + frag_len = (int)skb_headlen(skb); + memcpy(lb_buf + pkt_offset, skb->data, (size_t)(u32)frag_len); + pkt_offset += frag_len; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); + frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); + memcpy(lb_buf + pkt_offset, frag_data, (size_t)(u32)frag_len); + pkt_offset += frag_len; + } + nic_dev->lb_test_rx_idx++; +} + +static inline void hinic5_lro_set_gso_params(struct sk_buff *skb, u16 num_lro) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + __be16 proto; + + proto = __vlan_get_protocol(skb, eth->h_proto, NULL); + + skb_shinfo(skb)->gso_size = (u16)DIV_ROUND_UP((skb->len - skb_headlen(skb)), num_lro); + skb_shinfo(skb)->gso_type = (proto == htons(ETH_P_IP)) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; + skb_shinfo(skb)->gso_segs = num_lro; +} + +static int recv_one_pkt(struct hinic5_rxq *rxq, struct hinic5_cqe_info *cqe_info) +{ + struct sk_buff *skb = NULL; + struct net_device *netdev = rxq->netdev; + struct hinic5_nic_dev *nic_dev = netdev_priv(rxq->netdev); + +#ifdef HAVE_XDP_SUPPORT + if (hinic5_xdp_process_packet(rxq, cqe_info, &skb)) { + /* The XDP program has processed the packet + * and does not need to be sent to the protocol stack + */ + return HINIC5_XDP_PROCESSED; + } +#else + skb = hinic5_fetch_rx_buffer(rxq, cqe_info); +#endif + if (unlikely(!skb)) { + RXQ_STATS_INC(rxq, alloc_skb_err); + return -ENOMEM; + } + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + hinic5_pull_tail(skb); + + if (cqe_info->ts_flag != 0) + hinic5_ptp_rx_hwtstamp(nic_dev, skb); + + hinic5_rx_csum(rxq, cqe_info, skb); + +#ifdef HAVE_SKBUFF_CSUM_LEVEL + hinic5_rx_gro(rxq, cqe_info->pkt_fmt, skb); +#endif + +#if defined(NETIF_F_HW_VLAN_CTAG_RX) + if (((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) != 0) && cqe_info->vlan_offload != 0) { +#else + if ((netdev->features & NETIF_F_HW_VLAN_RX) && cqe_info->vlan_offload) { +#endif + /* if the packet is a vlan pkt, the vid may be 0 */ + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), cqe_info->vlan_tag); + } + + if (unlikely(test_bit(HINIC5_LP_TEST, &nic_dev->flags) != 0)) + hinic5_copy_lp_data(nic_dev, skb); + + if (cqe_info->lro_num > 1) + hinic5_lro_set_gso_params(skb, cqe_info->lro_num); + + skb_record_rx_queue(skb, rxq->q_id); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb_has_frag_list(skb)) { +#ifdef HAVE_NAPI_GRO_FLUSH_OLD + napi_gro_flush(&rxq->irq_cfg->napi, false); +#else + napi_gro_flush(&rxq->irq_cfg->napi); +#endif + } + napi_gro_receive(&rxq->irq_cfg->napi, skb); + + return 0; +} + +#define LRO_PKT_HDR_LEN_IPV4 66 +#define LRO_PKT_HDR_LEN_IPV6 86 +#define LRO_PKT_HDR_LEN(ip_type) \ + ((ip_type) == HINIC5_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4) + +void hinic5_rx_get_cqe_info(struct hinic5_rq_cqe *cqe, + struct hinic5_cqe_info *info, u8 cqe_mode, bool enable_pfe) +{ + u32 dw0 = hinic5_hw_cpu32(cqe->status); + u32 dw1 = hinic5_hw_cpu32(cqe->vlan_len); + u32 dw2 = hinic5_hw_cpu32(cqe->offload_type); + + info->lro_num = RQ_CQE_STATUS_GET(dw0, NUM_LRO); + info->csum_err = RQ_CQE_STATUS_GET(dw0, CSUM_ERR); + + info->pkt_len = RQ_CQE_SGE_GET(dw1, LEN); + info->vlan_tag = RQ_CQE_SGE_GET(dw1, VLAN); + + info->pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(dw2, PKT_TYPE); + info->ip_type = RQ_CQE_OFFOLAD_TYPE_GET(dw2, IP_TYPE); + info->pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(dw2, TUNNEL_PKT_FORMAT); + info->vlan_offload = RQ_CQE_OFFOLAD_TYPE_GET(dw2, VLAN_EN); + info->rss_type = RQ_CQE_OFFOLAD_TYPE_GET(dw2, RSS_TYPE); + info->pkt_mc = RQ_CQE_OFFOLAD_TYPE_GET(dw2, PKT_UMBCAST); + info->rss_hash_value = hinic5_hw_cpu32(cqe->hash_val); +} + +void hinic5_rx_get_compact_cqe_info(struct hinic5_rq_cqe *cqe, + struct hinic5_cqe_info *info, u8 cqe_mode, bool enable_pfe) +{ + u32 dw0, dw1, dw2, dw3; + + if (cqe_mode != HINIC5_RQ_CQE_INTEGRATE) { + dw0 = hinic5_hw_cpu32(cqe->status); + dw1 = hinic5_hw_cpu32(cqe->vlan_len); + dw2 = hinic5_hw_cpu32(cqe->offload_type); + dw3 = hinic5_hw_cpu32(cqe->hash_val); + } else { + /* + * When rx wqe is compact, cqe is integrated with packet by big endian, + * explicit endian conversion is needed. + */ + dw0 = be32_to_cpu(cqe->status); + dw1 = be32_to_cpu(cqe->vlan_len); + dw2 = be32_to_cpu(cqe->offload_type); + dw3 = be32_to_cpu(cqe->hash_val); + } + + info->cqe_type = RQ_COMPACT_CQE_STATUS_GET(dw0, CQE_TYPE); + info->csum_err = RQ_COMPACT_CQE_STATUS_GET(dw0, CSUM_ERR); + info->vlan_offload = RQ_COMPACT_CQE_STATUS_GET(dw0, VLAN_EN); + info->pkt_fmt = RQ_COMPACT_CQE_STATUS_GET(dw0, PKT_FORMAT); + info->ip_type = RQ_COMPACT_CQE_STATUS_GET(dw0, IP_TYPE); + info->cqe_len = RQ_COMPACT_CQE_STATUS_GET(dw0, CQE_LEN); + info->pkt_type = RQ_COMPACT_CQE_STATUS_GET(dw0, PKT_TYPE); + info->pkt_len = RQ_COMPACT_CQE_STATUS_GET(dw0, PKT_LEN); + info->ts_flag = RQ_COMPACT_CQE_STATUS_GET(dw0, TS_FLAG); + info->pkt_mc = RQ_COMPACT_CQE_STATUS_GET(dw0, PKT_MC); + info->rss_hash_value = dw1; + + switch (info->csum_err) { + case NIC_RX_COMPACT_CSUM_OTHER_ERROR: + info->csum_err = HINIC5_RX_CSUM_IPSU_OTHER_ERR; + break; + case NIC_RX_COMPACT_HW_BYPASS_ERROR: + info->csum_err = HINIC5_RX_CSUM_HW_CHECK_NONE; + break; + default: + break; + } + + if (info->cqe_len == RQ_COMPACT_CQE_16BYTE) { + info->lro_num = RQ_COMPACT_CQE_OFFLOAD_GET(dw2, NUM_LRO); + info->vlan_tag = RQ_COMPACT_CQE_OFFLOAD_GET(dw2, VLAN); + if (enable_pfe) { + info->pfe_pkt_src = RQ_COMPACT_CQE_OFFLOAD_GET(dw2, PFE_PKT_SRC); + info->pfe_port_id = RQ_COMPACT_CQE_OFFLOAD_GET(dw2, PFE_PORT_ID); + info->flow_mark_vld = RQ_COMPACT_CQE_OFFLOAD_GET(dw2, FLOW_MARK_VLD); + info->src_func_id = + (u16)((RQ_COMPACT_CQE_OFFLOAD_GET(dw2, SRC_FUNC_ID_HIGH) + << RQ_COMPACT_CQE_OFFLOAD_SRC_FUNC_ID_SHIFT) | + RQ_COMPACT_CQE_OFFLOAD_GET(dw3, SRC_FUNC_ID_LOW)); + info->flow_mark = RQ_COMPACT_CQE_OFFLOAD_GET(dw3, FLOW_MARK); + } + } else { + info->lro_num = 0; + info->vlan_tag = 0; + } + + if (cqe_mode == HINIC5_RQ_CQE_INTEGRATE) { + info->packet_offset = info->cqe_len == RQ_COMPACT_CQE_16BYTE ? + HINIC5_COMPACT_CQE_16B : HINIC5_COMPACT_CQE_8B; + } else { + info->packet_offset = 0; + } +} + +bool hinic5_rx_integrated_cqe_done(struct hinic5_rxq *rxq, struct hinic5_rq_cqe **rx_cqe) +{ + u16 sw_ci; + u16 hw_ci; + + sw_ci = (u16)(rxq->cons_idx & rxq->q_mask); + hw_ci = hinic5_get_rq_hw_ci(rxq->rq); + if (hw_ci == sw_ci) + return false; + /* make sure we read cqe info in dma */ + dma_sync_single_range_for_cpu(rxq->dev, rxq->rx_info[sw_ci].buf_dma_addr, + rxq->rx_info[sw_ci].page_offset, + rxq->buf_len, DMA_FROM_DEVICE); +#ifdef HAVE_XDP_SUPPORT + if (rxq->xdp_headroom_flag == 0) + *rx_cqe = (struct hinic5_rq_cqe *) + ((u8 *)page_address(rxq->rx_info[sw_ci].page) + + rxq->rx_info[sw_ci].page_offset); + else + *rx_cqe = (struct hinic5_rq_cqe *) + ((u8 *)page_address(rxq->rx_info[sw_ci].page) + + rxq->rx_info[sw_ci].page_offset + XDP_PACKET_HEADROOM); +#else + *rx_cqe = (struct hinic5_rq_cqe *) + ((u8 *)page_address(rxq->rx_info[sw_ci].page) + + rxq->rx_info[sw_ci].page_offset); +#endif + + return true; +} + +bool hinic5_rx_separate_cqe_done(struct hinic5_rxq *rxq, struct hinic5_rq_cqe **rx_cqe) +{ + u32 sw_ci, status = 0; + + sw_ci = rxq->cons_idx & rxq->q_mask; + *rx_cqe = rxq->rx_info[sw_ci].cqe; + + status = hinic5_hw_cpu32((*rx_cqe)->status); + if (HINIC5_GET_RX_DONE(status) == 0) + return false; + + return true; +} + +void hinic5_rx_cqe_sendup_convert(struct hinic5_rq_cqe *rx_cqe, + struct hinic5_rq_cqe *rx_cqe_sendup, u8 cqe_mode) +{ + if (cqe_mode == HINIC5_RQ_CQE_INTEGRATE) { + /* + * When rx wqe is compact, cqe is integrated with packet by big endian, + * explicit endian conversion is needed. + */ + rx_cqe_sendup->status = be32_to_cpu(rx_cqe->status); + rx_cqe_sendup->vlan_len = be32_to_cpu(rx_cqe->vlan_len); + rx_cqe_sendup->offload_type = be32_to_cpu(rx_cqe->offload_type); + rx_cqe_sendup->hash_val = be32_to_cpu(rx_cqe->hash_val); + } else { + rx_cqe_sendup->status = hinic5_hw_cpu32(rx_cqe->status); + rx_cqe_sendup->vlan_len = hinic5_hw_cpu32(rx_cqe->vlan_len); + rx_cqe_sendup->offload_type = hinic5_hw_cpu32(rx_cqe->offload_type); + rx_cqe_sendup->hash_val = hinic5_hw_cpu32(rx_cqe->hash_val); + } +} + +static int rx_cqe_check(struct hinic5_nic_dev *nic_dev, struct hinic5_rq_cqe *rx_cqe) +{ + struct hinic5_rq_cqe rx_cqe_sendup = {0}; + int i, ret = 0; + + for (i = 0; i < SERVICE_T_MAX; i++) { + if (nic_dev->tx_rx_ops.cqe_cb[i] && + test_bit(i, &nic_dev->tx_rx_ops.cqe_cb_state[i])) { + hinic5_rx_cqe_sendup_convert(rx_cqe, &rx_cqe_sendup, nic_dev->cqe_mode); + set_bit(i, &nic_dev->tx_rx_ops.cqe_cb_running[i]); + ret = nic_dev->tx_rx_ops.cqe_cb[i](nic_dev->lld_dev, &rx_cqe_sendup); + if (ret != 0) { + clear_bit(i, &nic_dev->tx_rx_ops.cqe_cb_running[i]); + break; + } + clear_bit(i, &nic_dev->tx_rx_ops.cqe_cb_running[i]); + } + } + return ret; +} + +static void rx_free_warning_wqe(struct hinic5_rxq *rxq, struct hinic5_cqe_info *cqe_info) +{ + u8 sge_num, packet_offset; + u32 pkt_len; + + packet_offset = cqe_info->packet_offset; + pkt_len = cqe_info->pkt_len; + sge_num = HINIC5_GET_SGE_NUM(pkt_len + packet_offset, rxq); + + rxq->cons_idx += sge_num; + rxq->delta += sge_num; +} + +int hinic5_rx_poll(struct hinic5_rxq *rxq, int budget) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(rxq->netdev); + u32 dropped = 0; + u64 pkt_mc = 0; + struct hinic5_rq_cqe *rx_cqe = NULL; + struct hinic5_cqe_info *cqe_info = NULL; + u64 rx_bytes = 0; + int pkts = 0; + u16 num_wqe = 0; + u16 sw_ci = 0; + bool enable_pfe = HINIC5_SUPPORT_FEATURE(nic_dev->hwdev, TC_FLOWER_OFFLOAD); + int ret = 0; + + while (likely(pkts < budget)) { + if (!nic_dev->tx_rx_ops.rx_cqe_done(rxq, &rx_cqe)) + break; + /* make sure we read rx_done before packet length */ + rmb(); + + sw_ci = (u16)(rxq->cons_idx & rxq->q_mask); + cqe_info = rxq->rx_info[sw_ci].cqe_info; + nic_dev->tx_rx_ops.rx_get_cqe_info(rx_cqe, cqe_info, nic_dev->cqe_mode, enable_pfe); + + if (nic_dev->support_htn && (rx_cqe_check(nic_dev, rx_cqe) != 0)) { + rx_free_warning_wqe(rxq, cqe_info); + dropped++; + rx_cqe->status = 0; + continue; + } + + ret = recv_one_pkt(rxq, cqe_info); + if (ret < 0) + break; + + /* 分离cqe场景下需要清零done bit */ + if (nic_dev->cqe_mode == HINIC5_RQ_CQE_SEPARATE) + rx_cqe->status = 0; + + #ifdef HAVE_XDP_SUPPORT + if (ret == HINIC5_XDP_PROCESSED) + continue; + #endif + + rx_bytes += cqe_info->pkt_len; + pkts++; + + if (cqe_info->pkt_mc == MULTICAST) + pkt_mc++; + + if (cqe_info->lro_num > 1) { + rx_bytes += ((cqe_info->lro_num - 1) * LRO_PKT_HDR_LEN(cqe_info->ip_type)); + num_wqe += HINIC5_GET_SGE_NUM(cqe_info->pkt_len, rxq); + } + + if (num_wqe >= nic_dev->lro_replenish_thld) + break; + } + + if (rxq->delta >= HINIC5_RX_BUFFER_WRITE) + hinic5_rx_fill_buffers(rxq); + + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.packets += (u64)(u32)pkts; + rxq->rxq_stats.bytes += rx_bytes; + rxq->rxq_stats.dropped += (u64)dropped; + rxq->rxq_stats.pkt_mc += pkt_mc; + u64_stats_update_end(&rxq->rxq_stats.syncp); +#ifdef HAVE_XDP_SUPPORT + hinic5_xdp_flush_if_needed(nic_dev); +#endif + return pkts; +} + +#ifdef HAVE_PAGE_POOL_SUPPORT +static struct page_pool *hinic5_create_page_pool(struct hinic5_nic_dev *nic_dev, + u32 rq_depth, + struct hinic5_rx_info *rx_info_arr) +{ + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_SYNC_DEV, + .order = nic_dev->page_order, + .pool_size = rq_depth * nic_dev->rx_buff_len / + (PAGE_SIZE << nic_dev->page_order), + .nid = dev_to_node(nic_dev->lld_dev->dev), + .dev = nic_dev->lld_dev->dev, + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = PAGE_SIZE << nic_dev->page_order, + }; + struct page_pool *page_pool = NULL; + u32 i; + + page_pool = nic_dev->page_pool_enabled ? + page_pool_create(&pp_params) : NULL; + for (i = 0; i < rq_depth; i++) + rx_info_arr[i].page_pool = page_pool; + return page_pool; +} +#endif + +int hinic5_alloc_rxqs_res(struct hinic5_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct hinic5_dyna_rxq_res *rxqs_res) +{ + struct hinic5_dyna_rxq_res *rqres = NULL; + u16 idx; + u32 pkts; + u64 size; + u64 cqe_mem_size = sizeof(struct hinic5_rq_cqe) * rq_depth; + u64 cqe_info_mem_size = sizeof(struct hinic5_cqe_info) * rq_depth; + + for (idx = 0; idx < num_rq; idx++) { + rqres = &rxqs_res[idx]; + size = sizeof(*rqres->rx_info) * rq_depth; + rqres->rx_info = kzalloc(size, GFP_KERNEL); + + if (!rqres->rx_info) + goto err_alloc_rx_info; + if (nic_dev->cqe_mode == HINIC5_RQ_CQE_SEPARATE) { + rqres->cqe_start_vaddr = dma_zalloc_coherent(nic_dev->lld_dev->dev, + cqe_mem_size, + &rqres->cqe_start_paddr, + GFP_KERNEL); + if (!rqres->cqe_start_vaddr) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc rxq%d cqe\n", idx); + goto err_alloc_cqe; + } + } + + rqres->cqe_info = kzalloc(cqe_info_mem_size, GFP_KERNEL); + if (!rqres->cqe_info) + goto err_alloc_cqe_info; + +#ifdef HAVE_PAGE_POOL_SUPPORT + if (nic_dev->page_pool_enabled) { + rqres->page_pool = hinic5_create_page_pool(nic_dev, rq_depth, + rqres->rx_info); + if (!rqres->page_pool) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to create rxq%d page pool\n", idx); + goto err_create_page_pool; + } + } +#endif + + pkts = hinic5_rx_alloc_buffers(nic_dev, rq_depth, rqres->rx_info); + if (pkts == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc rxq%d rx buffers\n", idx); + goto err_alloc_buffers; + } + rqres->next_to_alloc = (u16)pkts; + } + + return 0; + +err_alloc_buffers: +#ifdef HAVE_PAGE_POOL_SUPPORT + page_pool_destroy(rqres->page_pool); +err_create_page_pool: +#endif + kfree(rqres->cqe_info); +err_alloc_cqe_info: + if (nic_dev->cqe_mode == HINIC5_RQ_CQE_SEPARATE) { + dma_free_coherent(nic_dev->lld_dev->dev, cqe_mem_size, rqres->cqe_start_vaddr, + rqres->cqe_start_paddr); + } +err_alloc_cqe: + kfree(rqres->rx_info); +err_alloc_rx_info: + hinic5_free_rxqs_res(nic_dev, idx, rq_depth, rxqs_res); + return -ENOMEM; +} + +void hinic5_free_rxqs_res(struct hinic5_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct hinic5_dyna_rxq_res *rxqs_res) +{ + struct hinic5_dyna_rxq_res *rqres = NULL; + struct hinic5_rxq *rxq = NULL; + u64 cqe_mem_size = sizeof(struct hinic5_rq_cqe) * rq_depth; + int idx; + + for (idx = 0; idx < num_rq; idx++) { + rxq = &nic_dev->rxqs[idx]; + rqres = &rxqs_res[idx]; + hinic5_rx_free_buffers(nic_dev, rq_depth, rqres->rx_info); +#ifdef HAVE_PAGE_POOL_SUPPORT + if (rqres->page_pool) + page_pool_destroy(rqres->page_pool); +#endif + kfree(rqres->cqe_info); + if (nic_dev->cqe_mode == HINIC5_RQ_CQE_SEPARATE) { + dma_free_coherent(nic_dev->lld_dev->dev, cqe_mem_size, + rqres->cqe_start_vaddr, rqres->cqe_start_paddr); + } + kfree(rqres->rx_info); + } +} + +static inline void configure_rxq_init_default(struct hinic5_rxq *rxq) +{ + rxq->next_to_update = 0; + rxq->cons_idx = 0; + rxq->last_sw_ci = 0; + rxq->last_hw_ci = 0; + rxq->rx_check_err_cnt = 0; + rxq->rxq_print_times = 0; + rxq->last_packets = 0; + rxq->restore_buf_num = 0; +} + +void hinic5_remove_configure_rxqs(struct hinic5_nic_dev *nic_dev) +{ +#ifdef HAVE_XDP_SUPPORT + int idx = 0; + struct hinic5_rxq *rxq = NULL; + + for (idx = 0; idx < nic_dev->q_params.num_qps + nic_dev->q_params.xdp_qps; idx++) { + rxq = &nic_dev->rxqs[idx]; + rxq->xdp_prog = NULL; + xdp_rxq_info_unreg(&rxq->xdp_rxq); + } +#endif +} + +int hinic5_configure_rxqs(struct hinic5_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct hinic5_dyna_rxq_res *rxqs_res) +{ + struct hinic5_dyna_rxq_res *rqres = NULL; + struct irq_info *msix_entry = NULL; + struct hinic5_rxq *rxq = NULL; + struct hinic5_rq_cqe *cqe_va = NULL; + dma_addr_t cqe_pa; + u16 q_id; + u32 idx, pkts; + int err = 0; + + nic_dev->rxq_get_err_times = 0; + for (q_id = 0; q_id < num_rq; q_id++) { + rxq = &nic_dev->rxqs[q_id]; + rqres = &rxqs_res[q_id]; + msix_entry = &nic_dev->qps_irq_info[q_id]; + + configure_rxq_init_default(rxq); + rxq->irq_id = msix_entry->irq_id; + rxq->msix_entry_idx = msix_entry->msix_entry_idx; + rxq->next_to_alloc = rqres->next_to_alloc; + rxq->q_depth = rq_depth; + rxq->delta = rxq->q_depth; + rxq->q_mask = rxq->q_depth - 1; + rxq->last_sw_pi = rxq->q_depth - 1; + rxq->rx_info = rqres->rx_info; +#ifdef HAVE_XDP_SUPPORT + rxq->xdp_headroom_flag = nic_dev->xdp_prog ? 1 : 0; +#endif + + /* fill cqe */ + if (nic_dev->cqe_mode == HINIC5_RQ_CQE_SEPARATE) { + cqe_va = (struct hinic5_rq_cqe *)rqres->cqe_start_vaddr; + cqe_pa = rqres->cqe_start_paddr; + for (idx = 0; idx < rq_depth; idx++) { + rxq->rx_info[idx].cqe = cqe_va; + rxq->rx_info[idx].cqe_dma = cqe_pa; + cqe_va++; + cqe_pa += sizeof(*rxq->rx_info->cqe); + } + } + + for (idx = 0; idx < rq_depth; idx++) + rxq->rx_info[idx].cqe_info = &rqres->cqe_info[idx]; + + rxq->rq = hinic5_get_nic_queue(nic_dev->hwdev, rxq->q_id, HINIC5_RQ); + if (!rxq->rq) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to get rq\n"); + return -EINVAL; + } + + pkts = hinic5_rx_fill_wqe(rxq); + if (pkts != rxq->q_depth) { + nicif_err(nic_dev, drv, nic_dev->netdev, "Failed to fill rx wqe\n"); + return -EFAULT; + } + + pkts = hinic5_rx_fill_buffers(rxq); + if (pkts == 0) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to fill Rx buffer\n"); + return -ENOMEM; + } +#ifdef HAVE_XDP_SUPPORT + rxq->xdp_prog = nic_dev->xdp_prog; +#ifdef HAVE_XDP_RXQ_INFO_REG_NAPI_ID + err = xdp_rxq_info_reg(&rxq->xdp_rxq, nic_dev->netdev, q_id, q_id); +#else + err = xdp_rxq_info_reg(&rxq->xdp_rxq, nic_dev->netdev, q_id); +#endif +#endif + if (err != 0) + return err; + } + + return 0; +} + +void hinic5_free_rxqs(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->rxqs); + nic_dev->rxqs = NULL; +} + +int hinic5_alloc_rxqs(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct device *dev = nic_dev->lld_dev->dev; + struct hinic5_rxq *rxq = NULL; + u16 num_rxqs = nic_dev->max_qps; + u16 q_id; + u64 rxq_size; + + rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); + if (rxq_size == 0) { + nic_err(dev, "Cannot allocate zero size rxqs\n"); + return -EINVAL; + } + + nic_dev->rxqs = kzalloc(rxq_size, GFP_KERNEL); + if (!nic_dev->rxqs) + return -ENOMEM; + + for (q_id = 0; q_id < num_rxqs; q_id++) { + rxq = &nic_dev->rxqs[q_id]; + rxq->netdev = netdev; + rxq->dev = dev; + rxq->q_id = q_id; + rxq->buf_len = nic_dev->rx_buff_len; + rxq->rx_buff_shift = (u32)ilog2(nic_dev->rx_buff_len); + rxq->dma_rx_buff_size = nic_dev->dma_rx_buff_size; + rxq->q_depth = nic_dev->q_params.rq_depth; + rxq->q_mask = nic_dev->q_params.rq_depth - 1; + + rxq_stats_init(rxq); + } + + return 0; +} + +int hinic5_rx_configure(struct net_device *netdev, u8 dcb_en) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u8 rq2iq_map[HINIC5_MAX_NUM_RQ]; + int err; + + /* Set all rq mapping to all iq in default */ + + memset(rq2iq_map, 0xFF, sizeof(rq2iq_map)); + + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) != 0) { + err = hinic5_rss_init(nic_dev, rq2iq_map, sizeof(rq2iq_map), dcb_en); + if (err != 0) { + nicif_err(nic_dev, drv, netdev, "Failed to init rss\n"); + return -EFAULT; + } + } + + return 0; +} + +void hinic5_rx_remove_configure(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + if (test_bit(HINIC5_RSS_ENABLE, &nic_dev->flags) != 0) + hinic5_rss_deinit(nic_dev); +} + +int rxq_restore(struct hinic5_nic_dev *nic_dev, u16 q_id, u16 hw_ci) +{ + struct hinic5_rxq *rxq = &nic_dev->rxqs[q_id]; + struct hinic5_rq_wqe *rq_wqe = NULL; + struct hinic5_rx_info *rx_info = NULL; + dma_addr_t dma_addr; + u32 free_wqebbs = rxq->delta - rxq->restore_buf_num; + u32 buff_pi; + u32 i; + int err; + + if (rxq->delta < rxq->restore_buf_num) + return -EINVAL; + + if (rxq->restore_buf_num == 0) /* start restore process */ + rxq->restore_pi = rxq->next_to_update; + + buff_pi = rxq->restore_pi; + + if ((((rxq->cons_idx & rxq->q_mask) + rxq->q_depth - + rxq->next_to_update) % rxq->q_depth) != rxq->delta) + return -EINVAL; + + for (i = 0; i < free_wqebbs; i++) { + rx_info = &rxq->rx_info[buff_pi]; + + if (unlikely(!rx_alloc_mapped_page(nic_dev, rx_info))) { + RXQ_STATS_INC(rxq, alloc_rx_buf_err); + rxq->restore_pi = (u16)((rxq->restore_pi + i) & rxq->q_mask); + return -ENOMEM; + } + + dma_addr = rx_info->buf_dma_addr + rx_info->page_offset; + + rq_wqe = rx_info->rq_wqe; + + if (rxq->rq->wqe_type == HINIC5_EXTEND_RQ_WQE) { + rq_wqe->extend_wqe.buf_desc.sge.hi_addr = + hinic5_hw_be32(upper_32_bits(dma_addr)); + rq_wqe->extend_wqe.buf_desc.sge.lo_addr = + hinic5_hw_be32(lower_32_bits(dma_addr)); + } else { + rq_wqe->normal_wqe.buf_hi_addr = + hinic5_hw_be32(upper_32_bits(dma_addr)); + rq_wqe->normal_wqe.buf_lo_addr = + hinic5_hw_be32(lower_32_bits(dma_addr)); + } + buff_pi = (u16)((buff_pi + 1) & rxq->q_mask); + rxq->restore_buf_num++; + } + + nic_info(nic_dev->lld_dev->dev, "rxq %u restore_buf_num:%u\n", q_id, rxq->restore_buf_num); + + rx_info = &rxq->rx_info[(hw_ci + rxq->q_depth - 1) & rxq->q_mask]; + if (rx_info->buf_dma_addr != 0) { + dma_unmap_page(nic_dev->lld_dev->dev, rx_info->buf_dma_addr, + nic_dev->dma_rx_buff_size, DMA_FROM_DEVICE); + rx_info->buf_dma_addr = 0; + } + + if (rx_info->page) { + __free_pages(rx_info->page, nic_dev->page_order); + rx_info->page = NULL; + } + + rxq->delta = 1; + rxq->next_to_update = (u16)((hw_ci + rxq->q_depth - 1) & rxq->q_mask); + rxq->cons_idx = (u16)((rxq->next_to_update + 1) & rxq->q_mask); + rxq->restore_buf_num = 0; + rxq->next_to_alloc = rxq->next_to_update; + + for (i = 0; i < rxq->q_depth; i++) { + if (HINIC5_GET_RX_DONE(hinic5_hw_cpu32(rxq->rx_info[i].cqe->status)) == 0) + continue; + + RXQ_STATS_INC(rxq, restore_drop_sge); + rxq->rx_info[i].cqe->status = 0; + } + + err = hinic5_cache_out_qps_res(nic_dev->hwdev); + if (err != 0) { + clear_bit(HINIC5_RXQ_RECOVERY, &nic_dev->flags); + return err; + } + + hinic5_write_db(rxq->rq, rxq->q_id & (NIC_DCB_COS_MAX - 1), + RQ_CFLAG_DP, (u16)((u32)rxq->next_to_update << rxq->rq->wqe_type)); + + return 0; +} + +bool rxq_is_normal(struct hinic5_rxq *rxq, struct rxq_check_info rxq_info) +{ + u32 status; + + if (rxq->rxq_stats.packets != rxq->last_packets || rxq_info.hw_pi != rxq_info.hw_ci || + rxq_info.hw_ci != rxq->last_hw_ci || rxq->next_to_update != rxq->last_sw_pi) + return true; + + /* hw rx no wqe and driver rx no packet recv */ + status = rxq->rx_info[rxq->cons_idx & rxq->q_mask].cqe->status; + if (HINIC5_GET_RX_DONE(hinic5_hw_cpu32(status)) != 0) + return true; + + if ((rxq->cons_idx & rxq->q_mask) != rxq->last_sw_ci || + rxq->rxq_stats.packets != rxq->last_packets || + rxq->next_to_update != rxq_info.hw_pi) + return true; + + return false; +} + +#define RXQ_CHECK_ERR_TIMES 2 +#define RXQ_PRINT_MAX_TIMES 3 +#define RXQ_GET_ERR_MAX_TIMES 3 +void hinic5_rxq_check_work_handler(struct work_struct *work) +{ + struct delayed_work *delay = to_delayed_work(work); + struct hinic5_nic_dev *nic_dev = container_of(delay, struct hinic5_nic_dev, rxq_check_work); + struct rxq_check_info *rxq_info = NULL; + struct hinic5_rxq *rxq = NULL; + u64 size; + u16 qid; + int err; + + if (test_bit(HINIC5_INTF_UP, &nic_dev->flags) == 0) + return; + + if (test_bit(HINIC5_RXQ_RECOVERY, &nic_dev->flags) != 0) + queue_delayed_work(nic_dev->workq, &nic_dev->rxq_check_work, HZ); + +#ifdef HAVE_PAGE_POOL_SUPPORT + if (nic_dev->page_pool_enabled) + return; +#endif + + size = sizeof(*rxq_info) * nic_dev->q_params.num_qps; + if (size == 0) + return; + + rxq_info = kzalloc(size, GFP_KERNEL); + if (!rxq_info) + return; + + err = hinic5_get_rxq_hw_info(nic_dev->hwdev, rxq_info, nic_dev->q_params.num_qps, + nic_dev->rxqs[0].rq->wqe_type); + if (err != 0) { + nic_dev->rxq_get_err_times++; + if (nic_dev->rxq_get_err_times >= RXQ_GET_ERR_MAX_TIMES) + clear_bit(HINIC5_RXQ_RECOVERY, &nic_dev->flags); + goto free_rxq_info; + } + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + rxq = &nic_dev->rxqs[qid]; + if (!rxq_is_normal(rxq, rxq_info[qid])) { + rxq->rx_check_err_cnt++; + if (rxq->rx_check_err_cnt < RXQ_CHECK_ERR_TIMES) + continue; + + if (rxq->rxq_print_times <= RXQ_PRINT_MAX_TIMES) { + nic_warn(nic_dev->lld_dev->dev, "rxq %u wqe abnormal, hw_pi:%u, hw_ci:%u, sw_pi:%u, sw_ci:%u delta:%u\n", + qid, rxq_info[qid].hw_pi, rxq_info[qid].hw_ci, + rxq->next_to_update, + rxq->cons_idx & rxq->q_mask, rxq->delta); + rxq->rxq_print_times++; + } + + if (rxq_restore(nic_dev, qid, rxq_info[qid].hw_ci) != 0) + continue; + } + + rxq->rxq_print_times = 0; + rxq->rx_check_err_cnt = 0; + rxq->last_sw_pi = rxq->next_to_update; + rxq->last_sw_ci = rxq->cons_idx & rxq->q_mask; + rxq->last_hw_ci = rxq_info[qid].hw_ci; + rxq->last_packets = rxq->rxq_stats.packets; + } + + nic_dev->rxq_get_err_times = 0; + +free_rxq_info: + kfree(rxq_info); +} + +int hinic5_register_cqe_cb(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type event, + hinic5_cqe_cb cqe_cb) +{ + struct hinic5_nic_dev *nic_dev = NULL; + + if (!lld_dev || !cqe_cb || event >= SERVICE_T_MAX || + !hinic5_support_nic(lld_dev->hwdev, NULL)) + return -EINVAL; + + nic_dev = hinic5_get_uld_dev_unsafe(lld_dev, SERVICE_T_NIC); + if (!nic_dev) { + nic_err(lld_dev->dev, "There's no net device attached on the pci device"); + return -EINVAL; + } + + nic_dev->tx_rx_ops.cqe_cb[event] = cqe_cb; + + set_bit(event, &nic_dev->tx_rx_ops.cqe_cb_state[event]); + return 0; +} +EXPORT_SYMBOL(hinic5_register_cqe_cb); + +void hinic5_unregister_cqe_cb(struct hinic5_lld_dev *lld_dev, enum hinic5_service_type event) +{ + struct hinic5_nic_dev *nic_dev = NULL; + + if (!lld_dev || event >= SERVICE_T_MAX || !hinic5_support_nic(lld_dev->hwdev, NULL)) + return; + + nic_dev = hinic5_get_uld_dev_unsafe(lld_dev, SERVICE_T_NIC); + if (!nic_dev) + return; + + clear_bit(event, &nic_dev->tx_rx_ops.cqe_cb_state[event]); + + while (test_bit(event, + &nic_dev->tx_rx_ops.cqe_cb_running[event])) + usleep_range(HINIC5_RQ_CQE_USLEEP_LOW_BOUND, HINIC5_RQ_CQE_USLEEP_HIGH_BOUND); + nic_dev->tx_rx_ops.cqe_cb[event] = NULL; +} +EXPORT_SYMBOL(hinic5_unregister_cqe_cb); diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_rx.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_rx.h new file mode 100644 index 00000000..d15271ab --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_rx.h @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_RX_H +#define HINIC5_RX_H + +#include <linux/types.h> +#include <linux/device.h> +#include <linux/mm_types.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/u64_stats_sync.h> + +#include "ossl_knl.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_sq.h" +#include "hinic5_nic_rq.h" + +/* performance: ci addr RTE_CACHE_SIZE(64B) alignment */ +#define HINIC5_RX_HDR_SIZE 256 +#define HINIC5_RX_BUFFER_WRITE 16 + +#define HINIC5_RX_TCP_PKT 0x3 +#define HINIC5_RX_UDP_PKT 0x4 +#define HINIC5_RX_SCTP_PKT 0x7 + +#define HINIC5_RX_IPV4_PKT 0 +#define HINIC5_RX_IPV6_PKT 1 +#define HINIC5_RX_INVALID_IP_TYPE 2 + +#define HINIC5_RX_PKT_FORMAT_NON_TUNNEL 0 +#define HINIC5_RX_PKT_FORMAT_VXLAN 1 + +#define RXQ_STATS_INC(rxq, field) \ +do { \ + u64_stats_update_begin(&(rxq)->rxq_stats.syncp); \ + (rxq)->rxq_stats.field++; \ + u64_stats_update_end(&(rxq)->rxq_stats.syncp); \ +} while (0) + +#define HINIC5_GET_SGE_NUM(pkt_len, rxq) \ + ((u8)(((pkt_len) >> (rxq)->rx_buff_shift) + \ + ((((pkt_len) & ((rxq)->buf_len - 1)) != 0) ? 1 : 0))) + +/* rx cqe checksum err */ +#define HINIC5_RX_CSUM_IP_CSUM_ERR BIT(0) +#define HINIC5_RX_CSUM_TCP_CSUM_ERR BIT(1) +#define HINIC5_RX_CSUM_UDP_CSUM_ERR BIT(2) +#define HINIC5_RX_CSUM_IGMP_CSUM_ERR BIT(3) +#define HINIC5_RX_CSUM_ICMPV4_CSUM_ERR BIT(4) +#define HINIC5_RX_CSUM_ICMPV6_CSUM_ERR BIT(5) +#define HINIC5_RX_CSUM_SCTP_CRC_ERR BIT(6) +#define HINIC5_RX_CSUM_HW_CHECK_NONE BIT(7) +#define HINIC5_RX_CSUM_IPSU_OTHER_ERR BIT(8) + +#define HINIC5_HEADER_DATA_UNIT 2 +#define HINIC5_CQE_LEN 32 +#define HINIC5_COMPACT_CQE_8B 8 +#define HINIC5_COMPACT_CQE_16B 16 + +#define HINIC5_RQ_CQE_SEPARATE 0 +#define HINIC5_RQ_CQE_INTEGRATE 1 + +#define HINIC5_RQ_CQE_USLEEP_LOW_BOUND 900 +#define HINIC5_RQ_CQE_USLEEP_HIGH_BOUND 1000 + +/* flow bifurcation */ +#define HINIC5_GROUP_NUMBER_MIN 1 +#define HINIC5_GROUP_NUMBER_MAX 8 + +struct hinic5_rxq_stats { + u64 packets; + u64 bytes; + u64 errors; + u64 csum_errors; + u64 other_errors; + u64 dropped; + u64 rx_buf_empty; + + u64 alloc_skb_err; + u64 alloc_rx_buf_err; + u64 restore_drop_sge; + u64 pkt_mc; +#ifdef HAVE_XDP_SUPPORT + u64 xdp_dropped; + u64 xdp_redirected; + u64 xdp_large_pkt; +#endif +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#else + struct u64_stats_sync_empty syncp; +#endif +}; + +struct hinic5_rx_info { + dma_addr_t buf_dma_addr; + + struct hinic5_rq_cqe *cqe; + struct hinic5_cqe_info *cqe_info; + dma_addr_t cqe_dma; + struct page *page; +#ifdef HAVE_PAGE_POOL_SUPPORT + struct page_pool *page_pool; +#endif + u32 page_offset; + u32 rsvd1; + struct hinic5_rq_wqe *rq_wqe; + struct sk_buff *saved_skb; + u32 skb_len; + u32 rsvd2; +}; + +struct hinic5_rxq { + struct net_device *netdev; + + u16 q_id; + u16 rsvd1; + u32 q_depth; + u32 q_mask; + + u16 buf_len; + u16 rsvd2; + u32 rx_buff_shift; + u32 dma_rx_buff_size; + + struct hinic5_rxq_stats rxq_stats; + u32 cons_idx; + u32 delta; + + u32 irq_id; + u16 msix_entry_idx; +#ifdef HAVE_XDP_SUPPORT + u16 xdp_headroom_flag; +#else + u16 rsvd3; +#endif + + struct hinic5_rx_info *rx_info; + struct hinic5_io_queue *rq; +#ifdef HAVE_XDP_SUPPORT + struct bpf_prog *xdp_prog; + struct xdp_rxq_info xdp_rxq; +#endif + + struct hinic5_irq *irq_cfg; + u16 next_to_alloc; + u16 next_to_update; + struct device *dev; /* device for DMA mapping */ + + u64 status; + dma_addr_t cqe_start_paddr; + void *cqe_start_vaddr; + + u64 last_moder_packets; + u64 last_moder_bytes; + u8 last_coalesc_timer_cfg; + u8 last_pending_limt; + u16 restore_buf_num; + u32 rsvd5; + u64 rsvd6; + + u32 last_sw_pi; + u32 last_sw_ci; + + u32 last_hw_ci; + u8 rx_check_err_cnt; + u8 rxq_print_times; + u16 restore_pi; + + u64 last_packets; +} ____cacheline_aligned; + +struct hinic5_dyna_rxq_res { + u16 next_to_alloc; + struct hinic5_rx_info *rx_info; + struct hinic5_cqe_info *cqe_info; + dma_addr_t cqe_start_paddr; + void *cqe_start_vaddr; +#ifdef HAVE_PAGE_POOL_SUPPORT + struct page_pool *page_pool; +#endif +}; + +struct hinic5_nic_dev; + +int hinic5_alloc_rxqs(struct net_device *netdev); + +void hinic5_free_rxqs(struct net_device *netdev); + +int hinic5_alloc_rxqs_res(struct hinic5_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct hinic5_dyna_rxq_res *rxqs_res); + +void hinic5_free_rxqs_res(struct hinic5_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct hinic5_dyna_rxq_res *rxqs_res); + +void hinic5_remove_configure_rxqs(struct hinic5_nic_dev *nic_dev); + +int hinic5_configure_rxqs(struct hinic5_nic_dev *nic_dev, u16 num_rq, + u32 rq_depth, struct hinic5_dyna_rxq_res *rxqs_res); + +int hinic5_rx_configure(struct net_device *netdev, u8 dcb_en); + +void hinic5_rx_remove_configure(struct net_device *netdev); + +int hinic5_rx_poll(struct hinic5_rxq *rxq, int budget); + +void hinic5_rxq_get_stats(struct hinic5_rxq *rxq, + struct hinic5_rxq_stats *stats); + +void hinic5_rxq_clean_stats(struct hinic5_rxq_stats *rxq_stats); + +void hinic5_rxq_check_work_handler(struct work_struct *work); + +void hinic5_rx_get_cqe_info(struct hinic5_rq_cqe *cqe, + struct hinic5_cqe_info *info, u8 cqe_mode, bool enable_pfe); + +void hinic5_rx_get_compact_cqe_info(struct hinic5_rq_cqe *cqe, + struct hinic5_cqe_info *info, u8 cqe_mode, bool enable_pfe); + +void hinic5_reuse_rx_page(struct hinic5_rxq *rxq, + struct hinic5_rx_info *old_rx_info); + +struct sk_buff *hinic5_fetch_rx_buffer(struct hinic5_rxq *rxq, + const struct hinic5_cqe_info *cqe_info); + +bool hinic5_rx_separate_cqe_done(struct hinic5_rxq *rxq, struct hinic5_rq_cqe **rx_cqe); + +bool hinic5_rx_integrated_cqe_done(struct hinic5_rxq *rxq, struct hinic5_rq_cqe **rx_cqe); + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_tx.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_tx.c new file mode 100644 index 00000000..cf4d0064 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_tx.c @@ -0,0 +1,1321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <net/xfrm.h> +#include <net/ndisc.h> +#include <linux/netdevice.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <linux/dma-mapping.h> +#include <linux/types.h> +#include <linux/u64_stats_sync.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/icmpv6.h> +#include <linux/ipv6.h> + +#include "ossl_knl.h" +#include "hinic5_crm.h" +#include "hinic5_nic_sq.h" +#include "hinic5_nic_rq.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic_dev.h" +#include "hinic5_ptp.h" +#include "hinic5_tx.h" + +/* 1872 FT B600临时修改方案,配置文件适配后删除 */ +#define QP_COS_MASK 7 +static char qp_cos_mask = QP_COS_MASK; +module_param(qp_cos_mask, byte, 0444); +MODULE_PARM_DESC(qp_cos_mask, "QP COS mask, 0-255 (default=0)"); + +/* The 1823v200 product non-tso SGEs is 32, and that of the 1825v100&1872v100 is 38. + * The number of non-tso SGEs is strictly constrained to 32. + */ +#define HINIC5_NONTSO_PKT_MAX_SGE 32 + +#define MIN_SKB_LEN 32 + +#define MAX_PAYLOAD_OFFSET 221 + +#define NIC_QID(q_id, nic_dev) ((q_id) & ((nic_dev)->num_qps - 1)) + +#define HINIC5_TX_TASK_WRAPPED 1 +#define HINIC5_TX_BD_DESC_WRAPPED 2 + +#define NON_TSO_SKB_SIZE_MAX 0xFFFF +#define TSO_SKB_SIZE_MAX 0x3FFFF + +void hinic5_txq_get_stats(struct hinic5_txq *txq, + struct hinic5_txq_stats *stats) +{ + struct hinic5_txq_stats *txq_stats = &txq->txq_stats; + unsigned int start; + + u64_stats_update_begin(&stats->syncp); + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + stats->bytes = txq_stats->bytes; + stats->packets = txq_stats->packets; + stats->busy = txq_stats->busy; + stats->wake = txq_stats->wake; + stats->dropped = txq_stats->dropped; + stats->unfinished = txq_stats->unfinished; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + u64_stats_update_end(&stats->syncp); +} + +void hinic5_txq_clean_stats(struct hinic5_txq_stats *txq_stats) +{ + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->bytes = 0; + txq_stats->packets = 0; + txq_stats->busy = 0; + txq_stats->wake = 0; + txq_stats->dropped = 0; + txq_stats->unfinished = 0; + + txq_stats->skb_pad_err = 0; + txq_stats->frag_len_overflow = 0; + txq_stats->offload_cow_skb_err = 0; + txq_stats->map_frag_err = 0; + txq_stats->unknown_tunnel_pkt = 0; + txq_stats->frag_size_err = 0; + txq_stats->rsvd1 = 0; + txq_stats->rsvd2 = 0; + u64_stats_update_end(&txq_stats->syncp); +} + +static void txq_stats_init(struct hinic5_txq *txq) +{ + struct hinic5_txq_stats *txq_stats = &txq->txq_stats; + + u64_stats_init(&txq_stats->syncp); + hinic5_txq_clean_stats(txq_stats); + +#ifdef HAVE_XDP_SUPPORT + hinic5_xdptxq_init_stats(txq); +#endif +} + +#ifdef HAVE_XDP_SUPPORT +void hinic5_xdptxq_get_stats(struct hinic5_txq *txq, + struct hinic5_xdptxq_stats *stats) +{ + struct hinic5_xdptxq_stats *xdptxq_stats = &txq->xdptxq_stats; + unsigned int start; + + u64_stats_update_begin(&stats->syncp); + do { + start = u64_stats_fetch_begin(&xdptxq_stats->syncp); + stats->xdp_dropped = xdptxq_stats->xdp_dropped; + stats->xdp_xmits = xdptxq_stats->xdp_xmits; + stats->map_xdpf_err = xdptxq_stats->map_xdpf_err; + } while (u64_stats_fetch_retry(&xdptxq_stats->syncp, start)); + u64_stats_update_end(&stats->syncp); +} + +void hinic5_xdptxq_clean_stats(struct hinic5_xdptxq_stats *xdptxq_stats) +{ + u64_stats_update_begin(&xdptxq_stats->syncp); + xdptxq_stats->xdp_dropped = 0; + xdptxq_stats->xdp_xmits = 0; + xdptxq_stats->map_xdpf_err = 0; + u64_stats_update_end(&xdptxq_stats->syncp); +} + +void hinic5_xdptxq_init_stats(struct hinic5_txq *txq) +{ + struct hinic5_xdptxq_stats *xdptxq_stats = &txq->xdptxq_stats; + + u64_stats_init(&xdptxq_stats->syncp); + hinic5_xdptxq_clean_stats(xdptxq_stats); +} +#endif + +static inline void hinic5_set_buf_desc(struct hinic5_sq_bufdesc *buf_descs, + dma_addr_t addr, u32 len) +{ + buf_descs->hi_addr = hinic5_hw_be32(upper_32_bits(addr)); + buf_descs->lo_addr = hinic5_hw_be32(lower_32_bits(addr)); + buf_descs->len = hinic5_hw_be32(len); + buf_descs->rsvd = 0; +} + +static int tx_map_skb(struct hinic5_nic_dev *nic_dev, struct sk_buff *skb, + u16 valid_nr_frags, struct hinic5_txq *txq, + struct hinic5_tx_info *tx_info, + struct hinic5_sq_wqe_combo *wqe_combo) +{ + struct hinic5_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + struct hinic5_sq_bufdesc *buf_desc = wqe_combo->bds_head; + struct hinic5_dma_info *dma_info = tx_info->dma_info; + struct device *dev = nic_dev->lld_dev->dev; + skb_frag_t *frag = NULL; + u32 j, i; + int err; + + dma_info[0].dma = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_info[0].dma) != 0) { + TXQ_STATS_INC(txq, map_frag_err); + return -EFAULT; + } + + dma_info[0].len = skb_headlen(skb); + + wqe_desc->hi_addr = hinic5_hw_be32(upper_32_bits(dma_info[0].dma)); + wqe_desc->lo_addr = hinic5_hw_be32(lower_32_bits(dma_info[0].dma)); + + wqe_desc->ctrl_len = dma_info[0].len; + + for (i = 0; i < valid_nr_frags;) { + frag = &(skb_shinfo(skb)->frags[i]); + if (unlikely(i == wqe_combo->first_bds_num)) + buf_desc = wqe_combo->bds_sec2; + + i++; + dma_info[i].dma = skb_frag_dma_map(dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_info[i].dma) != 0) { + TXQ_STATS_INC(txq, map_frag_err); + i--; + err = -EFAULT; + goto frag_map_err; + } + dma_info[i].len = skb_frag_size(frag); + + hinic5_set_buf_desc(buf_desc, dma_info[i].dma, + dma_info[i].len); + buf_desc++; + } + + return 0; + +frag_map_err: + for (j = 0; j < i;) { + j++; + dma_unmap_page(dev, dma_info[j].dma, + dma_info[j].len, DMA_TO_DEVICE); + } + dma_unmap_single(dev, dma_info[0].dma, dma_info[0].len, + DMA_TO_DEVICE); + return err; +} + +static inline void tx_unmap_skb(struct hinic5_nic_dev *nic_dev, + struct sk_buff *skb, u16 valid_nr_frags, + struct hinic5_dma_info *dma_info) +{ + struct device *dev = nic_dev->lld_dev->dev; + int i; + + for (i = 0; i < valid_nr_frags;) { + i++; + dma_unmap_page(dev, + dma_info[i].dma, + dma_info[i].len, DMA_TO_DEVICE); + } + + dma_unmap_single(dev, dma_info[0].dma, + dma_info[0].len, DMA_TO_DEVICE); +} + +union hinic5_l4 { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +enum sq_l3_type { + UNKNOWN_L3TYPE = 0, + IPV6_PKT = 1, + IPV4_PKT_NO_CHKSUM_OFFLOAD = 2, + IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3, +}; + +enum sq_l4offload_type { + OFFLOAD_DISABLE = 0, + TCP_OFFLOAD_ENABLE = 1, + SCTP_OFFLOAD_ENABLE = 2, + UDP_OFFLOAD_ENABLE = 3, +}; + +/* initialize l4_len and offset */ +static void get_inner_l4_info(struct sk_buff *skb, union hinic5_l4 *l4, + u8 l4_proto, u32 *offset, + enum sq_l4offload_type *l4_offload) +{ + switch (l4_proto) { + case IPPROTO_TCP: + *l4_offload = TCP_OFFLOAD_ENABLE; + /* To keep same with TSO, payload offset begins from paylaod */ + *offset = (l4->tcp->doff << TCP_HDR_DATA_OFF_UNIT_SHIFT) + + TRANSPORT_OFFSET(l4->hdr, skb); + break; + + case IPPROTO_UDP: + *l4_offload = UDP_OFFLOAD_ENABLE; + *offset = TRANSPORT_OFFSET(l4->hdr, skb); + break; + default: + break; + } +} + +#if (KERNEL_VERSION(3, 8, 0) <= LINUX_VERSION_CODE) +static inline int hinic5_check_tunnel_pkt_support(struct net_device *netdev, u8 l4_proto, + union hinic5_l4 l4) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + switch (l4_proto) { + case IPPROTO_IPIP: + case IPPROTO_IPV6: + if (HINIC5_SUPPORT_IPXIP_OFFLOAD(nic_dev->hwdev)) + return 0; + + break; + case IPPROTO_UDP: + if (l4.udp->dest == VXLAN_OFFLOAD_PORT_LE && + HINIC5_SUPPORT_VXLAN_OFFLOAD(nic_dev->hwdev)) + return 0; + + if (l4.udp->dest == GENEVE_OFFLOAD_PORT_LE && + HINIC5_SUPPORT_GENEVE_OFFLOAD(nic_dev->hwdev)) + return 0; + + break; + default: + break; + } + + return -EINVAL; +} +#endif +static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic5_ip *ip, + union hinic5_l4 *l4, + enum sq_l3_type *l3_type, u8 *l4_proto) +{ + unsigned char *exthdr = NULL; + + if (ip->v4->version == IP4_VERSION) { + *l3_type = IPV4_PKT_WITH_CHKSUM_OFFLOAD; + *l4_proto = ip->v4->protocol; + +#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + /* inner_transport_header is wrong in centos7.0 and suse12.1 */ + l4->hdr = ip->hdr + ((u8)ip->v4->ihl << IP_HDR_IHL_UNIT_SHIFT); +#endif + } else if (ip->v4->version == IP6_VERSION) { + *l3_type = IPV6_PKT; + exthdr = ip->hdr + sizeof(*ip->v6); + *l4_proto = ip->v6->nexthdr; + if (exthdr != l4->hdr) { + __be16 frag_off = 0; +#ifndef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + ipv6_skip_exthdr(skb, (int)(exthdr - skb->data), + l4_proto, &frag_off); +#else + int pld_off = 0; + + pld_off = ipv6_skip_exthdr(skb, + (int)(exthdr - skb->data), + l4_proto, &frag_off); + l4->hdr = skb->data + pld_off; +#endif + } + } else { + *l3_type = UNKNOWN_L3TYPE; + *l4_proto = 0; + } +} + +static u8 hinic5_get_inner_l4_type(struct sk_buff *skb) +{ + enum sq_l3_type l3_type; + u8 l4_proto; + union hinic5_ip ip; + union hinic5_l4 l4; + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto); + + return l4_proto; +} + +static void hinic5_set_unknown_tunnel_csum(struct sk_buff *skb) +{ + int csum_offset; + __sum16 skb_csum; + u8 l4_proto; + + l4_proto = hinic5_get_inner_l4_type(skb); + /* Unsupport tunnel packet, disable csum offload */ + skb_checksum_help(skb); + + /* The value of csum is changed from 0xffff to 0 according to RFC1624. */ + if (skb->ip_summed == CHECKSUM_NONE && l4_proto != IPPROTO_UDP) { + csum_offset = skb_checksum_start_offset(skb) + skb->csum_offset; + skb_csum = *(__sum16 *)(skb->data + csum_offset); + if (skb_csum == 0xffff) + *(__sum16 *)(skb->data + csum_offset) = 0; + } +} + +static int hinic5_tx_csum(struct hinic5_txq *txq, struct sk_buff *skb, + struct hinic5_offload_info *offload_info, + struct hinic5_queue_info *queue_info) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + +#if (KERNEL_VERSION(3, 8, 0) <= LINUX_VERSION_CODE) + if (skb->encapsulation != 0) { + union hinic5_ip ip; + union hinic5_l4 l4; + u8 l4_proto; + + offload_info->encapsulation = 1; + + ip.hdr = skb_network_header(skb); + if (ip.v4->version == IPV4_VERSION) { + l4_proto = ip.v4->protocol; + l4.hdr = skb_transport_header(skb); + } else if (ip.v4->version == IPV6_VERSION) { + unsigned char *exthdr = NULL; + __be16 frag_off; + +#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + offload_info->out_l4_en = 1; +#endif + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + l4.hdr = skb_transport_header(skb); + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } else { + l4_proto = IPPROTO_RAW; + } + + if (l4_proto == IPPROTO_UDP) + queue_info->udp_dp_en = 1; + + if (hinic5_check_tunnel_pkt_support(txq->netdev, l4_proto, l4) != 0) { + TXQ_STATS_INC(txq, unknown_tunnel_pkt); + hinic5_set_unknown_tunnel_csum(skb); + return 0; + } + } + + offload_info->inner_l4_en = 1; +#else + offload_info->inner_l4_en = 1; +#endif + return 1; +} + +static void hinic5_set_tso_info(struct hinic5_offload_info *offload_info, + struct hinic5_queue_info *queue_info, + enum sq_l4offload_type l4_offload, + u32 offset, u32 mss) +{ + if (l4_offload == TCP_OFFLOAD_ENABLE) { + queue_info->tso = 1; + offload_info->inner_l4_en = 1; + } else if (l4_offload == UDP_OFFLOAD_ENABLE) { + queue_info->ufo = 1; + offload_info->inner_l4_en = 1; + } + + /* Default enable L3 calculation */ + offload_info->inner_l3_en = 1; + + queue_info->payload_offset = (u8)(offset >> 1); + + /* set MSS value */ + queue_info->mss = (u16)mss; +} + +static inline void hinic5_inner_tso_offload(struct hinic5_offload_info *offload_info, + struct hinic5_queue_info *queue_info, + struct sk_buff *skb, + union hinic5_ip ip, union hinic5_l4 l4) +{ + u8 l4_proto; + u32 offset = 0; + enum sq_l3_type l3_type; + enum sq_l4offload_type l4_offload = OFFLOAD_DISABLE; + + get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto); + + if (l4_proto == IPPROTO_TCP) + l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); +#ifdef HAVE_IP6_FRAG_ID_ENABLE_UFO + else if (l4_proto == IPPROTO_UDP && ip.v4->version == 6) + offload_info->ip_identify = + be32_to_cpu(skb_shinfo(skb)->ip6_frag_id); +#endif + + get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload); + +#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + u32 network_hdr_len; + + if (unlikely(l3_type == UNKNOWN_L3TYPE)) + network_hdr_len = 0; + else + network_hdr_len = l4.hdr - ip.hdr; + + if (unlikely(!offset)) { + if (l3_type == UNKNOWN_L3TYPE) + offset = ip.hdr - skb->data; + else if (l4_offload == OFFLOAD_DISABLE) + offset = ip.hdr - skb->data + network_hdr_len; + } +#endif + + hinic5_set_tso_info(offload_info, queue_info, l4_offload, offset, + skb_shinfo(skb)->gso_size); +} + +static int hinic5_tso(struct hinic5_offload_info *offload_info, + struct hinic5_queue_info *queue_info, struct sk_buff *skb) +{ + union hinic5_ip ip; + union hinic5_l4 l4; + u8 l4_proto; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_cow_head(skb, 0) < 0) + return -EINVAL; + + l4.hdr = skb_transport_header(skb); + ip.hdr = skb_network_header(skb); +#ifdef HAVE_SK_BUFF_ENCAPSULATION + if (skb->encapsulation != 0) { + u32 gso_type = skb_shinfo(skb)->gso_type; + /* L3 checksum always enable */ + offload_info->out_l3_en = 1; + offload_info->encapsulation = 1; + + if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) != 0) { + l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); + offload_info->out_l4_en = 1; + } else if ((gso_type & SKB_GSO_UDP_TUNNEL) != 0) { +#ifdef HAVE_OUTER_IPV6_TUNNEL_OFFLOAD + if (ip.v4->version == 6) { + l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); + offload_info->out_l4_en = 1; + } +#endif + } + + if (ip.v4->version == IPV4_VERSION) { + l4_proto = ip.v4->protocol; + } else if (ip.v4->version == IPV6_VERSION) { + union hinic5_l4 l4_ptr; + unsigned char *exthdr = 0; + __be16 frag_off; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + l4_ptr.hdr = skb_transport_header(skb); + if (l4_ptr.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto, &frag_off); + } else { + l4_proto = IPPROTO_RAW; + } + + if (l4_proto == IPPROTO_UDP) + queue_info->udp_dp_en = 1; + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } +#endif /* HAVE_SK_BUFF_ENCAPSULATION */ + hinic5_inner_tso_offload(offload_info, queue_info, skb, ip, l4); + return 1; +} + +static inline void hinic5_set_vlan_tx_offload(struct hinic5_offload_info *offload_info, + u16 vlan_tag, u8 vlan_type) +{ + offload_info->vlan1_tag = vlan_tag; + offload_info->vlan_sel = vlan_type; + offload_info->vlan_valid = 1; +} + +u32 hinic5_tx_offload(struct sk_buff *skb, struct hinic5_offload_info *offload_info, + struct hinic5_queue_info *queue_info, struct hinic5_txq *txq) +{ + u32 offload = 0; + int tso_cs_en; + struct hinic5_nic_dev *nic_dev = netdev_priv(txq->netdev); + + tso_cs_en = hinic5_tso(offload_info, queue_info, skb); + if (tso_cs_en < 0) { + offload = TX_OFFLOAD_INVALID; + return offload; + } else if (tso_cs_en != 0) { + offload |= TX_OFFLOAD_TSO; + } else { + tso_cs_en = hinic5_tx_csum(txq, skb, offload_info, queue_info); + if (tso_cs_en != 0) + offload |= TX_OFFLOAD_CSUM; + } + + if (unlikely(skb_vlan_tag_present(skb) != 0)) { + /* select vlan insert mode by qid, default 802.1Q Tag type */ + hinic5_set_vlan_tx_offload(offload_info, skb_vlan_tag_get(skb), + HINIC5_TX_TPID0); + offload |= TX_OFFLOAD_VLAN; + } + + if (unlikely(queue_info->payload_offset > MAX_PAYLOAD_OFFSET)) { + offload = TX_OFFLOAD_INVALID; + return offload; + } + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0)) { + offload |= TX_OFFLOAD_PTP; + if (hinic5_ptp_tx_process(nic_dev, skb) == 0) + offload_info->pkt_1588 = 1; + } + + return offload; +} + +static void get_pkt_stats(struct hinic5_tx_info *tx_info, struct sk_buff *skb) +{ + u32 ihs, hdr_len; + + if (skb_is_gso(skb)) { +#if (KERNEL_VERSION(3, 8, 0) <= LINUX_VERSION_CODE) +#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \ + defined(HAVE_SK_BUFF_ENCAPSULATION)) + if (skb->encapsulation != 0) { +#ifdef HAVE_SKB_INNER_TRANSPORT_OFFSET + ihs = skb_inner_transport_offset(skb) + + inner_tcp_hdrlen(skb); +#else + ihs = (skb_inner_transport_header(skb) - skb->data) + + inner_tcp_hdrlen(skb); +#endif + } else { +#endif +#endif + ihs = (u32)(skb_transport_offset(skb)) + tcp_hdrlen(skb); +#if (KERNEL_VERSION(3, 8, 0) <= LINUX_VERSION_CODE) +#if (defined(HAVE_SKB_INNER_TRANSPORT_HEADER) && \ + defined(HAVE_SK_BUFF_ENCAPSULATION)) + } +#endif +#endif + hdr_len = (skb_shinfo(skb)->gso_segs - 1) * ihs; + tx_info->num_bytes = skb->len + (u64)hdr_len; + } else { + tx_info->num_bytes = (skb->len > ETH_ZLEN) ? skb->len : ETH_ZLEN; + } + + tx_info->num_pkts = 1; +} + +int hinic5_maybe_stop_tx(struct hinic5_txq *txq, u16 wqebb_cnt) +{ + if (likely(hinic5_get_sq_free_wqebbs(txq->sq) >= wqebb_cnt)) + return 0; + + /* We need to check again in a case another CPU has just + * made room available. + */ + netif_stop_subqueue(txq->netdev, txq->q_id); + + if (likely(hinic5_get_sq_free_wqebbs(txq->sq) < wqebb_cnt)) + return -EBUSY; + + /* there have enough wqebbs after queue is wake up */ + netif_start_subqueue(txq->netdev, txq->q_id); + + return 0; +} + +static u16 hinic5_set_wqe_combo(struct hinic5_txq *txq, + struct hinic5_sq_wqe_combo *wqe_combo, + u16 num_sge, u16 *curr_pi) +{ + void *second_part_wqebbs_addr = NULL; + void *wqe = NULL; + u16 first_part_wqebbs_num, tmp_pi; + + wqe_combo->ctrl_bd0 = hinic5_get_sq_one_wqebb(txq->sq, curr_pi); + if (wqe_combo->wqebb_cnt == 1) { + /* compact wqe */ + wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE; + wqe_combo->task_type = SQ_WQE_TASKSECT_4BYTES; + wqe_combo->task = (void *)&wqe_combo->ctrl_bd0->queue_info; + return hinic5_get_and_update_sq_owner(txq->sq, *curr_pi, 1); + } + /* extend normal wqe */ + wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE; + wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES; + wqe_combo->task = hinic5_get_sq_one_wqebb(txq->sq, &tmp_pi); + + if (num_sge > 1) { + /* first wqebb contain bd0, and bd size is equal to sq wqebb + * size, so we use (num_sge - 1) as wanted weqbb_cnt + */ + wqe = hinic5_get_sq_multi_wqebbs(txq->sq, num_sge - 1, &tmp_pi, + &second_part_wqebbs_addr, + &first_part_wqebbs_num); + wqe_combo->bds_head = wqe; + wqe_combo->bds_sec2 = second_part_wqebbs_addr; + wqe_combo->first_bds_num = first_part_wqebbs_num; + } + + return hinic5_get_and_update_sq_owner(txq->sq, *curr_pi, wqe_combo->wqebb_cnt); +} + +void hinic5_tx_set_normal_task_offload(struct hinic5_offload_info *offload, + struct hinic5_sq_wqe_combo *wqe_combo) +{ + struct hinic5_sq_task *task = wqe_combo->task; + + /* if negotiation normal task, not support any offload when used 4B tasksection */ + if (wqe_combo->task_type == SQ_WQE_TASKSECT_4BYTES) { + task->pkt_info0 = 0; + return; + } + + task->pkt_info0 = + SQ_TASK_INFO0_SET(offload->inner_l4_en, INNER_L4_EN) | + SQ_TASK_INFO0_SET(offload->inner_l3_en, INNER_L3_EN) | + SQ_TASK_INFO0_SET(offload->encapsulation, TUNNEL_FLAG) | + SQ_TASK_INFO0_SET(offload->out_l3_en, OUT_L3_EN) | + SQ_TASK_INFO0_SET(offload->out_l4_en, OUT_L4_EN); + task->ip_identify = 0; + task->pkt_info2 = 0; + + task->vlan_offload = SQ_TASK_INFO3_SET(offload->vlan1_tag, VLAN_TAG) | + SQ_TASK_INFO3_SET(offload->vlan_sel, VLAN_TYPE) | + SQ_TASK_INFO3_SET(offload->vlan_valid, VLAN_TAG_VALID); + task->pkt_info0 = hinic5_hw_be32(task->pkt_info0); + task->vlan_offload = hinic5_hw_be32(task->vlan_offload); +} + +void hinic5_tx_set_compact_task_offload(struct hinic5_offload_info *offload, + struct hinic5_sq_wqe_combo *wqe_combo) +{ + struct hinic5_sq_task *task = wqe_combo->task; + + task->pkt_info0 = + SQ_TASK_INFO_SET(offload->out_l3_en, OUT_L3_EN) | + SQ_TASK_INFO_SET(offload->out_l4_en, OUT_L4_EN) | + SQ_TASK_INFO_SET(offload->inner_l3_en, INNER_L3_EN) | + SQ_TASK_INFO_SET(offload->inner_l4_en, INNER_L4_EN) | + SQ_TASK_INFO_SET(offload->vlan_valid, VLAN_VALID) | + SQ_TASK_INFO_SET(offload->vlan_sel, VLAN_SEL) | + SQ_TASK_INFO_SET(offload->vlan1_tag, VLAN_TAG) | + SQ_TASK_INFO_SET(offload->pkt_1588, PKT_1588); + task->pkt_info0 = hinic5_hw_be32(task->pkt_info0); +} + +static void hinic5_set_wqe_queue_info(struct hinic5_sq_wqe_combo *wqe_combo, + struct hinic5_queue_info *queue_info) +{ + u32 *qsf = &wqe_combo->ctrl_bd0->queue_info; + *qsf = SQ_CTRL_QUEUE_INFO_SET(1, UC) | + SQ_CTRL_QUEUE_INFO_SET(queue_info->sctp, SCTP) | + SQ_CTRL_QUEUE_INFO_SET(queue_info->udp_dp_en, UDP_DP_EN) | + SQ_CTRL_QUEUE_INFO_SET(queue_info->tso, TSO) | + SQ_CTRL_QUEUE_INFO_SET(queue_info->ufo, UFO) | + SQ_CTRL_QUEUE_INFO_SET(queue_info->payload_offset, PLDOFF) | + SQ_CTRL_QUEUE_INFO_SET(queue_info->pkt_type, PKT_TYPE) | + SQ_CTRL_QUEUE_INFO_SET(queue_info->mss, MSS); + + if (SQ_CTRL_QUEUE_INFO_GET(*qsf, MSS) == 0) { + *qsf |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_DEFAULT, MSS); + } else if (SQ_CTRL_QUEUE_INFO_GET(*qsf, MSS) < TX_MSS_MIN) { + /* mss should not less than 80 */ + *qsf = SQ_CTRL_QUEUE_INFO_CLEAR(*qsf, MSS); + *qsf |= SQ_CTRL_QUEUE_INFO_SET(TX_MSS_MIN, MSS); + } + + *qsf = hinic5_hw_be32(*qsf); +} + +/* * + * hinic5_prepare_sq_ctrl - init sq wqe cs + * @nr_descs: total sge_num, include bd0 in cs + */ +static void hinic5_prepare_sq_ctrl(struct hinic5_sq_wqe_combo *wqe_combo, + struct hinic5_queue_info *queue_info, int nr_descs, u16 owner) +{ + struct hinic5_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + + wqe_desc->ctrl_len |= + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER); + + if (wqe_combo->wqe_type == SQ_WQE_EXTENDED_TYPE) { + wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | + SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN); + hinic5_set_wqe_queue_info(wqe_combo, queue_info); + } else { + /* compact wqe not support TSO offload */ + wqe_desc->ctrl_len |= SQ_CTRL_15BIT_QUEUE_INFO_SET(queue_info->sctp, SCTP) | + SQ_CTRL_15BIT_QUEUE_INFO_SET(queue_info->udp_dp_en, UDP_DP_EN) | + SQ_CTRL_15BIT_QUEUE_INFO_SET(queue_info->pkt_type, PKT_TYPE); + } + + wqe_desc->ctrl_len = hinic5_hw_be32(wqe_desc->ctrl_len); +} + +static netdev_tx_t hinic5_send_one_skb(struct sk_buff *skb, + struct net_device *netdev, + struct hinic5_txq *txq) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_sq_wqe_combo wqe_combo = {0}; + struct hinic5_offload_info offload_info = {0}; + struct hinic5_queue_info queue_info = {0}; + struct hinic5_tx_info *tx_info = NULL; + u32 offload; + u16 owner = 0, pi = 0; + u16 wqebb_cnt, num_sge, valid_nr_frags; + bool find_zero_sge_len = false; + u32 total_size = 0, frag_size, max_wqe_len; + int err, i; + + if (unlikely(skb->len < MIN_SKB_LEN)) { + if (skb_pad(skb, (int)(MIN_SKB_LEN - skb->len)) != 0) { + TXQ_STATS_INC(txq, skb_pad_err); + goto tx_skb_pad_err; + } + + skb->len = MIN_SKB_LEN; + } + + if (unlikely(skb_shinfo(skb)->nr_frags >= HINIC5_NONTSO_PKT_MAX_SGE)) { + if (unlikely(skb_linearize(skb) != 0)) { + TXQ_STATS_INC(txq, offload_cow_skb_err); + goto tx_drop_pkts; + } + } + + max_wqe_len = skb_is_gso(skb) ? TSO_SKB_SIZE_MAX : NON_TSO_SKB_SIZE_MAX; + frag_size = skb_headlen(skb); + if (unlikely(frag_size > max_wqe_len || frag_size == 0)) { + TXQ_STATS_INC(txq, frag_size_err); + goto tx_drop_pkts; + } + total_size += frag_size; + + valid_nr_frags = 0; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); + total_size += frag_size; + if (unlikely(frag_size == 0)) { + find_zero_sge_len = true; + continue; + } else if (unlikely((find_zero_sge_len) || + (frag_size > max_wqe_len) || + (total_size > max_wqe_len))) { + TXQ_STATS_INC(txq, frag_size_err); + goto tx_drop_pkts; + } + valid_nr_frags++; + } + + num_sge = valid_nr_frags + 1; + + /* assume need normal TS format wqe, task info need 1 wqebb */ + wqebb_cnt = num_sge + 1; + if (unlikely(hinic5_maybe_stop_tx(txq, wqebb_cnt) != 0)) { + TXQ_STATS_INC(txq, busy); + return NETDEV_TX_BUSY; + } + + offload = hinic5_tx_offload(skb, &offload_info, &queue_info, txq); + if (unlikely(offload == TX_OFFLOAD_INVALID)) { + TXQ_STATS_INC(txq, offload_cow_skb_err); + goto tx_drop_pkts; + } else if (offload == 0 && num_sge == 1) { + /* no TS in current wqe */ + wqebb_cnt -= 1; + if (unlikely(num_sge == 1 && skb->len > COMPACET_WQ_SKB_MAX_LEN)) + goto tx_drop_pkts; + } else if ((nic_dev->tx_wqe_compact_task != 0) && + ((offload & TX_OFFLOAD_TSO) == 0) && + num_sge == 1) { + /* One more wqebb is needed for compact task under two situations: + * 1. TSO: MSS field is needed, no available space for compact task in compact wqe. + * 2. SGE number > 1: WQE is handled as extented wqe by nic. + */ + wqebb_cnt -= 1; + } + + wqe_combo.wqebb_cnt = wqebb_cnt; + wqe_combo.offload = (u8)offload; + owner = hinic5_set_wqe_combo(txq, &wqe_combo, num_sge, &pi); + nic_dev->tx_rx_ops.tx_set_wqe_offload(&offload_info, &wqe_combo); + + tx_info = &txq->tx_info[pi]; + tx_info->skb = skb; + tx_info->wqebb_cnt = wqebb_cnt; + tx_info->valid_nr_frags = valid_nr_frags; + + err = tx_map_skb(nic_dev, skb, valid_nr_frags, txq, tx_info, + &wqe_combo); + if (err != 0) { + hinic5_rollback_sq_wqebbs(txq->sq, wqebb_cnt, owner); + goto tx_drop_pkts; + } + + get_pkt_stats(tx_info, skb); + + hinic5_prepare_sq_ctrl(&wqe_combo, &queue_info, num_sge, owner); + + skb_tx_timestamp(skb); + + hinic5_write_db(txq->sq, (txq->cos & nic_dev->cos_mask_mode) & (u8)qp_cos_mask, SQ_CFLAG_DP, + hinic5_get_sq_local_pi(txq->sq)); + + return NETDEV_TX_OK; + +tx_drop_pkts: + dev_kfree_skb_any(skb); + +tx_skb_pad_err: + TXQ_STATS_INC(txq, dropped); + + return NETDEV_TX_OK; +} + +netdev_tx_t hinic5_lb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id = skb_get_queue_mapping(skb); + struct hinic5_txq *txq = &nic_dev->txqs[q_id]; + + return hinic5_send_one_skb(skb, netdev, txq); +} + +bool hinic5_check_skb_need_dual_send(struct sk_buff *skb) +{ + struct { + struct ipv6hdr ip6; + struct icmp6hdr icmp6; + } *combined = NULL, _combined; + + /* ARP报文 */ + if (skb->protocol == htons(ETH_P_ARP)) + return true; + if (skb->protocol == htons(ETH_P_IPV6)) { + combined = skb_header_pointer(skb, (int)skb_mac_header_len(skb), + sizeof(_combined), + &_combined); + if (combined && combined->ip6.nexthdr == NEXTHDR_ICMP && + (combined->icmp6.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || + combined->icmp6.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) + return true; + } + + return false; +} + +static int txq_free_old_skbs(struct hinic5_txq *txq, int budget); +#define XMIT_CLEAN_BUDGET 1024 /* clean unused skbs as much as possible */ + +netdev_tx_t hinic5_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_txq *txq = NULL; + u16 q_id = skb_get_queue_mapping(skb); + + if (unlikely(!netif_carrier_ok(netdev))) { + dev_kfree_skb_any(skb); + HINIC5_NIC_STATS_INC(nic_dev, tx_carrier_off_drop); + return NETDEV_TX_OK; + } + + /* 检查是否需要进行arp双发 */ + if (hinic5_check_dev_need_dual_send(nic_dev->hwdev) && + hinic5_check_skb_need_dual_send(skb)) { + skb_queue_tail(&nic_dev->arp_queue, skb_get(skb)); + queue_work(nic_dev->workq, &nic_dev->arp_dual_work); + } + + if (unlikely(q_id >= nic_dev->q_params.num_qps)) { + txq = &nic_dev->txqs[0]; + HINIC5_NIC_STATS_INC(nic_dev, tx_invalid_qid); + goto tx_drop_pkts; + } + txq = &nic_dev->txqs[q_id]; + + /* Clean up pending old skbs before queueing new ones. */ + txq_free_old_skbs(txq, XMIT_CLEAN_BUDGET); + + return hinic5_send_one_skb(skb, netdev, txq); + +tx_drop_pkts: + dev_kfree_skb_any(skb); + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.dropped++; + u64_stats_update_end(&txq->txq_stats.syncp); + + return NETDEV_TX_OK; +} + +static inline void tx_free_skb(struct hinic5_nic_dev *nic_dev, + struct hinic5_tx_info *tx_info) +{ + if (tx_info->skb) { + tx_unmap_skb(nic_dev, tx_info->skb, tx_info->valid_nr_frags, + tx_info->dma_info); + dev_kfree_skb_any(tx_info->skb); + tx_info->skb = NULL; + } +#ifdef HAVE_XDP_SUPPORT + if (tx_info->xdpf) { + dma_unmap_single(nic_dev->lld_dev->dev, tx_info->dma_info->dma, + tx_info->dma_info->len, DMA_TO_DEVICE); + tx_info->xdpf = NULL; + } +#endif +} + +static void free_all_tx_skbs(struct hinic5_nic_dev *nic_dev, u32 sq_depth, + struct hinic5_tx_info *tx_info_arr) +{ + struct hinic5_tx_info *tx_info = NULL; + u32 idx; + + for (idx = 0; idx < sq_depth; idx++) { + tx_info = &tx_info_arr[idx]; + if (tx_info->skb) + tx_free_skb(nic_dev, tx_info); + } +} + +static int txq_free_old_skbs(struct hinic5_txq *txq, int budget) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(txq->netdev); + struct hinic5_tx_info *tx_info = NULL; + u64 tx_bytes = 0, nr_pkts = 0; + int pkts = 0; +#ifdef HAVE_XDP_SUPPORT + u32 xmit_pkts = 0; +#endif + u16 wqebb_cnt = 0; + u16 hw_ci, sw_ci; + + hw_ci = hinic5_get_sq_hw_ci(txq->sq); + dma_rmb(); + sw_ci = hinic5_get_sq_local_ci(txq->sq); + + do { + tx_info = &txq->tx_info[sw_ci]; + + /* Whether all of the wqebb of this wqe is completed */ + if (hw_ci == sw_ci || + ((u16)(hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt) + break; + + sw_ci = (u16)(sw_ci + tx_info->wqebb_cnt) & (u16)txq->q_mask; + prefetch(&txq->tx_info[sw_ci]); + + wqebb_cnt += tx_info->wqebb_cnt; + + tx_bytes += tx_info->num_bytes; + nr_pkts += tx_info->num_pkts; + pkts++; +#ifdef HAVE_XDP_SUPPORT + if (tx_info->xdpf) + xmit_pkts++; +#endif + tx_free_skb(nic_dev, tx_info); + } while (likely(pkts < budget)); + + if (pkts == 0) + return 0; + + hinic5_update_sq_local_ci(txq->sq, wqebb_cnt); + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.bytes += tx_bytes; + txq->txq_stats.packets += nr_pkts; + u64_stats_update_end(&txq->txq_stats.syncp); + +#ifdef HAVE_XDP_SUPPORT + /* xmit_pkts的统计不会和tx_bytes的统计同时出现 */ + u64_stats_update_begin(&txq->xdptxq_stats.syncp); + txq->xdptxq_stats.xdp_xmits += xmit_pkts; + u64_stats_update_end(&txq->xdptxq_stats.syncp); +#endif + + return pkts; +} + +static inline void txq_wake(struct hinic5_txq *txq) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(txq->netdev); + const u16 q_id = txq->sq->q_id; + struct netdev_queue *netdev_txq = NULL; + bool need_wake, do_wake = false; + + need_wake = __netif_subqueue_stopped(nic_dev->netdev, q_id) && + hinic5_get_sq_free_wqebbs(txq->sq) >= 1 && + test_bit(HINIC5_INTF_UP, &nic_dev->flags); + if (likely(!need_wake)) + return; + + netdev_txq = netdev_get_tx_queue(txq->netdev, q_id); + + __netif_tx_lock(netdev_txq, smp_processor_id()); + /* To avoid re-waking subqueue with xmit_frame */ + if (__netif_subqueue_stopped(nic_dev->netdev, q_id)) { + netif_wake_subqueue(nic_dev->netdev, q_id); + do_wake = true; + } + __netif_tx_unlock(netdev_txq); + + if (likely(do_wake)) + TXQ_STATS_INC(txq, wake); +} + +int hinic5_tx_poll(struct hinic5_txq *txq, int budget) +{ + const u16 q_id = txq->sq->q_id; + struct netdev_queue *netdev_txq = netdev_get_tx_queue(txq->netdev, q_id); + int pkts = 0; + + if (__netif_tx_trylock(netdev_txq)) { + pkts = txq_free_old_skbs(txq, budget); + __netif_tx_unlock(netdev_txq); + } + + txq_wake(txq); + + return pkts; +} + +void hinic5_set_txq_cos(struct hinic5_nic_dev *nic_dev, u16 start_qid, + u16 q_num, u8 cos) +{ + u16 idx; + + for (idx = 0; idx < q_num; idx++) + nic_dev->txqs[idx + start_qid].cos = cos; +} + +#define HINIC5_BDS_PER_SQ_WQEBB \ + (HINIC5_SQ_WQEBB_SIZE / sizeof(struct hinic5_sq_bufdesc)) + +int hinic5_alloc_txqs_res(struct hinic5_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct hinic5_dyna_txq_res *txqs_res) +{ + struct hinic5_dyna_txq_res *tqres = NULL; + int idx, i; + u64 size; + + for (idx = 0; idx < num_sq; idx++) { + tqres = &txqs_res[idx]; + + size = sizeof(*tqres->tx_info) * sq_depth; + tqres->tx_info = kzalloc(size, GFP_KERNEL); + if (!tqres->tx_info) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc txq%d tx info\n", idx); + goto err_out; + } + + size = sizeof(*tqres->bds) * + (sq_depth * HINIC5_BDS_PER_SQ_WQEBB + + HINIC5_MAX_SQ_SGE); + tqres->bds = kzalloc(size, GFP_KERNEL); + if (!tqres->bds) { + kfree(tqres->tx_info); + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to alloc txq%d bds info\n", idx); + goto err_out; + } + } + + return 0; + +err_out: + for (i = 0; i < idx; i++) { + tqres = &txqs_res[i]; + + kfree(tqres->bds); + tqres->bds = NULL; + kfree(tqres->tx_info); + tqres->tx_info = NULL; + } + + return -ENOMEM; +} + +void hinic5_free_txqs_res(struct hinic5_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct hinic5_dyna_txq_res *txqs_res) +{ + struct hinic5_dyna_txq_res *tqres = NULL; + int idx; + + for (idx = 0; idx < num_sq; idx++) { + tqres = &txqs_res[idx]; + + free_all_tx_skbs(nic_dev, sq_depth, tqres->tx_info); + kfree(tqres->bds); + kfree(tqres->tx_info); + } +} + +int hinic5_configure_txqs(struct hinic5_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct hinic5_dyna_txq_res *txqs_res) +{ + struct hinic5_dyna_txq_res *tqres = NULL; + struct hinic5_txq *txq = NULL; + u16 q_id; + u32 idx; + + for (q_id = 0; q_id < num_sq; q_id++) { + txq = &nic_dev->txqs[q_id]; + tqres = &txqs_res[q_id]; + + txq->q_depth = sq_depth; + txq->q_mask = sq_depth - 1; + + txq->tx_info = tqres->tx_info; + for (idx = 0; idx < sq_depth; idx++) + txq->tx_info[idx].dma_info = + &tqres->bds[idx * HINIC5_BDS_PER_SQ_WQEBB]; + + txq->sq = hinic5_get_nic_queue(nic_dev->hwdev, q_id, HINIC5_SQ); + if (!txq->sq) { + nicif_err(nic_dev, drv, nic_dev->netdev, + "Failed to get %u sq\n", q_id); + return -EFAULT; + } + } + + return 0; +} + +int hinic5_alloc_txqs(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct device *dev = nic_dev->lld_dev->dev; + struct hinic5_txq *txq = NULL; + u16 q_id, num_txqs = nic_dev->max_qps; + u64 txq_size; + + txq_size = num_txqs * sizeof(*nic_dev->txqs); + if (txq_size == 0) { + nic_err(dev, "Cannot allocate zero size txqs\n"); + return -EINVAL; + } + + nic_dev->txqs = kzalloc(txq_size, GFP_KERNEL); + if (!nic_dev->txqs) + return -ENOMEM; + + for (q_id = 0; q_id < num_txqs; q_id++) { + txq = &nic_dev->txqs[q_id]; + txq->netdev = netdev; + txq->q_id = q_id; + txq->q_depth = nic_dev->q_params.sq_depth; + txq->q_mask = nic_dev->q_params.sq_depth - 1; + txq->dev = dev; + + txq_stats_init(txq); + } + + return 0; +} + +void hinic5_free_txqs(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->txqs); + nic_dev->txqs = NULL; +} + +static bool is_hw_complete_sq_process(struct hinic5_io_queue *sq) +{ + u16 sw_pi, hw_ci; + + sw_pi = hinic5_get_sq_local_pi(sq); + hw_ci = hinic5_get_sq_hw_ci(sq); + + return sw_pi == hw_ci; +} + +#define HINIC5_FLUSH_QUEUE_TIMEOUT 1000 +static int hinic5_stop_sq(struct hinic5_txq *txq) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(txq->netdev); + u64 timeout; + int err; + + timeout = msecs_to_jiffies(HINIC5_FLUSH_QUEUE_TIMEOUT) + jiffies; + do { + if (is_hw_complete_sq_process(txq->sq)) + return 0; + + usleep_range(900, 1000); /* sleep 900 us ~ 1000 us */ + } while (time_before(jiffies, (unsigned long)timeout)); + + /* force hardware to drop packets */ + timeout = msecs_to_jiffies(HINIC5_FLUSH_QUEUE_TIMEOUT) + jiffies; + do { + if (is_hw_complete_sq_process(txq->sq)) + return 0; + + err = hinic5_force_drop_tx_pkt(nic_dev->hwdev); + if (err != 0) + break; + + usleep_range(9900, 10000); /* sleep 9900 us ~ 10000 us */ + } while (time_before(jiffies, (unsigned long)timeout)); + + /* Avoid msleep takes too long and get a fake result */ + if (is_hw_complete_sq_process(txq->sq)) + return 0; + + return -EFAULT; +} + +/* should stop transmit any packets before calling this function */ +int hinic5_flush_txqs(struct net_device *netdev) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid; + int err; + + for (qid = 0; qid < nic_dev->q_params.num_qps + nic_dev->q_params.xdp_qps; qid++) { + err = hinic5_stop_sq(&nic_dev->txqs[qid]); + if (err != 0) + nicif_err(nic_dev, drv, netdev, + "Failed to stop sq%u\n", qid); + } + + return 0; +} + diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_tx.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_tx.h new file mode 100644 index 00000000..3c87d8c7 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_tx.h @@ -0,0 +1,217 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_TX_H +#define HINIC5_TX_H + +#include <net/ipv6.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> +#include <linux/u64_stats_sync.h> +#include <linux/ip.h> +#include <linux/ipv6.h> + +#include "hinic5_nic_sq.h" +#include "hinic5_nic_rq.h" +#include "hinic5_nic_io.h" + +#define TXQ_STATS_INC(txq, field) \ +do { \ + u64_stats_update_begin(&(txq)->txq_stats.syncp); \ + (txq)->txq_stats.field++; \ + u64_stats_update_end(&(txq)->txq_stats.syncp); \ +} while (0) + +#ifdef HAVE_XDP_SUPPORT +#define XDP_TXQ_STATS_INC(txq, field) \ +do { \ + u64_stats_update_begin(&(txq)->xdptxq_stats.syncp); \ + (txq)->xdptxq_stats.field++; \ + u64_stats_update_end(&(txq)->xdptxq_stats.syncp); \ +} while (0) +#endif + +#define VXLAN_OFFLOAD_PORT_LE 0xb512 /* big end is 4789 */ +#define GENEVE_OFFLOAD_PORT_LE 0xc117 /* big end is 6081 */ + +#define COMPACET_WQ_SKB_MAX_LEN 16383 + +#define IP4_VERSION 4 +#define IP6_VERSION 6 +#define IP_HDR_IHL_UNIT_SHIFT 2 +#define TCP_HDR_DATA_OFF_UNIT_SHIFT 2 + +enum tx_offload_type { + TX_OFFLOAD_TSO = BIT(0), + TX_OFFLOAD_CSUM = BIT(1), + TX_OFFLOAD_VLAN = BIT(2), + TX_OFFLOAD_INVALID = BIT(3), + TX_OFFLOAD_ESP = BIT(4), + TX_OFFLOAD_PTP = BIT(5), +}; + +enum hinic5_tx_cvlan_type { + HINIC5_TX_TPID0, +}; + +struct hinic5_txq_stats { + u64 packets; + u64 bytes; + u64 busy; + u64 wake; + u64 dropped; + u64 unfinished; + + /* Subdivision statistics show in private tool */ + u64 skb_pad_err; + u64 frag_len_overflow; + u64 offload_cow_skb_err; + u64 map_frag_err; + u64 unknown_tunnel_pkt; + u64 frag_size_err; + u64 rsvd1; + u64 rsvd2; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#else + struct u64_stats_sync_empty syncp; +#endif +}; + +#ifdef HAVE_XDP_SUPPORT +struct hinic5_xdptxq_stats { + u64 xdp_dropped; + u64 xdp_xmits; + u64 map_xdpf_err; + +#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; +#else + struct u64_stats_sync_empty syncp; +#endif +}; +#endif + +struct hinic5_dma_info { + dma_addr_t dma; + u32 len; +}; + +#define IPV4_VERSION 4 +#define IPV6_VERSION 6 +#define TCP_HDR_DOFF_UNIT 2 +#define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) + +union hinic5_ip { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +struct hinic5_tx_info { + struct sk_buff *skb; + + u16 wqebb_cnt; + u16 valid_nr_frags; + + int num_sge; + u16 num_pkts; + u16 rsvd1; + u32 rsvd2; + u64 num_bytes; + struct hinic5_dma_info *dma_info; +#ifdef HAVE_XDP_SUPPORT + struct xdp_frame *xdpf; +#endif + u64 rsvd3; +}; + +struct hinic5_txq { + struct net_device *netdev; + struct device *dev; + + struct hinic5_txq_stats txq_stats; + +#ifdef HAVE_XDP_SUPPORT + struct hinic5_xdptxq_stats xdptxq_stats; +#endif + + u8 cos; + u8 rsvd1; + u16 q_id; + u32 q_mask; + u32 q_depth; + u32 rsvd2; + + struct hinic5_tx_info *tx_info; + struct hinic5_io_queue *sq; + + u64 last_moder_packets; + u64 last_moder_bytes; + u64 rsvd3; +} ____cacheline_aligned; + +netdev_tx_t hinic5_lb_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); + +struct hinic5_dyna_txq_res { + struct hinic5_tx_info *tx_info; + struct hinic5_dma_info *bds; +}; + +netdev_tx_t hinic5_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + +u32 hinic5_tx_offload(struct sk_buff *skb, struct hinic5_offload_info *offload_info, + struct hinic5_queue_info *queue_info, struct hinic5_txq *txq); + +void hinic5_txq_get_stats(struct hinic5_txq *txq, + struct hinic5_txq_stats *stats); + +void hinic5_txq_clean_stats(struct hinic5_txq_stats *txq_stats); + +#ifdef HAVE_XDP_SUPPORT +void hinic5_xdptxq_get_stats(struct hinic5_txq *txq, + struct hinic5_xdptxq_stats *stats); + +void hinic5_xdptxq_clean_stats(struct hinic5_xdptxq_stats *xdptxq_stats); + +void hinic5_xdptxq_init_stats(struct hinic5_txq *txq); +#endif + +struct hinic5_nic_dev; +int hinic5_alloc_txqs_res(struct hinic5_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct hinic5_dyna_txq_res *txqs_res); + +void hinic5_free_txqs_res(struct hinic5_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct hinic5_dyna_txq_res *txqs_res); + +int hinic5_configure_txqs(struct hinic5_nic_dev *nic_dev, u16 num_sq, + u32 sq_depth, struct hinic5_dyna_txq_res *txqs_res); + +int hinic5_alloc_txqs(struct net_device *netdev); + +void hinic5_free_txqs(struct net_device *netdev); + +int hinic5_tx_poll(struct hinic5_txq *txq, int budget); + +int hinic5_flush_txqs(struct net_device *netdev); + +void hinic5_set_txq_cos(struct hinic5_nic_dev *nic_dev, u16 start_qid, + u16 q_num, u8 cos); + +static inline __sum16 csum_magic(union hinic5_ip *ip, unsigned short proto) +{ + return (ip->v4->version == IPV4_VERSION) ? + csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, (u8)proto, 0) : + csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, (u8)proto, 0); +} + +void hinic5_tx_set_normal_task_offload(struct hinic5_offload_info *offload, + struct hinic5_sq_wqe_combo *wqe_combo); + +void hinic5_tx_set_compact_task_offload(struct hinic5_offload_info *offload, + struct hinic5_sq_wqe_combo *wqe_combo); + +int hinic5_maybe_stop_tx(struct hinic5_txq *txq, u16 wqebb_cnt); +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_xdp.c b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_xdp.c new file mode 100644 index 00000000..fd8c002c --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_xdp.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" + +#ifdef HAVE_XDP_SUPPORT +#include <linux/bpf.h> +#include "hinic5_crm.h" +#include "hinic5_nic_cfg.h" +#include "hinic5_srv_nic.h" +#include "hinic5_nic_dev.h" +#include "hinic5_rx.h" +#include "hinic5_tx.h" +#include "hinic5_xdp.h" + +int tx_map_xdpf(struct hinic5_nic_dev *nic_dev, struct hinic5_txq *txq, u16 pi, + struct hinic5_sq_wqe_combo *wqe_combo) +{ + struct hinic5_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + struct hinic5_dma_info *dma_info = txq->tx_info[pi].dma_info; + struct xdp_frame *xdpf = txq->tx_info[pi].xdpf; + struct device *dev = nic_dev->lld_dev->dev; + + dma_info->dma = dma_map_single(dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_info->dma) != 0) { + XDP_TXQ_STATS_INC(txq, map_xdpf_err); + return -EFAULT; + } + dma_info->len = xdpf->len; + + wqe_desc->hi_addr = hinic5_hw_be32(upper_32_bits(dma_info->dma)); + wqe_desc->lo_addr = hinic5_hw_be32(lower_32_bits(dma_info->dma)); + + wqe_desc->ctrl_len = dma_info->len; + + return 0; +} + +void hinic5_prepare_xdp_sq_ctrl(struct hinic5_sq_wqe_combo *wqe_combo, u16 owner) +{ + struct hinic5_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + + wqe_desc->ctrl_len |= + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER); + + wqe_desc->ctrl_len = hinic5_hw_be32(wqe_desc->ctrl_len); + wqe_desc->queue_info = 0; +} + +int hinic5_xdp_xmit_frame(struct hinic5_nic_dev *nic_dev, struct hinic5_txq *txq, + struct xdp_frame *xdpf) +{ + struct hinic5_sq_wqe_combo wqe_combo = {0}; + u16 pi = 0, owner = 0; + /* Always use compact wqe for xdp */ + if (unlikely(hinic5_maybe_stop_tx(txq, 1) != 0)) { + TXQ_STATS_INC(txq, busy); + return NETDEV_TX_BUSY; + } + + wqe_combo.ctrl_bd0 = hinic5_get_sq_one_wqebb(txq->sq, &pi); + wqe_combo.task_type = SQ_WQE_TASKSECT_4BYTES; + wqe_combo.wqe_type = SQ_WQE_COMPACT_TYPE; + owner = hinic5_get_and_update_sq_owner(txq->sq, pi, 1); + + txq->tx_info[pi].xdpf = xdpf; + txq->tx_info[pi].wqebb_cnt = 1; + + if (tx_map_xdpf(nic_dev, txq, pi, &wqe_combo) != 0) { + txq->tx_info[pi].xdpf = NULL; + hinic5_rollback_sq_wqebbs(txq->sq, 1, owner); + return -EIO; + } + hinic5_prepare_xdp_sq_ctrl(&wqe_combo, owner); + + return 0; +} + +int hinic5_xdp_xmit_frames(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(dev); + struct hinic5_txq *txq = NULL; + u16 q_id, drops = 0; + int i; + + if (unlikely(!netif_carrier_ok(dev))) { + HINIC5_NIC_STATS_INC(nic_dev, tx_carrier_off_drop); + return NETDEV_TX_BUSY; + } + + if (unlikely((flags & ~XDP_XMIT_FLAGS_MASK) != 0)) + return -EINVAL; + + if (unlikely(nic_dev->q_params.xdp_qps == 0)) + return -EINVAL; + + /* xdp队列和内核TX队列隔离,xdp走后一半队列 */ + q_id = raw_smp_processor_id() % nic_dev->q_params.xdp_qps + nic_dev->q_params.num_qps; + + txq = &nic_dev->txqs[q_id]; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + + if (unlikely(hinic5_xdp_xmit_frame(nic_dev, txq, xdpf) != 0)) { + xdp_return_frame(xdpf); + XDP_TXQ_STATS_INC(txq, xdp_dropped); + drops++; + } + } + + if ((flags & XDP_XMIT_FLUSH) != 0) { + hinic5_write_db(txq->sq, (txq->cos & nic_dev->cos_mask_mode), SQ_CFLAG_DP, + hinic5_get_sq_local_pi(txq->sq)); + } + return n - drops; +} + +struct xdp_frame *xdp_convert_to_frame(struct xdp_buff *xdp, struct hinic5_nic_dev *nic_dev) +{ + struct xdp_frame *xdp_frame = NULL; + int metasize, headroom; + +#if (KERNEL_VERSION(5, 8, 0) < LINUX_VERSION_CODE) + if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) + return xdp_convert_zc_to_xdp_frame(xdp); + +#endif + xdp_frame = xdp->data_hard_start; + headroom = xdp->data - xdp->data_hard_start; + metasize = xdp->data - xdp->data_meta; + metasize = metasize > 0 ? metasize : 0; + + if (unlikely((headroom - metasize) < sizeof(*xdp_frame))) + return NULL; +#ifdef HAVE_XDP_FRAME_SZ + if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) + return NULL; + xdp_frame->frame_sz = xdp->frame_sz; +#endif + xdp_frame->data = xdp->data; + xdp_frame->len = xdp->data_end - xdp->data; + xdp_frame->headroom = (u16)(headroom - sizeof(*xdp_frame)); +#ifdef HAVE_XDP_DATA_META + xdp_frame->metasize = (u32)metasize; + xdp_frame->mem = xdp->rxq->mem; +#endif + + return xdp_frame; +} + +bool hinic5_xmit_xdp_buff(struct net_device *netdev, u16 q_id, struct xdp_buff *xdp) +{ + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic5_txq *txq = NULL; + struct xdp_frame *xdpf = NULL; + u16 dst_qid; + + /* xdp队列和内核TX队列隔离,xdp走后一半队列 */ + dst_qid = q_id + nic_dev->q_params.num_qps; + txq = &nic_dev->txqs[dst_qid]; + xdpf = xdp_convert_to_frame(xdp, nic_dev); + if (!xdpf) { + XDP_TXQ_STATS_INC(txq, xdp_dropped); + return false; + } + + if (unlikely(hinic5_xdp_xmit_frame(nic_dev, txq, xdpf) != 0)) { + xdp_return_frame(xdpf); + XDP_TXQ_STATS_INC(txq, xdp_dropped); + return false; + } + hinic5_write_db(txq->sq, (txq->cos & nic_dev->cos_mask_mode), SQ_CFLAG_DP, + hinic5_get_sq_local_pi(txq->sq)); + + return true; +} + +static void update_drop_rx_info(struct hinic5_rxq *rxq, u16 weqbb_num) +{ + struct hinic5_rx_info *rx_info = NULL; + u16 weqbb_num_tmp = weqbb_num; + struct net_device *netdev = rxq->netdev; + struct hinic5_nic_dev *nic_dev = netdev_priv(netdev); + + while (weqbb_num_tmp != 0) { + rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; + if (likely(page_to_nid(rx_info->page) == numa_node_id())) { + hinic5_reuse_rx_page(rxq, rx_info); + } else { + if (rx_info->buf_dma_addr != 0) { + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, + rxq->dma_rx_buff_size, DMA_FROM_DEVICE); + } + + if (rx_info->page) + __free_pages(rx_info->page, nic_dev->page_order); + } + + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + rxq->cons_idx++; + rxq->delta++; + + weqbb_num_tmp--; + } +} + +static bool hinic5_add_rx_frag_with_xdp(struct hinic5_rxq *rxq, u32 pkt_len, + struct hinic5_rx_info *rx_info, + struct sk_buff *skb, struct xdp_buff *xdp) +{ + struct page *page = rx_info->page; + + if (pkt_len <= HINIC5_RX_HDR_SIZE) { + __skb_put_data(skb, xdp->data, pkt_len); + + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + put_page(page); + goto umap_page; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + (int)(rx_info->page_offset + (xdp->data - xdp->data_hard_start)), + (int)pkt_len, rxq->buf_len); + + if (unlikely(page_to_nid(page) != numa_node_id())) + goto umap_page; + if (unlikely(page_count(page) != 1)) + goto umap_page; + + rx_info->page_offset ^= rxq->buf_len; + get_page(page); + + return true; +umap_page: + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, + rxq->dma_rx_buff_size, DMA_FROM_DEVICE); + return false; +} + +static void hinic5_xdp_set_data(struct hinic5_rxq *rxq, struct xdp_buff *xdp, + u8 *va, u32 pkt_len, u32 packet_offset) +{ + xdp->data = (void *)((uintptr_t)va + packet_offset); + xdp->data_hard_start = va; + xdp->data_end = xdp->data + pkt_len; + xdp->rxq = &rxq->xdp_rxq; +} + +static int hinic5_run_xdp_prog(struct hinic5_rxq *rxq, struct bpf_prog *xdp_prog, + struct xdp_buff *xdp, u32 *pkt_len) +{ + u32 act; + int err; + int result = HINIC5_XDP_PKT_DROP; + struct net_device *netdev = rxq->netdev; + struct hinic5_rx_info *rx_info = NULL; + + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + *pkt_len = xdp->data_end - xdp->data; + result = HINIC5_XDP_PKT_PASS; + break; + case XDP_TX: + if (unlikely(!hinic5_xmit_xdp_buff(netdev, rxq->q_id, xdp))) + goto out_failure; + result = HINIC5_XDP_PKT_TX; + break; + case XDP_REDIRECT: + rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; + get_page(rx_info->page); +#ifdef HAVE_XDP_FRAME_SZ + if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) + goto out_failure; +#endif + err = xdp_do_redirect(netdev, xdp, xdp_prog); + if (unlikely(err != 0)) { + put_page(rx_info->page); + goto out_failure; + } + result = HINIC5_XDP_PKT_REDIRECT; + break; + case XDP_ABORTED: + goto out_failure; + case XDP_DROP: + break; + default: + bpf_warn_invalid_xdp_action(netdev, xdp_prog, act); + +out_failure: + trace_xdp_exception(netdev, xdp_prog, act); + } + + return result; +} + +static void hinic5_prepare_xdp_buff(struct hinic5_rxq *rxq, struct xdp_buff *xdp, + u32 pkt_len, u32 packet_offset) +{ + u8 *va; + struct hinic5_rx_info *rx_info = NULL; + + rx_info = &rxq->rx_info[rxq->cons_idx & rxq->q_mask]; + va = (u8 *)page_address(rx_info->page) + rx_info->page_offset; + prefetch(va); + dma_sync_single_range_for_cpu(rxq->dev, rx_info->buf_dma_addr, + rx_info->page_offset, + rxq->buf_len, DMA_FROM_DEVICE); + hinic5_xdp_set_data(rxq, xdp, va, pkt_len, packet_offset); + +#ifdef HAVE_XDP_FRAME_SZ + xdp->frame_sz = rxq->buf_len; +#endif +#ifdef HAVE_XDP_DATA_META + xdp_set_data_meta_invalid(xdp); +#endif + prefetchw(xdp->data_hard_start); +} + +static void hinic5_handle_xdp_result(struct hinic5_rxq *rxq, int result, u16 weqbb_num) +{ + switch (result) { + case HINIC5_XDP_PKT_DROP: + RXQ_STATS_INC(rxq, xdp_dropped); + break; + case HINIC5_XDP_PKT_REDIRECT: + RXQ_STATS_INC(rxq, xdp_redirected); + break; + default: + break; + } + if (result != HINIC5_XDP_PKT_PASS) + update_drop_rx_info(rxq, weqbb_num); +} + +int hinic5_run_xdp(struct hinic5_rxq *rxq, struct hinic5_cqe_info *cqe_info, struct xdp_buff *xdp) +{ + struct bpf_prog *xdp_prog = NULL; + struct hinic5_nic_dev *nic_dev = NULL; + int result = HINIC5_XDP_PKT_PASS; + u16 weqbb_num = 1; /* xdp can only use one rx_buff */ + u32 pkt_len = cqe_info->pkt_len; + u32 packet_offset = cqe_info->packet_offset + XDP_PACKET_HEADROOM; + + nic_dev = netdev_priv(rxq->netdev); + + rcu_read_lock(); + xdp_prog = READ_ONCE(rxq->xdp_prog); + if (!xdp_prog) { + result = HINIC5_XDP_PROG_EMPTY; + goto unlock_rcu; + } + if (unlikely(pkt_len + packet_offset > rxq->buf_len)) { + RXQ_STATS_INC(rxq, xdp_large_pkt); + weqbb_num = HINIC5_GET_SGE_NUM(pkt_len + packet_offset, rxq); + result = HINIC5_XDP_PKT_DROP; + goto xdp_out; + } + +#ifdef HAVE_PAGE_POOL_SUPPORT + if (nic_dev->page_pool_enabled) { + result = HINIC5_XDP_PROG_EMPTY; + goto unlock_rcu; + } +#endif + hinic5_prepare_xdp_buff(rxq, xdp, pkt_len, packet_offset); + result = hinic5_run_xdp_prog(rxq, xdp_prog, xdp, &pkt_len); + cqe_info->pkt_len = (u16)pkt_len; +xdp_out: + hinic5_handle_xdp_result(rxq, result, weqbb_num); +unlock_rcu: + rcu_read_unlock(); + return result; +} + +struct sk_buff *hinic5_fetch_rx_buffer_xdp(struct hinic5_rxq *rxq, u32 pkt_len, + struct xdp_buff *xdp) +{ + struct sk_buff *skb = NULL; + struct hinic5_rx_info *rx_info = NULL; + u32 sw_ci; + bool reuse; + + sw_ci = rxq->cons_idx & rxq->q_mask; + rx_info = &rxq->rx_info[sw_ci]; + + skb = netdev_alloc_skb_ip_align(rxq->netdev, HINIC5_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + reuse = hinic5_add_rx_frag_with_xdp(rxq, pkt_len, rx_info, skb, xdp); + if (likely(reuse)) + hinic5_reuse_rx_page(rxq, rx_info); + + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + + rxq->cons_idx += 1; + rxq->delta += 1; + + return skb; +} + +void hinic5_xdp_flush_if_needed(const struct hinic5_nic_dev *nic_dev) +{ + if (unlikely(rcu_access_pointer(nic_dev->xdp_prog))) { +#ifdef HAVE_XDP_DO_FLUSH_MAP + xdp_do_flush_map(); +#else + xdp_do_flush(); +#endif + } +} + +/* Function to determine XDP status and build skb accordingly */ +bool hinic5_xdp_process_packet(struct hinic5_rxq *rxq, struct hinic5_cqe_info *cqe_info, + struct sk_buff **skb) +{ + u32 xdp_status; + struct xdp_buff xdp = { 0 }; + + xdp_status = (u32)(hinic5_run_xdp(rxq, cqe_info, &xdp)); + /* Check XDP status for redirect, TX, or drop */ + if (xdp_status == HINIC5_XDP_PKT_REDIRECT || + xdp_status == HINIC5_XDP_PKT_TX || + xdp_status == HINIC5_XDP_PKT_DROP) { + /* if packet is redirected, TX, or dropped, there is no need to build skb */ + return 1; + } + + /* Build skb based on XDP program configuration */ + if (xdp_status != HINIC5_XDP_PROG_EMPTY) + *skb = hinic5_fetch_rx_buffer_xdp(rxq, cqe_info->pkt_len, &xdp); + else + *skb = hinic5_fetch_rx_buffer(rxq, cqe_info); + + return 0; +} + +#endif diff --git a/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_xdp.h b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_xdp.h new file mode 100644 index 00000000..440c93b2 --- /dev/null +++ b/hinic5/src/dpu_platform_library/host/service/nic/linux/nicio/hinic5_xdp.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef HINIC5_XDP_H +#define HINIC5_XDP_H + +#include <net/xfrm.h> +#include <linux/netdevice.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/tcp.h> +#include <linux/sctp.h> +#include <linux/dma-mapping.h> +#include <linux/types.h> +#include <linux/u64_stats_sync.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include "ossl_knl.h" +#ifdef HAVE_XDP_SUPPORT +#include <net/xdp.h> +#include <linux/bpf_trace.h> + +#include "hinic5_nic_sq.h" +#include "hinic5_nic_rq.h" +#include "hinic5_nic_io.h" +#include "hinic5_nic_dev.h" +#include "hinic5_tx.h" + +#define HINIC5_XDP_PROCESSED 1 + +enum hinic5_xdp_pkt { + // bpf_prog status + HINIC5_XDP_PROG_EMPTY, + // pkt action + HINIC5_XDP_PKT_PASS, + HINIC5_XDP_PKT_DROP, + HINIC5_XDP_PKT_REDIRECT, + HINIC5_XDP_PKT_TX, +}; + +int tx_map_xdpf(struct hinic5_nic_dev *nic_dev, struct hinic5_txq *txq, u16 pi, + struct hinic5_sq_wqe_combo *wqe_combo); + +void hinic5_prepare_xdp_sq_ctrl(struct hinic5_sq_wqe_combo *wqe_combo, u16 owner); + +int hinic5_xdp_xmit_frame(struct hinic5_nic_dev *nic_dev, struct hinic5_txq *txq, + struct xdp_frame *xdpf); + +int hinic5_xdp_xmit_frames(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags); +bool hinic5_xmit_xdp_buff(struct net_device *netdev, u16 q_id, struct xdp_buff *xdp); + +struct xdp_frame *xdp_convert_to_frame(struct xdp_buff *xdp, struct hinic5_nic_dev *nic_dev); + +int hinic5_run_xdp(struct hinic5_rxq *rxq, struct hinic5_cqe_info *cqe_info, struct xdp_buff *xdp); + +struct sk_buff *hinic5_fetch_rx_buffer_xdp(struct hinic5_rxq *rxq, u32 pkt_len, + struct xdp_buff *xdp); + +void hinic5_xdp_flush_if_needed(const struct hinic5_nic_dev *nic_dev); + +bool hinic5_xdp_process_packet(struct hinic5_rxq *rxq, struct hinic5_cqe_info *cqe_info, + struct sk_buff **skb); +#endif +#endif diff --git a/hinic5/src/dpu_platform_library/include/drv_fw_msg/cfm/bond_cfm_cmd.h b/hinic5/src/dpu_platform_library/include/drv_fw_msg/cfm/bond_cfm_cmd.h new file mode 100644 index 00000000..f2eb09d6 --- /dev/null +++ b/hinic5/src/dpu_platform_library/include/drv_fw_msg/cfm/bond_cfm_cmd.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2026-2026. All rights reserved. + * Description : cfm bond datastruct + * Creation time : 2026/04/16 + */ + +#ifndef BOND_CFM_CMD_H +#define BOND_CFM_CMD_H + +#include "bond_mpu_cmd_defs.h" +#include "nic_mpu_cmd_structs.h" + +#define ETH_ALEN 6 /* Ethernet address length */ + +#define BOND_MAX_PORT_NUM 4 /* bond 支持最大port数量 */ +#define BOND_MAX_HOST_NUM 4 /* bond 支持最大host数量 */ + +/** + * @brief enum bond_mpu_type + * @details bonding类型,全卸载bond或者半卸载bond + */ +enum bond_mpu_type { + BOND_TYPE_HALF, + BOND_TYPE_FULL, + BOND_TYPE_BUTT +}; + +struct bond_half_data_s { + u16 bond_mode; /* bond mode:1 for active-backup, + * 2 for balance-xor,4 for 802.3ad + */ + u16 bond_id; /* bond id */ + u16 up_delay; /* default:200ms */ + u16 down_delay; /* default:200ms */ + u32 active_slaves : 8; /* active port slaves(bitmaps) */ + u32 slaves : 8; /* bond port id bitmaps */ + u32 lacp_collect_slaves : 8; /* 可用于LACP报文收发的slave bitmap */ + u32 hash_policy : 8; /* hash:0 for layer 2 ,1 for layer 2+3 ,2 for layer 3+4 */ + u32 first_roce_func; /* RoCE used */ + u32 bond_pf_bitmap; /* slave pf bitmap */ + u32 user_bitmap; /* user bitmap */ + u8 bond_name[BOND_NAME_MAX_LEN]; /* bond name, length must be less than 16 */ + u32 rsvd[8]; +}; + +struct bond_full_data_s { + u32 bond_id; /* bond设备号,output时有效,mpu操作成功返回时回填 */ + u32 master_slave_port_id; /* 主从端口id,最小的port id作为主口 */ + u32 slave_bitmap; /* bond port id bitmap */ + u32 poll_timeout; /* bond设备链路检查时间 */ + u32 up_delay; /* 暂时预留 */ + u32 down_delay; /* 暂时预留 */ + u32 bond_mode; /* 暂时预留 */ + u32 xmit_hash_policy; /* hash策略,用于微码选路逻辑 */ + u8 lacp_rate; /* lacp协商速率 0 为慢速, 1 为快速 */ + u8 rsvd1[3]; /* 保留域段 */ + u32 rsvd[9]; /* 保留域段 */ +}; + +typedef union tag_cfm_bond_data { + struct bond_half_data_s bond_half_data; + struct bond_full_data_s bond_full_data; +} cfm_bond_data_u; + +typedef struct tag_cfm_bond_cmd { + struct mgmt_msg_head comm_head; + u16 sub_cmd; + u8 rsvd0; + u8 bond_type; + cfm_bond_data_u data; + u8 rsvd1[32]; +} cfm_bond_cmd_s; + +/** + * @brief 定义了一个用于存储bond状态信息的结构体 + * @details 该结构体包含了bond状态的各种信息, + * 如bond_id、链路状态、slave port状态、port个数等, + * 并且还包含了每个port的lacp信息, + * 以及每个host成功和失败上报lacp协商结果的次数等。 + */ +typedef struct tag_bond_full_get { + u32 bond_id; /* bond id */ + u32 bon_mmi_status; /* 该bond子设备的链路状态 */ + u32 active_bitmap; /* 该bond子设备的slave port状态 */ + u32 port_count; /* 该bond子设备个数 */ + struct lacp_port_info port_info[BOND_MAX_PORT_NUM];/* 每个port的lacp信息 */ + u64 success_report_cnt[BOND_MAX_HOST_NUM];/* 每个host成功上报lacp协商结果次数 */ + u64 fail_report_cnt[BOND_MAX_HOST_NUM]; /* 每个host上报lacp协商结果失败次数 */ + u64 poll_timeout; /* 轮询超时时间 */ + u64 fast_periodic_timeout; /* 快速周期性超时时间 */ + u64 slow_periodic_timeout; /* 慢速周期性超时时间 */ + u64 short_timeout; /* 短超时时间 */ + u64 long_timeout; /* 长超时时间 */ + u64 aggregate_wait_timeout; /* 聚合等待超时时间 */ + u64 tx_period_timeout; /* 发送周期超时时间 */ + u64 rx_marker_timer; /* RX标记定时器 */ + u8 bond_mode; /* bond模式 */ + u8 arp_dual_en; /* arp双发使能标记 */ + u8 rsvd[6]; /* 保留域段 */ +} bond_full_get_s; + +typedef union tag_cfm_bond_get_s { + hinic5_bond_get_s bond_half_data; + bond_full_get_s bond_full_data; +} cfm_bond_get_u; + +typedef struct tag_cfm_bond_info_get { + struct mgmt_msg_head comm_head; + u16 sub_cmd; + u8 rsvd0; + u8 bond_type; + cfm_bond_get_u data; +} cfm_bond_info_get_s; + +/* bond配置信息结构体 */ +typedef struct tag_cfm_bond_cfg_cmd { + struct mgmt_msg_head head; /* 消息头 */ + u32 cfg_bitmap; /* bond cfg bitmap */ + u8 bond_name[BOND_NAME_MAX_LEN]; /* if bond_id_vld=0 input, else output */ + u8 op_code; /* 操作类型:0: 查询GET,1:配置SET */ + u8 arp_en; /* ARP双发使能 */ + u8 rsvd0[2]; /* 保留字段 */ + u32 rsvd1[58]; /* 保留字段 */ +} cfm_bond_cfg_cmd_s; + +/** + * @brief 定义了创建bond设备所需的信息 + * @details 该结构体包含了创建bond设备所需的各种参数, + * 如bond设备号、主从端口id、bond port id bitmap等。 + */ +struct hinic5_create_bond_info { + u32 bond_id; /* bond设备号,output时有效,mpu操作成功返回时回填 */ + u32 master_slave_port_id; /* 主从端口id,最小的port id作为主口 */ + u32 slave_bitmap; /* bond port id bitmap */ + u32 poll_timeout; /* bond设备链路检查时间 */ + u32 up_delay; /* 暂时预留 */ + u32 down_delay; /* 暂时预留 */ + u32 bond_mode; /* 暂时预留 */ + u32 xmit_hash_policy; /* hash策略,用于微码选路逻辑 */ + u8 lacp_rate; /* lacp协商速率 0 为慢速, 1 为快速 */ + u8 rsvd1[3]; /* 保留域段 */ + u32 rsvd[1]; /* 保留域段 */ +}; + +/** + * @brief 创建bond的消息接口结构体 + * @details 该结构体用于创建bond的消息接口 + */ +struct hinic5_cmd_create_bond { + struct hinic5_mgmt_msg_head head; /* 命令字消息头 */ + struct hinic5_create_bond_info create_bond_info; /* 创建bond的信息 */ +}; + +#endif /* BOND_CFM_CMD_H */ diff --git a/hinic5/src/dpu_platform_library/include/drv_fw_msg/cfm/qos_base_cmd.h b/hinic5/src/dpu_platform_library/include/drv_fw_msg/cfm/qos_base_cmd.h new file mode 100644 index 00000000..0ec678d9 --- /dev/null +++ b/hinic5/src/dpu_platform_library/include/drv_fw_msg/cfm/qos_base_cmd.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2026-2026. All rights reserved. + * Description : qos datastruct + * Creation time : 2026/03/11 + */ + +#ifndef QOS_BASE_CMD_H +#define QOS_BASE_CMD_H + +#include "mpu_cmd_base_defs.h" + +/** + * @brief enum qos_cc_l2d parm command + */ +typedef enum tag_qos_cc_l2d_parm { + QOS_CC_L2D_PARM_INVALID = 0, /**< parm command invalid */ + QOS_CC_L2D_PARM_NIC_OQ_THRD, /**< parm command nic_oq_thrd */ + QOS_CC_L2D_PARM_ROCE_OQ_THRD, /**< parm command roce_oq_thrd */ + QOS_CC_L2D_PARM_UBOE_OQ_THRD, /**< parm command uboe_oq_thrd */ + QOS_CC_L2D_PARM_HCT_EN, /**< parm command hct_en */ +} qos_cc_l2d_parm_e; + +/** + * @brief struct qos_cc_l2d tbl command + */ +typedef struct tag_qos_cc_l2d_tbl { + union { + struct { +#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && ((BYTE_ORDER == BIG_ENDIAN)) + u32 nic_oq_thrd : 10; + u32 nic_oq_thrd_mark : 1; + u32 rsvd : 21; +#else + u32 rsvd : 21; + u32 nic_oq_thrd_mark : 1; + u32 nic_oq_thrd : 10; +#endif + } bs; + u32 value; + } dw0; + + union { + struct { +#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && ((BYTE_ORDER == BIG_ENDIAN)) + u32 roce_oq_thrd : 10; + u32 roce_oq_thrd_mark : 1; + u32 rsvd : 21; +#else + u32 rsvd : 21; + u32 roce_oq_thrd_mark : 1; + u32 roce_oq_thrd : 10; +#endif + } bs; + u32 value; + } dw1; + + union { + struct { +#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && ((BYTE_ORDER == BIG_ENDIAN)) + u32 uboe_oq_thrd : 10; + u32 uboe_oq_thrd_mark : 1; + u32 rsvd : 21; +#else + u32 rsvd : 21; + u32 uboe_oq_thrd_mark : 1; + u32 uboe_oq_thrd : 10; +#endif + } bs; + u32 value; + } dw2; + + union { + struct { +#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && ((BYTE_ORDER == BIG_ENDIAN)) + u32 host_cg_tran_en : 1; + u32 host_cg_tran_mark : 1; + u32 rsvd : 30; +#else + u32 rsvd : 30; + u32 host_cg_tran_mark : 1; + u32 host_cg_tran_en : 1; +#endif + } bs; + u32 value; + } dw3; + + u32 dw_rsvd[4]; +} qos_cc_l2d_tbl_s; + +/* 考虑可扩展性,实际透传大小32B,联合体占位64B */ +#define CFM_QOS_CC_L2D_DATA_LEN 64 + +/* L2DMEM写请求 */ +typedef struct tag_cfm_l2dmem_req { + struct mgmt_msg_head head; + /* 考虑可扩展性 */ + union { + u32 padding[CFM_QOS_CC_L2D_DATA_LEN]; + qos_cc_l2d_tbl_s l2d_tbl; + } cfg; +} qos_cc_l2d_req_s; + +/* L2DMEM读响应 */ +typedef struct tag_cfm_l2dmem_rsp { + struct mgmt_msg_head head; + union { + u32 padding[CFM_QOS_CC_L2D_DATA_LEN]; + qos_cc_l2d_tbl_s l2d_tbl; + } cfg; +} qos_cc_l2d_rsp_s; + +#endif /* QOS_BASE_CMD_H */ diff --git a/hinic5/src/dpu_platform_library/include/drv_fw_msg/mpu/inband_mpu_cmd_defs.h b/hinic5/src/dpu_platform_library/include/drv_fw_msg/mpu/inband_mpu_cmd_defs.h new file mode 100644 index 00000000..9860499a --- /dev/null +++ b/hinic5/src/dpu_platform_library/include/drv_fw_msg/mpu/inband_mpu_cmd_defs.h @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2026-2026. All rights reserved. + * Description : mpu cmd + * Creation time : 2026/04/10 + */ + +#ifndef INBAND_MPU_CMD_DEFS_H +#define INBAND_MPU_CMD_DEFS_H + +#include "mpu_cmd_base_defs.h" + +typedef struct { + struct mgmt_msg_head head; + + u8 low_power_enable; + u8 opt_type; + u8 rsv[6]; + u32 ncsi_enter_low_power_mode_cnt; + u32 ncsi_exit_low_power_mode_cnt; +} comm_cmd_low_power_set_s; + +#define MAX_LOG_BUF_SIZE 1024 +struct nic_cmd_get_uart_log_info { + struct mgmt_msg_head head; + struct { + u32 ret : 8; + u32 version : 8; + u32 log_elem_real_num : 16; + } log_head; + char uart_log[MAX_LOG_BUF_SIZE]; +}; + +#define MAX_LOG_CMD_BUF_SIZE 128 +struct nic_cmd_set_uart_log_cmd { + struct mgmt_msg_head head; + struct { + u32 ret : 8; + u32 version : 8; + u32 cmd_elem_real_num : 16; + } log_head; + char uart_cmd[MAX_LOG_CMD_BUF_SIZE]; +}; + +enum log_or_index_type { + MPU_COMM_GET_LOG = 0, /**< 获取mpu日志 */ + MPU_COMM_GET_INDEX, /**< 获取index日志 */ + NPU_COMM_GET_SIM_DATA, /**< 从flash中获取微码字典 */ +}; + +enum log_module_type { + LOG_MODULE_TYPE_MPU_LOG = 0, + LOG_MODULE_TYPE_NPU_LOG, + LOG_MODULE_TYPE_SMU_LOG, + LOG_MODULE_TYPE_MPU_LASTWORD, + LOG_MODULE_TYPE_NPU_LASTWORD, + LOG_MODULE_TYPE_MPU_RELOAD_LOG = 5, + LOG_MODULE_TYPE_MPU_CNT_DICT, + LOG_MODULE_TYPE_NPU_CNT_DICT, + LOG_MODULE_TYPE_UBC_IMP_LOG, + LOG_MODULE_TYPE_UBC_IMP_LASTWORD, + LOG_MODULE_TYPE_ROCE_IMP_LOG = 10, + LOG_MODULE_TYPE_ROCE_SCC_LOG, + LOG_MODULE_TYPE_BUTT +}; + +enum log_area_type { + LOG_AREA_RAM = 0, + LOG_AREA_FLASH, +}; + +struct nic_log_info { + struct mgmt_msg_head msg_head; + + u32 offset; + u8 log_or_index; // 0:log; 1:index; + u8 type; // 0:up; 1:ucode; + // 2:smu;(log_or_index: 0, 此位:日志类型, + // 1, 此位:字典类型) + // 3:mpu lastword 4.npu lastword + // 5:mpu cnt 字典文件, 6. npu cnt 字典文件 + u8 area; // 0:ram; + // 1:flash;(只有当log_or_index为0的时候,此位有效) + u8 rsvd1; // reserved + u8 data[MAX_LOG_BUF_SIZE]; // 单次获取1KB数据 +}; + +/* 日志控制信息(用于日志防覆盖功能保留的pi和日志次序等信息) */ +typedef struct { + u32 log_valid; /* 日志有效位 */ + u32 rsv_log_pi; + u32 pi; /* 日志偏移 */ + /* 日志序列号,每写一次递增1,标识是哪一次的日志, + * 以及下次要写入的分区(偶数写入主区,奇数写入备区) + */ + u32 log_seq; + u32 info_rsv[2]; /* 日志保留信息rsv字段 */ +} log_ctrl_info_s; + +typedef struct { + u32 log_type : 2; /* 日志数据的来源,0:代表up,1:代表ucode,2:表示sec */ + u32 rsvd1 : 6; + u32 core_id : 2; + u32 time_sync : 1; /* 时间是否已同步,MPU_LOG_TIME_SYNC_TYPE类型 */ + u32 patch_log : 1; /* 补丁日志标识,0:非补丁日志,1:补丁日志 */ + u32 patch_log_level : 3; /* 补丁日志级别 */ + u32 rsvd3 : 17; +} log_head_mpu; + +typedef struct { + u32 index : 8; + u32 valid : 1; + u32 rsv : 23; +} log_head_imp; + +typedef struct { + u32 log_type : 2; /* 日志数据的来源,0:代表up,1:代表ucode,2:表示sec */ + u32 core_id : 6; /* 微码core id,当type为1时生效 */ + u32 thread_id : 2; /* 微码thread id,当type为1时生效 */ + u32 srv_data : 22; /* 特性私有数据 */ +} log_head_def; + +/* user日志条目 */ +#define LOG_USER_ITEM 4 + +/* 日志的存储格式,总大小为32B */ +typedef struct { + /* DW0 */ + union { + log_head_mpu mpu; + log_head_imp imp; + log_head_def def; + } head; + + /* DW1 */ + u16 file_id; /* 存放文件ID */ + u16 code_line_num; /* 打印代码所在行 */ + + /* DW2~DW3 */ + u32 time_l32; /* 时间计数 */ + u32 time_h32; /* 时间计数 */ + + /* Dw4~DW7 */ + u32 user_val[LOG_USER_ITEM]; /* 根据每种类型的日志条目确定 */ +} log_item_s; + +typedef struct tag_mpu_mctp_counter_info { + u32 mctp_send_get_routing_tbl_port_err; + u32 mctp_get_routing_tbl_trans_pkt_err; + u32 mctp_dis_eid_proc_msg_len_err; + u32 mctp_send_one_cmd_trans_pkt_err; + u32 mctp_send_first_cmd_trans_pkt_err; + u32 mctp_send_middle_cmd_trans_pkt_err; + u32 mctp_send_last_cmd_trans_pkt_err; + u32 mctp_get_routing_tbl_err; + u32 mctp_lldp_capture_send_cmd_msg_err; + u32 mctp_reset_unsupported_err; + u32 mctp_set_eid_msg_len_err; + u32 mctp_set_eid_req_eid_err; + u32 mctp_get_eid_msg_len_err; + u32 mctp_predis_eid_msg_len_err; + u32 mctp_notify_dis_iid_err; + u32 mctp_get_routing_tbl_fail_err; + u32 mctp_ctrl_cmd_not_support_err; + u32 mctp_handle_cmd_ic_err; + u32 mctp_handle_cmd_proc_err; + u32 mctp_handle_cmd_func_remap_err; + u32 mctp_handle_cmd_send_msg_err; + u32 mctp_assemble_not_first_pkt_err; + u32 mctp_assemble_msg_check_tag_err; + u32 mctp_assemble_msg_check_seq_err; + u32 mctp_assemble_msg_rcv_offset_err; + u32 mctp_check_pkg_len_shorter_err; + u32 mctp_check_pkg_len_err; + u32 mctp_check_pkg_trans_head_err; + u32 mctp_ncsi_msg_proc_err; + u32 mctp_pldm_msg_proc_err; + u32 mctp_handle_msg_type_not_support_err; + u32 mctp_handle_msg_fail_err; + u32 mctp_handle_msg_send_cmd_msg_err; + u32 mctp_alloc_rcv_buff_err; + u32 mctp_alloc_send_buff_err; + u32 mctp_assemble_msg_err; + u32 mctp_pkt_proc_err; + u32 mctp_recv_pkt_pldm_type; + u32 mctp_pldm_msg_handle_err; + u32 mctp_recv_pkt_ncsi_type; + u32 mctp_ncsi_msg_handle_err; + u32 mctp_recv_full_pkt_cnt; + u32 mctp_recv_ctr_pkt_cnt; + u32 mctp_recv_data_pkt_cnt; + u32 mctp_handle_cmd_proc_cnt; + u32 mctp_set_eid_msg_cnt; + u32 mctp_get_eid_msg_cnt; + u32 mctp_get_uuid_msg_cnt; + u32 mctp_get_version_msg_cnt; + u32 mctp_get_msgtype_msg_cnt; + u32 mctp_resovle_eid_msg_cnt; + u32 mctp_update_routing_tbl_msg_cnt; + u32 mctp_get_routing_tbl_msg_cnt; + u32 mctp_predis_eid_msg_cnt; + u32 mctp_dis_eid_proc_msg_cnt; + u32 mctp_notify_dis_msg_cnt; + u32 mctp_get_network_id_msg_cnt; + u32 mctp_query_hop_msg_cnt; + u32 mctp_resolve_uuid_msg_cnt; + u32 mctp_handle_cmd_send_cnt; + u32 mctp_handle_msg_proc_cnt; + u32 mctp_handle_msg_send_cnt; + u32 mctp_assemble_msg_correct; + u32 mctp_lldp_capture_send_cmd_msg_cnt; +} mpu_mctp_counter_info_s; + +#define OOB_INFO_BUFFER_MAX 1024 +typedef struct comm_cmd_oob_info_resp_new { + struct mgmt_msg_head head; /* 8B */ + u8 oob_info_buf[OOB_INFO_BUFFER_MAX]; +} comm_cmd_oob_info_resp_new; + +#endif diff --git a/hinic5/src/dpu_platform_library/include/drv_tool_msg/bond_pub_cmd.h b/hinic5/src/dpu_platform_library/include/drv_tool_msg/bond_pub_cmd.h new file mode 100644 index 00000000..ce23c55d --- /dev/null +++ b/hinic5/src/dpu_platform_library/include/drv_tool_msg/bond_pub_cmd.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * Filename : bond_pub_cmd.h + * Creation time : 2025/09/08 + * Description : bond工具和驱动的交互接口定义 + * Version : 1.0 + */ + +#ifndef BOND_PUB_CMD_H +#define BOND_PUB_CMD_H + +#include "bond_common_defs.h" +#include "hinic5_mt.h" + +#define MAX_NETDEV_NUM 4 + +/** + * @brief enum hinic5_bond_cmd_to_custom_e + * @details 定义了与自定义设备相关的命令类型 + */ +enum hinic5_bond_cmd_to_custom_e { + CMD_CUSTOM_BOND_DEV_CREATE = 1, /**< 创建自定义设备 */ + CMD_CUSTOM_BOND_DEV_DELETE, /**< 删除自定义设备 */ + CMD_CUSTOM_BOND_GET_CHIP_NAME, /**< 获取芯片名称 */ + CMD_CUSTOM_BOND_GET_CARD_INFO, /**< 获取卡片信息 */ + CMD_CUSTOM_BOND_GET_ULD_DEV_NAME +}; + +#define BOND_NAME_LEN (16) +#define BOND_DFX_OP_ADD (0) +#define BOND_DFX_OP_DEL (1) + +/** + * @brief struct bond_dfx_ops_info + * @details 用于bond绑定解绑的dfx操作 + */ +struct bond_dfx_ops_info { + struct mt_msg_head head; + char bond_name[BOND_NAME_LEN]; + u32 ops; + u32 user; +}; + +#endif diff --git a/hinic5/src/dpu_platform_library/include/drv_tool_msg/hisec_pub_cmd.h b/hinic5/src/dpu_platform_library/include/drv_tool_msg/hisec_pub_cmd.h new file mode 100644 index 00000000..21ab615c --- /dev/null +++ b/hinic5/src/dpu_platform_library/include/drv_tool_msg/hisec_pub_cmd.h @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Filename : hisec pub cmd + * Creation time : 2024/04/10 + * Description : COMM Commands and struct define between hinicadmdfx and DRIVER(ipsec) + * Version : 1.0 + */ + +#ifndef HISEC_PUB_CMD_H +#define HISEC_PUB_CMD_H + +#include "base_type.h" + +/* hisec_cmd_table最大2MB,减去ipsec_tbl_args的大小 */ +#define IPSEC_TBL_BUF_MAX (2048 * 1024 - sizeof(ipsec_tbl_args)) + +typedef enum hisec_driver_cmd_type { + HISEC_DRIVER_CMD_SET_IPSEC_SA = 0, + HISEC_DRIVER_CMD_SET_IPSEC_SP, + HISEC_DRIVER_CMD_SET_IPSEC_MASK, + HISEC_DRIVER_CMD_LIST_IPSEC_SA, + HISEC_DRIVER_CMD_LIST_IPSEC_SP, + HISEC_DRIVER_CMD_GET_IPSEC_SA_EXCIDS, + HISEC_DRIVER_CMD_GET_IPSEC_SP_EXCIDS, + HISEC_DRIVER_CMD_GET_TRNG, + HISEC_DRIVER_CMD_GET_ANTIREPLAY_INFO, + HISEC_DRIVER_CMD_GET_IPSEC_INFO, + HISEC_DRIVER_CMD_GET_VERSION = 16, +} hisec_driver_cmd_type_e; + +typedef struct tag_hisec_driver_cmd_hdr { + union { + struct { + u32 rsvd0 : 16; + u32 channel_id : 8; /* logical concurrency channel */ + u32 cmd_type : 8; /* cmd type */ + } bs; + u32 value; + } dw0; + + u32 cmd_sn; + u32 rsvd1[2]; +} hisec_driver_cmd_hdr_s; + +#define IPSEC_ANTIREPLAY_BITMAP_MAX_LEN 512 + +typedef struct { + u64 ipsec_in_pkt_num; + u64 ipsec_out_pkt_num; + + u32 ipsec_drop_pkt_num; + u32 ipsec_pkt_replay_without_esn_error; + u32 ipsec_pkt_top_seq_error; + u32 ipsec_pkt_anti_replay_error; + u32 ipsec_pkt_invalid_error; +} ipsec_antireplay_counter; + +typedef struct { + u64 rx_seq; + u64 tx_seq; + + u32 replaywin; + u16 esn_flag; + u16 exid; + + u16 funcid; + u16 valid; + u16 dir; + u16 rsvd; + + u64 bitmap[IPSEC_ANTIREPLAY_BITMAP_MAX_LEN]; +} ipsec_antireplay_info ; + +struct ipsec_soft_antireplay_info { + ipsec_antireplay_info info; + ipsec_antireplay_counter counter; +}; + +#define HISEC_ENABLE_HARDWARE_ANTIREPLAY 1 +#define HISEC_HARDWARE_ANTIREPLAY_WINDOW_SIZE 64 +/* 软件约束软件防重放窗口最小为256 */ +#define HISEC_SOFTWARE_ANTIREPLAY_MIN_WINDOW_SIZE 256 + +struct hinic5_ipsec_driver_enc_info { + u8 proto; /* tcp/udp */ + u8 direction; /* out/in */ + u8 mode; /* 0 transport/ 1 tunnel */ + u8 flag; /* 1 - esn */ + + u16 replaywindow; /* 32-64 */ + u8 alg_type; /* 0- aead 1-enc 2-auth 3-enc & auth */ + u8 alg_standard; /* 0 - aes, 1 - SM4 */ + + u8 enc_type; /* hisec_crypto_alg_type */ + u8 auth_type; /* hisec_crypto_alg_type */ + + u16 cipher_key_len; /* in bit */ + u32 cipher_key[8]; /* 128bit 192bit 256bit */ + u32 salt; /* 32bit */ + + u16 auth_key_len; /* in bit */ + u16 auth_trunc_len; /* in bit */ + u16 icv_mac_len; /* in bit */ + u8 out_ip_type; + u8 hard_antireplay_en; + u32 auth_key[32]; /* 1024bit */ + + u32 tunnel_sip[4]; + u32 tunnel_dip[4]; + + u8 tfc_padding_en; + u8 tfc_pad_len; + u8 tfc_pad_val; + u8 encrypt_path_sel; /* 0 - fast path, 1 - slow path */ + + u64 top_seq; +}; + +typedef struct tag_hisec_driver_cmd_set_ipsec_sa { + hisec_driver_cmd_hdr_s cmdhdr; + + u32 saddr[4]; + u32 daddr[4]; /* ipv4 in daddr[0] */ + u32 spi; + + u8 ipsec_proto; + u8 opid; /* | 0 - add | 1 - del | 2 - update | 3 - flush | */ + u8 iptype; /* | 0 - ipv4 | 1 - ipv6 | */ + u8 ipsec_tls_flag; + + u16 sport; + u16 dport; + + u16 vlan_id; + u16 dmac_h16; + + u32 dmac_l32; + + u32 rsvd1; + + u32 isn; + + struct hinic5_ipsec_driver_enc_info enc_info; +} hisec_driver_cmd_set_ipsec_sa_s; + +typedef struct tag_hisec_driver_cmd_set_ipsec_sp { + hisec_driver_cmd_hdr_s cmdhdr; + + u32 saddr[4]; + u32 daddr[4]; /* dip ipv4 in daddr[0] */ + u32 spi; + + u16 vid; + u16 dport; + + u8 ulp_proto; /* | 6 - tcp | 17 - udp | */ + u8 ipsec_proto; /* | 50 - esp | 51 - ah | */ + u8 opid; /* | 0 - add | 1 - del | 2 - update | */ + u8 iptype; /* | 0 - ipv4 | 1 - ipv6 | */ + + u8 action; /* | 0 - bypass | 1 - protect | 2 - drop | */ + u8 rsvd0; + u16 rsvd1; + + u16 sport; + u16 rsvd2; + + u32 rsvd3[3]; +} hisec_driver_cmd_set_ipsec_sp_s; + +typedef struct { + u32 index; + u32 cnt; + u32 total_cnt; +} ipsec_tbl_args; + +struct hisec_cmd_table { + ipsec_tbl_args args; + u8 tbl_buf[IPSEC_TBL_BUF_MAX]; +}; + +typedef struct tag_hisec_cmd_get_trng_req { + u32 req_num; + u8 tbl_buf[IPSEC_TBL_BUF_MAX]; +} hisec_cmd_get_trng_req_s; + +enum operation_type { + ADD = 0, + DELETE, + UPDATE, + FLUSH, + LIST, + SET, + DUMP, + ANTIREPLAY, + OPER_TYPE_MAX, +}; + +enum algorithm_type { + ALGO_AEAD = 0, + ALGO_ENC, + ALGO_AUTH, + ALGO_ENC_AND_AUTH, +}; + +enum algo_name_type { + RFC4106_GCM_AES = 0, + CBC_AES, + RFC3686_CTR_AES, + HMAC_SHA1, + HMAC_SHA256, + HMAC_SHA384, + HMAC_SHA512, + HMAC_SM3, + SM4_GCM, + ALG_TYPE_MAX, +}; + +enum alg_standard_type { + ALG_STANDARD_AES = 0, + ALG_STANDARD_SM4, +}; + +enum encapsulation_mode { + TRANSPORT = 0, + TUNNEL, + NATT_TUNNEL, + ENCAPSULATION_MODE_MAX = 3, +}; + +enum traffic_direction { + OUTBOUND = 0, + INBOUND, +}; + +/** + * @brief IPSec SPD action define + * @application scope used by tool and crypt driver + * @note the definition must be consistent with that of <hisec_npu_cmd.h> + */ +typedef enum { + HISEC_CMD_SPD_ACTION_BYPASS = 0, + HISEC_CMD_SPD_ACTION_PROTECT = 1, + HISEC_CMD_SPD_ACTION_DROP = 2, + HISEC_CMD_SPD_ACTION_MAX, +} hisec_cmd_spd_action_type_e; + +/* IPsec mask info */ +typedef struct tag_hisec_ipsec_mask_info_st { + u32 sa_key_mask; + u32 sp_key_mask; + + u32 rsvd0[2]; +} hisec_ipsec_mask_info_s; + +#define IPSEC_SA_MAX_NUM (64 * 1024) +#define IPSEC_SP_MAX_NUM (32 * 1024) + +struct hisec_cmd_get_sa_excids_out_buf { + u32 excids_size; + u16 excids[IPSEC_SA_MAX_NUM]; +}; + +struct hisec_cmd_get_sp_excids_out_buf { + u32 excids_size; + u16 excids[IPSEC_SP_MAX_NUM]; +}; + +typedef struct hisec_cmd_get_ipsec_info_out_buf { + u32 sa_ctxs; + u32 sp_ctxs; + u8 sa_mask; + u8 sp_mask; + u8 ipsec_work_mode; + u8 white_list; +} hisec_cmd_get_ipsec_info_out_buf_s; + +#endif /* _HISEC_PUB_CMD_H_ */ diff --git a/hinic5/src/dpu_platform_library/include/drv_tool_msg/macsec_pub_cmd.h b/hinic5/src/dpu_platform_library/include/drv_tool_msg/macsec_pub_cmd.h new file mode 100644 index 00000000..08a2d74a --- /dev/null +++ b/hinic5/src/dpu_platform_library/include/drv_tool_msg/macsec_pub_cmd.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved. + * Creation time : 2024/04/10 + * Description : COMM Commands between hinicadmdfx and DRIVER(macsec) + */ +#ifndef MACSEC_PUB_CMD_H +#define MACSEC_PUB_CMD_H + +#include "macsec_mpu_cmd_defs.h" + +#define HIMACSEC_MAX_SAK_KEY_LEN 32 // The max sak length is 32B +#define HIMACSEC_MAX_SA_IN_SC 4 // The macsec protocol stipulates + // that SC can have a maximum of 4 SAs +#define HIMACSEC_SALT_BUF_SIZE 12 // The salt is 12B +#define HIMACSEC_ICV_LEN 16 +#define HIMACSEC_TOOL_IN_BUF_MAX 1024 +#define HIMACSEC_LIST_OUT_BUF_MAX (8 * 1024) +#define HIMACSEC_MIB_OUT_BUF_MAX (1 * 1024) +#define HIMACSEC_COUNTER_OUT_BUF_MAX 512 +#define HIMACSEC_CRYPTO_ALGO_AES 0 // TODO 算法类型归一 +#define HIMACSEC_CRYPTO_ALGO_SM4 1 +#define HIMACSEC_POLICY_ENABLE 1 +#define HIMACSEC_POLICY_DISABLE 0 +#define HIMACSEC_KEY_LENGTH_128 16 +#define HIMACSEC_KEY_LENGTH_256 32 +#define HIMACSEC_REG_KEY_LENGTH_128 0 +#define HIMACSEC_REG_KEY_LENGTH_256 1 +#define HIMACSEC_XPN_MAX_REPLAY_WINDOWS ((1 << 30) - 1) // 使能 xpn 模式下的 + // 最大重放窗口为 2^30 -1 +#define HIMACSEC_DEFAULT_XPN_THRESHOLD 0xC000000000000000ULL // XPN 模式下默认 pn 阈值 +#define HIMACSEC_DEFAULT_PN_THRESHOLD 0xC0000000ULL // 非 XPN 模式下默认 pn 阈值 +#define HIMACSEC_SET_SC_ENCODING_SA_BIT_VAL 0x1 +#define HIMACSEC_SET_SC_PROTECTION_MODE_BIT_VAL 0x2 +#define HIMACSEC_SET_SC_PROTECT_FRAMES_BIT_VAL 0x4 +#define HIMACSEC_SET_SC_VALIDATE_FRAMES_BIT_VAL 0x8 + +// mib 查询的类型 +typedef enum { + HIMACSEC_TOOL_MIB_TYPE_SC = 0, // SC MIB 信息 + HIMACSEC_TOOL_MIB_TYPE_PORT, // PORT MIB 信息 + HIMACSEC_TOOL_MIB_TYPE_MAX +} himacsec_tool_mib_type_e; + +// 加密方向 sc 保护模式 +typedef enum { + PROTECTION_MODE_INTERITY_ONLY = 0, // 仅进行完整性校验 + PROTECTION_MODE_CONFIDENTIALITY, // 进行完整性校验及加密 + PROTECTION_MODE_OFFSET_CONFIDENTIALITY, // 进行完整性校验及加密, + // 加密数据可偏移 0/30/50B + PROTECTION_MODE_MAX +} himacsec_protection_mode_e; + +// 解密方向 sc 验证模式 +typedef enum { + VALIDATE_MODE_DISABLE = 1, // 不对报文进行校验 + VALIDATE_MODE_CHECK, // 只进行校验,不进行过滤 + VALIDATE_MODE_STRICT, // 进行校验及过滤 + VALIDATE_MODE_MAX +} himacsec_validate_mode_e; + +// 算法定义 +typedef enum { + HIMACSEC_TOOL_CIPHER_GCM_AES_128 = 0, + HIMACSEC_TOOL_CIPHER_GCM_AES_256, + HIMACSEC_TOOL_CIPHER_GCM_AES_XPN_128, + HIMACSEC_TOOL_CIPHER_GCM_AES_XPN_256, + HIMACSEC_TOOL_CIPHER_GCM_SM4_128, + HIMACSEC_TOOL_CIPHER_GCM_SM4_XPN_128, + HIMACSEC_TOOL_CIPHER_MAX +} himacsec_tool_algo_e; + +// 支持的加密偏移规格 +typedef enum { + HIMACSEC_CONFIDENTIALITY_OFFSET_0 = 0, + HIMACSEC_CONFIDENTIALITY_OFFSET_30, + HIMACSEC_CONFIDENTIALITY_OFFSET_50, + HIMACSEC_CONFIDENTIALITY_OFFSET_MAX +} himacsec_confidentiality_ofs_e; + +// macsec object 操作对象 +typedef enum { + HIMACSEC_TOOL_OBJ_ENC_SC = 0, + HIMACSEC_TOOL_OBJ_DEC_SC, + HIMACSEC_TOOL_OBJ_ENC_SA, + HIMACSEC_TOOL_OBJ_DEC_SA, + HIMACSEC_TOOL_OBJ_MAX +} himacsec_tool_obj_e; + +typedef enum macsec_direction { // TODO 与 ipsec 归一 + MACSEC_INBOUND = 0, + MACSEC_OUTBOUND, +} crypt_direction_e; + +typedef enum crypt_key_length { + CRYPT_KEY_LENGTH_128 = 128, + CRYPT_KEY_LENGTH_256 = 256 +} crypt_key_length_e; + +enum sc_status { + SC_STATUS_NONE, + SC_STATUS_CREATED, + SC_STATUS_PN_THRESHOLD, + SC_STATUS_MAX, +}; + +enum sa_status { + SA_STATUS_NONE, + SA_STATUS_CREATED, + SA_STATUS_ENCODING, + SA_STATUS_EXPIRED, + SA_STATUS_MAX, +}; + +union obj_status { + enum sc_status sc; + enum sa_status sa; +}; + +struct himacsec_status { + u64 create_time; + u64 enable_time; + union obj_status status; + crypt_direction_e direct; +}; + +struct himacsec_sa { + macsec_sa_info_s info; + struct himacsec_status status; +}; + +struct himacsec_sc { + macsec_sc_info_s info; + struct himacsec_status status; + struct himacsec_sa sa[HIMACSEC_MAX_SA_IN_SC]; +}; + +// 命令的 inbuf 包含一个 hdr +typedef struct himacsec_cmd_hdr { + u32 cmd_type; + himacsec_tool_obj_e obj_type; +} himacsec_cmd_hdr_s; + +// inbuf 定义 +typedef struct himacsec_cmd_in { + himacsec_cmd_hdr_s hdr; + u8 buf[HIMACSEC_TOOL_IN_BUF_MAX]; +} himacsec_cmd_in_s; + +// del 命令入参定义 +struct himacsec_cmd_del_in { + u64 sci; + u8 an; + u8 rsvd[7]; +}; + +struct himacsec_cmd_set_sc_in { + u64 sci; + u32 set_flag_bitmap; + u32 rsvd; + macsec_sc_info_s sc; +}; + +// list 和 dump 命令 outbuf 出参定义 +typedef struct himacsec_cmd_list_out { + u32 enc_sc_cnt; + u32 dec_sc_cnt; + u8 enc_sc_buf[HIMACSEC_LIST_OUT_BUF_MAX]; // reserve the 100% resources for expansion + u8 dec_sc_buf[HIMACSEC_LIST_OUT_BUF_MAX]; +} himacsec_cmd_list_out_s; + +// mib 入参定义 +struct himacsec_cmd_mib_in { + u64 sci; + himacsec_tool_mib_type_e mib_type; + u32 rsvd; +}; + +// mib 出参定义 +typedef struct himacsec_cmd_mib_out { + u32 num; + u8 mib_buf[HIMACSEC_MIB_OUT_BUF_MAX]; +} himacsec_cmd_mib_out_s; + +// list buf 出参定义 +struct himacsec_cmd_list_sc_buf { + struct himacsec_sc sc; + u8 sa_cnt; +}; + +// counter 出参定义 +typedef struct himacsec_cmd_counter_out { + u8 enc_cnt_buf[HIMACSEC_COUNTER_OUT_BUF_MAX]; + u8 dec_cnt_buf[HIMACSEC_COUNTER_OUT_BUF_MAX]; +} himacsec_cmd_counter_out_s; + +#endif /* MACSEC_PUB_CMD_H */ diff --git a/hinic5/src/dpu_platform_library/include/drv_tool_msg/nic_pub_cmd.h b/hinic5/src/dpu_platform_library/include/drv_tool_msg/nic_pub_cmd.h new file mode 100644 index 00000000..c9420e16 --- /dev/null +++ b/hinic5/src/dpu_platform_library/include/drv_tool_msg/nic_pub_cmd.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2026 Huawei Technologies Co., Ltd */ + +#ifndef NIC_PUB_CMD_H +#define NIC_PUB_CMD_H + +#include "hinic5_mt.h" + +/* 队列信息相关 */ + +/** + * @brief struct hinic5_tx_hw_page + * @details 用于存储硬件页面信息的结构体 + */ +struct hinic5_tx_hw_page { + u64 phy_addr; /* 物理地址 */ + u64 *map_addr; /* 映射地址 */ +}; + +enum hinic5_show_set { + HINIC5_SHOW_SSET_IO_STATS = 1, +}; + +#define HINIC5_SHOW_ITEM_LEN 32 +/** + * @brief struct hinic5_show_item + * @details 用于显示项的结构体 + */ +struct hinic5_show_item { + char name[HINIC5_SHOW_ITEM_LEN]; /* 项的名称 */ + u8 hexadecimal; /* 值的显示方式,0表示十进制,1表示十六进制 */ + u8 rsvd[7]; /* 保留字段 */ + u64 value; /* 项的值 */ +}; + +/** + * @brief struct wqe_info + * @details 用于存储工作队列的相关信息 + */ +struct wqe_info { + int q_id; /* 队列ID */ + void *slq_handle; /* 队列句柄 */ + unsigned int wqe_id; /* 工作队列元素ID */ +}; + +/** + * @brief struct nic_sq_info + * @details 用于存储网络接口的发送队列信息的结构体 + */ +struct nic_sq_info { + u16 q_id; /* 队列ID */ + u16 pi; /* 生产者索引 */ + u16 ci; /* 消费者索引 */ + u16 fi; /* 硬件消费者索引 */ + u32 q_depth; /* 队列深度 */ + u16 pi_reverse; /* 生产者索引的反向 */ + u16 wqebb_size; /* 工作队列元素的大小 */ + u8 priority; /* 优先级 */ + u64 ci_wqe_page_addr; /* sq wq首page地址 */ + u64 cla_addr; /* wq block地址 */ + void *slq_handle; /* 发送队列句柄 */ + struct hinic5_tx_hw_page direct_wqe; /* 直接工作队列元素 */ + struct hinic5_tx_hw_page doorbell; /* 门铃 */ + u32 page_idx; /* 页面索引 */ + u32 glb_sq_id; /* 全局发送队列ID */ +}; + +/** + * @brief struct nic_rq_info + * @details 用于存储网络接口的接收队列信息的结构体 + */ +struct nic_rq_info { + u16 q_id; /* 队列ID */ + u16 delta; /* 差值 */ + u16 hw_ci; + u16 ci; /* 消费者索引 */ + u16 sw_pi; /* 软件生产者索引 */ + u16 wqebb_size; /* 工作队列元素的大小 */ + u16 q_depth; /* 队列深度 */ + u16 buf_len; /* 缓冲区长度 */ + + void *slq_handle; /* 接收队列句柄 */ + u64 ci_wqe_page_addr; /* 消费者索引工作队列元素的页面地址 */ + u64 ci_cla_tbl_addr; /* 消费者索引缓存行对齐表地址 */ + + u8 coalesc_timer_cfg; /* 中断超时时间,单位5us */ + u8 pending_limt; /* 中断聚合个数,单位8pkt */ + u16 msix_idx; /* MSI-X索引 */ + u32 msix_vector; /* MSI-X向量 */ +}; + +/* QOS相关 */ + +#define MT_DCB_OPCODE_WR BIT(0) /* 1 - write, 0 - read */ + +/** + * @brief struct hinic5_mt_dcb_state + * @details 用于存储多任务数据中心桥(DCB)状态信息的结构体 + */ +struct hinic5_mt_dcb_state { + struct mt_msg_head head; /* 消息头 */ + + u16 op_code; /* 操作码 0 - get dcb state, 1 - set dcb state */ + u8 state; /* 状态 0 - disable, 1 - enable dcb */ + u8 rsvd; /* 保留字段 */ +}; + +#define CMD_QOS_DEV_TRUST BIT(0) +#define CMD_QOS_DEV_DFT_COS BIT(1) +#define CMD_QOS_DEV_PCP2COS BIT(2) +#define CMD_QOS_DEV_DSCP2COS BIT(3) + +/** + * @brief struct hinic5_mt_qos_dev_cfg + * @details 用于配置QoS设备的结构体 + */ +struct hinic5_mt_qos_dev_cfg { + struct mt_msg_head head; /* 消息头部 */ + + u8 op_code; /* 0:get 1: set */ + u8 rsvd0; + u16 cfg_bitmap; /* bit0 - trust, bit1 - dft_cos, + * bit2 - pcp2cos, bit3 - dscp2cos + */ + + u8 trust; /* 0 - pcp, 1 - dscp */ + u8 dft_cos; + u16 rsvd1; + u8 pcp2cos[8]; /* 必须8个一起配置 */ + u8 dscp2cos[64]; /* 配置dscp2cos时,若cos值设置为0xFF, + * 驱动则忽略此dscp优先级的配置, + * 允许一次性配置多个dscp跟cos的映射关系 + */ + u32 rsvd2[4]; +}; + +/** + * @brief struct hinic5_mt_qos_cos_cfg + * @details 用于配置HINIC5多队列服务质量(QoS)类别设置的结构体 + */ +struct hinic5_mt_qos_cos_cfg { + struct mt_msg_head head; /* 消息头部,包含了消息的类型和长度等信息 */ + + u8 port_id; /* 端口ID,用于标识消息所属的端口 */ + u8 func_cos_bitmap; /* 功能类别位图,用于标识各个功能类别的使能状态 */ + u8 port_cos_bitmap; /* 端口类别位图,用于标识各个端口类别的使能状态 */ + u8 func_max_cos_num; /* 功能类别的最大数量,用于限制功能类别的数量 */ + u32 rsvd2[4]; +}; + +enum nic_driver_cmd_type { + NIC_TOOL_CMD_START = 0x120, /* 平台新增命令字从0x120开始, + * 旧命令字统一定义在@driver_cmd_type + */ + + /* MACsec 工具命令集 */ + MACSEC_TOOL_OP_LIST = 0x120, /* 获取驱动内存中所有 macsec 配置信息 */ + MACSEC_TOOL_OP_DUMP, /* 获取芯片侧所有 macsec 配置信息 */ + MACSEC_TOOL_OP_MIB, /* 获取芯片侧 SC MIB 信息或者 PORT MIB 信息 */ + MACSEC_TOOL_OP_ADD, /* 新增 SC 或 SA 配置 */ + MACSEC_TOOL_OP_DEL, /* 删除 SC 或 SA 配置 */ + MACSEC_TOOL_OP_SET, /* 修改 SC 配置 */ + MACSEC_TOOL_OP_FLUSH, /* 清除某个设备管理的 macsec 配置 */ + MACSEC_TOOL_OP_MAX = 0x12F, + + NIC_CMD_EXTEND_RSV_START = 0x200, + /* NIC工具保留命令字,产品使用该范围内的命令字 */ + NIC_CMD_EXTEND_RSV_END = 0x2FF, +}; + +#endif /* NIC_PUB_CMD_H */ diff --git a/hinic5/src/dpu_platform_library/include/drv_tool_msg/sdk_pub_cmd.h b/hinic5/src/dpu_platform_library/include/drv_tool_msg/sdk_pub_cmd.h new file mode 100644 index 00000000..3d10b439 --- /dev/null +++ b/hinic5/src/dpu_platform_library/include/drv_tool_msg/sdk_pub_cmd.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2026 Huawei Technologies Co., Ltd */ + +#ifndef SDK_PUB_CMD_H +#define SDK_PUB_CMD_H + +#include "base_type.h" +#include "hinic5_mt.h" + +/** + * @brief struct wqe_info + * @details 用于存储工作队列的相关信息 + */ +struct cmdq_wqe_info { + int q_id; /* 队列ID */ + void *rsvd; + unsigned int wqebb_id; /* 工作队列元素ID */ +}; + +#define CMDQ_WQE_DW_NUMBER (16) + +struct sdk_cmdq_wqe_desc { + u32 data[CMDQ_WQE_DW_NUMBER]; + u32 wqebb_size; +}; + +enum sdk_cmd_type { + SDK_CMD_SET_FREQ_REDUCE_RATIO = SERVICE_DRV_BASE_CMD, /* 设置芯片时间 + * 统计的降频比 + */ + SDK_CMD_SET_TIME_DIFF_ENABLE, /* 设置芯片时间差统计的使能 */ + SDK_CMD_GET_TIME_DIFF, /* 获取芯片时间差统计的差值 */ + SDK_CMD_GET_CMDQ_INFO, /* 获取cmdq队列的metadata信息 */ + SDK_CMD_GET_CMDQ_WQE_DESC, /* 获取cmdq队列的wqe信息 */ + SDK_CMD_CMDQ_CHANNEL_DETECT, /* cmdq通道连通性测试 */ + SDK_CMD_ATTACK_TEST, /* SDK 攻击测试 */ + + SDK_CMD_EXTEND_RSV_START = 0x200, + /* SDK工具保留命令字,产品使用该范围内的命令字 */ + SDK_CMD_EXTEND_RSV_END = 0x2FF, +}; +#endif diff --git a/hinic5/src/dpu_platform_library/include/fw_typedef.h b/hinic5/src/dpu_platform_library/include/fw_typedef.h new file mode 100644 index 00000000..980d38ba --- /dev/null +++ b/hinic5/src/dpu_platform_library/include/fw_typedef.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2026 Huawei Technologies Co., Ltd */ + +#ifndef FW_TYPEDEF_H +#define FW_TYPEDEF_H + +#include "base_type.h" + +typedef enum up_fw_update_type_e { + UP_FW_UPDATE_UP_TEXT = 0x0, + UP_FW_UPDATE_UP_DATA = 0x1, + UP_FW_UPDATE_UP_DICT = 0x2, + UP_FW_UPDATE_TILE_PCPTR = 0x3, + UP_FW_UPDATE_TILE_TEXT = 0x4, + UP_FW_UPDATE_TILE_DATA = 0x5, + UP_FW_UPDATE_TILE_DICT = 0x6, + UP_FW_UPDATE_PPE_STATE = 0x7, + UP_FW_UPDATE_PPE_BRANCH = 0x8, + UP_FW_UPDATE_PPE_EXTACT = 0x9, + UP_FW_UPDATE_CFG0 = 0xa, + UP_FW_UPDATE_CFG1 = 0xb, + UP_FW_UPDATE_MPU_CNT_DICT = 0xc, + UP_FW_UPDATE_NPU_CNT_DICT = 0xd, + UP_FW_UPDATE_PHY = 0x18, + UP_FW_UPDATE_BIOS = 0x19, + UP_FW_UPDATE_HLINK_ONE = 0x1a, + UP_FW_UPDATE_HLINK_TWO = 0x1b, + UP_FW_UPDATE_HLINK_THR = 0x1c, + UP_FW_UPDATE_VERIFY_TYPE = 0x1d, + UP_FW_UPDATE_RESVD_FW_TWO = 0x1e, + UP_FW_UPDATE_L0FW = 0x20, + UP_FW_UPDATE_L1FW = 0x21, + UP_FW_UPDATE_BOOT = 0x22, + UP_FW_UPDATE_SEC_DICT = 0x23, + UP_FW_UPDATE_HOT_PATCH0 = 0x24, + UP_FW_UPDATE_PSM_ROM = 0x2a, + UP_FW_UPDATE_UB_SCC = 0x2b, + UP_FW_UPDATE_UB_SCC_DICT = 0x2c, + UP_FW_UPDATE_UBC_IMP = 0x2d, + UP_FW_UPDATE_UBC_IMP_DICT = 0x2e, + UP_FW_UPDATE_UBG_IMP = 0x2f, + UP_FW_UPDATE_UBG_IMP_DICT = 0x30, + UP_FW_UPDATE_ROCE_IMP = 0x31, + UP_FW_UPDATE_ROCE_IMP_DICT = 0x32, + UP_FW_UPDATE_ROCE_SCC = 0x33, + UP_FW_UPDATE_ROCE_SCC_DICT = 0x34, + UP_FW_UPDATE_OPTION_ROM = 0x3a, + UP_FW_UPDATE_EXTEND_BIN = 0x3e, +} up_fw_update_type_e; + +#define UP_MAX_FW_TYPE_CNT 0x40 + +/* uP image context could not be the same since the flash address the + * checksum are different. It is needed to keep two different fw information. + */ +#define FINAL_SUM(sum) (~(sum)) + +typedef struct fw_section_info_s { + u32 invalid; + u32 fw_ver; + u32 fw_len; + u32 fw_crc; + u32 fw_info_csum; +} fw_section_info_s; + +typedef struct fw_info_s { + u32 invalid; // 非0,表示状态机不存在,0表示存在 + u32 cfg_index; // 配置文件index,0~7 + u8 image_state[UP_MAX_FW_TYPE_CNT]; // 主备状态,0表示主,1表示备 + fw_section_info_s fw_attr[UP_MAX_FW_TYPE_CNT]; // 子固件信息 + u32 csum; +} fw_info_s; + +#endif diff --git a/hinic5/src/tools/micro_log/hinic5_micro_log.c b/hinic5/src/tools/micro_log/hinic5_micro_log.c new file mode 100644 index 00000000..3c83ff91 --- /dev/null +++ b/hinic5/src/tools/micro_log/hinic5_micro_log.c @@ -0,0 +1,1139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/time.h> +#include <linux/timex.h> +#include <linux/rtc.h> +#include <linux/string.h> + +#include "comm_defs.h" +#include "ossl_knl.h" +#include "hinic5_hw.h" +#include "hinic5_hwdev.h" +#include "hinic5_chip_info.h" +#include "hinic5_micro_log.h" +#include "hinic5_comm_cmd.h" +#include "mpu_inband_cmd_defs.h" +#include "micro_log_comm.h" +#include "micro_log_procfs_cmd.h" +#include "micro_log_index.h" + +static bool micro_log_en; +module_param(micro_log_en, bool, 0444); +MODULE_PARM_DESC(micro_log_en, "Enable micorlog write to host - default is false"); + +static bool micro_asm_mode; // 0: 默认从flash中取; 1:选择/home/microcode.asm +module_param(micro_asm_mode, bool, 0444); +MODULE_PARM_DESC(micro_asm_mode, "default micro asm from flash"); + +char log_file_path[MAX_PATH_NAME] = "/home/microcode.log"; +char asm_file_path[MAX_PATH_NAME] = "/home/microcode.asm"; + +#define NIC_MICRO_ASM_START_ADDR 0x10000000 + +const char *micro_log_level[] = {"ERR", "WARN", "INFO", "DEBUG"}; + +u8 nic_micro_log_dbg; +u32 poll_log_cnt; + +int nic_micro_log_write_log_write_file(struct micro_log_info *log_info, + const u8 *func_name, micro_log_item_s *log_item, + struct file *fp_log_file) +{ + u32 err = 0; + unsigned int level_index = 0; + struct timeval txc; + struct rtc_time time; + u64 localtime; + +/*lint -restore*/ + level_index = log_item->ctrl_info.bs.type; + if (level_index > ULOG_DEBUG) { + microlog_warning("level_index(%d) err.", level_index); + level_index = ULOG_DEBUG; + } + + /* 获取当前的UTC时间 */ + do_gettimeofday(&(txc)); + + /* 把UTC时间调整为本地时间 */ + utctime_to_localtime((u64)txc.tv_sec, &localtime); + + /* 北京时区调整. */ + localtime = TIMEZONE_ADJUSTMENT(localtime); + + /* 算出时间中的年月日等数值到tm中 */ + rtc_time_to_tm((time64_t)localtime, &time); + + (void)snprintf(log_info->micro_log_tmpbuf, (unsigned long)MICRO_LOG_MAX_STRING_LEN * 8, + "[%02u:%02u:%02u.%06u](tile_core_tc:%d_%d_%d)[%d][%s](%s:%d) :", + time.tm_hour, time.tm_min, time.tm_sec, (u32)txc.tv_usec % 1000000, + log_item->ctrl_info.bs.tile_id, log_item->ctrl_info.bs.core_id, + log_item->ctrl_info.bs.thread_id, + log_item->line_and_pi.bs.log_seq, micro_log_level[level_index], + func_name, log_item->line_and_pi.bs.line); + + err = file_write(fp_log_file, log_info->micro_log_tmpbuf, + (u32)strlen(log_info->micro_log_tmpbuf)); //lint !e712 + if (err != strlen(log_info->micro_log_tmpbuf)) { + microlog_err("Can't write the cal data to file"); + return -EFAULT; + } + return 0; +} + +/* + * Function : nic_micro_log_write_log_file + * Description : 将解析后的信息保存在log文件中 + * Type : + * Input : u8 *buf + * u8 *func_name + * micro_log_item_s *log_item + * Output : None + * Return : + * Restriction : + * History : + * 1.Date : 2015/10/19 + * Modification : Created function + */ +int nic_micro_log_write_log_file(struct micro_log_info *log_info, u8 *buf, + u8 *func_name, micro_log_item_s *log_item) +{ + u32 err = 0; + struct file *fp; +#if defined(HAVE_MM_SEGMENT_T) + mm_segment_t old_fs; +#endif + + if (nic_micro_log_dbg == 1) + microlog_info("nic_micro_log_dbg in"); + + if (!buf || !func_name || !log_item) { + microlog_err("input buf, func_name or log_item is null"); + return -EFAULT; + } + if (!log_info->fp_log_file) { + microlog_err("fp_log_file is NULL"); + return -EFAULT; + } + + fp = log_info->fp_log_file; + +/*lint -save -e501*/ +#if defined(HAVE_MM_SEGMENT_T) +#if !defined(CONFIG_UACCESS_MEMCPY) && !defined(CONFIG_SET_FS) + old_fs = get_fs(); + set_fs(get_ds()); +#elif defined(CONFIG_UACCESS_MEMCPY) + old_fs = get_fs(); + set_fs(KERNEL_DS); +#elif defined(CONFIG_SET_FS) + old_fs = force_uaccess_begin(); +#endif +#endif + + if (nic_micro_log_write_log_write_file(log_info, func_name, log_item, fp) == -EFAULT) + return -EFAULT; + memset(log_info->micro_log_tmpbuf, 0, sizeof(log_info->micro_log_tmpbuf)); + +/*lint -save -e668*/ + (void)snprintf(log_info->micro_log_tmpbuf, + (unsigned long)MICRO_LOG_MAX_STRING_LEN * 8, (char *)buf, + log_item->data[0], log_item->data[1], log_item->data[2], log_item->data[3], + log_item->data[4], log_item->data[5], log_item->data[6], log_item->data[7]); +/*lint -restore*/ + err = file_write(fp, log_info->micro_log_tmpbuf, + (u32)strlen(log_info->micro_log_tmpbuf)); //lint !e712 + if (err != strlen(log_info->micro_log_tmpbuf)) { + microlog_err("Can't write the cal data to file ERR:[0x%x]", err); + return -EFAULT; + } + +#if defined(HAVE_MM_SEGMENT_T) +#if !defined(CONFIG_SET_FS) + set_fs(old_fs); +#else + force_uaccess_end(old_fs); +#endif +#endif + + return 0; +} + +/* + * Function : nic_micro_log_get_string_from_data + * Description : 获得字符串起始地址 + * Type : + * Input : struct micro_log_info *log_info + * unsigned int data_addr + * Output : None + * Return : + * Restriction : + * History : + * 1.Date : 2015/10/19 + * Modification : Created function + */ +char *nic_micro_log_get_string_from_data(struct micro_log_info *log_info, unsigned int data_addr) +{ + unsigned int offset; + char *out_buf; + char *tmp_char; + + u32 i; + char err_string[] = "string has %%s\n"; + u32 err_string_len = sizeof("string has %%s\n"); + + offset = data_addr - NIC_MICRO_ASM_START_ADDR; + out_buf = (char *)(log_info->micro_log_data_addr + offset); + + tmp_char = out_buf; + for (i = 0; i < strlen((char *)out_buf); i++, tmp_char++) { + if (('%' == *tmp_char) && (('s' == *(tmp_char + 1)) || ('S' == *(tmp_char + 1)))) + memcpy(out_buf, err_string, err_string_len); + } + + if (nic_micro_log_dbg == 1) { + pr_info("%s(%d): micro addr : 0x%x\n", __func__, __LINE__, data_addr); + tmp_char = out_buf; + pr_info("%s(%d)get asm data as:\n", __func__, __LINE__); + for (i = 0; i < strlen(out_buf); i++) { + pr_info("0x%02x ", *(tmp_char + i)); + if (0 == ((i + 1) % 16)) + pr_info("\n"); + } + } + + return out_buf; +} + +/* + * Function : nic_micro_log_parse_microcode_log + * Description : 解析微码日志 + * Type : + * Input : struct micro_log_info *log_info + * micro_log_item_s *log_item + * Output : None + * Return : + * Restriction : + * History : + * 1.Date : 2015/10/19 + * Modification : Created function + */ +int nic_micro_log_parse_microcode_log(struct micro_log_info *log_info, micro_log_item_s *log_item) +{ + int err; + + char *log_str; + char *log_file; + + if (!log_item) { + microlog_err("input log_item is NULL!"); + return -EFAULT; + } + + if (nic_micro_log_dbg == 1) + microlog_info("nic_micro_log_dbg in!"); + + /** 字符串地址必须大于第一行data地址, 并且是4的整数倍*/ + if (log_item->string_addr < NIC_MICRO_ASM_START_ADDR || + log_item->func_name_addr < NIC_MICRO_ASM_START_ADDR) { + microlog_err("string_addr[%x], func_name_addr[%x]", + log_item->string_addr, log_item->func_name_addr); + return -EFAULT; + } + + if (!log_info->micro_log_data_addr) { + microlog_err("asm file has no data, micro_log_data_addr is NULL"); + return -EFAULT; + } + + log_str = nic_micro_log_get_string_from_data(log_info, log_item->string_addr); + log_file = nic_micro_log_get_string_from_data(log_info, log_item->func_name_addr); + + err = nic_micro_log_write_log_file(log_info, (u8 *)log_str, (u8 *)log_file, log_item); + if (err != 0) { + microlog_err("write log file fail."); + return err; + } + + if (nic_micro_log_dbg == 1) + microlog_info("nic_micro_log_dbg out!"); + return 0; +} + +int check_param_for_get_asm(struct micro_log_info *log_info) +{ + if (!log_info->fp_asm_file) { + log_info->fp_asm_file = file_open(asm_file_path); + if (IS_ERR(log_info->fp_asm_file)) { + microlog_err("Can't open /home/microcode.asm file."); + log_info->fp_asm_file = NULL; + return -EFAULT; + } + } + return 0; +} + +/* + * Function : process_per_line_data + * Description : process_per_line_data for function `nic_micro_log_get_asm_file_data` + * Type : struct micro_log_info *log_info, u64 datalen + * Input : void + * Output : None + * Return : void + * Restriction :None + * History : + * 1.Date : 2016/3/6 + * Modification : Created function + */ +int process_per_line_data(struct micro_log_info *log_info, u32 all_line, u64 file_size) +{ + int read_byte; + u32 i; + u32 file_ops = 0; + u64 datalen = 0; + unsigned int addr; + unsigned int data; + char tmpbuf[MICRO_LOG_MAX_STRING_LEN] = {0}; + + /* 获取每行中的4字节数据 */ + for (i = 0; i < all_line; i++) { + read_byte = file_read(log_info->fp_asm_file, tmpbuf, LINE_CHAR_NUM, &file_ops); + if (read_byte < 0) { + microlog_err("Can't read the cal data:%d from file %d.", read_byte, i); + return -EFAULT; + } + + if (read_byte < LINE_CHAR_NUM) { + microlog_err("end file."); + return 0; + } + + (void)sscanf(tmpbuf, "%x : %x", &addr, &data); + + if (nic_micro_log_dbg == 1) + microlog_info("0x%x", data); + + (void)snprintf((char *)(log_info->micro_log_data_addr + datalen), + file_size, "%c%c%c%c", (u8)(data >> 24), + (u8)((data & 0x00ff0000) >> 16), + (u8)((data & 0x0000ff00) >> 8), (u8)(data & 0xff)); + datalen += sizeof(data); + + if (file_size < datalen) { + microlog_err("datalen too large"); + return -EFAULT; + } + } + return 0; +} + +/* + * Function : nic_micro_log_get_asm_file_data + * Description : get microcode.asm data + * Type : + * Input : void + * Output : None + * Return : + * Restriction : + * History : + * 1.Date : 2016/3/6 + * Modification : Created function + */ +int nic_micro_log_get_asm_file_data(struct micro_log_info *log_info) +{ + u64 file_size; + u32 all_line; +#if defined(HAVE_MM_SEGMENT_T) + mm_segment_t old_fs; +#endif + + if (check_param_for_get_asm(log_info) != 0) + return -EFAULT; + +/*lint -save -e501*/ +#if defined(HAVE_MM_SEGMENT_T) +#if !defined(CONFIG_UACCESS_MEMCPY) && !defined(CONFIG_SET_FS) + old_fs = get_fs(); + set_fs(get_ds()); +#elif defined(CONFIG_UACCESS_MEMCPY) + old_fs = get_fs(); + set_fs(KERNEL_DS); +#elif defined(CONFIG_SET_FS) + old_fs = force_uaccess_begin(); +#endif +#endif +/*lint -restore*/ + + file_size = get_file_size(log_info->fp_asm_file); + + /* asm文件一行固定20字节,文件大小为20倍数。 + * asm文件最后一行为时间,无需保存在缓存中 + */ + all_line = (u32)(file_size - LINE_CHAR_NUM) / LINE_CHAR_NUM; + + /* asm文件中一行有效字符串为4字节,申请的内存只需asm文件的1/5 */ + file_size = (file_size / LINE_CHAR_NUM) * 4; + + log_info->micro_log_data_addr = kzalloc((file_size + 1), GFP_KERNEL); + if (!log_info->micro_log_data_addr) + goto err_close_file; + + /*lint -save -e647*/ + set_file_position(log_info->fp_asm_file, 0 * LINE_CHAR_NUM); + /*lint -restore*/ + if (process_per_line_data(log_info, all_line, file_size) != 0) { + microlog_err("process_per_line_data fail!"); + goto err_free_mem; + } + +#if defined(HAVE_MM_SEGMENT_T) +#if !defined(CONFIG_SET_FS) + set_fs(old_fs); +#else + force_uaccess_end(old_fs); +#endif +#endif + + microlog_info("%s success\n", __func__); + return 0; + +err_free_mem: + if (log_info->micro_log_data_addr) { + kfree((void *)(log_info->micro_log_data_addr)); + log_info->micro_log_data_addr = NULL; + } + +err_close_file: + +#if defined(HAVE_MM_SEGMENT_T) +#if !defined(CONFIG_SET_FS) + set_fs(old_fs); +#else + force_uaccess_end(old_fs); +#endif +#endif + file_close(log_info->fp_asm_file); + log_info->fp_asm_file = NULL; + microlog_err("close microcode.asm!"); + return -EFAULT; +} + +/* + * Function : nic_micro_log_create_log_file + * Description : create microlog.log file + * Type : + * Input : void + * Output : None + * Return : + * Restriction : + * History : + * 1.Date : 2016/3/6 + * Modification : Created function + */ +int nic_micro_log_create_log_file(struct micro_log_info *log_info) +{ + char ulog_file_time[MAX_PATH_NAME] = {0}; + struct timeval txc; + struct rtc_time time; + u64 max_time_len; + u64 path_len; + u64 localtime; + + /* 获取当前的UTC时间 */ + do_gettimeofday(&(txc)); + + /* 把UTC时间调整为本地时间 */ + utctime_to_localtime(txc.tv_sec, &localtime); + + /* 算出时间中的年月日等数值到tm中 */ + rtc_time_to_tm(localtime, &time); + + path_len = strlen(log_file_path) - strlen(".log"); + (void)memcpy(ulog_file_time, log_file_path, path_len); + + max_time_len = MAX_PATH_NAME - path_len; + (void)snprintf(ulog_file_time + path_len, max_time_len, + "_%s_%04d_%02d_%02d_%02d_%02d_%02d.log", + log_info->hinic_micro_log_task.name, + time.tm_year + 1900, + time.tm_mon + 1, time.tm_mday, + time.tm_hour, time.tm_min, time.tm_sec); + + if (!log_info->fp_log_file) { + log_info->fp_log_file = file_creat(ulog_file_time); + if (IS_ERR(log_info->fp_log_file)) { + microlog_err("Can't create %s file", ulog_file_time); + + return -EFAULT; + } + } + microlog_info("create %s file", ulog_file_time); + + return 0; +} + +/* + * Function : nic_micro_log_create_new_log_file + * Description : create new microcode.log for overflow 1G + * Type : + * Input : void + * Output : None + * Return : + * Restriction : + * History : + * 1.Date : 2016/3/6 + * Modification : Created function + */ +int nic_micro_log_create_new_log_file(struct micro_log_info *log_info) +{ + u32 file_size; + + char ulog_file_time[MAX_PATH_NAME] = {0}; + struct timeval txc; + struct rtc_time time; + u64 max_time_len; + u64 path_len; + u64 localtime; + + if (!log_info->fp_log_file) { + microlog_err("fp_log_file is NULL!"); + return -EFAULT; + } + file_size = get_file_size(log_info->fp_log_file); + + if ((MAX_SIZE_OF_LOG_FILE) <= file_size) { + if (log_info->fp_log_file) { + file_close(log_info->fp_log_file); + log_info->fp_log_file = NULL; + } + + /* 获取当前的UTC时间 */ + do_gettimeofday(&(txc)); + + utctime_to_localtime(txc.tv_sec, &localtime); + + /* 算出时间中的年月日等数值到tm中 */ + rtc_time_to_tm(localtime, &time); + + path_len = strlen(log_file_path) - strlen(".log"); + (void)memcpy(ulog_file_time, log_file_path, path_len); + + max_time_len = MAX_PATH_NAME - path_len; + (void)snprintf(ulog_file_time + path_len, max_time_len, + "_%s_%04d_%02d_%02d_%02d-%02d.log", + log_info->hinic_micro_log_task.name, + time.tm_year + 1900, + time.tm_mon + 1, time.tm_mday, + time.tm_hour, time.tm_min); + + log_info->fp_log_file = file_creat(ulog_file_time); + if (IS_ERR(log_info->fp_log_file)) { + microlog_err("Can't create %s file!", ulog_file_time); + return -EFAULT; + } + } + return 0; +} + +int hinic5_micro_log_init_cnt_set(void *hwdev) +{ + cmdq_microlog_ctrl_info_set_s microlog_ctrl_info = {{0}}; + size_t msg_len = sizeof(cmdq_microlog_ctrl_info_set_s); + + microlog_ctrl_info.microlog_init_flag = 1; + + return hinic5_set_microlog_cmdq(hwdev, (void *)µlog_ctrl_info, + msg_len, COMM_CMD_MICROLOG_CTRL_INFO_SET); +} + +int hinic5_comm_micro_log_init(struct hinic5_hwdev *hwdev) +{ + int err = 0; + + if (!micro_log_en) + return 0; + + if (hinic5_micro_log_init_cnt_set(hwdev) != 0) { + microlog_warning("not again enable micro_log"); + return 0; + } + + err = hinic5_micro_log_init(hwdev); + if (err) { + sdk_err(hwdev->dev_hdl, "Failed to initialize micro log\n"); + return err; + } + + err = hinic5_micro_log_func_en(hwdev, 1); + if (err) { + sdk_warn(hwdev->dev_hdl, "Failed to enable micro log\n"); + return err; + } + + err = micro_log_procfs_init(hwdev); + return err; +} + +/* + * Function : hinic5_micro_log_init + * Description : micro code's log init + * Type : void + * Input : void + * Output : None + * Return : int + * Restriction : + * History : void + * 1.Date : 2015/8/15 + * Modification : Created function + */ +int hinic5_micro_log_init(void *hwdev) +{ + int ret = 0; + u64 v_addr = 0; + u64 p_addr = 0; + u32 i; + struct card_node *chip_node; + struct micro_log_info *log_info; + + if (!hwdev) { + microlog_err("hwdev is NULL!"); + return -EFAULT; + } + + if (hinic5_ppf_idx(hwdev) != hinic5_global_func_id(hwdev)) { + microlog_info("Only PPF support micro log init!"); + return 0; + } + + chip_node = (struct card_node *)(((struct hinic5_hwdev *)hwdev)->chip_node); + + log_info = chip_node->log_info; + if (log_info) { + sdk_info(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "%s(%d):NIC MICRO LOG has already init!\n", + __func__, __LINE__); + return 0; + } + + log_info = kzalloc(sizeof(*log_info), GFP_KERNEL); + if (!log_info) + return -ENOMEM; + + chip_node->log_info = log_info; + + /* 申请256*256*64B=4M空间 */ + for (i = 0; i < MICRO_LOG_MAX_QUEUE_NUM; i++) { + v_addr = (u64)dma_zalloc_coherent(((struct hinic5_hwdev *)hwdev)->dev_hdl, + (unsigned long)(MICRO_LOG_MAX_QUEUE_DEPTH * MICRO_LOG_ITEM_LEN), + &p_addr, GFP_KERNEL); + if (!v_addr) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "%s(%d):NIC MICRO LOG alloc queue(%d) reosurce fail!\n", + __func__, __LINE__, i); + goto err_free_mem; + } + + log_info->que_addr[MICRO_LOG_VIR_ADDR][i] = v_addr; + log_info->que_addr[MICRO_LOG_PHY_ADDR][i] = p_addr; + + /* 维护软件侧队列信息BD */ + ret = hinic5_microlog_gpa_set(hwdev, p_addr, i); + if (ret) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "%s(%d):NIC MICRO LOG write table (Lt index%d)fail(%d)!\n", + __func__, __LINE__, i, ret); + + dma_free_coherent(((struct hinic5_hwdev *)hwdev)->dev_hdl, + (unsigned long)(MICRO_LOG_MAX_QUEUE_DEPTH * + MICRO_LOG_ITEM_LEN), + (void *)v_addr, p_addr); + v_addr = 0; + p_addr = 0; + log_info->que_addr[MICRO_LOG_VIR_ADDR][i] = v_addr; + log_info->que_addr[MICRO_LOG_PHY_ADDR][i] = p_addr; + goto err_free_mem; + } + } + + memset((void *)&log_info->log_stati_info, 0, sizeof(struct nic_micro_log_statistics_info)); + + log_info->hwdev = hwdev; + + microlog_info("nic micro log init OK!"); + + return 0; + +err_free_mem: + if (i == 0) { + kfree(log_info); + chip_node->log_info = NULL; + return -ENOMEM; + } + /* first need i-1 */ + while (i--) { + v_addr = log_info->que_addr[MICRO_LOG_VIR_ADDR][i]; + p_addr = log_info->que_addr[MICRO_LOG_PHY_ADDR][i]; + + dma_free_coherent(((struct hinic5_hwdev *)hwdev)->dev_hdl, + (unsigned long)(MICRO_LOG_MAX_QUEUE_DEPTH * MICRO_LOG_ITEM_LEN), + (void *)v_addr, p_addr); + + v_addr = 0; + p_addr = 0; + + log_info->que_addr[MICRO_LOG_VIR_ADDR][i] = v_addr; + log_info->que_addr[MICRO_LOG_PHY_ADDR][i] = p_addr; + } + + kfree(log_info); + chip_node->log_info = NULL; + return -ENOMEM; +} + +static void micro_log_clear_ci_entry_data(struct micro_log_info *log_info) +{ + u32 lt_index; + u32 lt_offset; + size_t len = sizeof(micro_log_item_s); + + lt_index = (log_info->all_ci / MICRO_LOG_MAX_QUEUE_DEPTH) % MICRO_LOG_MAX_QUEUE_NUM; + lt_offset = log_info->all_ci % MICRO_LOG_MAX_QUEUE_DEPTH; + memset((micro_log_item_s *)(log_info->que_addr[MICRO_LOG_VIR_ADDR][lt_index] + + (lt_offset * MICRO_LOG_ITEM_LEN)), 0, len); +} + +static void micro_log_get_ci_entry_data(struct micro_log_info *log_info, micro_log_item_s *log_item) +{ + u32 i; + u32 lt_index; + u32 lt_offset; + size_t len = sizeof(micro_log_item_s); + + lt_index = (log_info->all_ci / MICRO_LOG_MAX_QUEUE_DEPTH) % MICRO_LOG_MAX_QUEUE_NUM; + lt_offset = log_info->all_ci % MICRO_LOG_MAX_QUEUE_DEPTH; + memcpy(log_item, + (micro_log_item_s *)(log_info->que_addr[MICRO_LOG_VIR_ADDR][lt_index] + + (lt_offset * MICRO_LOG_ITEM_LEN)), len); + + /* 先对ctrl信息进行大小端转换 */ + log_item->ctrl_info.value = ntohl(log_item->ctrl_info.value); + /* 进行大小端转换 */ + log_item->string_addr = ntohl(log_item->string_addr); + log_item->func_name_addr = ntohl(log_item->func_name_addr); + for (i = 0; i < DFX_LOG_PRINT_MAX_PARA; i++) + log_item->data[i] = ntohl(log_item->data[i]); + log_item->line_and_pi.value = ntohl(log_item->line_and_pi.value); +} + +int micro_log_file_size_check(struct hinic5_hwdev *hwdev, struct micro_log_info *log_info) +{ + int ret; + + ret = hinic5_microlog_ctrl_info_set(hwdev, log_info->nic_micro_log_enable, + log_info->all_ci, INFO_LOG_PRINT); + if (ret) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, "%s(%d):Write table (It index 0)fail(%d), all_ci:0x%x\n", + __func__, __LINE__, ret, log_info->all_ci); + return ret; + } + + ret = nic_micro_log_create_new_log_file(log_info); + if (ret) + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "%s(%d):nic_micro_log_create_new_log_file fail(%d)!\n", + __func__, __LINE__, ret); + return ret; +} + +/* + * Function : nic_micro_log_poll_recv + * Description : poll receive micro log + * Type : void + * Input : void + * Output : None + * Return : void + * Restriction : + * History : None + * 1.Date : 2015/8/15 + * Modification : Created function + */ +static void nic_micro_log_poll_recv(void *hwdev) +{ + int ret; + struct card_node *chip_node; + struct micro_log_info *log_info; + micro_log_item_s log_item; + static u32 count; + + chip_node = (struct card_node *)(((struct hinic5_hwdev *)hwdev)->chip_node); + log_info = chip_node->log_info; + if (!log_info) { + microlog_err("input param hwdev is illegal!"); + return; + } + + if (log_info->nic_micro_log_enable == 0) { + msleep(MICRO_LOG_POLLING_TIME * 1000); + return; + } + + while (log_info->nic_micro_log_enable != 0) { + micro_log_get_ci_entry_data(log_info, &log_item); + if (log_item.ctrl_info.bs.ctrl_flag == 0) + break; + + // 解析日志 + ret = nic_micro_log_parse_microcode_log(log_info, &log_item); + if (ret != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "%s(%d):parse_microcode_log fail(%d)\n", + __func__, __LINE__, ret); + return; + } + + // 清零对应的buffer + micro_log_clear_ci_entry_data(log_info); + + log_info->all_ci++; + count++; + poll_log_cnt++; + log_info->log_stati_info.recv_log_num++; + + if (count >= MAX_NUM_OF_ONE_TIME_ULOG) { + if (micro_log_file_size_check(hwdev, log_info) != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "%s(%d):micro_log_file_size_check fail.\n", + __func__, __LINE__); + } else { + /* 如果处理不成功,poll_log_cnt不能清零的原因 + * 是为了再while循环外再次尝试更新ci等操作 + */ + poll_log_cnt = 0; + } + count = 0; + /* change the same priority task for avoiding long time only do this task */ + msleep(100); + } + + if (count && (!(count % 100))) + msleep(100); + } + + /* 更新ci的接口由mbox改为cmdq,存在日志循环打印的问题, + * 因此加上拦截:当日志累积16K,会更新一次ci + */ + if (poll_log_cnt >= MAX_NUM_OF_ONE_TIME_ULOG) { + if (micro_log_file_size_check(hwdev, log_info) != 0) { + sdk_err(((struct hinic5_hwdev *)hwdev)->dev_hdl, + "%s(%d):micro_log_file_size_check fail.\n", + __func__, __LINE__); + return; + } + poll_log_cnt = 0; + } + + /* polling timer */ + msleep(MICRO_LOG_POLLING_TIME); +} + +static void nic_micro_log_disable_func(void *hwdev) +{ + struct card_node *chip_node; + struct micro_log_info *log_info; + + chip_node = (struct card_node *)(((struct hinic5_hwdev *)hwdev)->chip_node); + log_info = chip_node->log_info; + + /* 删除一个轮询SM表格中的线程 */ + stop_thread(&log_info->hinic_micro_log_task); + + hinic5_micro_log_reset(hwdev); + + /* 等待循环任务中不在记录日志,仍需等待50ms的延时 */ + msleep(MICRO_LOG_POLLING_TIME * 50); + + if (log_info->fp_asm_file) { + file_close(log_info->fp_asm_file); + log_info->fp_asm_file = NULL; + microlog_info("close microcode.asm!"); + } + if (log_info->micro_log_data_addr) { + kfree((void *)log_info->micro_log_data_addr); + log_info->micro_log_data_addr = NULL; + } + if (log_info->fp_log_file) { + file_close(log_info->fp_log_file); + log_info->fp_log_file = NULL; + microlog_info("close microcode.log!"); + } +} + +static int hinic5_micro_log_ctr32_clear(void *hwdev, u8 cmd) +{ + struct hinic5_cmd_buf *cmd_buf = NULL; + u64 out_param = 0; + int err; + + if (hinic5_is_chip_present((struct hinic5_hwdev *)hwdev) == false) { + microlog_warning("chip is absent, microlog not send cmdq to npu!"); + return 0; + } + + cmd_buf = hinic5_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + microlog_err("failed to allocate cmd buf!"); + return -ENOMEM; + } + + cmd_buf->size = sizeof(u32); + + err = hinic5_cmdq_direct_resp(hwdev, HINIC5_MOD_COMM, cmd, cmd_buf, + &out_param, 0, HINIC5_CHANNEL_NIC); + if ((err) || (out_param)) { + microlog_err("failed to clear print cnt, err: %d,out_param: 0x%llx!", + err, out_param); + err = -EFAULT; + } + + hinic5_free_cmd_buf(hwdev, cmd_buf); + + return err; +} + +int micro_log_get_asm_file(void *hwdev, struct micro_log_info *log_info) +{ + int ret = 0; + + if (micro_asm_mode == 1) { + /* 方式1:默认从/home/microcode.asm中拿到字典文件 */ + ret = nic_micro_log_get_asm_file_data(log_info); + } + + if (ret != 0 || (micro_asm_mode == 0 && !log_info->micro_log_data_addr)) { + /* 方式2:方式1失败(可能内部处理失败 + * 或者home下无microcode.asm文件), + * 或者用户手动选择从flash中拿到字典文件 + */ + ret = mirco_log_get_sim_data_from_flash((struct hinic5_hwdev *)hwdev, log_info); + } + return ret; +} + +int nic_micro_log_enable_func(void *hwdev) +{ + int ret; + struct card_node *chip_node; + struct micro_log_info *log_info; + + if (!hwdev) { + microlog_err("hwdev is NULL!"); + return -EFAULT; + } + + chip_node = (struct card_node *)(((struct hinic5_hwdev *)hwdev)->chip_node); + log_info = chip_node->log_info; + + hinic5_micro_log_reset(hwdev); + + ret = micro_log_get_asm_file(hwdev, log_info); + if (ret) { + microlog_err("micro_log_get_asm_file fail(0x%x)\n", ret); + log_info->nic_micro_log_enable = 0; + return ret; + } + + log_info->hinic_micro_log_task.name = (char *)chip_node->chip_name; + + if (!log_info->fp_log_file) { + ret = nic_micro_log_create_log_file(log_info); + if (ret) { + microlog_err("nic_micro_log_create_new_log_file fail(%d)!", ret); + log_info->nic_micro_log_enable = 0; + return ret; + } + } + + ret = hinic5_microlog_ctrl_info_set(hwdev, log_info->nic_micro_log_enable, + log_info->all_ci, INFO_LOG_PRINT); + if (ret) { + microlog_err("Write table (It index0)fail(%d)!", ret); + return ret; + } + + log_info->hinic_micro_log_task.data = hwdev; + log_info->hinic_micro_log_task.thread_fn = nic_micro_log_poll_recv; + + ret = creat_thread(&log_info->hinic_micro_log_task); + if (ret) { + microlog_err("NIC MICRO LOG create thread fail(%d)!", ret); + return ret; + } + + return 0; +} + +int hinic5_micro_log_func_en(void *hwdev, u8 is_en) +{ + int ret; + struct card_node *chip_node; + struct micro_log_info *log_info; + + if (!hwdev) { + microlog_err("handle is NULL!"); + return -EFAULT; + } + + if (is_en > 1) { + microlog_err("is_en(%u) beyond 1!", is_en); + return -EFAULT; + } + + chip_node = (struct card_node *)(((struct hinic5_hwdev *)hwdev)->chip_node); + log_info = chip_node->log_info; + + if (!log_info) { + microlog_err("micro log is not init!"); + return -EFAULT; + } + + if (log_info->nic_micro_log_enable == is_en) { + microlog_err("micro log is already open or close!"); + return 0; + } + + log_info->nic_micro_log_enable = is_en; + + if (is_en) { + ret = nic_micro_log_enable_func(hwdev); + if (ret) { + microlog_err("nic_micro_log_enable_func fail!"); + return -EFAULT; + } + } else { + nic_micro_log_disable_func(hwdev); + } + + microlog_info("micro log func is %s\n", is_en ? "enable" : "disable"); + + return 0; +} + +/* + * Function : hinic5_micro_log_uninit + * Description : nic micro code log uninit + * Type : void + * Input : eal_handle handle + * Output : None + * Return : void + * Restriction :Null + * History : None + * 1.Date : 2015/9/2 + * Modification : Created function + */ +void hinic5_micro_log_uninit(void *hwdev) +{ + int ret; + u64 v_addr = 0; + u64 p_addr = 0; + u32 i; + struct card_node *chip_node; + struct micro_log_info *log_info; + + if (!micro_log_en) + return; + + if (!hwdev) { + microlog_err("hwdev is NULL!"); + return; + } + + chip_node = (struct card_node *)(((struct hinic5_hwdev *)hwdev)->chip_node); + log_info = chip_node->log_info; + if (!log_info) { + microlog_warning("micro log is not init!"); + return; + } + + if (log_info->hwdev != hwdev) { + microlog_err("micro log is not init in this function!"); + return; + } + + ret = hinic5_micro_log_func_en(hwdev, 0); + if (ret) + microlog_warning("hinic5_micro_log_func_en fail (%d)!", ret); + + /* 释放256*64B*192 空间 */ + for (i = 0; i < MICRO_LOG_MAX_QUEUE_NUM; i++) { + v_addr = log_info->que_addr[MICRO_LOG_VIR_ADDR][i]; + p_addr = log_info->que_addr[MICRO_LOG_PHY_ADDR][i]; + dma_free_coherent(((struct hinic5_hwdev *)log_info->hwdev)->dev_hdl, + (unsigned long)(MICRO_LOG_MAX_QUEUE_DEPTH * MICRO_LOG_ITEM_LEN), + (void *)v_addr, (dma_addr_t)p_addr); + log_info->que_addr[MICRO_LOG_VIR_ADDR][i] = 0; + log_info->que_addr[MICRO_LOG_PHY_ADDR][i] = 0; + + /* 更新队列信息到SM表格中,调用chip接口 */ + ret = hinic5_microlog_gpa_set(hwdev, 0 /* p_addr */, i); + if (ret != 0) + microlog_warning("Write table (It index%d)fail(%d)!", i, ret); + } + + kfree(log_info); + chip_node->log_info = NULL; + + micro_log_procfs_exit(); +} + +void hinic5_micro_log_reset(void *hwdev) +{ + int ret; + u64 v_addr = 0; + u32 i; + struct card_node *chip_node; + struct micro_log_info *log_info; + + chip_node = (struct card_node *)(((struct hinic5_hwdev *)hwdev)->chip_node); + log_info = chip_node->log_info; + if (!log_info) { + microlog_err("x86 micro log is not init"); + return; + } + + /* 维护软件侧队列信息BD */ + ret = hinic5_microlog_ctrl_info_set(hwdev, log_info->nic_micro_log_enable, + 0 /* ci_index */, INFO_LOG_PRINT); + if (ret) { + microlog_err("Write table (It index 0)fail(%d)!", ret); + return; + } + + for (i = 0; i < MICRO_LOG_MAX_QUEUE_NUM; i++) { + v_addr = log_info->que_addr[MICRO_LOG_VIR_ADDR][i]; + memset((void *)v_addr, 0, + (unsigned long)(MICRO_LOG_MAX_QUEUE_DEPTH * MICRO_LOG_ITEM_LEN)); + } + + ret = hinic5_micro_log_ctr32_clear(hwdev, COMM_CMD_MICROLOG_PRINT_CNT_CLEAR); + if (ret) { + microlog_err("Read ctr (It index 0)fail(%d)!", ret); + return; + } +} diff --git a/hinic5/src/tools/micro_log/hinic5_micro_log.h b/hinic5/src/tools/micro_log/hinic5_micro_log.h new file mode 100644 index 00000000..31d67c07 --- /dev/null +++ b/hinic5/src/tools/micro_log/hinic5_micro_log.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __NIC_MICRO_LOG_H__ +#define __NIC_MICRO_LOG_H__ + +#define MAX_PATH_NAME 256 + +#define MAX_LEN_MICRO_LOG_DATA (64 * 1024) + +#define MAX_NUM_OF_ONE_TIME_ULOG (16 * 1024) + +#define MAX_SIZE_OF_LOG_FILE (512 * 1024 * 1024) + +#define MICRO_LOG_ITEM_LEN 64 + +#define MICRO_LOG_MAX_RECV_NUM_ONE_TIME 20 + +#define MICRO_LOG_MEM_TAG MEM_TAG("MICRO_LOG_INIT") + +#define LINE_CHAR_NUM (20) + +#define MICRO_LOG_POLLING_TIME 1 + +#define MICRO_LOG_Q_CI_MASK (0x000F) +#define MICRO_LOG_D2_CI_MASK (0x00FF) +#define MICRO_LOG_ALL_MASK (0xFFFF) + +/* 此处必须固定为0,工具自动生成的TBL_ID_DFX_LOG_POINTER固定在0位置 */ +#define TBL_ID_DFX_LOG_POINTER 0 + +/* Type */ +#define ULOG_ERR 0 +#define ULOG_WARN 1 +#define ULOG_INFO 2 +#define ULOG_DEBUG 3 + +int hinic5_comm_micro_log_init(struct hinic5_hwdev *hwdev); + +int hinic5_micro_log_func_en(void *hwdev, u8 is_en); + +int hinic5_micro_log_init(void *hwdev); + +void hinic5_micro_log_uninit(void *hwdev); + +void hinic5_micro_log_reset(void *hwdev); + +#define BEIJING_TIMEZONE 8 +#define SECONDS_OF_HOUR 3600 +#define TIMEZONE_ADJUSTMENT(unix_time) \ + (unix_time + (BEIJING_TIMEZONE * SECONDS_OF_HOUR)) + +#endif diff --git a/hinic5/src/tools/micro_log/micro_log_comm.c b/hinic5/src/tools/micro_log/micro_log_comm.c new file mode 100644 index 00000000..edf61d15 --- /dev/null +++ b/hinic5/src/tools/micro_log/micro_log_comm.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * Version : Initial MIRCO_LOG Draft + * Created : 2025/02/25 + * Last Modified : 2025/02/25 + * Description : 微码主机日志公共函数 + */ +#include "micro_log_comm.h" +#include "comm_defs.h" +#include "hinic5_comm_cmd.h" +#include "hinic5_hw.h" +#include "hinic5_common.h" +#include "hinic5_hwdev.h" + +int hinic5_set_microlog_cmdq(void *hwdev, void *microlog_msg, size_t msg_len, u8 cmd) +{ + int err; + u64 out_param = 0; + struct hinic5_cmd_buf *cmd_buf = NULL; + + if (hinic5_is_chip_present((struct hinic5_hwdev *)hwdev) == false) + return 0; + + cmd_buf = hinic5_alloc_cmd_buf(hwdev); + if (!cmd_buf) { + microlog_err("failed to allocate cmd buf!"); + return -ENOMEM; + } + + cmd_buf->size = msg_len; + memcpy(cmd_buf->buf, microlog_msg, msg_len); + hinic5_cpu_to_be32(cmd_buf->buf, cmd_buf->size); + + err = hinic5_cmdq_direct_resp(hwdev, HINIC5_MOD_COMM, cmd, cmd_buf, + &out_param, 0, HINIC5_CHANNEL_NIC); + if ((err) || (out_param)) { + microlog_err("cmdq_cmd:%d, err:%d, out_param: 0x%llx!", cmd, err, out_param); + err = -EFAULT; + } + + hinic5_free_cmd_buf(hwdev, cmd_buf); + return err; +} + +int hinic5_microlog_gpa_set(void *hwdev, u64 p_addr, u8 lt_index) +{ + cmdq_microlog_gpa_set_s microlog_gpa = { 0 }; + + microlog_gpa.wr_init_pc_h32 = (u32)((p_addr >> 32) & 0xffffffff); + microlog_gpa.wr_init_pc_l32 = (u32)(p_addr & 0xffffffff); + microlog_gpa.lt_index = lt_index; + return hinic5_set_microlog_cmdq(hwdev, (void *)µlog_gpa, + sizeof(cmdq_microlog_gpa_set_s), + COMM_CMD_MICROLOG_GPA_SET); +} + +int hinic5_microlog_ctrl_info_set(void *hwdev, u8 microlog_en, u32 ci_index, + enum log_level_type state) +{ + cmdq_microlog_ctrl_info_set_s microlog_ctrl_info = {{0}}; + size_t msg_len = sizeof(cmdq_microlog_ctrl_info_set_s); + + microlog_ctrl_info.max_num = MICRO_LOG_MAX_QUEUE_DEPTH; + microlog_ctrl_info.microlog_en = microlog_en; + microlog_ctrl_info.state = state; + microlog_ctrl_info.ci_index = ci_index; + return hinic5_set_microlog_cmdq(hwdev, (void *)µlog_ctrl_info, + msg_len, COMM_CMD_MICROLOG_CTRL_INFO_SET); +} diff --git a/hinic5/src/tools/micro_log/micro_log_comm.h b/hinic5/src/tools/micro_log/micro_log_comm.h new file mode 100644 index 00000000..bea843fb --- /dev/null +++ b/hinic5/src/tools/micro_log/micro_log_comm.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2026 Huawei Technologies Co., Ltd */ + +#ifndef MICRO_LOG_COMM_H_ +#define MICRO_LOG_COMM_H_ + +#include "ossl_knl.h" + +#define microlog_info(fmt, ...) \ + pr_info("[MICRO_LOG][INFO][%s:%d]:" fmt "\n", __func__, __LINE__, ##__VA_ARGS__) +#define microlog_warning(fmt, ...) \ + pr_warn("[MICRO_LOG][WARN][%s:%d]:" fmt "\n", __func__, __LINE__, ##__VA_ARGS__) +#define microlog_err(fmt, ...) \ + pr_err("[MICRO_LOG][ERROR][%s:%d]:" fmt "\n", __func__, __LINE__, ##__VA_ARGS__) + +#define MICRO_LOG_VIR_ADDR 0 +#define MICRO_LOG_PHY_ADDR 1 +#define MICRO_LOG_VIR_PHY_ADDR 2 + +#define MICRO_LOG_MAX_QUEUE_NUM 256 +#define MICRO_LOG_MAX_QUEUE_DEPTH 256 + +#define MICRO_LOG_MAX_STRING_LEN (128) + +#define LOG_LEVEL_INFO_MAX_SIZE 32 + +struct nic_micro_log_statistics_info { + u32 recv_log_num; +}; + +struct micro_log_info { + struct sdk_thread_info hinic_micro_log_task; + u64 que_addr[MICRO_LOG_VIR_PHY_ADDR][MICRO_LOG_MAX_QUEUE_NUM]; + struct nic_micro_log_statistics_info log_stati_info; + char micro_log_tmpbuf[MICRO_LOG_MAX_STRING_LEN * 8 + 1]; + u8 nic_micro_log_dbg; + u8 nic_micro_log_enable; + struct file *fp_log_file; + struct file *fp_asm_file; + char *micro_log_data_addr; + void *hwdev; + u32 all_ci; +}; + +/* 1)state = 0,不打印日志; + * 2)state = 1,打印err日志 + * 3) state = 2,warn日志:包括err、warn日志 + * 4)state = 3,info日志:包括err、warn、info日志 + */ +enum log_level_type { + ALL_LOG_DROP = 0, + ERR_LOG_PRINT = 1, + WARN_LOG_PRINT = 2, + INFO_LOG_PRINT = 3 +}; + +struct log_level_message { + enum log_level_type level_type; + char level_info[LOG_LEVEL_INFO_MAX_SIZE]; +}; + +int hinic5_set_microlog_cmdq(void *hwdev, void *microlog_msg, size_t msg_len, u8 cmd); +int hinic5_microlog_gpa_set(void *hwdev, u64 p_addr, u8 lt_index); +int hinic5_microlog_ctrl_info_set(void *hwdev, u8 microlog_en, u32 ci_index, + enum log_level_type state); + +#endif /* MICRO_LOG_COMM_H_ */ diff --git a/hinic5/src/tools/micro_log/micro_log_index.c b/hinic5/src/tools/micro_log/micro_log_index.c new file mode 100644 index 00000000..ee0b2065 --- /dev/null +++ b/hinic5/src/tools/micro_log/micro_log_index.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2026 Huawei Technologies Co., Ltd */ + +#include "ossl_knl.h" + +#include "micro_log_index.h" +#include "comm_defs.h" +#include "hinic5_hw.h" +#include "fw_typedef.h" +#include "mpu_inband_cmd.h" +#include "inband_mpu_cmd_defs.h" + +static int micro_log_read_fw_cfg_info(void *hwdev, fw_info_s *cfg_info) +{ + struct cmd_query_fw query_fw_input; + struct cmd_fw_info query_fw_output; + u16 out_size = sizeof(struct cmd_fw_info); + int ret; + int err = 0; + + memset(&query_fw_input, 0, sizeof(struct cmd_query_fw)); + memset(&query_fw_output, 0, sizeof(struct cmd_fw_info)); + + if (!hwdev || !cfg_info) { + microlog_err("point is null\r\n"); + return -EINVAL; + } + + query_fw_input.offset = 0; + query_fw_input.len = MAX_LOG_BUF_SIZE; + ret = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, + COMM_MGMT_CMD_QUERY_FW_INFO, + &query_fw_input, + sizeof(struct cmd_query_fw), + &query_fw_output, &out_size, 0, + HINIC5_CHANNEL_COMM); + if (ret != 0 || out_size == 0 || query_fw_output.head.status != 0) { + microlog_err("Failed to get length. ret:%d, status:0x%x, out_size:0x%x\n", + ret, query_fw_output.head.status, out_size); + return ret; + } + + if (query_fw_output.len != query_fw_input.len) { + microlog_err("The length is inconsistent!\n"); + return -EFAULT; + } + + memcpy((u8 *)cfg_info, query_fw_output.data, query_fw_output.len); + return err; +} + +static int micro_log_read_sim_data(void *hwdev, struct nic_log_info *simdata_info, + u32 read_offset, u32 sim_data_len) +{ + u32 ret; + u16 out_size = sizeof(struct nic_log_info); + struct nic_log_info simdata_buf_in; + + if (!hwdev || !simdata_info) { + microlog_err("point is null\r\n"); + return -EINVAL; + } + + memset(simdata_info, 0, sizeof(struct nic_log_info)); + memset(&simdata_buf_in, 0, sizeof(struct nic_log_info)); + simdata_buf_in.offset = read_offset; + simdata_buf_in.log_or_index = NPU_COMM_GET_SIM_DATA; + /* 考虑到hinic新老工具兼容性,用data[3:0]域段替代file_size, 占用32bit */ + *(u32 *)(void *)simdata_buf_in.data = sim_data_len; + + ret = hinic5_msg_to_mgmt_sync(hwdev, HINIC5_MOD_COMM, + COMM_MGMT_CMD_GET_LOG, &simdata_buf_in, + sizeof(struct nic_log_info), simdata_info, + &out_size, 0, HINIC5_CHANNEL_COMM); + if (ret != 0 || out_size == 0 || simdata_info->msg_head.status != 0) { + microlog_err("Failed to read sim_data, ret:%d, status:0x%x, out_size:0x%x\n", + ret, simdata_info->msg_head.status, out_size); + } + return ret; +} + +static int micro_log_get_sim_data_length(void *hwdev, u32 *sim_data_len) +{ + fw_info_s cfg_info; + + if (micro_log_read_fw_cfg_info(hwdev, &cfg_info) != 0) { + microlog_err("Fail to read_fw_cfg_info\n"); + return -EFAULT; + } + + if (cfg_info.fw_attr[FW_TILE_DATA_INDEX].invalid != 0) { + microlog_err("fw_tile_data_index invalid\n"); + return -EFAULT; + } + + *sim_data_len = cfg_info.fw_attr[FW_TILE_DATA_INDEX].fw_len; + return 0; +} + +int mirco_log_get_sim_data_from_flash(void *hwdev, struct micro_log_info *log_info) +{ + u32 sim_data_len = 0; + u32 i; + u32 j; + u32 k; + struct nic_log_info simdata_info; + + if (micro_log_get_sim_data_length(hwdev, &sim_data_len) != 0) + return -EFAULT; + microlog_info("get sim_data_len(0x%x) ok.", sim_data_len); + + log_info->micro_log_data_addr = kzalloc((sim_data_len + 1), GFP_KERNEL); + if (!log_info->micro_log_data_addr) + return -EFAULT; + + /* mbox每次最多读1K数据 */ + for (i = 0, j = 0; i < sim_data_len; i += MAX_LOG_BUF_SIZE) { + if (micro_log_read_sim_data(hwdev, &simdata_info, i, sim_data_len) != 0) + goto err_free_mem; + + for (k = 0; k < MAX_LOG_BUF_SIZE && j < sim_data_len; k++, j++) + log_info->micro_log_data_addr[j] = simdata_info.data[k]; + } + + microlog_info("get_sim_data_from_flash ok\n"); + return 0; + +err_free_mem: + if (log_info->micro_log_data_addr) { + kfree((void *)(log_info->micro_log_data_addr)); + log_info->micro_log_data_addr = NULL; + } + return -EFAULT; +} diff --git a/hinic5/src/tools/micro_log/micro_log_index.h b/hinic5/src/tools/micro_log/micro_log_index.h new file mode 100644 index 00000000..4656d72e --- /dev/null +++ b/hinic5/src/tools/micro_log/micro_log_index.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2026 Huawei Technologies Co., Ltd */ + +#ifndef MICRO_LOG_INDEX_H_ +#define MICRO_LOG_INDEX_H_ + +#include "micro_log_comm.h" + +#define FW_TILE_DATA_INDEX 0x5 +int mirco_log_get_sim_data_from_flash(void *hwdev, struct micro_log_info *log_info); + +#endif diff --git a/hinic5/src/tools/micro_log/micro_log_procfs_cmd.c b/hinic5/src/tools/micro_log/micro_log_procfs_cmd.c new file mode 100644 index 00000000..7834e073 --- /dev/null +++ b/hinic5/src/tools/micro_log/micro_log_procfs_cmd.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + * Version : Initial MIRCO_LOG Draft + * Created : 2025/02/25 + * Last Modified : 2025/02/25 + * Description : 用于动态更改主机日志状态: 支持info日志和err日志动态切换 + */ +#include <linux/kernel.h> +#include <linux/version.h> +#include <linux/proc_fs.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/stat.h> + +#include "micro_log_procfs_cmd.h" +#include "micro_log_comm.h" +#include "hinic5_hwdev.h" +#include "hinic5_chip_info.h" + +/****************************for proc fs****************************************/ +#define MICRO_LOG_PROCFS_NAME "micro_log" +#define LOG_LEVEL_PROCFS_NAME "log_level" +#define LOG_LEVEL_MAX_LEN 2 /* 正常情况下:只用到两个字符, + * 分别存字符等级、echo自带的换行符 + */ +#define DEC_CODE 10 +#define HINIC_DEV_NAME_LEN 32 +static struct proc_dir_entry *proc_micro_log_dir; +static struct proc_dir_entry *proc_hinic_dev_dir; +static struct proc_dir_entry *proc_log_level_file; +void *g_micro_log_dev; + +#define PROCFS_RD_BUFFER_SIZE (LOG_LEVEL_INFO_MAX_SIZE + 1) +static char log_level_procfs_buffer_rd[PROCFS_RD_BUFFER_SIZE] = {0}; + +static char hinic_dev_name[HINIC_DEV_NAME_LEN + 1] = {0}; +static struct log_level_message log_level_msg[] = { + {ALL_LOG_DROP, "ALL_LOG_DROP"}, {ERR_LOG_PRINT, "ERR_LOG"}, + {WARN_LOG_PRINT, "WARN_LOG"}, {INFO_LOG_PRINT, "INFO_LOG"} +}; + +/* 1)state = 0,不打印日志; + * 2)state = 1,打印err日志 + * 3) state = 2,打印err + warn日志 + * 4)state = 3,打印err + warn + info日志 + */ +static int micro_log_state_set(void *hwdev, enum log_level_type state) +{ + int err; + struct card_node *chip_node; + struct micro_log_info *log_info; + + if (!hwdev) { + microlog_err("hwdev is NULL!"); + return -ENOMEM; + } + + chip_node = (struct card_node *)(((struct hinic5_hwdev *)hwdev)->chip_node); + if (!chip_node) { + microlog_err("chip_node is NULL!"); + return -ENOMEM; + } + + log_info = chip_node->log_info; + if (!log_info) { + microlog_err("log_info is NULL!"); + return -ENOMEM; + } + + err = hinic5_microlog_ctrl_info_set(hwdev, log_info->nic_micro_log_enable, + log_info->all_ci, state); + if (err) { + microlog_err("cmdq return fail(0x%x), state: %u", err, state); + return err; + } + microlog_info("set state(%u) ok", state); + return 0; +} + +static ssize_t log_level_proc_write(struct file *file, const char __user *buff, + size_t len, loff_t *off) +{ + ssize_t wr_len; + char log_level_buffer[LOG_LEVEL_MAX_LEN + 1] = { 0 }; + u32 num; + + if (!buff || len == 0) { + microlog_err("proc parameter incorrect: buff is null or size is zero"); + return -EINVAL; + } + + wr_len = (len > LOG_LEVEL_MAX_LEN) ? LOG_LEVEL_MAX_LEN : len; + if (copy_from_user(&log_level_buffer, buff, wr_len) != 0) { + microlog_err("copy_from_user failed"); + return -EFAULT; + } + + log_level_buffer[wr_len] = '\0'; + microlog_info("procfile write: %c, %s, wr_len:0x%x", + log_level_buffer[0], log_level_buffer, (u32)wr_len); + /* 将字符串转为十进制数 */ + num = simple_strtoul(log_level_buffer, NULL, DEC_CODE); + + if (num > INFO_LOG_PRINT) { + microlog_err("get wrong log_level:%s", log_level_buffer); + return -EINVAL; + } + + if (micro_log_state_set(g_micro_log_dev, num) != 0) + return -EINVAL; + + (void)snprintf(log_level_procfs_buffer_rd, PROCFS_RD_BUFFER_SIZE, + "%s\n", log_level_msg[num].level_info); + return wr_len; +} + +static ssize_t log_level_proc_read(struct file *file, char __user *buff, size_t len, loff_t *off) +{ + unsigned long plen = strlen(log_level_procfs_buffer_rd); + ssize_t bytes_to_copy; + + if (*off >= plen) { + /* 如果已经读取完所有数据,则返回0, 否则一直循环打印 */ + return 0; + } + + bytes_to_copy = (len < plen) ? len : plen; + microlog_info("copy_to_user bytes_to_copy:%d.", (u32)bytes_to_copy); + + if (copy_to_user(buff, log_level_procfs_buffer_rd, bytes_to_copy) != 0) { + microlog_err("copy to user failed!"); + bytes_to_copy = 0; + } + + *off += bytes_to_copy; + return bytes_to_copy; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) +static const struct file_operations log_level_proc_fops = { + .read = log_level_proc_read, + .write = log_level_proc_write, +}; +#else +static const struct proc_ops log_level_proc_fops = { + .proc_read = log_level_proc_read, + .proc_write = log_level_proc_write, +}; +#endif + +static int check_params_valid(void *hwdev) +{ + struct card_node *chip_node; + struct micro_log_info *log_info; + int cpy_len; + + if (!hwdev) { + microlog_err("hwdev is NULL!"); + return -ENOMEM; + } + + chip_node = (struct card_node *)(((struct hinic5_hwdev *)hwdev)->chip_node); + if (!chip_node) { + microlog_err("chip_node is NULL!"); + return -ENOMEM; + } + + log_info = chip_node->log_info; + if (!log_info || !log_info->hinic_micro_log_task.name) { + microlog_err("log_info is NULL!"); + return -ENOMEM; + } + + cpy_len = strlen(log_info->hinic_micro_log_task.name); + if (cpy_len >= HINIC_DEV_NAME_LEN) { + microlog_err("len beyond HINIC_DEV_NAME_LEN"); + return -EINVAL; + } + (void)strscpy(hinic_dev_name, log_info->hinic_micro_log_task.name, HINIC_DEV_NAME_LEN); + hinic_dev_name[cpy_len + 1] = '\0'; + return 0; +} + +int micro_log_procfs_init(void *hwdev) +{ + int ret = check_params_valid(hwdev); + + if (ret != 0) + return ret; + + proc_micro_log_dir = proc_mkdir(MICRO_LOG_PROCFS_NAME, NULL); + if (!proc_micro_log_dir) { + microlog_err("Failed to create /proc/%s directory", MICRO_LOG_PROCFS_NAME); + return -ENOMEM; + } + + proc_hinic_dev_dir = proc_mkdir(hinic_dev_name, proc_micro_log_dir); + if (!proc_hinic_dev_dir) { + microlog_err("Failed to create /proc/%s directory", MICRO_LOG_PROCFS_NAME); + return -ENOMEM; + } + + proc_log_level_file = proc_create(LOG_LEVEL_PROCFS_NAME, 0644, + proc_hinic_dev_dir, + &log_level_proc_fops); + if (!proc_log_level_file) { + remove_proc_entry(MICRO_LOG_PROCFS_NAME, NULL); + microlog_err("Failed to create /proc/micro_log/%s file", LOG_LEVEL_PROCFS_NAME); + return -ENOMEM; + } + + microlog_info("/proc/micro_log/%s created", LOG_LEVEL_PROCFS_NAME); + g_micro_log_dev = hwdev; + (void)snprintf(log_level_procfs_buffer_rd, PROCFS_RD_BUFFER_SIZE, + "%s\n", log_level_msg[INFO_LOG_PRINT].level_info); + return 0; +} + +void micro_log_procfs_exit(void) +{ + remove_proc_entry(LOG_LEVEL_PROCFS_NAME, proc_hinic_dev_dir); + remove_proc_entry(hinic_dev_name, proc_micro_log_dir); + remove_proc_entry(MICRO_LOG_PROCFS_NAME, NULL); + microlog_info("Micro Log Module removed"); +} diff --git a/hinic5/src/tools/micro_log/micro_log_procfs_cmd.h b/hinic5/src/tools/micro_log/micro_log_procfs_cmd.h new file mode 100644 index 00000000..b435283d --- /dev/null +++ b/hinic5/src/tools/micro_log/micro_log_procfs_cmd.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2026 Huawei Technologies Co., Ltd */ + +#ifndef MICRO_LOG_PROCFS_CMD_H_ +#define MICRO_LOG_PROCFS_CMD_H_ + +#include <linux/types.h> + +int micro_log_procfs_init(void *hwdev); +void micro_log_procfs_exit(void); + +#endif /* MICRO_LOG_PROCFS_CMD_H_ */ -- 2.45.1.windows.1
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://atomgit.com/openeuler/kernel/merge_requests/22280 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/UU4... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://atomgit.com/openeuler/kernel/merge_requests/22280 Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/UU4...
participants (2)
-
LinKun -
patchwork bot