driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9EMOF CVE: NA
---------------------------------
Add Huawei Intelligent Network Card RDMA Driver Dependency.
Signed-off-by: Shuai Wu wushuai51@huawei.com --- Documentation/networking/hinic3.rst | 15 + MAINTAINERS | 4 + drivers/net/ethernet/huawei/hinic3/Makefile | 22 +- .../ethernet/huawei/hinic3/bond/hinic3_bond.c | 1042 +++++++++ .../ethernet/huawei/hinic3/bond/hinic3_bond.h | 98 + .../ethernet/huawei/hinic3/comm_msg_intf.h | 565 +---- .../ethernet/huawei/hinic3/cqm/cqm_bat_cla.c | 2056 +++++++++++++++++ .../ethernet/huawei/hinic3/cqm/cqm_bat_cla.h | 214 ++ .../huawei/hinic3/cqm/cqm_bitmap_table.c | 1454 ++++++++++++ .../huawei/hinic3/cqm/cqm_bitmap_table.h | 66 + .../huawei/hinic3/cqm/cqm_bloomfilter.c | 535 +++++ .../huawei/hinic3/cqm/cqm_bloomfilter.h | 53 + .../net/ethernet/huawei/hinic3/cqm/cqm_cmd.c | 250 ++ .../net/ethernet/huawei/hinic3/cqm/cqm_cmd.h | 39 + .../net/ethernet/huawei/hinic3/cqm/cqm_db.c | 506 ++++ .../net/ethernet/huawei/hinic3/cqm/cqm_db.h | 36 + .../ethernet/huawei/hinic3/cqm/cqm_define.h | 54 + .../net/ethernet/huawei/hinic3/cqm/cqm_main.c | 1743 ++++++++++++++ .../net/ethernet/huawei/hinic3/cqm/cqm_main.h | 380 +++ .../ethernet/huawei/hinic3/cqm/cqm_memsec.c | 665 ++++++ .../ethernet/huawei/hinic3/cqm/cqm_memsec.h | 23 + .../ethernet/huawei/hinic3/cqm/cqm_object.c | 1664 +++++++++++++ .../ethernet/huawei/hinic3/cqm/cqm_object.h | 714 ++++++ .../huawei/hinic3/cqm/cqm_object_intern.c | 1467 ++++++++++++ .../huawei/hinic3/cqm/cqm_object_intern.h | 93 + .../net/ethernet/huawei/hinic3/cqm/readme.txt | 3 + .../net/ethernet/huawei/hinic3/hinic3_crm.h | 110 +- .../net/ethernet/huawei/hinic3/hinic3_dbg.c | 2 + .../net/ethernet/huawei/hinic3/hinic3_hw.h | 51 +- .../ethernet/huawei/hinic3/hinic3_mag_cfg.c | 4 +- .../huawei/hinic3/hinic3_mgmt_interface.h | 377 +-- .../net/ethernet/huawei/hinic3/hinic3_nic.h | 1 - .../ethernet/huawei/hinic3/hinic3_nic_cfg.h | 1 + .../ethernet/huawei/hinic3/hinic3_srv_nic.h | 5 + .../ethernet/huawei/hinic3/hw/hinic3_cmdq.c | 33 +- .../ethernet/huawei/hinic3/hw/hinic3_cmdq.h | 7 +- .../ethernet/huawei/hinic3/hw/hinic3_csr.h | 1 + .../huawei/hinic3/hw/hinic3_devlink.h | 24 + .../ethernet/huawei/hinic3/hw/hinic3_eqs.c | 35 + .../ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c | 155 +- .../ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h | 17 +- .../huawei/hinic3/hw/hinic3_hw_comm.c | 148 +- .../huawei/hinic3/hw/hinic3_hw_comm.h | 2 +- .../ethernet/huawei/hinic3/hw/hinic3_hw_mt.c | 39 +- .../ethernet/huawei/hinic3/hw/hinic3_hwdev.c | 212 +- .../ethernet/huawei/hinic3/hw/hinic3_hwdev.h | 33 +- .../ethernet/huawei/hinic3/hw/hinic3_hwif.c | 54 + .../ethernet/huawei/hinic3/hw/hinic3_mbox.c | 113 +- .../ethernet/huawei/hinic3/hw/hinic3_mbox.h | 82 +- .../ethernet/huawei/hinic3/hw/hinic3_mgmt.c | 76 +- .../ethernet/huawei/hinic3/hw/hinic3_mgmt.h | 5 +- .../huawei/hinic3/hw/hinic3_multi_host_mgmt.c | 1231 ++++++++++ .../huawei/hinic3/hw/hinic3_multi_host_mgmt.h | 124 + .../huawei/hinic3/hw/hinic3_nictool.c | 1 + .../huawei/hinic3/hw/hinic3_pci_id_tbl.h | 30 +- .../huawei/hinic3/hw/ossl_knl_linux.c | 20 +- .../hinic3/include/bond/bond_common_defs.h | 69 + .../include/cfg_mgmt/cfg_mgmt_mpu_cmd.h | 12 + .../include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h | 212 ++ .../huawei/hinic3/include/cqm/cqm_npu_cmd.h | 31 + .../hinic3/include/cqm/cqm_npu_cmd_defs.h | 61 + .../huawei/hinic3/include/hinic3_common.h | 181 ++ .../huawei/hinic3/include/hinic3_cqm.h | 364 +++ .../huawei/hinic3/include/hinic3_cqm_define.h | 52 + .../huawei/hinic3/include/hinic3_lld.h | 223 ++ .../huawei/hinic3/include/hinic3_profile.h | 148 ++ .../huawei/hinic3/include/mpu/mag_mpu_cmd.h | 70 + .../hinic3/include/mpu/mpu_board_defs.h | 71 + .../hinic3/include/mpu/mpu_cmd_base_defs.h | 116 + .../hinic3/include/mpu/mpu_inband_cmd.h | 187 ++ .../hinic3/include/mpu/mpu_inband_cmd_defs.h | 1078 +++++++++ .../include/mpu/mpu_outband_ncsi_cmd_defs.h | 205 ++ .../huawei/hinic3/include/mpu/nic_cfg_comm.h | 55 + .../huawei/hinic3/include/ossl_types.h | 144 ++ .../include/public/npu_cmdq_base_defs.h | 232 ++ .../ethernet/huawei/hinic3/include/readme.txt | 1 + .../hinic3/include/vmsec/vmsec_mpu_common.h | 107 + .../huawei/hinic3/include/vram_common.h | 65 + drivers/net/ethernet/huawei/hinic3/mag_cmd.h | 272 ++- drivers/net/ethernet/huawei/hinic3/ossl_knl.h | 4 + .../ethernet/huawei/hinic3/ossl_knl_linux.h | 70 +- 81 files changed, 19659 insertions(+), 1125 deletions(-) create mode 100644 drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c create mode 100644 drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bloomfilter.c create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_bloomfilter.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.c create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.c create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_define.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.c create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.h create mode 100644 drivers/net/ethernet/huawei/hinic3/cqm/readme.txt create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.c create mode 100644 drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/cqm/cqm_npu_cmd.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/cqm/cqm_npu_cmd_defs.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_common.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_cqm.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_cqm_define.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/hinic3_profile.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_board_defs.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_ncsi_cmd_defs.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/ossl_types.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/public/npu_cmdq_base_defs.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/readme.txt create mode 100644 drivers/net/ethernet/huawei/hinic3/include/vmsec/vmsec_mpu_common.h create mode 100644 drivers/net/ethernet/huawei/hinic3/include/vram_common.h
diff --git a/Documentation/networking/hinic3.rst b/Documentation/networking/hinic3.rst index df665bdd9..199e2a63c 100644 --- a/Documentation/networking/hinic3.rst +++ b/Documentation/networking/hinic3.rst @@ -84,6 +84,21 @@ Data. (hinic3_hw_qp.c, hinic3_hw_qp.h, hinic3_hw_qp_ctxt.h)
IO - de/constructs all the IO components. (hinic3_hw_io.c, hinic3_hw_io.h)
+CQM components: +========== + +The CQM module organizes the memory in the large system in a format (CLA table) +and allocates the memory to the chip (BAT table). The chip can use the memory in +the large system to save context information and queue information (SCQ\SRQ). +(cqm_bat_cla.c, cqm_bat_cla.h, cqm_bitmap_table.c, cqm_bitmap_table.h) + +When a packet is transmitted from the PCIe link, the chip parses the 5-tuple +such as sid, did, and hostid. Fill the parsed data in the queue +(in the form of scqe). In this way, the driver can directly obtain data from the +queue (through MPDK polling) and then process the data. In this way, the +uninstallation is implemented. +(cqm_main.c, cqm_main.h, cqm_db.c, cqm_db.h) + HW device: ==========
diff --git a/MAINTAINERS b/MAINTAINERS index cd4f9e589..42b570c27 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9667,6 +9667,10 @@ L: netdev@vger.kernel.org S: Supported F: Documentation/networking/hinic3.rst F: drivers/net/ethernet/huawei/hinic3/ +F: drivers/net/ethernet/huawei/hinic3/bond/ +F: drivers/net/ethernet/huawei/hinic3/cqm/ +F: drivers/net/ethernet/huawei/hinic3/hw/ +F: drivers/net/ethernet/huawei/hinic3/include/
HUGETLB SUBSYSTEM M: Mike Kravetz mike.kravetz@oracle.com diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile index fd00ba5b1..7304b1f5b 100644 --- a/drivers/net/ethernet/huawei/hinic3/Makefile +++ b/drivers/net/ethernet/huawei/hinic3/Makefile @@ -1,5 +1,14 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/hw/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/cqm/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/cqm/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/public/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/mpu/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/bond/ +ccflags-y += -I$(srctree)/drivers/net/ethernet/huawei/hinic3/include/vmsec/
obj-$(CONFIG_HINIC3) += hinic3.o hinic3-objs := hw/hinic3_hwdev.o \ @@ -23,6 +32,8 @@ hinic3-objs := hw/hinic3_hwdev.o \ hw/hinic3_nictool.o \ hw/hinic3_devlink.o \ hw/ossl_knl_linux.o \ + hw/hinic3_multi_host_mgmt.o \ + bond/hinic3_bond.o \ hinic3_main.o \ hinic3_tx.o \ hinic3_rx.o \ @@ -42,4 +53,13 @@ hinic3-objs := hw/hinic3_hwdev.o \ hinic3_rss_cfg.o \ hinic3_nic_event.o \ hinic3_nic_io.o \ - hinic3_nic_dbg.o \ No newline at end of file + hinic3_nic_dbg.o \ + cqm/cqm_bat_cla.o \ + cqm/cqm_bitmap_table.o \ + cqm/cqm_object_intern.o \ + cqm/cqm_bloomfilter.o \ + cqm/cqm_cmd.o \ + cqm/cqm_db.o \ + cqm/cqm_object.o \ + cqm/cqm_main.o \ + cqm/cqm_memsec.o diff --git a/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c new file mode 100644 index 000000000..3c284bb0e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.c @@ -0,0 +1,1042 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [NIC]" fmt + +#include <net/sock.h> +#include <net/bonding.h> +#include <linux/rtnetlink.h> +#include <linux/net.h> +#include <linux/mutex.h> +#include <linux/netdevice.h> +#include <linux/version.h> + +#include "hinic3_lld.h" +#include "hinic3_srv_nic.h" +#include "hinic3_nic_dev.h" +#include "hinic3_hw.h" +#include "mpu_inband_cmd.h" +#include "hinic3_hwdev.h" +#include "hinic3_bond.h" + +#define PORT_INVALID_ID 0xFF + +#define STATE_SYNCHRONIZATION_INDEX 3 + +struct hinic3_bond_dev { + char name[BOND_NAME_MAX_LEN]; + struct bond_attr bond_attr; + struct bond_attr new_attr; + struct bonding *bond; + void *ppf_hwdev; + struct kref ref; +#define BOND_DEV_STATUS_IDLE 0x0 +#define BOND_DEV_STATUS_ACTIVATED 0x1 + u8 status; + u8 slot_used[HINIC3_BOND_USER_NUM]; + struct workqueue_struct *wq; + struct delayed_work bond_work; + struct bond_tracker tracker; + spinlock_t lock; /* lock for change status */ +}; + +typedef void (*bond_service_func)(const char *bond_name, void *bond_attr, + enum bond_service_proc_pos pos); + +static DEFINE_MUTEX(g_bond_service_func_mutex); + +static bond_service_func g_bond_service_func[HINIC3_BOND_USER_NUM]; + +struct hinic3_bond_mngr { + u32 cnt; + struct hinic3_bond_dev *bond_dev[BOND_MAX_NUM]; + struct socket *rtnl_sock; +}; + +static struct hinic3_bond_mngr bond_mngr = { .cnt = 0 }; +static DEFINE_MUTEX(g_bond_mutex); + +static bool bond_dev_is_activated(const struct hinic3_bond_dev *bdev) +{ + return bdev->status == BOND_DEV_STATUS_ACTIVATED; +} + +#define PCI_DBDF(dom, bus, dev, func) \ + (((dom) << 16) | ((bus) << 8) | ((dev) << 3) | ((func) & 0x7)) + +#ifdef __PCLINT__ +static inline bool netif_is_bond_master(const struct net_device *dev) +{ + return (dev->flags & IFF_MASTER) && (dev->priv_flags & IFF_BONDING); +} +#endif + +static u32 bond_gen_uplink_id(struct hinic3_bond_dev *bdev) +{ + u32 uplink_id = 0; + u8 i; + struct hinic3_nic_dev *nic_dev = NULL; + struct pci_dev *pdev = NULL; + u32 domain, bus, dev, func; + + spin_lock(&bdev->lock); + for (i = 0; i < BOND_PORT_MAX_NUM; i++) { + if (BITMAP_JUDGE(bdev->bond_attr.slaves, i)) { + if (!bdev->tracker.ndev[i]) + continue; + nic_dev = netdev_priv(bdev->tracker.ndev[i]); + pdev = nic_dev->pdev; + domain = (u32)pci_domain_nr(pdev->bus); + bus = pdev->bus->number; + dev = PCI_SLOT(pdev->devfn); + func = PCI_FUNC(pdev->devfn); + uplink_id = PCI_DBDF(domain, bus, dev, func); + break; + } + } + spin_unlock(&bdev->lock); + + return uplink_id; +} + +static struct hinic3_nic_dev *get_nic_dev_safe(struct net_device *ndev) +{ + struct hinic3_lld_dev *lld_dev = NULL; + + lld_dev = hinic3_get_lld_dev_by_netdev(ndev); + if (!lld_dev) + return NULL; + + return netdev_priv(ndev); +} + +static u8 bond_get_slaves_bitmap(struct hinic3_bond_dev *bdev, struct bonding *bond) +{ + struct slave *slave = NULL; + struct list_head *iter = NULL; + struct hinic3_nic_dev *nic_dev = NULL; + u8 bitmap = 0; + u8 port_id; + + rcu_read_lock(); + bond_for_each_slave_rcu(bond, slave, iter) { + nic_dev = get_nic_dev_safe(slave->dev); + if (!nic_dev) + continue; + + port_id = hinic3_physical_port_id(nic_dev->hwdev); + BITMAP_SET(bitmap, port_id); + (void)iter; + } + rcu_read_unlock(); + + return bitmap; +} + +static void bond_update_attr(struct hinic3_bond_dev *bdev, struct bonding *bond) +{ + spin_lock(&bdev->lock); + + bdev->new_attr.bond_mode = (u16)bond->params.mode; + bdev->new_attr.bond_id = bdev->bond_attr.bond_id; + bdev->new_attr.up_delay = (u16)bond->params.updelay; + bdev->new_attr.down_delay = (u16)bond->params.downdelay; + bdev->new_attr.slaves = 0; + bdev->new_attr.active_slaves = 0; + bdev->new_attr.lacp_collect_slaves = 0; + bdev->new_attr.first_roce_func = DEFAULT_ROCE_BOND_FUNC; + + /* Only support L2/L34/L23 three policy */ + if (bond->params.xmit_policy <= BOND_XMIT_POLICY_LAYER23) + bdev->new_attr.xmit_hash_policy = (u8)bond->params.xmit_policy; + else + bdev->new_attr.xmit_hash_policy = BOND_XMIT_POLICY_LAYER2; + + bdev->new_attr.slaves = bond_get_slaves_bitmap(bdev, bond); + + spin_unlock(&bdev->lock); +} + +static u8 bond_get_netdev_idx(const struct hinic3_bond_dev *bdev, + const struct net_device *ndev) +{ + u8 i; + + for (i = 0; i < BOND_PORT_MAX_NUM; i++) { + if (bdev->tracker.ndev[i] == ndev) + return i; + } + + return PORT_INVALID_ID; +} + +static u8 bond_dev_track_port(struct hinic3_bond_dev *bdev, + struct net_device *ndev) +{ + u8 port_id; + void *ppf_hwdev = NULL; + struct hinic3_nic_dev *nic_dev = NULL; + struct hinic3_lld_dev *ppf_lld_dev = NULL; + + nic_dev = get_nic_dev_safe(ndev); + if (!nic_dev) { + pr_warn("hinic3_bond: invalid slave: %s\n", ndev->name); + return PORT_INVALID_ID; + } + + ppf_lld_dev = hinic3_get_ppf_lld_dev_unsafe(nic_dev->lld_dev); + if (ppf_lld_dev) + ppf_hwdev = ppf_lld_dev->hwdev; + + pr_info("hinic3_bond: track ndev:%s", ndev->name); + port_id = hinic3_physical_port_id(nic_dev->hwdev); + + spin_lock(&bdev->lock); + /* attach netdev to the port position associated with it */ + if (bdev->tracker.ndev[port_id]) { + pr_warn("hinic3_bond: Old ndev:%s is replaced\n", + bdev->tracker.ndev[port_id]->name); + } else { + bdev->tracker.cnt++; + } + bdev->tracker.ndev[port_id] = ndev; + bdev->tracker.netdev_state[port_id].link_up = 0; + bdev->tracker.netdev_state[port_id].tx_enabled = 0; + if (!bdev->ppf_hwdev) + bdev->ppf_hwdev = ppf_hwdev; + pr_info("TRACK cnt: %d, slave_name(%s)\n", bdev->tracker.cnt, ndev->name); + spin_unlock(&bdev->lock); + + return port_id; +} + +static void bond_dev_untrack_port(struct hinic3_bond_dev *bdev, u8 idx) +{ + spin_lock(&bdev->lock); + + if (bdev->tracker.ndev[idx]) { + pr_info("hinic3_bond: untrack port:%u ndev:%s cnt:%d\n", idx, + bdev->tracker.ndev[idx]->name, bdev->tracker.cnt); + bdev->tracker.ndev[idx] = NULL; + bdev->tracker.cnt--; + } + + spin_unlock(&bdev->lock); +} + +static void bond_slave_event(struct hinic3_bond_dev *bdev, struct slave *slave) +{ + u8 idx; + + idx = bond_get_netdev_idx(bdev, slave->dev); + if (idx == PORT_INVALID_ID) + idx = bond_dev_track_port(bdev, slave->dev); + if (idx == PORT_INVALID_ID) + return; + + spin_lock(&bdev->lock); + bdev->tracker.netdev_state[idx].link_up = bond_slave_is_up(slave); + bdev->tracker.netdev_state[idx].tx_enabled = bond_slave_is_up(slave) && + bond_is_active_slave(slave); + spin_unlock(&bdev->lock); + + queue_delayed_work(bdev->wq, &bdev->bond_work, 0); +} + +static bool bond_eval_bonding_stats(const struct hinic3_bond_dev *bdev, + struct bonding *bond) +{ + int mode; + + mode = BOND_MODE(bond); + if (mode != BOND_MODE_8023AD && + mode != BOND_MODE_XOR && + mode != BOND_MODE_ACTIVEBACKUP) { + pr_err("hinic3_bond: Wrong mode:%d\n", mode); + return false; + } + + return bdev->tracker.cnt > 0; +} + +static void bond_master_event(struct hinic3_bond_dev *bdev, + struct bonding *bond) +{ + spin_lock(&bdev->lock); + bdev->tracker.is_bonded = bond_eval_bonding_stats(bdev, bond); + spin_unlock(&bdev->lock); + + queue_delayed_work(bdev->wq, &bdev->bond_work, 0); +} + +static struct hinic3_bond_dev *bond_get_bdev(const struct bonding *bond) +{ + struct hinic3_bond_dev *bdev = NULL; + int bid; + + mutex_lock(&g_bond_mutex); + for (bid = BOND_FIRST_ID; bid <= BOND_MAX_ID; bid++) { + bdev = bond_mngr.bond_dev[bid]; + if (!bdev) + continue; + + if (bond == bdev->bond) { + mutex_unlock(&g_bond_mutex); + return bdev; + } + } + mutex_unlock(&g_bond_mutex); + return NULL; +} + +static void bond_handle_rtnl_event(struct net_device *ndev) +{ + struct hinic3_bond_dev *bdev = NULL; + struct bonding *bond = NULL; + struct slave *slave = NULL; + + if (netif_is_bond_master(ndev)) { + bond = netdev_priv(ndev); + bdev = bond_get_bdev(bond); + } else if (netif_is_bond_slave(ndev)) { + /*lint -e(160) */ + slave = bond_slave_get_rtnl(ndev); + if (slave) { + bond = bond_get_bond_by_slave(slave); + bdev = bond_get_bdev(bond); + } + } + + if (!bond || !bdev) + return; + + bond_update_attr(bdev, bond); + + if (slave) + bond_slave_event(bdev, slave); + else + bond_master_event(bdev, bond); +} + +static void bond_rtnl_data_ready(struct sock *sk) +{ + struct net_device *ndev = NULL; + struct ifinfomsg *ifinfo = NULL; + struct nlmsghdr *hdr = NULL; + struct sk_buff *skb = NULL; + int err = 0; + + skb = skb_recv_datagram(sk, 0, &err); + if (err != 0 || !skb) + return; + + hdr = (struct nlmsghdr *)skb->data; + if (!hdr || + !NLMSG_OK(hdr, skb->len) || + hdr->nlmsg_type != RTM_NEWLINK || + !rtnl_is_locked()) { + goto free_skb; + } + + ifinfo = nlmsg_data(hdr); + ndev = dev_get_by_index(&init_net, ifinfo->ifi_index); + if (ndev) { + bond_handle_rtnl_event(ndev); + dev_put(ndev); + } + +free_skb: + kfree_skb(skb); +} + +static int bond_enable_netdev_event(void) +{ + struct sockaddr_nl addr = { + .nl_family = AF_NETLINK, + .nl_groups = RTNLGRP_LINK, + }; + int err; + struct socket **rtnl_sock = &bond_mngr.rtnl_sock; + + err = sock_create_kern(&init_net, AF_NETLINK, SOCK_DGRAM, NETLINK_ROUTE, + rtnl_sock); + if (err) { + pr_err("hinic3_bond: Couldn't create rtnl socket.\n"); + *rtnl_sock = NULL; + return err; + } + + (*rtnl_sock)->sk->sk_data_ready = bond_rtnl_data_ready; + (*rtnl_sock)->sk->sk_allocation = GFP_KERNEL; + + err = kernel_bind(*rtnl_sock, (struct sockaddr *)(u8 *)&addr, sizeof(addr)); + if (err) { + pr_err("hinic3_bond: Couldn't bind rtnl socket.\n"); + sock_release(*rtnl_sock); + *rtnl_sock = NULL; + } + + return err; +} + +static void bond_disable_netdev_event(void) +{ + if (bond_mngr.rtnl_sock) + sock_release(bond_mngr.rtnl_sock); +} + +static int bond_send_upcmd(struct hinic3_bond_dev *bdev, struct bond_attr *attr, + u8 cmd_type) +{ + int err, len; + struct hinic3_bond_cmd cmd = {0}; + u16 out_size = sizeof(cmd); + + cmd.sub_cmd = 0; + cmd.ret_status = 0; + + if (attr) { + memcpy(&cmd.attr, attr, sizeof(*attr)); + } else { + cmd.attr.bond_id = bdev->bond_attr.bond_id; + cmd.attr.slaves = bdev->bond_attr.slaves; + } + + len = sizeof(cmd.bond_name); + if (cmd_type == MPU_CMD_BOND_CREATE) { + strscpy(cmd.bond_name, bdev->name, len); + cmd.bond_name[sizeof(cmd.bond_name) - 1] = '\0'; + } + + err = hinic3_msg_to_mgmt_sync(bdev->ppf_hwdev, HINIC3_MOD_OVS, cmd_type, + &cmd, sizeof(cmd), &cmd, &out_size, 0, + HINIC3_CHANNEL_NIC); + if (err != 0 || !out_size || cmd.ret_status != 0) { + pr_err("hinic3_bond: uP cmd: %u failed, err: %d, sts: %u, out size: %u\n", + cmd_type, err, cmd.ret_status, out_size); + err = -EIO; + } + + return err; +} + +static int bond_upcmd_deactivate(struct hinic3_bond_dev *bdev) +{ + int err; + u16 id_tmp; + + if (bdev->status == BOND_DEV_STATUS_IDLE) + return 0; + + pr_info("hinic3_bond: deactivate bond: %u\n", bdev->bond_attr.bond_id); + + err = bond_send_upcmd(bdev, NULL, MPU_CMD_BOND_DELETE); + if (err == 0) { + id_tmp = bdev->bond_attr.bond_id; + memset(&bdev->bond_attr, 0, sizeof(bdev->bond_attr)); + bdev->status = BOND_DEV_STATUS_IDLE; + bdev->bond_attr.bond_id = id_tmp; + if (!bdev->tracker.cnt) + bdev->ppf_hwdev = NULL; + } + + return err; +} + +static void bond_pf_bitmap_set(struct hinic3_bond_dev *bdev, u8 index) +{ + struct hinic3_nic_dev *nic_dev = NULL; + u8 pf_id; + + nic_dev = netdev_priv(bdev->tracker.ndev[index]); + if (!nic_dev) + return; + + pf_id = hinic3_pf_id_of_vf(nic_dev->hwdev); + BITMAP_SET(bdev->new_attr.bond_pf_bitmap, pf_id); +} + +static void bond_update_slave_info(struct hinic3_bond_dev *bdev, + struct bond_attr *attr) +{ + struct net_device *ndev = NULL; + u8 i; + + if (!netif_running(bdev->bond->dev)) + return; + + if (attr->bond_mode == BOND_MODE_ACTIVEBACKUP) { + rcu_read_lock(); + ndev = bond_option_active_slave_get_rcu(bdev->bond); + rcu_read_unlock(); + } + + for (i = 0; i < BOND_PORT_MAX_NUM; i++) { + if (!BITMAP_JUDGE(attr->slaves, i)) { + if (BITMAP_JUDGE(bdev->bond_attr.slaves, i)) + bond_dev_untrack_port(bdev, i); + + continue; + } + + bond_pf_bitmap_set(bdev, i); + if (!bdev->tracker.netdev_state[i].tx_enabled) + continue; + + if (attr->bond_mode == BOND_MODE_8023AD) { + BITMAP_SET(attr->active_slaves, i); + BITMAP_SET(attr->lacp_collect_slaves, i); + } else if (attr->bond_mode == BOND_MODE_XOR) { + BITMAP_SET(attr->active_slaves, i); + } else if (ndev && (ndev == bdev->tracker.ndev[i])) { + /* BOND_MODE_ACTIVEBACKUP */ + BITMAP_SET(attr->active_slaves, i); + break; + } + } +} + +static int bond_upcmd_config(struct hinic3_bond_dev *bdev, + struct bond_attr *attr) +{ + int err; + + bond_update_slave_info(bdev, attr); + attr->bond_pf_bitmap = bdev->new_attr.bond_pf_bitmap; + + if (memcmp(&bdev->bond_attr, attr, sizeof(struct bond_attr)) == 0) + return 0; + + pr_info("hinic3_bond: Config bond: %u\n", attr->bond_id); + pr_info("mode:%u, up_d:%u, down_d:%u, hash:%u, slaves:%u, ap:%u, cs:%u\n", + attr->bond_mode, + attr->up_delay, + attr->down_delay, + attr->xmit_hash_policy, + attr->slaves, + attr->active_slaves, + attr->lacp_collect_slaves); + pr_info("bond_pf_bitmap: 0x%x\n", attr->bond_pf_bitmap); + + err = bond_send_upcmd(bdev, attr, MPU_CMD_BOND_SET_ATTR); + if (!err) + memcpy(&bdev->bond_attr, attr, sizeof(*attr)); + + return err; +} + +static int bond_upcmd_activate(struct hinic3_bond_dev *bdev, + struct bond_attr *attr) +{ + int err; + + if (bond_dev_is_activated(bdev)) + return 0; + + pr_info("hinic3_bond: active bond: %u\n", bdev->bond_attr.bond_id); + + err = bond_send_upcmd(bdev, attr, MPU_CMD_BOND_CREATE); + if (err == 0) { + bdev->status = BOND_DEV_STATUS_ACTIVATED; + bdev->bond_attr.bond_mode = attr->bond_mode; + err = bond_upcmd_config(bdev, attr); + } + + return err; +} + +static void bond_call_service_func(struct hinic3_bond_dev *bdev, struct bond_attr *attr, + enum bond_service_proc_pos pos, int bond_status) +{ + int i; + + if (bond_status) + return; + + mutex_lock(&g_bond_service_func_mutex); + for (i = 0; i < HINIC3_BOND_USER_NUM; i++) { + if (g_bond_service_func[i]) + g_bond_service_func[i](bdev->name, (void *)attr, pos); + } + mutex_unlock(&g_bond_service_func_mutex); +} + +static void bond_do_work(struct hinic3_bond_dev *bdev) +{ + bool is_bonded = 0; + struct bond_attr attr; + int err = 0; + + spin_lock(&bdev->lock); + is_bonded = bdev->tracker.is_bonded; + attr = bdev->new_attr; + spin_unlock(&bdev->lock); + attr.user_bitmap = 0; + + /* is_bonded indicates whether bond should be activated. */ + if (is_bonded && !bond_dev_is_activated(bdev)) { + bond_call_service_func(bdev, &attr, BOND_BEFORE_ACTIVE, 0); + err = bond_upcmd_activate(bdev, &attr); + bond_call_service_func(bdev, &attr, BOND_AFTER_ACTIVE, err); + } else if (is_bonded && bond_dev_is_activated(bdev)) { + bond_call_service_func(bdev, &attr, BOND_BEFORE_MODIFY, 0); + err = bond_upcmd_config(bdev, &attr); + bond_call_service_func(bdev, &attr, BOND_AFTER_MODIFY, err); + } else if (!is_bonded && bond_dev_is_activated(bdev)) { + bond_call_service_func(bdev, &attr, BOND_BEFORE_DEACTIVE, 0); + err = bond_upcmd_deactivate(bdev); + bond_call_service_func(bdev, &attr, BOND_AFTER_DEACTIVE, err); + } + + if (err) + pr_err("hinic3_bond: Do bond failed\n"); +} + +#define MIN_BOND_SLAVE_CNT 2 +static void bond_try_do_work(struct work_struct *work) +{ + struct delayed_work *delayed_work = to_delayed_work(work); + struct hinic3_bond_dev *bdev = + container_of(delayed_work, struct hinic3_bond_dev, bond_work); + + if (g_bond_service_func[HINIC3_BOND_USER_ROCE] && bdev->tracker.cnt < MIN_BOND_SLAVE_CNT) + queue_delayed_work(bdev->wq, &bdev->bond_work, HZ); + else + bond_do_work(bdev); +} + +static int bond_dev_init(struct hinic3_bond_dev *bdev, const char *name) +{ + bdev->wq = create_singlethread_workqueue("hinic3_bond_wq"); + if (!bdev->wq) { + pr_err("hinic3_bond: Failed to create workqueue\n"); + return -ENODEV; + } + + INIT_DELAYED_WORK(&bdev->bond_work, bond_try_do_work); + bdev->status = BOND_DEV_STATUS_IDLE; + strscpy(bdev->name, name, sizeof(bdev->name)); + + spin_lock_init(&bdev->lock); + + return 0; +} + +static int bond_dev_release(struct hinic3_bond_dev *bdev) +{ + int err; + u8 i; + u32 bond_cnt; + + err = bond_upcmd_deactivate(bdev); + if (err) { + pr_err("hinic3_bond: Failed to deactivate dev\n"); + return err; + } + + for (i = BOND_FIRST_ID; i <= BOND_MAX_ID; i++) { + if (bond_mngr.bond_dev[i] == bdev) { + bond_mngr.bond_dev[i] = NULL; + bond_mngr.cnt--; + pr_info("hinic3_bond: Free bond, id: %u mngr_cnt:%u\n", i, bond_mngr.cnt); + break; + } + } + + bond_cnt = bond_mngr.cnt; + mutex_unlock(&g_bond_mutex); + if (!bond_cnt) + bond_disable_netdev_event(); + + cancel_delayed_work_sync(&bdev->bond_work); + destroy_workqueue(bdev->wq); + kfree(bdev); + + return err; +} + +static void bond_dev_free(struct kref *ref) +{ + struct hinic3_bond_dev *bdev = NULL; + + bdev = container_of(ref, struct hinic3_bond_dev, ref); + bond_dev_release(bdev); +} + +static struct hinic3_bond_dev *bond_dev_alloc(const char *name) +{ + struct hinic3_bond_dev *bdev = NULL; + u16 i; + int err; + + bdev = kzalloc(sizeof(*bdev), GFP_KERNEL); + if (!bdev) { + mutex_unlock(&g_bond_mutex); + return NULL; + } + + err = bond_dev_init(bdev, name); + if (err) { + kfree(bdev); + mutex_unlock(&g_bond_mutex); + return NULL; + } + + if (!bond_mngr.cnt) { + err = bond_enable_netdev_event(); + if (err) { + bond_dev_release(bdev); + return NULL; + } + } + + for (i = BOND_FIRST_ID; i <= BOND_MAX_ID; i++) { + if (!bond_mngr.bond_dev[i]) { + bdev->bond_attr.bond_id = i; + bond_mngr.bond_dev[i] = bdev; + bond_mngr.cnt++; + pr_info("hinic3_bond: Create bond dev, id:%u cnt:%u\n", i, bond_mngr.cnt); + break; + } + } + + if (i > BOND_MAX_ID) { + bond_dev_release(bdev); + bdev = NULL; + pr_err("hinic3_bond: Failed to get free bond id\n"); + } + + return bdev; +} + +static void update_bond_info(struct hinic3_bond_dev *bdev, struct bonding *bond) +{ + struct slave *slave = NULL; + struct list_head *iter = NULL; + struct net_device *ndev[BOND_PORT_MAX_NUM]; + int i = 0; + + bdev->bond = bond; + + rtnl_lock(); + bond_for_each_slave(bond, slave, iter) { + if (bond_dev_track_port(bdev, slave->dev) == PORT_INVALID_ID) + continue; + ndev[i] = slave->dev; + dev_hold(ndev[i++]); + if (i >= BOND_PORT_MAX_NUM) + break; + (void)iter; + } + + bond_for_each_slave(bond, slave, iter) { + bond_handle_rtnl_event(slave->dev); + (void)iter; + } + + bond_handle_rtnl_event(bond->dev); + + rtnl_unlock(); + /* In case user queries info before bonding is complete */ + flush_delayed_work(&bdev->bond_work); + + rtnl_lock(); + while (i) + dev_put(ndev[--i]); + rtnl_unlock(); +} + +static struct hinic3_bond_dev *bond_dev_by_name(const char *name) +{ + struct hinic3_bond_dev *bdev = NULL; + int i; + + for (i = BOND_FIRST_ID; i <= BOND_MAX_ID; i++) { + if (bond_mngr.bond_dev[i] && + (strcmp(bond_mngr.bond_dev[i]->name, name) == 0)) { + bdev = bond_mngr.bond_dev[i]; + break; + } + } + + return bdev; +} + +static void bond_dev_user_attach(struct hinic3_bond_dev *bdev, + enum hinic3_bond_user user) +{ + if (bdev->slot_used[user]) + return; + + bdev->slot_used[user] = 1; + if (!kref_get_unless_zero(&bdev->ref)) + kref_init(&bdev->ref); +} + +static void bond_dev_user_detach(struct hinic3_bond_dev *bdev, + enum hinic3_bond_user user, bool *freed) +{ + if (user < 0 || user >= HINIC3_BOND_USER_NUM) + return; + + if (bdev->slot_used[user]) { + bdev->slot_used[user] = 0; + if (kref_read(&bdev->ref) == 1) + *freed = true; + kref_put(&bdev->ref, bond_dev_free); + } +} + +static struct bonding *bond_get_knl_bonding(const char *name) +{ + struct net_device *ndev_tmp = NULL; + + for_each_netdev(&init_net, ndev_tmp) { + if (netif_is_bond_master(ndev_tmp) && + !strcmp(ndev_tmp->name, name)) + return netdev_priv(ndev_tmp); + } + + return NULL; +} + +void hinic3_bond_set_user_bitmap(struct bond_attr *attr, enum hinic3_bond_user user) +{ + if (!BITMAP_JUDGE(attr->user_bitmap, user)) + BITMAP_SET(attr->user_bitmap, user); +} +EXPORT_SYMBOL(hinic3_bond_set_user_bitmap); + +int hinic3_bond_attach(const char *name, enum hinic3_bond_user user, + u16 *bond_id) +{ + struct hinic3_bond_dev *bdev = NULL; + struct bonding *bond = NULL; + bool new_dev = false; + + if (!name || !bond_id) + return -EINVAL; + + bond = bond_get_knl_bonding(name); + if (!bond) { + pr_warn("hinic3_bond: Kernel bond %s not exist.\n", name); + return -ENODEV; + } + + mutex_lock(&g_bond_mutex); + bdev = bond_dev_by_name(name); + if (!bdev) { + bdev = bond_dev_alloc(name); + new_dev = true; + } else { + pr_info("hinic3_bond: %s already exist\n", name); + } + + if (!bdev) { + // lock has beed released in bond_dev_alloc + return -ENODEV; + } + + bond_dev_user_attach(bdev, user); + mutex_unlock(&g_bond_mutex); + + if (new_dev) + update_bond_info(bdev, bond); + + *bond_id = bdev->bond_attr.bond_id; + return 0; +} +EXPORT_SYMBOL(hinic3_bond_attach); + +int hinic3_bond_detach(u16 bond_id, enum hinic3_bond_user user) +{ + int err = 0; + bool lock_freed = false; + + if (bond_id < BOND_FIRST_ID || bond_id > BOND_MAX_ID) { + pr_warn("hinic3_bond: Invalid bond id:%u to delete\n", bond_id); + return -EINVAL; + } + + mutex_lock(&g_bond_mutex); + if (!bond_mngr.bond_dev[bond_id]) + err = -ENODEV; + else + bond_dev_user_detach(bond_mngr.bond_dev[bond_id], user, &lock_freed); + + if (!lock_freed) + mutex_unlock(&g_bond_mutex); + return err; +} +EXPORT_SYMBOL(hinic3_bond_detach); + +void hinic3_bond_clean_user(enum hinic3_bond_user user) +{ + int i = 0; + bool lock_freed = false; + + mutex_lock(&g_bond_mutex); + for (i = BOND_FIRST_ID; i <= BOND_MAX_ID; i++) { + if (bond_mngr.bond_dev[i]) { + bond_dev_user_detach(bond_mngr.bond_dev[i], user, &lock_freed); + if (lock_freed) { + mutex_lock(&g_bond_mutex); + lock_freed = false; + } + } + } + if (!lock_freed) + mutex_unlock(&g_bond_mutex); +} +EXPORT_SYMBOL(hinic3_bond_clean_user); + +int hinic3_bond_get_uplink_id(u16 bond_id, u32 *uplink_id) +{ + if (bond_id < BOND_FIRST_ID || bond_id > BOND_MAX_ID || !uplink_id) { + pr_warn("hinic3_bond: Invalid args, id: %u, uplink: %d\n", + bond_id, !!uplink_id); + return -EINVAL; + } + + mutex_lock(&g_bond_mutex); + if (bond_mngr.bond_dev[bond_id]) + *uplink_id = bond_gen_uplink_id(bond_mngr.bond_dev[bond_id]); + mutex_unlock(&g_bond_mutex); + + return 0; +} +EXPORT_SYMBOL(hinic3_bond_get_uplink_id); + +int hinic3_bond_register_service_func(enum hinic3_bond_user user, void (*func) + (const char *bond_name, void *bond_attr, + enum bond_service_proc_pos pos)) +{ + if (user >= HINIC3_BOND_USER_NUM) + return -EINVAL; + + mutex_lock(&g_bond_service_func_mutex); + g_bond_service_func[user] = func; + mutex_unlock(&g_bond_service_func_mutex); + + return 0; +} +EXPORT_SYMBOL(hinic3_bond_register_service_func); + +int hinic3_bond_unregister_service_func(enum hinic3_bond_user user) +{ + if (user >= HINIC3_BOND_USER_NUM) + return -EINVAL; + + mutex_lock(&g_bond_service_func_mutex); + g_bond_service_func[user] = NULL; + mutex_unlock(&g_bond_service_func_mutex); + + return 0; +} +EXPORT_SYMBOL(hinic3_bond_unregister_service_func); + +int hinic3_bond_get_slaves(u16 bond_id, struct hinic3_bond_info_s *info) +{ + struct bond_tracker *tracker = NULL; + int size; + int i; + int len; + + if (!info || bond_id < BOND_FIRST_ID || bond_id > BOND_MAX_ID) { + pr_warn("hinic3_bond: Invalid args, info: %d,id: %u\n", + !!info, bond_id); + return -EINVAL; + } + + size = ARRAY_LEN(info->slaves_name); + if (size < BOND_PORT_MAX_NUM) { + pr_warn("hinic3_bond: Invalid args, size: %u\n", + size); + return -EINVAL; + } + + mutex_lock(&g_bond_mutex); + if (bond_mngr.bond_dev[bond_id]) { + info->slaves = bond_mngr.bond_dev[bond_id]->bond_attr.slaves; + tracker = &bond_mngr.bond_dev[bond_id]->tracker; + info->cnt = 0; + for (i = 0; i < BOND_PORT_MAX_NUM; i++) { + if (BITMAP_JUDGE(info->slaves, i) && tracker->ndev[i]) { + len = sizeof(info->slaves_name[0]); + strscpy(info->slaves_name[info->cnt], tracker->ndev[i]->name, len); + info->cnt++; + } + } + } + mutex_unlock(&g_bond_mutex); + return 0; +} +EXPORT_SYMBOL(hinic3_bond_get_slaves); + +struct net_device *hinic3_bond_get_netdev_by_portid(const char *bond_name, u8 port_id) +{ + struct hinic3_bond_dev *bdev = NULL; + + if (port_id >= BOND_PORT_MAX_NUM) + return NULL; + mutex_lock(&g_bond_mutex); + bdev = bond_dev_by_name(bond_name); + if (!bdev) { + mutex_unlock(&g_bond_mutex); + return NULL; + } + mutex_unlock(&g_bond_mutex); + return bdev->tracker.ndev[port_id]; +} +EXPORT_SYMBOL(hinic3_bond_get_netdev_by_portid); + +int hinic3_get_hw_bond_infos(void *hwdev, struct hinic3_hw_bond_infos *infos, u16 channel) +{ + struct comm_cmd_hw_bond_infos bond_infos; + u16 out_size = sizeof(bond_infos); + int err; + + if (!hwdev || !infos) + return -EINVAL; + + memset(&bond_infos, 0, sizeof(bond_infos)); + + bond_infos.infos.bond_id = infos->bond_id; + + err = hinic3_msg_to_mgmt_sync(hwdev, HINIC3_MOD_COMM, COMM_MGMT_CMD_GET_HW_BOND, + &bond_infos, sizeof(bond_infos), + &bond_infos, &out_size, 0, channel); + if (bond_infos.head.status || err || !out_size) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "Failed to get hw bond information, err: %d, status: 0x%x, out size: 0x%x, channel: 0x%x\n", + err, bond_infos.head.status, out_size, channel); + return -EIO; + } + + memcpy(infos, &bond_infos.infos, sizeof(*infos)); + + return 0; +} +EXPORT_SYMBOL(hinic3_get_hw_bond_infos); + +int hinic3_get_bond_tracker_by_name(const char *name, struct bond_tracker *tracker) +{ + struct hinic3_bond_dev *bdev = NULL; + int i; + + mutex_lock(&g_bond_mutex); + for (i = BOND_FIRST_ID; i <= BOND_MAX_ID; i++) { + if (bond_mngr.bond_dev[i] && + (strcmp(bond_mngr.bond_dev[i]->name, name) == 0)) { + bdev = bond_mngr.bond_dev[i]; + spin_lock(&bdev->lock); + *tracker = bdev->tracker; + spin_unlock(&bdev->lock); + mutex_unlock(&g_bond_mutex); + return 0; + } + } + mutex_unlock(&g_bond_mutex); + return -ENODEV; +} +EXPORT_SYMBOL(hinic3_get_bond_tracker_by_name); diff --git a/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h new file mode 100644 index 000000000..3ee99d906 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/bond/hinic3_bond.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_BOND_H +#define HINIC3_BOND_H + +#include <linux/netdevice.h> +#include <linux/types.h> +#include "mpu_inband_cmd_defs.h" +#include "bond_common_defs.h" + +enum hinic3_bond_user { + HINIC3_BOND_USER_OVS, + HINIC3_BOND_USER_TOE, + HINIC3_BOND_USER_ROCE, + HINIC3_BOND_USER_NUM +}; + +enum bond_service_proc_pos { + BOND_BEFORE_ACTIVE, + BOND_AFTER_ACTIVE, + BOND_BEFORE_MODIFY, + BOND_AFTER_MODIFY, + BOND_BEFORE_DEACTIVE, + BOND_AFTER_DEACTIVE, + BOND_POS_MAX +}; + +#define BITMAP_SET(bm, bit) ((bm) |= (typeof(bm))(1U << (bit))) +#define BITMAP_CLR(bm, bit) ((bm) &= ~((typeof(bm))(1U << (bit)))) +#define BITMAP_JUDGE(bm, bit) ((bm) & (typeof(bm))(1U << (bit))) + +#define MPU_CMD_BOND_CREATE 17 +#define MPU_CMD_BOND_DELETE 18 +#define MPU_CMD_BOND_SET_ATTR 19 +#define MPU_CMD_BOND_GET_ATTR 20 + +#define HINIC3_MAX_PORT 4 +#define HINIC3_IFNAMSIZ 16 +struct hinic3_bond_info_s { + u8 slaves; + u8 cnt; + u8 srv[2]; + char slaves_name[HINIC3_MAX_PORT][HINIC3_IFNAMSIZ]; +}; + +#pragma pack(1) +struct netdev_lower_state_info { + u8 link_up : 1; + u8 tx_enabled : 1; + u8 rsvd : 6; +}; + +#pragma pack() + +struct bond_tracker { + struct netdev_lower_state_info netdev_state[BOND_PORT_MAX_NUM]; + struct net_device *ndev[BOND_PORT_MAX_NUM]; + u8 cnt; + bool is_bonded; +}; + +struct bond_attr { + u16 bond_mode; + u16 bond_id; + u16 up_delay; + u16 down_delay; + u8 active_slaves; + u8 slaves; + u8 lacp_collect_slaves; + u8 xmit_hash_policy; + u32 first_roce_func; + u32 bond_pf_bitmap; + u32 user_bitmap; +}; + +struct hinic3_bond_cmd { + u8 ret_status; + u8 version; + u16 sub_cmd; + struct bond_attr attr; + char bond_name[16]; +}; + +void hinic3_bond_set_user_bitmap(struct bond_attr *attr, enum hinic3_bond_user user); +int hinic3_bond_attach(const char *name, enum hinic3_bond_user user, u16 *bond_id); +int hinic3_bond_detach(u16 bond_id, enum hinic3_bond_user user); +void hinic3_bond_clean_user(enum hinic3_bond_user user); +int hinic3_bond_get_uplink_id(u16 bond_id, u32 *uplink_id); +int hinic3_bond_register_service_func(enum hinic3_bond_user user, void (*func) + (const char *bond_name, void *bond_attr, + enum bond_service_proc_pos pos)); +int hinic3_bond_unregister_service_func(enum hinic3_bond_user user); +int hinic3_bond_get_slaves(u16 bond_id, struct hinic3_bond_info_s *info); +struct net_device *hinic3_bond_get_netdev_by_portid(const char *bond_name, u8 port_id); +int hinic3_get_hw_bond_infos(void *hwdev, struct hinic3_hw_bond_infos *infos, u16 channel); +int hinic3_get_bond_tracker_by_name(const char *name, struct bond_tracker *tracker); +#endif /* HINIC3_BOND_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/comm_msg_intf.h b/drivers/net/ethernet/huawei/hinic3/comm_msg_intf.h index 4d662fe4c..ae998cfcd 100644 --- a/drivers/net/ethernet/huawei/hinic3/comm_msg_intf.h +++ b/drivers/net/ethernet/huawei/hinic3/comm_msg_intf.h @@ -14,69 +14,8 @@
#include "comm_defs.h" #include "mgmt_msg_base.h" - -/* func_reset_flag的边界值 */ -#define FUNC_RESET_FLAG_MAX_VALUE ((1U << (RES_TYPE_MAX + 1)) - 1) -struct comm_cmd_func_reset { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1[3]; - u64 reset_flag; -}; - -struct comm_cmd_ppf_flr_type_set { - struct mgmt_msg_head head; - - u16 func_id; - u8 rsvd1[2]; - u32 ppf_flr_type; -}; - -enum { - COMM_F_API_CHAIN = 1U << 0, - COMM_F_CLP = 1U << 1, - COMM_F_CHANNEL_DETECT = 1U << 2, - COMM_F_MBOX_SEGMENT = 1U << 3, - COMM_F_CMDQ_NUM = 1U << 4, - COMM_F_VIRTIO_VQ_SIZE = 1U << 5, -}; - -#define COMM_MAX_FEATURE_QWORD 4 -struct comm_cmd_feature_nego { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; /* 1: set, 0: get */ - u8 rsvd; - u64 s_feature[COMM_MAX_FEATURE_QWORD]; -}; - -struct comm_cmd_clear_doorbell { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1[3]; -}; - -struct comm_cmd_clear_resource { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1[3]; -}; - -struct comm_global_attr { - u8 max_host_num; - u8 max_pf_num; - u16 vf_id_start; - - u8 mgmt_host_node_id; /* for api cmd to mgmt cpu */ - u8 cmdq_num; - u8 rsvd1[2]; - - u32 rsvd2[8]; -}; +#include "mpu_cmd_base_defs.h" +#include "mpu_inband_cmd_defs.h"
struct spu_cmd_freq_operation { struct comm_info_head head; @@ -107,236 +46,11 @@ struct spu_cmd_tsensor_operation { s16 sys_tsensor_temp; };
-struct comm_cmd_heart_event { - struct mgmt_msg_head head; - - u8 init_sta; /* 0: mpu init ok, 1: mpu init error. */ - u8 rsvd1[3]; - u32 heart; /* add one by one */ - u32 heart_handshake; /* should be alwasys: 0x5A5A5A5A */ -}; - -struct comm_cmd_channel_detect { - struct mgmt_msg_head head; - - u16 func_id; - u16 rsvd1[3]; - u32 rsvd2[2]; -}; - -enum hinic3_svc_type { - SVC_T_COMM = 0, - SVC_T_NIC, - SVC_T_OVS, - SVC_T_ROCE, - SVC_T_TOE, - SVC_T_IOE, - SVC_T_FC, - SVC_T_VBS, - SVC_T_IPSEC, - SVC_T_VIRTIO, - SVC_T_MIGRATE, - SVC_T_PPA, - SVC_T_MAX, -}; - -struct comm_cmd_func_svc_used_state { - struct mgmt_msg_head head; - u16 func_id; - u16 svc_type; - u8 used_state; - u8 rsvd[35]; -}; - -#define TABLE_INDEX_MAX 129 - -struct sml_table_id_info { - u8 node_id; - u8 instance_id; -}; - -struct comm_cmd_get_sml_tbl_data { - struct comm_info_head head; /* 8B */ - u8 tbl_data[512]; -}; - -struct comm_cmd_get_glb_attr { - struct mgmt_msg_head head; - - struct comm_global_attr attr; -}; - -enum hinic3_fw_ver_type { - HINIC3_FW_VER_TYPE_BOOT, - HINIC3_FW_VER_TYPE_MPU, - HINIC3_FW_VER_TYPE_NPU, - HINIC3_FW_VER_TYPE_SMU_L0, - HINIC3_FW_VER_TYPE_SMU_L1, - HINIC3_FW_VER_TYPE_CFG, -}; - -#define HINIC3_FW_VERSION_LEN 16 -#define HINIC3_FW_COMPILE_TIME_LEN 20 -struct comm_cmd_get_fw_version { - struct mgmt_msg_head head; - - u16 fw_type; - u16 rsvd1; - u8 ver[HINIC3_FW_VERSION_LEN]; - u8 time[HINIC3_FW_COMPILE_TIME_LEN]; -}; - -/* hardware define: cmdq context */ -struct cmdq_ctxt_info { - u64 curr_wqe_page_pfn; - u64 wq_block_pfn; -}; - -struct comm_cmd_cmdq_ctxt { - struct mgmt_msg_head head; - - u16 func_id; - u8 cmdq_id; - u8 rsvd1[5]; - - struct cmdq_ctxt_info ctxt; -}; - -struct comm_cmd_root_ctxt { - struct mgmt_msg_head head; - - u16 func_id; - u8 set_cmdq_depth; - u8 cmdq_depth; - u16 rx_buf_sz; - u8 lro_en; - u8 rsvd1; - u16 sq_depth; - u16 rq_depth; - u64 rsvd2; -}; - -struct comm_cmd_wq_page_size { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; - /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ - u8 page_size; - - u32 rsvd1; -}; - -struct comm_cmd_msix_config { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; - u8 rsvd1; - u16 msix_index; - u8 pending_cnt; - u8 coalesce_timer_cnt; - u8 resend_timer_cnt; - u8 lli_timer_cnt; - u8 lli_credit_cnt; - u8 rsvd2[5]; -}; - enum cfg_msix_operation { CFG_MSIX_OPERATION_FREE = 0, CFG_MSIX_OPERATION_ALLOC = 1, };
-struct comm_cmd_cfg_msix_num { - struct comm_info_head head; /* 8B */ - - u16 func_id; - u8 op_code; /* 1: alloc 0: free */ - u8 rsvd0; - - u16 msix_num; - u16 rsvd1; -}; - -struct comm_cmd_dma_attr_config { - struct mgmt_msg_head head; - - u16 func_id; - u8 entry_idx; - u8 st; - u8 at; - u8 ph; - u8 no_snooping; - u8 tph_en; - u32 resv1; -}; - -struct comm_cmd_ceq_ctrl_reg { - struct mgmt_msg_head head; - - u16 func_id; - u16 q_id; - u32 ctrl0; - u32 ctrl1; - u32 rsvd1; -}; - -struct comm_cmd_func_tmr_bitmap_op { - struct mgmt_msg_head head; - - u16 func_id; - u8 opcode; /* 1: start, 0: stop */ - u8 rsvd1[5]; -}; - -struct comm_cmd_ppf_tmr_op { - struct mgmt_msg_head head; - - u8 ppf_id; - u8 opcode; /* 1: start, 0: stop */ - u8 rsvd1[6]; -}; - -struct comm_cmd_ht_gpa { - struct mgmt_msg_head head; - - u8 host_id; - u8 rsvd0[3]; - u32 rsvd1[7]; - u64 page_pa0; - u64 page_pa1; -}; - -struct comm_cmd_get_eqm_num { - struct mgmt_msg_head head; - - u8 host_id; - u8 rsvd1[3]; - u32 chunk_num; - u32 search_gpa_num; -}; - -struct comm_cmd_eqm_cfg { - struct mgmt_msg_head head; - - u8 host_id; - u8 valid; - u16 rsvd1; - u32 page_size; - u32 rsvd2; -}; - -struct comm_cmd_eqm_search_gpa { - struct mgmt_msg_head head; - - u8 host_id; - u8 rsvd1[3]; - u32 start_idx; - u32 num; - u32 rsvd2; - u64 gpa_hi52[0]; /*lint !e1501*/ -}; - struct comm_cmd_ffm_info { struct mgmt_msg_head head;
@@ -350,281 +64,6 @@ struct comm_cmd_ffm_info { u32 rsvd1; };
-#define HARDWARE_ID_1XX3V100_TAG 31 /* 1xx3v100 tag */ - -struct hinic3_board_info { - u8 board_type; - u8 port_num; - u8 port_speed; - u8 pcie_width; - u8 host_num; - u8 pf_num; - u16 vf_total_num; - u8 tile_num; - u8 qcm_num; - u8 core_num; - u8 work_mode; - u8 service_mode; - u8 pcie_mode; - u8 boot_sel; - u8 board_id; - u32 cfg_addr; - u32 service_en_bitmap; - u8 scenes_id; - u8 cfg_template_id; - u8 hardware_id; - u8 spu_en; - u16 pf_vendor_id; - u8 tile_bitmap; - u8 sm_bitmap; -}; - -struct comm_cmd_board_info { - struct mgmt_msg_head head; - - struct hinic3_board_info info; - u32 rsvd[22]; -}; - -struct comm_cmd_sync_time { - struct mgmt_msg_head head; - - u64 mstime; - u64 rsvd1; -}; - -struct comm_cmd_sdi_info { - struct mgmt_msg_head head; - u32 cfg_sdi_mode; -}; - -/* func flr set */ -struct comm_cmd_func_flr_set { - struct mgmt_msg_head head; - - u16 func_id; - u8 type; /* 1: close 置flush */ - u8 isall; /* 是否操作对应pf下的所有vf 1: all vf */ - u32 rsvd; -}; - -struct comm_cmd_bdf_info { - struct mgmt_msg_head head; - - u16 function_idx; - u8 rsvd1[2]; - u8 bus; - u8 device; - u8 function; - u8 rsvd2[5]; -}; - -struct hw_pf_info { - u16 glb_func_idx; - u16 glb_pf_vf_offset; - u8 p2p_idx; - u8 itf_idx; - u16 max_vfs; - u16 max_queue_num; - u16 vf_max_queue_num; - u16 port_id; - u16 rsvd0; - u32 pf_service_en_bitmap; - u32 vf_service_en_bitmap; - u16 rsvd1[2]; - - u8 device_type; - u8 bus_num; /* tl_cfg_bus_num */ - u16 vf_stride; /* VF_RID_SETTING.vf_stride */ - u16 vf_offset; /* VF_RID_SETTING.vf_offset */ - u8 rsvd[2]; -}; - -#define CMD_MAX_MAX_PF_NUM 32 -struct hinic3_hw_pf_infos { - u8 num_pfs; - u8 rsvd1[3]; - - struct hw_pf_info infos[CMD_MAX_MAX_PF_NUM]; -}; - -struct comm_cmd_hw_pf_infos { - struct mgmt_msg_head head; - - struct hinic3_hw_pf_infos infos; -}; - -#define DD_CFG_TEMPLATE_MAX_IDX 12 -#define DD_CFG_TEMPLATE_MAX_TXT_LEN 64 -#define CFG_TEMPLATE_OP_QUERY 0 -#define CFG_TEMPLATE_OP_SET 1 -#define CFG_TEMPLATE_SET_MODE_BY_IDX 0 -#define CFG_TEMPLATE_SET_MODE_BY_NAME 1 - -struct comm_cmd_cfg_template { - struct mgmt_msg_head head; - u8 opt_type; /* 0: query 1: set */ - u8 set_mode; /* 0-index mode. 1-name mode. */ - u8 tp_err; - u8 rsvd0; - - u8 cur_index; /* Current cfg tempalte index. */ - u8 cur_max_index; /* Max support cfg tempalte index. */ - u8 rsvd1[2]; - u8 cur_name[DD_CFG_TEMPLATE_MAX_TXT_LEN]; - u8 cur_cfg_temp_info[DD_CFG_TEMPLATE_MAX_IDX][DD_CFG_TEMPLATE_MAX_TXT_LEN]; - - u8 next_index; /* Next reset cfg tempalte index. */ - u8 next_max_index; /* Max support cfg tempalte index. */ - u8 rsvd2[2]; - u8 next_name[DD_CFG_TEMPLATE_MAX_TXT_LEN]; - u8 next_cfg_temp_info[DD_CFG_TEMPLATE_MAX_IDX][DD_CFG_TEMPLATE_MAX_TXT_LEN]; -}; - -#define MQM_SUPPORT_COS_NUM 8 -#define MQM_INVALID_WEIGHT 256 -#define MQM_LIMIT_SET_FLAG_READ 0 -#define MQM_LIMIT_SET_FLAG_WRITE 1 -struct comm_cmd_set_mqm_limit { - struct mgmt_msg_head head; - - u16 set_flag; /* 置位该标记位表示设置 */ - u16 func_id; - /* 对应cos_id所占的权重,0-255, 0为SP调度. */ - u16 cos_weight[MQM_SUPPORT_COS_NUM]; - u32 host_min_rate; /* 本host支持的最低限速 */ - u32 func_min_rate; /* 本function支持的最低限速,单位Mbps */ - u32 func_max_rate; /* 本function支持的最高限速,单位Mbps */ - u8 rsvd[64]; /* Reserved */ -}; - -#define DUMP_16B_PER_LINE 16 -#define DUMP_8_VAR_PER_LINE 8 -#define DUMP_4_VAR_PER_LINE 4 - -#define DATA_LEN_1K 1024 -/* 软狗超时信息上报接口 */ -struct comm_info_sw_watchdog { - struct comm_info_head head; - - /* 全局信息 */ - u32 curr_time_h; /* 发生死循环的时间,cycle */ - u32 curr_time_l; /* 发生死循环的时间,cycle */ - u32 task_id; /* 发生死循环的任务 */ - u32 rsv; /* 保留字段,用于扩展 */ - - /* 寄存器信息,TSK_CONTEXT_S */ - u64 pc; - - u64 elr; - u64 spsr; - u64 far; - u64 esr; - u64 xzr; - u64 x30; - u64 x29; - u64 x28; - u64 x27; - u64 x26; - u64 x25; - u64 x24; - u64 x23; - u64 x22; - u64 x21; - u64 x20; - u64 x19; - u64 x18; - u64 x17; - u64 x16; - u64 x15; - u64 x14; - u64 x13; - u64 x12; - u64 x11; - u64 x10; - u64 x09; - u64 x08; - u64 x07; - u64 x06; - u64 x05; - u64 x04; - u64 x03; - u64 x02; - u64 x01; - u64 x00; - - /* 堆栈控制信息,STACK_INFO_S */ - u64 stack_top; /* 栈顶 */ - u64 stack_bottom; /* 栈底 */ - u64 sp; /* 栈当前SP指针值 */ - u32 curr_used; /* 栈当前使用的大小 */ - u32 peak_used; /* 栈使用的历史峰值 */ - u32 is_overflow; /* 栈是否溢出 */ - - /* 堆栈具体内容 */ - u32 stack_actlen; /* 实际的堆栈长度(<=1024) */ - u8 stack_data[DATA_LEN_1K]; /* 超过1024部分,会被截断 */ -}; - -/* 临终遗言信息 */ -#define XREGS_NUM 31 -struct tag_cpu_tick { - u32 cnt_hi; /* *< cycle计数高32位 */ - u32 cnt_lo; /* *< cycle计数低32位 */ -}; - -struct tag_ax_exc_reg_info { - u64 ttbr0; - u64 ttbr1; - u64 tcr; - u64 mair; - u64 sctlr; - u64 vbar; - u64 current_el; - u64 sp; - /* 以下字段的内存布局与TskContext保持一致 */ - u64 elr; /* 返回地址 */ - u64 spsr; - u64 far_r; - u64 esr; - u64 xzr; - u64 xregs[XREGS_NUM]; /* 0~30: x30~x0 */ -}; - -struct tag_exc_info { - char os_ver[48]; /* *< OS版本号 */ - char app_ver[64]; /* *< 产品版本号 */ - u32 exc_cause; /* *< 异常原因 */ - u32 thread_type; /* *< 异常前的线程类型 */ - u32 thread_id; /* *< 异常前线程PID */ - u16 byte_order; /* *< 字节序 */ - u16 cpu_type; /* *< CPU类型 */ - u32 cpu_id; /* *< CPU ID */ - struct tag_cpu_tick cpu_tick; /* *< CPU Tick */ - u32 nest_cnt; /* *< 异常嵌套计数 */ - u32 fatal_errno; /* *< 致命错误码,发生致命错误时有效 */ - u64 uw_sp; /* *< 异常前栈指针 */ - u64 stack_bottom; /* *< 异常前栈底 */ - /* 异常发生时的核内寄存器上下文信息,82\57必须位于152字节处, - * 若有改动,需更新sre_platform.eh中的OS_EXC_REGINFO_OFFSET宏 - */ - struct tag_ax_exc_reg_info reg_info; -}; - -/* 上报给驱动的up lastword模块接口 */ -#define MPU_LASTWORD_SIZE 1024 -struct tag_comm_info_up_lastword { - struct comm_info_head head; - - struct tag_exc_info stack_info; - - /* 堆栈具体内容 */ - u32 stack_actlen; /* 实际的堆栈长度(<=1024) */ - u8 stack_data[MPU_LASTWORD_SIZE]; /* 超过1024部分,会被截断 */ -}; - -#define FW_UPDATE_MGMT_TIMEOUT 3000000U - struct hinic3_cmd_update_firmware { struct mgmt_msg_head msg_head;
diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c new file mode 100644 index 000000000..2d2f54cbc --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.c @@ -0,0 +1,2056 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <linux/kernel.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" + +#include "cqm_object.h" +#include "cqm_bitmap_table.h" +#include "cqm_cmd.h" +#include "cqm_object_intern.h" +#include "cqm_main.h" +#include "cqm_memsec.h" +#include "cqm_bat_cla.h" + +#include "cqm_npu_cmd.h" +#include "cqm_npu_cmd_defs.h" + +static void cqm_bat_fill_cla_common_gpa(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + struct tag_cqm_bat_entry_standerd *bat_entry_standerd) +{ + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + struct hinic3_func_attr *func_attr = NULL; + struct tag_cqm_bat_entry_vf2pf gpa = {0}; + u32 cla_gpa_h = 0; + dma_addr_t pa; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + if (cla_table->cla_lvl == CQM_CLA_LVL_0) + pa = cla_table->cla_z_buf.buf_list[0].pa; + else if (cla_table->cla_lvl == CQM_CLA_LVL_1) + pa = cla_table->cla_y_buf.buf_list[0].pa; + else + pa = cla_table->cla_x_buf.buf_list[0].pa; + + gpa.cla_gpa_h = CQM_ADDR_HI(pa) & CQM_CHIP_GPA_HIMASK; + + /* On the SPU, the value of spu_en in the GPA address + * in the BAT is determined by the host ID and fun IDx. + */ + if (hinic3_host_id(cqm_handle->ex_handle) == CQM_SPU_HOST_ID) { + func_attr = &cqm_handle->func_attribute; + gpa.acs_spu_en = func_attr->func_global_idx & 0x1; + } else { + gpa.acs_spu_en = 0; + } + + /* In fake mode, fake_vf_en in the GPA address of the BAT + * must be set to 1. + */ + if (cqm_handle->func_capability.fake_func_type == CQM_FAKE_FUNC_CHILD) { + gpa.fake_vf_en = 1; + func_attr = &cqm_handle->parent_cqm_handle->func_attribute; + gpa.pf_id = func_attr->func_global_idx; + } else { + gpa.fake_vf_en = 0; + } + + memcpy(&cla_gpa_h, &gpa, sizeof(u32)); + bat_entry_standerd->cla_gpa_h = cla_gpa_h; + + /* GPA is valid when gpa[0] = 1. + * CQM_BAT_ENTRY_T_REORDER does not support GPA validity check. + */ + if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) + bat_entry_standerd->cla_gpa_l = CQM_ADDR_LW(pa); + else + bat_entry_standerd->cla_gpa_l = CQM_ADDR_LW(pa) | + gpa_check_enable; + + cqm_info(handle->dev_hdl, "Cla type %u, pa 0x%llx, gpa 0x%x-0x%x, level %u\n", + cla_table->type, pa, bat_entry_standerd->cla_gpa_h, bat_entry_standerd->cla_gpa_l, + bat_entry_standerd->cla_level); +} + +static void cqm_bat_fill_cla_common(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + u8 *entry_base_addr) +{ + struct tag_cqm_bat_entry_standerd *bat_entry_standerd = NULL; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 cache_line = 0; + + /* The cacheline of the timer is changed to 512. */ + if (cla_table->type == CQM_BAT_ENTRY_T_TIMER) + cache_line = CQM_CHIP_TIMER_CACHELINE; + else + cache_line = CQM_CHIP_CACHELINE; + + if (cla_table->obj_num == 0) { + cqm_info(handle->dev_hdl, + "Cla alloc: cla_type %u, obj_num=0, don't init bat entry\n", + cla_table->type); + return; + } + + bat_entry_standerd = (struct tag_cqm_bat_entry_standerd *)entry_base_addr; + + /* The QPC value is 256/512/1024 and the timer value is 512. + * The other cacheline value is 256B. + * The conversion operation is performed inside the chip. + */ + if (cla_table->obj_size > cache_line) { + if (cla_table->obj_size == CQM_OBJECT_512) + bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_512; + else + bat_entry_standerd->entry_size = + CQM_BAT_ENTRY_SIZE_1024; + bat_entry_standerd->max_number = cla_table->max_buffer_size / + cla_table->obj_size; + } else { + if (cache_line == CQM_CHIP_CACHELINE) { + bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_256; + bat_entry_standerd->max_number = + cla_table->max_buffer_size / cache_line; + } else { + bat_entry_standerd->entry_size = CQM_BAT_ENTRY_SIZE_512; + bat_entry_standerd->max_number = + cla_table->max_buffer_size / cache_line; + } + } + + bat_entry_standerd->max_number = bat_entry_standerd->max_number - 1; + + bat_entry_standerd->bypass = CQM_BAT_NO_BYPASS_CACHE; + bat_entry_standerd->z = cla_table->cacheline_z; + bat_entry_standerd->y = cla_table->cacheline_y; + bat_entry_standerd->x = cla_table->cacheline_x; + bat_entry_standerd->cla_level = cla_table->cla_lvl; + + cqm_bat_fill_cla_common_gpa(cqm_handle, cla_table, bat_entry_standerd); +} + +static void cqm_bat_fill_cla_cfg(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + u8 **entry_base_addr) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct tag_cqm_bat_entry_cfg *bat_entry_cfg = NULL; + + bat_entry_cfg = (struct tag_cqm_bat_entry_cfg *)(*entry_base_addr); + bat_entry_cfg->cur_conn_cache = 0; + bat_entry_cfg->max_conn_cache = + func_cap->flow_table_based_conn_cache_number; + bat_entry_cfg->cur_conn_num_h_4 = 0; + bat_entry_cfg->cur_conn_num_l_16 = 0; + bat_entry_cfg->max_conn_num = func_cap->flow_table_based_conn_number; + + /* Aligns with 64 buckets and shifts rightward by 6 bits. + * The maximum value of this field is 16 bits. A maximum of 4M buckets + * can be supported. The value is subtracted by 1. It is used for &hash + * value. + */ + if ((func_cap->hash_number >> CQM_HASH_NUMBER_UNIT) != 0) { + bat_entry_cfg->bucket_num = ((func_cap->hash_number >> + CQM_HASH_NUMBER_UNIT) - 1); + } + if (func_cap->bloomfilter_length != 0) { + bat_entry_cfg->bloom_filter_len = func_cap->bloomfilter_length - + 1; + bat_entry_cfg->bloom_filter_addr = func_cap->bloomfilter_addr; + } + + (*entry_base_addr) += sizeof(struct tag_cqm_bat_entry_cfg); +} + +static void cqm_bat_fill_cla_other(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + u8 **entry_base_addr) +{ + cqm_bat_fill_cla_common(cqm_handle, cla_table, *entry_base_addr); + + (*entry_base_addr) += sizeof(struct tag_cqm_bat_entry_standerd); +} + +static void cqm_bat_fill_cla_taskmap(struct tag_cqm_handle *cqm_handle, + const struct tag_cqm_cla_table *cla_table, + u8 **entry_base_addr) +{ + struct tag_cqm_bat_entry_taskmap *bat_entry_taskmap = NULL; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + int i; + + if (cqm_handle->func_capability.taskmap_number != 0) { + bat_entry_taskmap = + (struct tag_cqm_bat_entry_taskmap *)(*entry_base_addr); + for (i = 0; i < CQM_BAT_ENTRY_TASKMAP_NUM; i++) { + bat_entry_taskmap->addr[i].gpa_h = + (u32)(cla_table->cla_z_buf.buf_list[i].pa >> + CQM_CHIP_GPA_HSHIFT); + bat_entry_taskmap->addr[i].gpa_l = + (u32)(cla_table->cla_z_buf.buf_list[i].pa & + CQM_CHIP_GPA_LOMASK); + cqm_info(handle->dev_hdl, + "Cla alloc: taskmap bat entry: 0x%x 0x%x\n", + bat_entry_taskmap->addr[i].gpa_h, + bat_entry_taskmap->addr[i].gpa_l); + } + } + + (*entry_base_addr) += sizeof(struct tag_cqm_bat_entry_taskmap); +} + +static void cqm_bat_fill_cla_timer(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + u8 **entry_base_addr) +{ + /* Only the PPF allocates timer resources. */ + if (cqm_handle->func_attribute.func_type != CQM_PPF) { + (*entry_base_addr) += CQM_BAT_ENTRY_SIZE; + } else { + cqm_bat_fill_cla_common(cqm_handle, cla_table, + *entry_base_addr); + + (*entry_base_addr) += sizeof(struct tag_cqm_bat_entry_standerd); + } +} + +static void cqm_bat_fill_cla_invalid(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + u8 **entry_base_addr) +{ + (*entry_base_addr) += CQM_BAT_ENTRY_SIZE; +} + +/** + * Prototype : cqm_bat_fill_cla + * Description : Fill the base address of the CLA table into the BAT table. + * Input : struct tag_cqm_handle *cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +static void cqm_bat_fill_cla(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct tag_cqm_cla_table *cla_table = NULL; + u32 entry_type = CQM_BAT_ENTRY_T_INVALID; + u8 *entry_base_addr = NULL; + u32 i = 0; + + /* Fills each item in the BAT table according to the BAT format. */ + entry_base_addr = bat_table->bat; + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cqm_dbg("entry_base_addr = %p\n", entry_base_addr); + entry_type = bat_table->bat_entry_type[i]; + cla_table = &bat_table->entry[i]; + + if (entry_type == CQM_BAT_ENTRY_T_CFG) { + cqm_bat_fill_cla_cfg(cqm_handle, cla_table, &entry_base_addr); + } else if (entry_type == CQM_BAT_ENTRY_T_TASKMAP) { + cqm_bat_fill_cla_taskmap(cqm_handle, cla_table, &entry_base_addr); + } else if (entry_type == CQM_BAT_ENTRY_T_INVALID) { + cqm_bat_fill_cla_invalid(cqm_handle, cla_table, &entry_base_addr); + } else if (entry_type == CQM_BAT_ENTRY_T_TIMER) { + if (cqm_handle->func_attribute.func_type == CQM_PPF && + (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || + cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2)) { + entry_base_addr += sizeof(struct tag_cqm_bat_entry_standerd); + continue; + } + + cqm_bat_fill_cla_timer(cqm_handle, cla_table, + &entry_base_addr); + } else { + cqm_bat_fill_cla_other(cqm_handle, cla_table, &entry_base_addr); + } + + /* Check whether entry_base_addr is out-of-bounds array. */ + if (entry_base_addr >= + (bat_table->bat + CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE)) + break; + } +} + +u32 cqm_funcid2smfid(const struct tag_cqm_handle *cqm_handle) +{ + u32 funcid = 0; + u32 smf_sel = 0; + u32 smf_id = 0; + u32 smf_pg_partial = 0; + /* SMF_Selection is selected based on + * the lower two bits of the function id + */ + u32 lbf_smfsel[4] = {0, 2, 1, 3}; + /* SMFID is selected based on SMF_PG[1:0] and SMF_Selection(0-1) */ + u32 smfsel_smfid01[4][2] = { {0, 0}, {0, 0}, {1, 1}, {0, 1} }; + /* SMFID is selected based on SMF_PG[3:2] and SMF_Selection(2-4) */ + u32 smfsel_smfid23[4][2] = { {2, 2}, {2, 2}, {3, 3}, {2, 3} }; + + /* When the LB mode is disabled, SMF0 is always returned. */ + if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL) { + smf_id = 0; + } else { + funcid = cqm_handle->func_attribute.func_global_idx & 0x3; + smf_sel = lbf_smfsel[funcid]; + + if (smf_sel < 0x2) { + smf_pg_partial = cqm_handle->func_capability.smf_pg & + 0x3; + smf_id = smfsel_smfid01[smf_pg_partial][smf_sel]; + } else { + smf_pg_partial = + /* shift to right by 2 bits */ + (cqm_handle->func_capability.smf_pg >> 2) & 0x3; + smf_id = smfsel_smfid23[smf_pg_partial][smf_sel - 0x2]; + } + } + + return smf_id; +} + +/* This function is used in LB mode 1/2. The timer spoker info + * of independent space needs to be configured for 4 SMFs. + */ +static void cqm_update_timer_gpa(struct tag_cqm_handle *cqm_handle, u32 smf_id) +{ + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct tag_cqm_cla_table *cla_table = NULL; + u32 entry_type = CQM_BAT_ENTRY_T_INVALID; + u8 *entry_base_addr = NULL; + u32 i = 0; + + if (cqm_handle->func_attribute.func_type != CQM_PPF) + return; + + if (cqm_handle->func_capability.lb_mode != CQM_LB_MODE_1 && + cqm_handle->func_capability.lb_mode != CQM_LB_MODE_2) + return; + + cla_table = &bat_table->timer_entry[smf_id]; + entry_base_addr = bat_table->bat; + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + entry_type = bat_table->bat_entry_type[i]; + + if (entry_type == CQM_BAT_ENTRY_T_TIMER) { + cqm_bat_fill_cla_timer(cqm_handle, cla_table, + &entry_base_addr); + break; + } + + if (entry_type == CQM_BAT_ENTRY_T_TASKMAP) + entry_base_addr += sizeof(struct tag_cqm_bat_entry_taskmap); + else + entry_base_addr += CQM_BAT_ENTRY_SIZE; + + /* Check whether entry_base_addr is out-of-bounds array. */ + if (entry_base_addr >= + (bat_table->bat + CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE)) + break; + } +} + +static s32 cqm_bat_update_cmd(struct tag_cqm_handle *cqm_handle, struct tag_cqm_cmd_buf *buf_in, + u32 smf_id, u32 func_id) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_cmdq_bat_update *bat_update_cmd = NULL; + s32 ret = CQM_FAIL; + + bat_update_cmd = (struct tag_cqm_cmdq_bat_update *)(buf_in->buf); + bat_update_cmd->offset = 0; + + if (cqm_handle->bat_table.bat_size > CQM_BAT_MAX_SIZE) { + cqm_err(handle->dev_hdl, + "bat_size = %u, which is more than %d.\n", + cqm_handle->bat_table.bat_size, CQM_BAT_MAX_SIZE); + return CQM_FAIL; + } + bat_update_cmd->byte_len = cqm_handle->bat_table.bat_size; + + memcpy(bat_update_cmd->data, cqm_handle->bat_table.bat, bat_update_cmd->byte_len); + +#ifdef __CQM_DEBUG__ + cqm_byte_print((u32 *)(cqm_handle->bat_table.bat), + CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE); +#endif + + bat_update_cmd->smf_id = smf_id; + bat_update_cmd->func_id = func_id; + + cqm_info(handle->dev_hdl, "Bat update: smf_id=%u\n", + bat_update_cmd->smf_id); + cqm_info(handle->dev_hdl, "Bat update: func_id=%u\n", + bat_update_cmd->func_id); + + cqm_swab32((u8 *)bat_update_cmd, + sizeof(struct tag_cqm_cmdq_bat_update) >> CQM_DW_SHIFT); + + ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle), CQM_MOD_CQM, + CQM_CMD_T_BAT_UPDATE, buf_in, NULL, NULL, + CQM_CMD_TIMEOUT, HINIC3_CHANNEL_DEFAULT); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(handle->dev_hdl, "%s: send_cmd_box ret=%d\n", __func__, + ret); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_bat_update + * Description : Send a command to tile to update the BAT table through cmdq. + * Input : struct tag_cqm_handle *cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +static s32 cqm_bat_update(struct tag_cqm_handle *cqm_handle) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_cmd_buf *buf_in = NULL; + s32 ret = CQM_FAIL; + u32 smf_id = 0; + u32 func_id = 0; + u32 i = 0; + + buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle)); + if (!buf_in) + return CQM_FAIL; + buf_in->size = sizeof(struct tag_cqm_cmdq_bat_update); + + /* In non-fake mode, func_id is set to 0xffff, indicating the current + * func. In fake mode, the value of func_id is specified. This is a fake + * func_id. + */ + if (cqm_handle->func_capability.fake_func_type == CQM_FAKE_FUNC_CHILD) + func_id = cqm_handle->func_attribute.func_global_idx; + else + func_id = 0xffff; + + /* The LB scenario is supported. + * The normal mode is the traditional mode and is configured on SMF0. + * In mode 0, load is balanced to four SMFs based on the func ID (except + * the PPF func ID). The PPF in mode 0 needs to be configured on four + * SMF, so the timer resources can be shared by the four timer engine. + * Mode 1/2 is load balanced to four SMF by flow. Therefore, one + * function needs to be configured to four SMF. + */ + if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || + (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && + cqm_handle->func_attribute.func_type != CQM_PPF)) { + smf_id = cqm_funcid2smfid(cqm_handle); + ret = cqm_bat_update_cmd(cqm_handle, buf_in, smf_id, func_id); + } else if ((cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1) || + (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2) || + ((cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0) && + (cqm_handle->func_attribute.func_type == CQM_PPF))) { + for (i = 0; i < CQM_LB_SMF_MAX; i++) { + cqm_update_timer_gpa(cqm_handle, i); + + /* The smf_pg variable stores the currently + * enabled SMF. + */ + if (cqm_handle->func_capability.smf_pg & (1U << i)) { + smf_id = i; + ret = cqm_bat_update_cmd(cqm_handle, buf_in, + smf_id, func_id); + if (ret != CQM_SUCCESS) + goto out; + } + } + } else { + cqm_err(handle->dev_hdl, "Bat update: unsupport lb mode=%u\n", + cqm_handle->func_capability.lb_mode); + ret = CQM_FAIL; + } + +out: + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return ret; +} + +static s32 cqm_bat_init_ft(struct tag_cqm_handle *cqm_handle, struct tag_cqm_bat_table *bat_table, + enum func_type function_type) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 i = 0; + + bat_table->bat_entry_type[CQM_BAT_INDEX0] = CQM_BAT_ENTRY_T_CFG; + bat_table->bat_entry_type[CQM_BAT_INDEX1] = CQM_BAT_ENTRY_T_HASH; + bat_table->bat_entry_type[CQM_BAT_INDEX2] = CQM_BAT_ENTRY_T_QPC; + bat_table->bat_entry_type[CQM_BAT_INDEX3] = CQM_BAT_ENTRY_T_SCQC; + bat_table->bat_entry_type[CQM_BAT_INDEX4] = CQM_BAT_ENTRY_T_LUN; + bat_table->bat_entry_type[CQM_BAT_INDEX5] = CQM_BAT_ENTRY_T_TASKMAP; + + if (function_type == CQM_PF || function_type == CQM_PPF) { + bat_table->bat_entry_type[CQM_BAT_INDEX6] = CQM_BAT_ENTRY_T_L3I; + bat_table->bat_entry_type[CQM_BAT_INDEX7] = CQM_BAT_ENTRY_T_CHILDC; + bat_table->bat_entry_type[CQM_BAT_INDEX8] = CQM_BAT_ENTRY_T_TIMER; + bat_table->bat_entry_type[CQM_BAT_INDEX9] = CQM_BAT_ENTRY_T_XID2CID; + bat_table->bat_entry_type[CQM_BAT_INDEX10] = CQM_BAT_ENTRY_T_REORDER; + bat_table->bat_size = CQM_BAT_SIZE_FT_PF; + } else if (function_type == CQM_VF) { + bat_table->bat_size = CQM_BAT_SIZE_FT_VF; + } else { + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; + + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(function_type)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static s32 cqm_bat_init_rdma(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_bat_table *bat_table, + enum func_type function_type) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 i = 0; + + bat_table->bat_entry_type[CQM_BAT_INDEX0] = CQM_BAT_ENTRY_T_QPC; + bat_table->bat_entry_type[CQM_BAT_INDEX1] = CQM_BAT_ENTRY_T_SCQC; + bat_table->bat_entry_type[CQM_BAT_INDEX2] = CQM_BAT_ENTRY_T_SRQC; + bat_table->bat_entry_type[CQM_BAT_INDEX3] = CQM_BAT_ENTRY_T_MPT; + bat_table->bat_entry_type[CQM_BAT_INDEX4] = CQM_BAT_ENTRY_T_GID; + + if (function_type == CQM_PF || function_type == CQM_PPF) { + bat_table->bat_entry_type[CQM_BAT_INDEX5] = CQM_BAT_ENTRY_T_L3I; + bat_table->bat_entry_type[CQM_BAT_INDEX6] = + CQM_BAT_ENTRY_T_CHILDC; + bat_table->bat_entry_type[CQM_BAT_INDEX7] = + CQM_BAT_ENTRY_T_TIMER; + bat_table->bat_entry_type[CQM_BAT_INDEX8] = + CQM_BAT_ENTRY_T_XID2CID; + bat_table->bat_entry_type[CQM_BAT_INDEX9] = + CQM_BAT_ENTRY_T_REORDER; + bat_table->bat_size = CQM_BAT_SIZE_RDMA_PF; + } else if (function_type == CQM_VF) { + bat_table->bat_size = CQM_BAT_SIZE_RDMA_VF; + } else { + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; + + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(function_type)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static s32 cqm_bat_init_ft_rdma(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_bat_table *bat_table, + enum func_type function_type) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 i = 0; + + bat_table->bat_entry_type[CQM_BAT_INDEX0] = CQM_BAT_ENTRY_T_CFG; + bat_table->bat_entry_type[CQM_BAT_INDEX1] = CQM_BAT_ENTRY_T_HASH; + bat_table->bat_entry_type[CQM_BAT_INDEX2] = CQM_BAT_ENTRY_T_QPC; + bat_table->bat_entry_type[CQM_BAT_INDEX3] = CQM_BAT_ENTRY_T_SCQC; + bat_table->bat_entry_type[CQM_BAT_INDEX4] = CQM_BAT_ENTRY_T_SRQC; + bat_table->bat_entry_type[CQM_BAT_INDEX5] = CQM_BAT_ENTRY_T_MPT; + bat_table->bat_entry_type[CQM_BAT_INDEX6] = CQM_BAT_ENTRY_T_GID; + bat_table->bat_entry_type[CQM_BAT_INDEX7] = CQM_BAT_ENTRY_T_LUN; + bat_table->bat_entry_type[CQM_BAT_INDEX8] = CQM_BAT_ENTRY_T_TASKMAP; + + if (function_type == CQM_PF || function_type == CQM_PPF) { + bat_table->bat_entry_type[CQM_BAT_INDEX9] = CQM_BAT_ENTRY_T_L3I; + bat_table->bat_entry_type[CQM_BAT_INDEX10] = + CQM_BAT_ENTRY_T_CHILDC; + bat_table->bat_entry_type[CQM_BAT_INDEX11] = + CQM_BAT_ENTRY_T_TIMER; + bat_table->bat_entry_type[CQM_BAT_INDEX12] = + CQM_BAT_ENTRY_T_XID2CID; + bat_table->bat_entry_type[CQM_BAT_INDEX13] = + CQM_BAT_ENTRY_T_REORDER; + bat_table->bat_size = CQM_BAT_SIZE_FT_RDMA_PF; + } else if (function_type == CQM_VF) { + bat_table->bat_size = CQM_BAT_SIZE_FT_RDMA_VF; + } else { + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; + + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(function_type)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_bat_init + * Description : Initialize the BAT table. Only the items to be initialized and + * the entry sequence are selected. The content of the BAT entry + * is filled after the CLA is allocated. + * Input : struct tag_cqm_handle *cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +s32 cqm_bat_init(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_func_capability *capability = &cqm_handle->func_capability; + enum func_type function_type = cqm_handle->func_attribute.func_type; + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + u32 i; + + memset(bat_table, 0, sizeof(struct tag_cqm_bat_table)); + + /* Initialize the type of each bat entry. */ + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; + + /* Select BATs based on service types. Currently, + * feature-related resources of the VF are stored in the BATs of the VF. + */ + if (capability->ft_enable && capability->rdma_enable) + return cqm_bat_init_ft_rdma(cqm_handle, bat_table, function_type); + else if (capability->ft_enable) + return cqm_bat_init_ft(cqm_handle, bat_table, function_type); + else if (capability->rdma_enable) + return cqm_bat_init_rdma(cqm_handle, bat_table, function_type); + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_bat_uninit + * Description : Deinitialize the BAT table. + * Input : struct tag_cqm_handle *cqm_handle + * Output : None + * Return Value : void + * 1.Date : 2015/5/15 + * Modification : Created function + */ +void cqm_bat_uninit(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 i; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) + bat_table->bat_entry_type[i] = CQM_BAT_ENTRY_T_INVALID; + + memset(bat_table->bat, 0, CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE); + + /* Instruct the chip to update the BAT table. */ + if (cqm_bat_update(cqm_handle) != CQM_SUCCESS) + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update)); +} + +static s32 cqm_cla_fill_buf(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *cla_base_buf, + struct tag_cqm_buf *cla_sub_buf, u8 gpa_check_enable) +{ + struct hinic3_func_attr *func_attr = NULL; + dma_addr_t *base = NULL; + u64 fake_en = 0; + u64 spu_en = 0; + u64 pf_id = 0; + u32 i = 0; + u32 addr_num; + u32 buf_index = 0; + + /* Apply for space for base_buf */ + if (!cla_base_buf->buf_list) { + if (cqm_buf_alloc(cqm_handle, cla_base_buf, false) == CQM_FAIL) + return CQM_FAIL; + } + + /* Apply for space for sub_buf */ + if (!cla_sub_buf->buf_list) { + if (cqm_buf_alloc(cqm_handle, cla_sub_buf, false) == CQM_FAIL) { + cqm_buf_free(cla_base_buf, cqm_handle); + return CQM_FAIL; + } + } + + /* Fill base_buff with the gpa of sub_buf */ + addr_num = cla_base_buf->buf_size / sizeof(dma_addr_t); + base = (dma_addr_t *)(cla_base_buf->buf_list[0].va); + for (i = 0; i < cla_sub_buf->buf_number; i++) { + /* The SPU SMF supports load balancing from the SMF to the CPI, + * depending on the host ID and func ID. + */ + if (hinic3_host_id(cqm_handle->ex_handle) == CQM_SPU_HOST_ID) { + func_attr = &cqm_handle->func_attribute; + spu_en = (u64)(func_attr->func_global_idx & 0x1) << 0x3F; + } else { + spu_en = 0; + } + + /* fake enable */ + if (cqm_handle->func_capability.fake_func_type == + CQM_FAKE_FUNC_CHILD) { + fake_en = 1ULL << 0x3E; + func_attr = + &cqm_handle->parent_cqm_handle->func_attribute; + pf_id = func_attr->func_global_idx; + pf_id = (pf_id & 0x1f) << 0x39; + } else { + fake_en = 0; + pf_id = 0; + } + + *base = (dma_addr_t)((((((u64)(cla_sub_buf->buf_list[i].pa) & CQM_CHIP_GPA_MASK) | + spu_en) | + fake_en) | + pf_id) | + gpa_check_enable); + + cqm_swab64((u8 *)base, 1); + if ((i + 1) % addr_num == 0) { + buf_index++; + if (buf_index < cla_base_buf->buf_number) + base = cla_base_buf->buf_list[buf_index].va; + } else { + base++; + } + } + + return CQM_SUCCESS; +} + +static s32 cqm_cla_xyz_lvl1(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + u32 trunk_size) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_buf *cla_y_buf = NULL; + struct tag_cqm_buf *cla_z_buf = NULL; + s32 shift = 0; + s32 ret = CQM_FAIL; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + u32 cache_line = 0; + + /* The cacheline of the timer is changed to 512. */ + if (cla_table->type == CQM_BAT_ENTRY_T_TIMER) + cache_line = CQM_CHIP_TIMER_CACHELINE; + else + cache_line = CQM_CHIP_CACHELINE; + + if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) + gpa_check_enable = 0; + + cla_table->cla_lvl = CQM_CLA_LVL_1; + + shift = cqm_shift(trunk_size / cla_table->obj_size); + cla_table->z = (u32)(shift ? (shift - 1) : (shift)); + cla_table->y = CQM_MAX_INDEX_BIT; + cla_table->x = 0; + + cqm_dbg("cla_table->obj_size = %d, cache_line = %d", + cla_table->obj_size, cache_line); + if (cla_table->obj_size >= cache_line) { + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + } else { + shift = cqm_shift(trunk_size / cache_line); + cla_table->cacheline_z = (u32)(shift ? (shift - 1) : (shift)); + cla_table->cacheline_y = CQM_MAX_INDEX_BIT; + cla_table->cacheline_x = 0; + } + + /* Applying for CLA_Y_BUF Space */ + cla_y_buf = &cla_table->cla_y_buf; + cla_y_buf->buf_size = trunk_size; + cla_y_buf->buf_number = 1; + cla_y_buf->page_number = cla_y_buf->buf_number << + cla_table->trunk_order; + + ret = cqm_buf_alloc(cqm_handle, cla_y_buf, false); + if (ret != CQM_SUCCESS) + return CQM_FAIL; + + /* Applying for CLA_Z_BUF Space */ + cla_z_buf = &cla_table->cla_z_buf; + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = (ALIGN(cla_table->max_buffer_size, trunk_size)) / trunk_size; + cla_z_buf->page_number = cla_z_buf->buf_number << cla_table->trunk_order; + + /* All buffer space must be statically allocated. */ + if (cla_table->alloc_static) { + ret = cqm_cla_fill_buf(cqm_handle, cla_y_buf, cla_z_buf, + gpa_check_enable); + if (unlikely(ret != CQM_SUCCESS)) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_fill_buf)); + return CQM_FAIL; + } + } else { /* Only the buffer list space is initialized. The buffer space + * is dynamically allocated in services. + */ + cla_z_buf->buf_list = vmalloc(cla_z_buf->buf_number * + sizeof(struct tag_cqm_buf_list)); + if (!cla_z_buf->buf_list) { + cqm_buf_free(cla_y_buf, cqm_handle); + return CQM_FAIL; + } + memset(cla_z_buf->buf_list, 0, + cla_z_buf->buf_number * sizeof(struct tag_cqm_buf_list)); + } + + return CQM_SUCCESS; +} + +static void cqm_cla_xyz_lvl2_param_init(struct tag_cqm_cla_table *cla_table, u32 trunk_size) +{ + s32 shift = 0; + u32 cache_line = 0; + + /* The cacheline of the timer is changed to 512. */ + if (cla_table->type == CQM_BAT_ENTRY_T_TIMER) + cache_line = CQM_CHIP_TIMER_CACHELINE; + else + cache_line = CQM_CHIP_CACHELINE; + + cla_table->cla_lvl = CQM_CLA_LVL_2; + + shift = cqm_shift(trunk_size / cla_table->obj_size); + cla_table->z = (u32)(shift ? (shift - 1) : (shift)); + shift = cqm_shift(trunk_size / sizeof(dma_addr_t)); + cla_table->y = cla_table->z + shift; + cla_table->x = CQM_MAX_INDEX_BIT; + + if (cla_table->obj_size >= cache_line) { + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + } else { + shift = cqm_shift(trunk_size / cache_line); + cla_table->cacheline_z = (u32)(shift ? (shift - 1) : (shift)); + shift = cqm_shift(trunk_size / sizeof(dma_addr_t)); + cla_table->cacheline_y = cla_table->cacheline_z + shift; + cla_table->cacheline_x = CQM_MAX_INDEX_BIT; + } +} + +static s32 cqm_cla_xyz_lvl2_xyz_apply(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, u32 trunk_size) +{ + struct tag_cqm_buf *cla_x_buf = NULL; + struct tag_cqm_buf *cla_y_buf = NULL; + struct tag_cqm_buf *cla_z_buf = NULL; + s32 ret = CQM_FAIL; + + /* Apply for CLA_X_BUF Space */ + cla_x_buf = &cla_table->cla_x_buf; + cla_x_buf->buf_size = trunk_size; + cla_x_buf->buf_number = 1; + cla_x_buf->page_number = cla_x_buf->buf_number << cla_table->trunk_order; + ret = cqm_buf_alloc(cqm_handle, cla_x_buf, false); + if (ret != CQM_SUCCESS) + return CQM_FAIL; + + /* Apply for CLA_Z_BUF and CLA_Y_BUF Space */ + cla_z_buf = &cla_table->cla_z_buf; + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = (ALIGN(cla_table->max_buffer_size, trunk_size)) / trunk_size; + cla_z_buf->page_number = cla_z_buf->buf_number << cla_table->trunk_order; + + cla_y_buf = &cla_table->cla_y_buf; + cla_y_buf->buf_size = trunk_size; + cla_y_buf->buf_number = + (u32)(ALIGN(cla_z_buf->buf_number * sizeof(dma_addr_t), trunk_size)) / trunk_size; + cla_y_buf->page_number = cla_y_buf->buf_number << cla_table->trunk_order; + + return 0; +} + +static s32 cqm_cla_xyz_vram_name_init(struct tag_cqm_cla_table *cla_table, + struct hinic3_hwdev *handle) +{ + struct tag_cqm_buf *cla_x_buf = NULL; + struct tag_cqm_buf *cla_y_buf = NULL; + struct tag_cqm_buf *cla_z_buf = NULL; + + cla_x_buf = &cla_table->cla_x_buf; + cla_z_buf = &cla_table->cla_z_buf; + cla_y_buf = &cla_table->cla_y_buf; + snprintf(cla_x_buf->buf_info.buf_vram_name, + VRAM_NAME_MAX_LEN - 1, "%s%s", cla_table->name, VRAM_CQM_CLA_COORD_X); + + snprintf(cla_y_buf->buf_info.buf_vram_name, + VRAM_NAME_MAX_LEN - 1, "%s%s", cla_table->name, VRAM_CQM_CLA_COORD_Y); + + snprintf(cla_z_buf->buf_info.buf_vram_name, + VRAM_NAME_MAX_LEN - 1, "%s%s", cla_table->name, VRAM_CQM_CLA_COORD_Z); + + return CQM_SUCCESS; +} + +static s32 cqm_cla_xyz_lvl2(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, u32 trunk_size) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_buf *cla_x_buf = NULL; + struct tag_cqm_buf *cla_y_buf = NULL; + struct tag_cqm_buf *cla_z_buf = NULL; + s32 ret = CQM_FAIL; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + + cqm_cla_xyz_lvl2_param_init(cla_table, trunk_size); + + ret = cqm_cla_xyz_lvl2_xyz_apply(cqm_handle, cla_table, trunk_size); + if (ret) + return ret; + + cla_x_buf = &cla_table->cla_x_buf; + cla_z_buf = &cla_table->cla_z_buf; + cla_y_buf = &cla_table->cla_y_buf; + + if (cla_table->type == CQM_BAT_ENTRY_T_REORDER) + gpa_check_enable = 0; + + /* All buffer space must be statically allocated. */ + if (cla_table->alloc_static) { + /* Apply for y buf and z buf, and fill the gpa of z buf list in y buf */ + if (cqm_cla_fill_buf(cqm_handle, cla_y_buf, cla_z_buf, + gpa_check_enable) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_fill_buf)); + cqm_buf_free(cla_x_buf, cqm_handle); + return CQM_FAIL; + } + + /* Fill the gpa of the y buf list into the x buf. + * After the x and y bufs are applied for, this function will not fail. + * Use void to forcibly convert the return of the function. + */ + (void)cqm_cla_fill_buf(cqm_handle, cla_x_buf, cla_y_buf, gpa_check_enable); + } else { /* Only the buffer list space is initialized. The buffer space + * is dynamically allocated in services. + */ + cla_z_buf->buf_list = vmalloc(cla_z_buf->buf_number * + sizeof(struct tag_cqm_buf_list)); + if (!cla_z_buf->buf_list) { + cqm_buf_free(cla_x_buf, cqm_handle); + return CQM_FAIL; + } + memset(cla_z_buf->buf_list, 0, + cla_z_buf->buf_number * sizeof(struct tag_cqm_buf_list)); + + cla_y_buf->buf_list = vmalloc(cla_y_buf->buf_number * + sizeof(struct tag_cqm_buf_list)); + if (!cla_y_buf->buf_list) { + cqm_buf_free(cla_z_buf, cqm_handle); + cqm_buf_free(cla_x_buf, cqm_handle); + return CQM_FAIL; + } + memset(cla_y_buf->buf_list, 0, + cla_y_buf->buf_number * sizeof(struct tag_cqm_buf_list)); + } + + return CQM_SUCCESS; +} + +static s32 cqm_cla_xyz_check(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, u32 *size) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 trunk_size = 0; + + /* If the capability(obj_num) is set to 0, the CLA does not need to be + * initialized and exits directly. + */ + if (cla_table->obj_num == 0) { + cqm_info(handle->dev_hdl, + "Cla alloc: cla_type %u, obj_num=0, don't alloc buffer\n", + cla_table->type); + return CQM_SUCCESS; + } + + cqm_info(handle->dev_hdl, + "Cla alloc: cla_type %u, obj_num=0x%x, gpa_check_enable=%d\n", + cla_table->type, cla_table->obj_num, + cqm_handle->func_capability.gpa_check_enable); + + /* Check whether obj_size is 2^n-aligned. An error is reported when + * obj_size is 0 or 1. + */ + if (!cqm_check_align(cla_table->obj_size)) { + cqm_err(handle->dev_hdl, + "Cla alloc: cla_type %u, obj_size 0x%x is not align on 2^n\n", + cla_table->type, cla_table->obj_size); + return CQM_FAIL; + } + + trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); + + if (trunk_size < cla_table->obj_size) { + cqm_err(handle->dev_hdl, + "Cla alloc: cla type %u, obj_size 0x%x is out of trunk size\n", + cla_table->type, cla_table->obj_size); + return CQM_FAIL; + } + + *size = trunk_size; + + return CQM_CONTINUE; +} + +static s32 cqm_cla_xyz_lvl0(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, u32 trunk_size) +{ + struct tag_cqm_buf *cla_z_buf = NULL; + + cla_table->cla_lvl = CQM_CLA_LVL_0; + + cla_table->z = CQM_MAX_INDEX_BIT; + cla_table->y = 0; + cla_table->x = 0; + + cla_table->cacheline_z = cla_table->z; + cla_table->cacheline_y = cla_table->y; + cla_table->cacheline_x = cla_table->x; + + /* Applying for CLA_Z_BUF Space */ + cla_z_buf = &cla_table->cla_z_buf; + cla_z_buf->buf_size = trunk_size; + cla_z_buf->buf_number = 1; + cla_z_buf->page_number = cla_z_buf->buf_number << cla_table->trunk_order; + cla_z_buf->bat_entry_type = cla_table->type; + + return cqm_buf_alloc(cqm_handle, cla_z_buf, false); +} + +/** + * Prototype : cqm_cla_xyz + * Description : Calculate the number of levels of CLA tables and allocate + * space for each level of CLA table. + * Input : struct tag_cqm_handle *cqm_handle + * struct tag_cqm_cla_table *cla_table + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +static s32 cqm_cla_xyz(struct tag_cqm_handle *cqm_handle, struct tag_cqm_cla_table *cla_table) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 trunk_size = 0; + s32 ret = CQM_FAIL; + + ret = cqm_cla_xyz_check(cqm_handle, cla_table, &trunk_size); + if (ret != CQM_CONTINUE) + return ret; + + ret = cqm_cla_xyz_vram_name_init(cla_table, handle); + if (ret != CQM_SUCCESS) + return ret; + + /* Level-0 CLA occupies a small space. + * Only CLA_Z_BUF can be allocated during initialization. + */ + cqm_dbg("cla_table->max_buffer_size = %d trunk_size = %d\n", + cla_table->max_buffer_size, trunk_size); + + if (cla_table->max_buffer_size > trunk_size && + cqm_need_secure_mem((void *)handle)) { + trunk_size = roundup(cla_table->max_buffer_size, CQM_SECURE_MEM_ALIGNED_SIZE); + cqm_dbg("[memsec]reset trunk_size = %u\n", trunk_size); + } + + if (cla_table->max_buffer_size <= trunk_size) { + ret = cqm_cla_xyz_lvl0(cqm_handle, cla_table, trunk_size); + if (ret != CQM_SUCCESS) + return CQM_FAIL; + /* Level-1 CLA + * Allocates CLA_Y_BUF and CLA_Z_BUF during initialization. + */ + } else if (cla_table->max_buffer_size <= + (trunk_size * (trunk_size / sizeof(dma_addr_t)))) { + if (cqm_cla_xyz_lvl1(cqm_handle, cla_table, trunk_size) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl1)); + return CQM_FAIL; + } + /* Level-2 CLA + * Allocates CLA_X_BUF, CLA_Y_BUF, and CLA_Z_BUF during initialization. + */ + } else if (cla_table->max_buffer_size <= (trunk_size * (trunk_size / sizeof(dma_addr_t)) * + (trunk_size / sizeof(dma_addr_t)))) { + if (cqm_cla_xyz_lvl2(cqm_handle, cla_table, trunk_size) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_xyz_lvl2)); + return CQM_FAIL; + } + } else { /* The current memory management mode does not support such + * a large buffer addressing. The order value needs to + * be increased. + */ + cqm_err(handle->dev_hdl, + "Cla alloc: cla max_buffer_size 0x%x exceeds support range\n", + cla_table->max_buffer_size); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static void cqm_cla_init_entry_normal(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + struct tag_cqm_func_capability *capability) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + switch (cla_table->type) { + case CQM_BAT_ENTRY_T_HASH: + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->hash_number * capability->hash_basic_size; + cla_table->obj_size = capability->hash_basic_size; + cla_table->obj_num = capability->hash_number; + cla_table->alloc_static = true; + break; + case CQM_BAT_ENTRY_T_QPC: + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->qpc_number * capability->qpc_basic_size; + cla_table->obj_size = capability->qpc_basic_size; + cla_table->obj_num = capability->qpc_number; + cla_table->alloc_static = capability->qpc_alloc_static; + cqm_info(handle->dev_hdl, "Cla alloc: qpc alloc_static=%d\n", + cla_table->alloc_static); + break; + case CQM_BAT_ENTRY_T_MPT: + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->mpt_number * + capability->mpt_basic_size; + cla_table->obj_size = capability->mpt_basic_size; + cla_table->obj_num = capability->mpt_number; + cla_table->alloc_static = true; /* CCB decided. MPT uses only + * static application scenarios. + */ + break; + case CQM_BAT_ENTRY_T_SCQC: + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->scqc_number * capability->scqc_basic_size; + cla_table->obj_size = capability->scqc_basic_size; + cla_table->obj_num = capability->scqc_number; + cla_table->alloc_static = capability->scqc_alloc_static; + cqm_info(handle->dev_hdl, "Cla alloc: scqc alloc_static=%d\n", + cla_table->alloc_static); + break; + case CQM_BAT_ENTRY_T_SRQC: + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->srqc_number * capability->srqc_basic_size; + cla_table->obj_size = capability->srqc_basic_size; + cla_table->obj_num = capability->srqc_number; + cla_table->alloc_static = false; + break; + default: + break; + } +} + +static void cqm_cla_init_entry_extern(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + struct tag_cqm_func_capability *capability) +{ + switch (cla_table->type) { + case CQM_BAT_ENTRY_T_GID: + /* Level-0 CLA table required */ + cla_table->max_buffer_size = capability->gid_number * + capability->gid_basic_size; + cla_table->trunk_order = + (u32)cqm_shift(ALIGN(cla_table->max_buffer_size, PAGE_SIZE) / PAGE_SIZE); + cla_table->obj_size = capability->gid_basic_size; + cla_table->obj_num = capability->gid_number; + cla_table->alloc_static = true; + break; + case CQM_BAT_ENTRY_T_LUN: + cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; + cla_table->max_buffer_size = capability->lun_number * + capability->lun_basic_size; + cla_table->obj_size = capability->lun_basic_size; + cla_table->obj_num = capability->lun_number; + cla_table->alloc_static = true; + break; + case CQM_BAT_ENTRY_T_TASKMAP: + cla_table->trunk_order = CQM_4K_PAGE_ORDER; + cla_table->max_buffer_size = capability->taskmap_number * + capability->taskmap_basic_size; + cla_table->obj_size = capability->taskmap_basic_size; + cla_table->obj_num = capability->taskmap_number; + cla_table->alloc_static = true; + break; + case CQM_BAT_ENTRY_T_L3I: + cla_table->trunk_order = CLA_TABLE_PAGE_ORDER; + cla_table->max_buffer_size = capability->l3i_number * + capability->l3i_basic_size; + cla_table->obj_size = capability->l3i_basic_size; + cla_table->obj_num = capability->l3i_number; + cla_table->alloc_static = true; + break; + case CQM_BAT_ENTRY_T_CHILDC: + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->childc_number * + capability->childc_basic_size; + cla_table->obj_size = capability->childc_basic_size; + cla_table->obj_num = capability->childc_number; + cla_table->alloc_static = true; + break; + case CQM_BAT_ENTRY_T_TIMER: + /* Ensure that the basic size of the timer buffer page does not + * exceed 128 x 4 KB. Otherwise, clearing the timer buffer of + * the function is complex. + */ + cla_table->trunk_order = CQM_4K_PAGE_ORDER; + cla_table->max_buffer_size = capability->timer_number * + capability->timer_basic_size; + cla_table->obj_size = capability->timer_basic_size; + cla_table->obj_num = capability->timer_number; + cla_table->alloc_static = true; + break; + case CQM_BAT_ENTRY_T_XID2CID: + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->xid2cid_number * + capability->xid2cid_basic_size; + cla_table->obj_size = capability->xid2cid_basic_size; + cla_table->obj_num = capability->xid2cid_number; + cla_table->alloc_static = true; + break; + case CQM_BAT_ENTRY_T_REORDER: + /* This entry supports only IWARP and does not support GPA + * validity check. + */ + cla_table->trunk_order = capability->pagesize_reorder; + cla_table->max_buffer_size = capability->reorder_number * + capability->reorder_basic_size; + cla_table->obj_size = capability->reorder_basic_size; + cla_table->obj_num = capability->reorder_number; + cla_table->alloc_static = true; + break; + default: + break; + } +} + +static s32 cqm_cla_init_entry_condition(struct tag_cqm_handle *cqm_handle, u32 entry_type) +{ + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct tag_cqm_cla_table *cla_table = &bat_table->entry[entry_type]; + struct tag_cqm_cla_table *cla_table_timer = NULL; + u32 i; + + /* When the timer is in LB mode 1 or 2, the timer needs to be + * configured for four SMFs and the address space is independent. + */ + if (cla_table->type == CQM_BAT_ENTRY_T_TIMER && + (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || + cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2)) { + for (i = 0; i < CQM_LB_SMF_MAX; i++) { + cla_table_timer = &bat_table->timer_entry[i]; + memcpy(cla_table_timer, cla_table, sizeof(struct tag_cqm_cla_table)); + + snprintf(cla_table_timer->name, + VRAM_NAME_MAX_LEN - 1, "%s%s%01u", cla_table->name, + VRAM_CQM_CLA_SMF_BASE, i); + + if (cqm_cla_xyz(cqm_handle, cla_table_timer) == + CQM_FAIL) { + cqm_cla_uninit(cqm_handle, entry_type); + return CQM_FAIL; + } + } + return CQM_SUCCESS; + } + + if (cqm_cla_xyz(cqm_handle, cla_table) == CQM_FAIL) { + cqm_cla_uninit(cqm_handle, entry_type); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static s32 cqm_cla_init_entry(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_func_capability *capability) +{ + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct tag_cqm_cla_table *cla_table = NULL; + s32 ret; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + cla_table->type = bat_table->bat_entry_type[i]; + snprintf(cla_table->name, VRAM_NAME_MAX_LEN - 1, + "%s%s%s%02u", cqm_handle->name, VRAM_CQM_CLA_BASE, + VRAM_CQM_CLA_TYPE_BASE, cla_table->type); + + cqm_cla_init_entry_normal(cqm_handle, cla_table, capability); + cqm_cla_init_entry_extern(cqm_handle, cla_table, capability); + + /* Allocate CLA entry space at each level. */ + if (cla_table->type < CQM_BAT_ENTRY_T_HASH || + cla_table->type > CQM_BAT_ENTRY_T_REORDER) { + mutex_init(&cla_table->lock); + continue; + } + + /* For the PPF, resources (8 wheels x 2k scales x 32B x + * func_num) need to be applied for to the timer. The + * structure of the timer entry in the BAT table needs + * to be filled. For the PF, no resource needs to be + * applied for the timer and no structure needs to be + * filled in the timer entry in the BAT table. + */ + if (!(cla_table->type == CQM_BAT_ENTRY_T_TIMER && + cqm_handle->func_attribute.func_type != CQM_PPF)) { + ret = cqm_cla_init_entry_condition(cqm_handle, i); + if (ret != CQM_SUCCESS) + return CQM_FAIL; + cqm_dbg("~~~~cla_table->type = %d\n", cla_table->type); + } + cqm_dbg("****cla_table->type = %d\n", cla_table->type); + mutex_init(&cla_table->lock); + } + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_cla_init + * Description : Initialize the CLA table. + * Input : struct tag_cqm_handle *cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +s32 cqm_cla_init(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_func_capability *capability = &cqm_handle->func_capability; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + s32 ret; + + /* Applying for CLA Entries */ + ret = cqm_cla_init_entry(cqm_handle, capability); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_init_entry)); + return ret; + } + + /* After the CLA entry is applied, the address is filled + * in the BAT table. + */ + cqm_bat_fill_cla(cqm_handle); + + /* Instruct the chip to update the BAT table. */ + ret = cqm_bat_update(cqm_handle); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_update)); + goto err; + } + + cqm_info(handle->dev_hdl, "Timer start: func_type=%d, timer_enable=%u\n", + cqm_handle->func_attribute.func_type, + cqm_handle->func_capability.timer_enable); + + if (cqm_handle->func_attribute.func_type == CQM_PPF) { + ret = hinic3_ppf_ht_gpa_init(handle); + if (ret) { + cqm_err(handle->dev_hdl, "PPF ht gpa init fail!\n"); + goto err; + } + + if (cqm_handle->func_capability.timer_enable == + CQM_TIMER_ENABLE) { + /* Enable the timer after the timer resources are applied for */ + cqm_info(handle->dev_hdl, "PPF timer start\n"); + ret = hinic3_ppf_tmr_start(handle); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "PPF timer start, ret=%d\n", ret); + goto err1; + } + } + } + + return CQM_SUCCESS; +err1: + hinic3_ppf_ht_gpa_deinit(handle); +err: + cqm_cla_uninit(cqm_handle, CQM_BAT_ENTRY_MAX); + return CQM_FAIL; +} + +/** + * Prototype : cqm_cla_uninit + * Description : Deinitialize the CLA table. + * Input : struct tag_cqm_handle *cqm_handle + * Output : None + * Return Value : void + * 1.Date : 2015/5/15 + * Modification : Created function + */ +void cqm_cla_uninit(struct tag_cqm_handle *cqm_handle, u32 entry_numb) +{ + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct tag_cqm_cla_table *cla_table = NULL; + s32 inv_flag = 0; + u32 i; + + for (i = 0; i < entry_numb; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_x_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_y_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_z_buf, + &inv_flag); + } + mutex_deinit(&cla_table->lock); + } + + /* When the lb mode is 1/2, the timer space allocated to the 4 SMFs + * needs to be released. + */ + if (cqm_handle->func_attribute.func_type == CQM_PPF && + (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || + cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2)) { + for (i = 0; i < CQM_LB_SMF_MAX; i++) { + cla_table = &bat_table->timer_entry[i]; + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_x_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_y_buf, + &inv_flag); + cqm_buf_free_cache_inv(cqm_handle, + &cla_table->cla_z_buf, + &inv_flag); + mutex_deinit(&cla_table->lock); + } + } +} + +static s32 cqm_cla_update_cmd(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cmd_buf *buf_in, + struct tag_cqm_cla_update_cmd *cmd) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_cla_update_cmd *cla_update_cmd = NULL; + s32 ret = CQM_FAIL; + + cla_update_cmd = (struct tag_cqm_cla_update_cmd *)(buf_in->buf); + + cla_update_cmd->gpa_h = cmd->gpa_h; + cla_update_cmd->gpa_l = cmd->gpa_l; + cla_update_cmd->value_h = cmd->value_h; + cla_update_cmd->value_l = cmd->value_l; + cla_update_cmd->smf_id = cmd->smf_id; + cla_update_cmd->func_id = cmd->func_id; + + cqm_swab32((u8 *)cla_update_cmd, + (sizeof(struct tag_cqm_cla_update_cmd) >> CQM_DW_SHIFT)); + + ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle), CQM_MOD_CQM, + CQM_CMD_T_CLA_UPDATE, buf_in, NULL, NULL, + CQM_CMD_TIMEOUT, HINIC3_CHANNEL_DEFAULT); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(handle->dev_hdl, "Cla alloc: cqm_cla_update, cqm_send_cmd_box_ret=%d\n", + ret); + cqm_err(handle->dev_hdl, + "Cla alloc: cqm_cla_update, cla_update_cmd: 0x%x 0x%x 0x%x 0x%x\n", + cmd->gpa_h, cmd->gpa_l, cmd->value_h, cmd->value_l); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_cla_update + * Description : Send a command to update the CLA table. + * Input : struct tag_cqm_handle *cqm_handle, + * struct tag_cqm_buf_list *buf_node_parent parent node of the content to + * be updated + * struct tag_cqm_buf_list *buf_node_child Subnode for which the buffer + * is to be applied + * u32 child_index Index of a child node. + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +static s32 cqm_cla_update(struct tag_cqm_handle *cqm_handle, + const struct tag_cqm_buf_list *buf_node_parent, + const struct tag_cqm_buf_list *buf_node_child, + u32 child_index, u8 cla_update_mode) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_cmd_buf *buf_in = NULL; + struct tag_cqm_cla_update_cmd cmd; + dma_addr_t pa = 0; + s32 ret = CQM_FAIL; + u8 gpa_check_enable = cqm_handle->func_capability.gpa_check_enable; + u32 i = 0; + u64 spu_en; + + buf_in = cqm_cmd_alloc(cqm_handle->ex_handle); + if (!buf_in) + return CQM_FAIL; + buf_in->size = sizeof(struct tag_cqm_cla_update_cmd); + + /* Fill command format, convert to big endian. */ + /* SPU function sets bit63: acs_spu_en based on function id. */ + if (hinic3_host_id(cqm_handle->ex_handle) == CQM_SPU_HOST_ID) + spu_en = ((u64)(cqm_handle->func_attribute.func_global_idx & + 0x1)) << 0x3F; + else + spu_en = 0; + + pa = ((buf_node_parent->pa + (child_index * sizeof(dma_addr_t))) | + (u32)spu_en); + cmd.gpa_h = CQM_ADDR_HI(pa); + cmd.gpa_l = CQM_ADDR_LW(pa); + + pa = (buf_node_child->pa | (u32)spu_en); + cmd.value_h = CQM_ADDR_HI(pa); + cmd.value_l = CQM_ADDR_LW(pa); + + cqm_dbg("Cla alloc: %s, gpa=0x%x 0x%x, value=0x%x 0x%x, cla_update_mode=0x%x\n", + __func__, cmd.gpa_h, cmd.gpa_l, cmd.value_h, cmd.value_l, + cla_update_mode); + + /* current CLA GPA CHECK */ + if (gpa_check_enable) { + switch (cla_update_mode) { + /* gpa[0]=1 means this GPA is valid */ + case CQM_CLA_RECORD_NEW_GPA: + cmd.value_l |= 1; + break; + /* gpa[0]=0 means this GPA is valid */ + case CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID: + case CQM_CLA_DEL_GPA_WITH_CACHE_INVALID: + cmd.value_l &= (~1); + break; + default: + cqm_err(handle->dev_hdl, + "Cla alloc: %s, wrong cla_update_mode=%u\n", + __func__, cla_update_mode); + break; + } + } + + /* Todo: The following code is the same as that in the bat update and + * needs to be reconstructed. + */ + /* In non-fake mode, set func_id to 0xffff. + * Indicates the current func fake mode, set func_id to the + * specified value, This is a fake func_id. + */ + if (cqm_handle->func_capability.fake_func_type == CQM_FAKE_FUNC_CHILD) + cmd.func_id = cqm_handle->func_attribute.func_global_idx; + else + cmd.func_id = 0xffff; + + /* Normal mode is 1822 traditional mode and is configured on SMF0. */ + /* Mode 0 is hashed to 4 SMF engines (excluding PPF) by func ID. */ + if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || + (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && + cqm_handle->func_attribute.func_type != CQM_PPF)) { + cmd.smf_id = cqm_funcid2smfid(cqm_handle); + ret = cqm_cla_update_cmd(cqm_handle, buf_in, &cmd); + /* Modes 1/2 are allocated to four SMF engines by flow. + * Therefore, one function needs to be allocated to four SMF engines. + */ + /* Mode 0 PPF needs to be configured on 4 engines, + * and the timer resources need to be shared by the 4 engines. + */ + } else if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || + cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2 || + (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && + cqm_handle->func_attribute.func_type == CQM_PPF)) { + for (i = 0; i < CQM_LB_SMF_MAX; i++) { + /* The smf_pg variable stores currently enabled SMF. */ + if (cqm_handle->func_capability.smf_pg & (1U << i)) { + cmd.smf_id = i; + ret = cqm_cla_update_cmd(cqm_handle, buf_in, + &cmd); + if (ret != CQM_SUCCESS) + goto out; + } + } + } else { + cqm_err(handle->dev_hdl, "Cla update: unsupport lb mode=%u\n", + cqm_handle->func_capability.lb_mode); + ret = CQM_FAIL; + } + +out: + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return ret; +} + +/** + * Prototype : cqm_cla_alloc + * Description : Trunk page for applying for a CLA. + * Input : struct tag_cqm_handle *cqm_handle, + * struct tag_cqm_cla_table *cla_table, + * struct tag_cqm_buf_list *buf_node_parent parent node of the content to + * be updated + * struct tag_cqm_buf_list *buf_node_child subnode for which the buffer + * is to be applied + * u32 child_index index of a child node + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/15 + * Modification : Created function + */ +static s32 cqm_cla_alloc(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + struct tag_cqm_buf_list *buf_node_parent, + struct tag_cqm_buf_list *buf_node_child, u32 child_index) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + s32 ret = CQM_FAIL; + + /* Apply for trunk page */ + buf_node_child->va = (u8 *)ossl_get_free_pages(GFP_KERNEL | __GFP_ZERO, + cla_table->trunk_order); + if (!buf_node_child->va) + return CQM_FAIL; + + /* PCI mapping */ + buf_node_child->pa = pci_map_single(cqm_handle->dev, buf_node_child->va, + PAGE_SIZE << cla_table->trunk_order, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, buf_node_child->pa)) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_node_child->pa)); + goto err1; + } + + /* Notify the chip of trunk_pa so that the chip fills in cla entry */ + ret = cqm_cla_update(cqm_handle, buf_node_parent, buf_node_child, + child_index, CQM_CLA_RECORD_NEW_GPA); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update)); + goto err2; + } + + return CQM_SUCCESS; + +err2: + pci_unmap_single(cqm_handle->dev, buf_node_child->pa, + PAGE_SIZE << cla_table->trunk_order, + PCI_DMA_BIDIRECTIONAL); +err1: + free_pages((ulong)(buf_node_child->va), cla_table->trunk_order); + buf_node_child->va = NULL; + return CQM_FAIL; +} + +/** + * Prototype : cqm_cla_free + * Description : Release trunk page of a CLA + * Input : struct tag_cqm_handle *cqm_handle + * struct tag_cqm_cla_table *cla_table + * struct tag_cqm_buf_list *buf_node + * Output : None + * Return Value : void + * 1.Date : 2015/5/15 + * Modification : Created function + */ +static void cqm_cla_free(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + struct tag_cqm_buf_list *buf_node_parent, + struct tag_cqm_buf_list *buf_node_child, + u32 child_index, u8 cla_update_mode) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 trunk_size; + + cqm_dbg("Cla free: cla_update_mode=%u\n", cla_update_mode); + + if (cqm_cla_update(cqm_handle, buf_node_parent, buf_node_child, + child_index, cla_update_mode) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_update)); + return; + } + + if (cla_update_mode == CQM_CLA_DEL_GPA_WITH_CACHE_INVALID) { + trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); + if (cqm_cla_cache_invalid(cqm_handle, buf_node_child->pa, + trunk_size) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_cache_invalid)); + return; + } + } + + /* Remove PCI mapping from the trunk page */ + pci_unmap_single(cqm_handle->dev, buf_node_child->pa, + PAGE_SIZE << cla_table->trunk_order, + PCI_DMA_BIDIRECTIONAL); + + /* Rlease trunk page */ + free_pages((ulong)(buf_node_child->va), cla_table->trunk_order); + buf_node_child->va = NULL; +} + +static u8 *cqm_cla_get_unlock_lvl0(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + struct tag_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + u8 *ret_addr = NULL; + u32 offset = 0; + + /* Level 0 CLA pages are statically allocated. */ + offset = index * cla_table->obj_size; + ret_addr = (u8 *)(cla_z_buf->buf_list->va) + offset; + *pa = cla_z_buf->buf_list->pa + offset; + + return ret_addr; +} + +static u8 *cqm_cla_get_unlock_lvl1(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + struct tag_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_buf_list *buf_node_y = NULL; + struct tag_cqm_buf_list *buf_node_z = NULL; + u32 y_index = 0; + u32 z_index = 0; + u8 *ret_addr = NULL; + u32 offset = 0; + + z_index = index & ((1U << (cla_table->z + 1)) - 1); + y_index = index >> (cla_table->z + 1); + + if (y_index >= cla_z_buf->buf_number) { + cqm_err(handle->dev_hdl, + "Cla get: index exceeds buf_number, y_index %u, z_buf_number %u\n", + y_index, cla_z_buf->buf_number); + return NULL; + } + buf_node_z = &cla_z_buf->buf_list[y_index]; + buf_node_y = cla_y_buf->buf_list; + + /* The z buf node does not exist, applying for a page first. */ + if (!buf_node_z->va) { + if (cqm_cla_alloc(cqm_handle, cla_table, buf_node_y, buf_node_z, + y_index) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_alloc)); + cqm_err(handle->dev_hdl, + "Cla get: cla_table->type=%u\n", + cla_table->type); + return NULL; + } + } + + cqm_dbg("Cla get: 1L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + + return ret_addr; +} + +static u8 *cqm_cla_get_unlock_lvl2(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + struct tag_cqm_buf *cla_x_buf = &cla_table->cla_x_buf; + struct tag_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_buf_list *buf_node_x = NULL; + struct tag_cqm_buf_list *buf_node_y = NULL; + struct tag_cqm_buf_list *buf_node_z = NULL; + u32 x_index = 0; + u32 y_index = 0; + u32 z_index = 0; + u32 trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); + u8 *ret_addr = NULL; + u32 offset = 0; + u64 tmp; + + z_index = index & ((1U << (cla_table->z + 1)) - 1); + y_index = (index >> (cla_table->z + 1)) & + ((1U << (cla_table->y - cla_table->z)) - 1); + x_index = index >> (cla_table->y + 1); + tmp = x_index * (trunk_size / sizeof(dma_addr_t)) + y_index; + + if (x_index >= cla_y_buf->buf_number || tmp >= cla_z_buf->buf_number) { + cqm_err(handle->dev_hdl, + "Cla get: index exceeds buf_number, x %u, y %u, y_buf_n %u, z_buf_n %u\n", + x_index, y_index, cla_y_buf->buf_number, + cla_z_buf->buf_number); + return NULL; + } + + buf_node_x = cla_x_buf->buf_list; + buf_node_y = &cla_y_buf->buf_list[x_index]; + buf_node_z = &cla_z_buf->buf_list[tmp]; + + /* The y buf node does not exist, applying for pages for y node. */ + if (!buf_node_y->va) { + if (cqm_cla_alloc(cqm_handle, cla_table, buf_node_x, buf_node_y, + x_index) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_alloc)); + return NULL; + } + } + + /* The z buf node does not exist, applying for pages for z node. */ + if (!buf_node_z->va) { + if (cqm_cla_alloc(cqm_handle, cla_table, buf_node_y, buf_node_z, + y_index) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_alloc)); + if (buf_node_y->refcount == 0) + /* To release node Y, cache_invalid is + * required. + */ + cqm_cla_free(cqm_handle, cla_table, buf_node_x, buf_node_y, x_index, + CQM_CLA_DEL_GPA_WITH_CACHE_INVALID); + return NULL; + } + + cqm_dbg("Cla get: 2L: y_refcount=0x%x\n", buf_node_y->refcount); + /* reference counting of the y buffer node needs to increase + * by 1. + */ + buf_node_y->refcount++; + } + + cqm_dbg("Cla get: 2L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + buf_node_z->refcount += count; + offset = z_index * cla_table->obj_size; + ret_addr = (u8 *)(buf_node_z->va) + offset; + *pa = buf_node_z->pa + offset; + + return ret_addr; +} + +/** + * Prototype : cqm_cla_get_unlock + * Description : Apply for block buffer in number of count from the index + * position in the cla table, The unlocked process is used for + * static buffer application. + * Input : struct tag_cqm_handle *cqm_handle, + * struct tag_cqm_cla_table *cla_table, + * u32 index, + * u32 count, + * dma_addr_t *pa + * Output : None + * Return Value : u8 * + * 1.Date : 2019/7/11 + * Modification : Created function + */ +u8 *cqm_cla_get_unlock(struct tag_cqm_handle *cqm_handle, struct tag_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + u8 *ret_addr = NULL; + + if (cla_table->cla_lvl == CQM_CLA_LVL_0) + ret_addr = cqm_cla_get_unlock_lvl0(cqm_handle, cla_table, index, + count, pa); + else if (cla_table->cla_lvl == CQM_CLA_LVL_1) + ret_addr = cqm_cla_get_unlock_lvl1(cqm_handle, cla_table, index, + count, pa); + else + ret_addr = cqm_cla_get_unlock_lvl2(cqm_handle, cla_table, index, + count, pa); + + return ret_addr; +} + +/** + * Prototype : cqm_cla_get_lock + * Description : Apply for block buffer in number of count from the index + * position in the cla table. The lock process is used during + * dynamic buffer application. + * Input : struct tag_cqm_handle *cqm_handle, + * struct tag_cqm_cla_table *cla_table, + * u32 index, + * u32 count, + * dma_addr_t *pa + * Output : None + * Return Value : u8 * + * 1.Date : 2019/7/11 + * Modification : Created function + */ +u8 *cqm_cla_get_lock(struct tag_cqm_handle *cqm_handle, struct tag_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa) +{ + u8 *ret_addr = NULL; + + mutex_lock(&cla_table->lock); + + ret_addr = cqm_cla_get_unlock(cqm_handle, cla_table, index, count, pa); + + mutex_unlock(&cla_table->lock); + + return ret_addr; +} + +/** + * Prototype : cqm_cla_put + * Description : Decrease the value of reference counting on the trunk page. + * If the value is 0, the trunk page is released. + * Input : struct tag_cqm_handle *cqm_handle, + * struct tag_cqm_cla_table *cla_table, + * u32 index, + * u32 count + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_cla_put(struct tag_cqm_handle *cqm_handle, struct tag_cqm_cla_table *cla_table, + u32 index, u32 count) +{ + struct tag_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + struct tag_cqm_buf *cla_y_buf = &cla_table->cla_y_buf; + struct tag_cqm_buf *cla_x_buf = &cla_table->cla_x_buf; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_buf_list *buf_node_z = NULL; + struct tag_cqm_buf_list *buf_node_y = NULL; + struct tag_cqm_buf_list *buf_node_x = NULL; + u32 x_index = 0; + u32 y_index = 0; + u32 trunk_size = (u32)(PAGE_SIZE << cla_table->trunk_order); + u64 tmp; + + /* The buffer is applied statically, and the reference counting + * does not need to be controlled. + */ + if (cla_table->alloc_static) + return; + + mutex_lock(&cla_table->lock); + + if (cla_table->cla_lvl == CQM_CLA_LVL_1) { + y_index = index >> (cla_table->z + 1); + + if (y_index >= cla_z_buf->buf_number) { + cqm_err(handle->dev_hdl, + "Cla put: index exceeds buf_number, y_index %u, z_buf_number %u\n", + y_index, cla_z_buf->buf_number); + cqm_err(handle->dev_hdl, + "Cla put: cla_table->type=%u\n", + cla_table->type); + mutex_unlock(&cla_table->lock); + return; + } + + buf_node_z = &cla_z_buf->buf_list[y_index]; + buf_node_y = cla_y_buf->buf_list; + + /* When the value of reference counting on the z node page is 0, + * the z node page is released. + */ + cqm_dbg("Cla put: 1L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + buf_node_z->refcount -= count; + if (buf_node_z->refcount == 0) + /* The cache invalid is not required for the Z node. */ + cqm_cla_free(cqm_handle, cla_table, buf_node_y, + buf_node_z, y_index, + CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID); + } else if (cla_table->cla_lvl == CQM_CLA_LVL_2) { + y_index = (index >> (cla_table->z + 1)) & + ((1U << (cla_table->y - cla_table->z)) - 1); + x_index = index >> (cla_table->y + 1); + tmp = x_index * (trunk_size / sizeof(dma_addr_t)) + y_index; + + if (x_index >= cla_y_buf->buf_number || tmp >= cla_z_buf->buf_number) { + cqm_err(handle->dev_hdl, + "Cla put: index exceeds buf, x %u, y %u, y_buf_n %u, z_buf_n %u\n", + x_index, y_index, cla_y_buf->buf_number, + cla_z_buf->buf_number); + mutex_unlock(&cla_table->lock); + return; + } + + buf_node_x = cla_x_buf->buf_list; + buf_node_y = &cla_y_buf->buf_list[x_index]; + buf_node_z = &cla_z_buf->buf_list[tmp]; + cqm_dbg("Cla put: 2L: z_refcount=0x%x, count=0x%x\n", + buf_node_z->refcount, count); + + /* When the value of reference counting on the z node page is 0, + * the z node page is released. + */ + buf_node_z->refcount -= count; + if (buf_node_z->refcount == 0) { + cqm_cla_free(cqm_handle, cla_table, buf_node_y, + buf_node_z, y_index, + CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID); + + /* When the value of reference counting on the y node + * page is 0, the y node page is released. + */ + cqm_dbg("Cla put: 2L: y_refcount=0x%x\n", + buf_node_y->refcount); + buf_node_y->refcount--; + if (buf_node_y->refcount == 0) + /* Node y requires cache to be invalid. */ + cqm_cla_free(cqm_handle, cla_table, buf_node_x, buf_node_y, + x_index, CQM_CLA_DEL_GPA_WITH_CACHE_INVALID); + } + } + + mutex_unlock(&cla_table->lock); +} + +/** + * Prototype : cqm_cla_table_get + * Description : Searches for the CLA table data structure corresponding to a + * BAT entry. + * Input : struct tag_cqm_bat_table *bat_table, + * u32 entry_type + * Output : None + * Return Value : struct tag_cqm_cla_table * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_cqm_cla_table *cqm_cla_table_get(struct tag_cqm_bat_table *bat_table, + u32 entry_type) +{ + struct tag_cqm_cla_table *cla_table = NULL; + u32 i = 0; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table && entry_type == cla_table->type) + return cla_table; + } + + return NULL; +} diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.h new file mode 100644 index 000000000..dd0766e6d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bat_cla.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_BAT_CLA_H +#define CQM_BAT_CLA_H + +#include <linux/types.h> +#include <linux/mutex.h> + +#include "cqm_bitmap_table.h" +#include "cqm_object.h" + +/* When the connection check is enabled, the maximum number of connections + * supported by the chip is 1M - 63, which cannot reach 1M + */ +#define CQM_BAT_MAX_CONN_NUM (0x100000 - 63) +#define CQM_BAT_MAX_CACHE_CONN_NUM (0x100000 - 63) + +#define CLA_TABLE_PAGE_ORDER 0 +#define CQM_4K_PAGE_ORDER 0 +#define CQM_4K_PAGE_SIZE 4096 + +#define CQM_BAT_ENTRY_MAX 16 +#define CQM_BAT_ENTRY_SIZE 16 +#define CQM_BAT_STORE_API_SIZE 16 + +#define CQM_BAT_SIZE_FT_RDMA_PF 240 +#define CQM_BAT_SIZE_FT_RDMA_VF 160 +#define CQM_BAT_SIZE_FT_PF 192 +#define CQM_BAT_SIZE_FT_VF 112 +#define CQM_BAT_SIZE_RDMA_PF 160 +#define CQM_BAT_SIZE_RDMA_VF 80 + +#define CQM_BAT_INDEX0 0 +#define CQM_BAT_INDEX1 1 +#define CQM_BAT_INDEX2 2 +#define CQM_BAT_INDEX3 3 +#define CQM_BAT_INDEX4 4 +#define CQM_BAT_INDEX5 5 +#define CQM_BAT_INDEX6 6 +#define CQM_BAT_INDEX7 7 +#define CQM_BAT_INDEX8 8 +#define CQM_BAT_INDEX9 9 +#define CQM_BAT_INDEX10 10 +#define CQM_BAT_INDEX11 11 +#define CQM_BAT_INDEX12 12 +#define CQM_BAT_INDEX13 13 +#define CQM_BAT_INDEX14 14 +#define CQM_BAT_INDEX15 15 + +enum cqm_bat_entry_type { + CQM_BAT_ENTRY_T_CFG = 0, + CQM_BAT_ENTRY_T_HASH = 1, + CQM_BAT_ENTRY_T_QPC = 2, + CQM_BAT_ENTRY_T_SCQC = 3, + CQM_BAT_ENTRY_T_SRQC = 4, + CQM_BAT_ENTRY_T_MPT = 5, + CQM_BAT_ENTRY_T_GID = 6, + CQM_BAT_ENTRY_T_LUN = 7, + CQM_BAT_ENTRY_T_TASKMAP = 8, + CQM_BAT_ENTRY_T_L3I = 9, + CQM_BAT_ENTRY_T_CHILDC = 10, + CQM_BAT_ENTRY_T_TIMER = 11, + CQM_BAT_ENTRY_T_XID2CID = 12, + CQM_BAT_ENTRY_T_REORDER = 13, + CQM_BAT_ENTRY_T_INVALID = 14, + CQM_BAT_ENTRY_T_MAX = 15, +}; + +/* CLA update mode */ +#define CQM_CLA_RECORD_NEW_GPA 0 +#define CQM_CLA_DEL_GPA_WITHOUT_CACHE_INVALID 1 +#define CQM_CLA_DEL_GPA_WITH_CACHE_INVALID 2 + +#define CQM_CLA_LVL_0 0 +#define CQM_CLA_LVL_1 1 +#define CQM_CLA_LVL_2 2 + +#define CQM_MAX_INDEX_BIT 19 + +#define CQM_CHIP_CACHELINE 256 +#define CQM_CHIP_TIMER_CACHELINE 512 +#define CQM_OBJECT_256 256 +#define CQM_OBJECT_512 512 +#define CQM_OBJECT_1024 1024 +#define CQM_CHIP_GPA_MASK 0x1ffffffffffffff +#define CQM_CHIP_GPA_HIMASK 0x1ffffff +#define CQM_CHIP_GPA_LOMASK 0xffffffff +#define CQM_CHIP_GPA_HSHIFT 32 + +/* Aligns with 64 buckets and shifts rightward by 6 bits */ +#define CQM_HASH_NUMBER_UNIT 6 + +struct tag_cqm_cla_table { + u32 type; + u32 max_buffer_size; + u32 obj_num; + bool alloc_static; /* Whether the buffer is statically allocated */ + u32 cla_lvl; + u32 cacheline_x; /* x value calculated based on cacheline, + * used by the chip + */ + u32 cacheline_y; /* y value calculated based on cacheline, + * used by the chip + */ + u32 cacheline_z; /* z value calculated based on cacheline, + * used by the chip + */ + u32 x; /* x value calculated based on obj_size, used by software */ + u32 y; /* y value calculated based on obj_size, used by software */ + u32 z; /* z value calculated based on obj_size, used by software */ + struct tag_cqm_buf cla_x_buf; + struct tag_cqm_buf cla_y_buf; + struct tag_cqm_buf cla_z_buf; + u32 trunk_order; /* A continuous physical page contains 2^order pages */ + u32 obj_size; + struct mutex lock; /* Lock for cla buffer allocation and free */ + + struct tag_cqm_bitmap bitmap; + + struct tag_cqm_object_table obj_table; /* Mapping table between + * indexes and objects + */ + char name[VRAM_NAME_APPLY_LEN]; +}; + +struct tag_cqm_bat_entry_cfg { + u32 cur_conn_num_h_4 : 4; + u32 rsv1 : 4; + u32 max_conn_num : 20; + u32 rsv2 : 4; + + u32 max_conn_cache : 10; + u32 rsv3 : 6; + u32 cur_conn_num_l_16 : 16; + + u32 bloom_filter_addr : 16; + u32 cur_conn_cache : 10; + u32 rsv4 : 6; + + u32 bucket_num : 16; + u32 bloom_filter_len : 16; +}; + +#define CQM_BAT_NO_BYPASS_CACHE 0 +#define CQM_BAT_BYPASS_CACHE 1 + +#define CQM_BAT_ENTRY_SIZE_256 0 +#define CQM_BAT_ENTRY_SIZE_512 1 +#define CQM_BAT_ENTRY_SIZE_1024 2 + +struct tag_cqm_bat_entry_standerd { + u32 entry_size : 2; + u32 rsv1 : 6; + u32 max_number : 20; + u32 rsv2 : 4; + + u32 cla_gpa_h : 32; + + u32 cla_gpa_l : 32; + + u32 rsv3 : 8; + u32 z : 5; + u32 y : 5; + u32 x : 5; + u32 rsv24 : 1; + u32 bypass : 1; + u32 cla_level : 2; + u32 rsv5 : 5; +}; + +struct tag_cqm_bat_entry_vf2pf { + u32 cla_gpa_h : 25; + u32 pf_id : 5; + u32 fake_vf_en : 1; + u32 acs_spu_en : 1; +}; + +#define CQM_BAT_ENTRY_TASKMAP_NUM 4 +struct tag_cqm_bat_entry_taskmap_addr { + u32 gpa_h; + u32 gpa_l; +}; + +struct tag_cqm_bat_entry_taskmap { + struct tag_cqm_bat_entry_taskmap_addr addr[CQM_BAT_ENTRY_TASKMAP_NUM]; +}; + +struct tag_cqm_bat_table { + u32 bat_entry_type[CQM_BAT_ENTRY_MAX]; + u8 bat[CQM_BAT_ENTRY_MAX * CQM_BAT_ENTRY_SIZE]; + struct tag_cqm_cla_table entry[CQM_BAT_ENTRY_MAX]; + /* In LB mode 1, the timer needs to be configured in 4 SMFs, + * and the GPAs must be different and independent. + */ + struct tag_cqm_cla_table timer_entry[4]; + u32 bat_size; +}; + +s32 cqm_bat_init(struct tag_cqm_handle *cqm_handle); +void cqm_bat_uninit(struct tag_cqm_handle *cqm_handle); +s32 cqm_cla_init(struct tag_cqm_handle *cqm_handle); +void cqm_cla_uninit(struct tag_cqm_handle *cqm_handle, u32 entry_numb); +u8 *cqm_cla_get_unlock(struct tag_cqm_handle *cqm_handle, struct tag_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa); +u8 *cqm_cla_get_lock(struct tag_cqm_handle *cqm_handle, struct tag_cqm_cla_table *cla_table, + u32 index, u32 count, dma_addr_t *pa); +void cqm_cla_put(struct tag_cqm_handle *cqm_handle, struct tag_cqm_cla_table *cla_table, + u32 index, u32 count); +struct tag_cqm_cla_table *cqm_cla_table_get(struct tag_cqm_bat_table *bat_table, + u32 entry_type); +u32 cqm_funcid2smfid(const struct tag_cqm_handle *cqm_handle); + +#endif /* CQM_BAT_CLA_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c new file mode 100644 index 000000000..e43b1679c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.c @@ -0,0 +1,1454 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/device.h> +#include <linux/mm.h> +#include <linux/gfp.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" +#include "cqm_memsec.h" +#include "cqm_object.h" +#include "cqm_bat_cla.h" +#include "cqm_cmd.h" +#include "cqm_object_intern.h" +#include "cqm_main.h" + +#include "cqm_npu_cmd.h" +#include "cqm_npu_cmd_defs.h" +#include "vram_common.h" + +#define common_section + +struct malloc_memory { + bool (*check_alloc_mode)(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf); + s32 (*malloc_func)(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf); +}; + +struct free_memory { + bool (*check_alloc_mode)(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf); + void (*free_func)(struct tag_cqm_buf *buf); +}; + +/** + * Prototype : cqm_swab64(Encapsulation of __swab64) + * Description : Perform big-endian conversion for a memory block (8 bytes). + * Input : u8 *addr: Start address of the memory block + * u32 cnt: Number of 8 bytes in the memory block + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_swab64(u8 *addr, u32 cnt) +{ + u64 *temp = (u64 *)addr; + u64 value = 0; + u32 i; + + for (i = 0; i < cnt; i++) { + value = __swab64(*temp); + *temp = value; + temp++; + } +} + +/** + * Prototype : cqm_swab32(Encapsulation of __swab32) + * Description : Perform big-endian conversion for a memory block (4 bytes). + * Input : u8 *addr: Start address of the memory block + * u32 cnt: Number of 4 bytes in the memory block + * Output : None + * Return Value : void + * 1.Date : 2015/7/23 + * Modification : Created function + */ +void cqm_swab32(u8 *addr, u32 cnt) +{ + u32 *temp = (u32 *)addr; + u32 value = 0; + u32 i; + + for (i = 0; i < cnt; i++) { + value = __swab32(*temp); + *temp = value; + temp++; + } +} + +/** + * Prototype : cqm_shift + * Description : Calculates n in a 2^n number.(Find the logarithm of 2^n) + * Input : u32 data + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_shift(u32 data) +{ + u32 data_num = data; + s32 shift = -1; + + do { + data_num >>= 1; + shift++; + } while (data_num); + + return shift; +} + +/** + * Prototype : cqm_check_align + * Description : Check whether the value is 2^n-aligned. If 0 or 1, false is + * returned. + * Input : u32 data + * Output : None + * Return Value : s32 + * 1.Date : 2015/9/15 + * Modification : Created function + */ +bool cqm_check_align(u32 data) +{ + u32 data_num = data; + + if (data == 0) + return false; + + /* Todo: (n & (n - 1) == 0) can be used to determine the value. */ + do { + /* When the value can be exactly divided by 2, + * the value of data is shifted right by one bit, that is, + * divided by 2. + */ + if ((data_num & 0x1) == 0) + data_num >>= 1; + /* If the value cannot be divisible by 2, the value is + * not 2^n-aligned and false is returned. + */ + else + return false; + } while (data_num != 1); + + return true; +} + +/** + * Prototype : cqm_kmalloc_align + * Description : Allocates 2^n-byte-aligned memory for the start address. + * Input : size_t size + * gfp_t flags + * u16 align_order + * Output : None + * Return Value : void * + * 1.Date : 2017/9/22 + * Modification : Created function + */ +void *cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order) +{ + void *orig_addr = NULL; + void *align_addr = NULL; + void *index_addr = NULL; + + orig_addr = kmalloc(size + ((u64)1 << align_order) + sizeof(void *), + flags); + if (!orig_addr) + return NULL; + + index_addr = (void *)((char *)orig_addr + sizeof(void *)); + align_addr = + (void *)((((u64)index_addr + ((u64)1 << align_order) - 1) >> + align_order) << align_order); + + /* Record the original memory address for memory release. */ + index_addr = (void *)((char *)align_addr - sizeof(void *)); + *(void **)index_addr = orig_addr; + + return align_addr; +} + +/** + * Prototype : cqm_kfree_align + * Description : Release the memory allocated for starting address alignment. + * Input : void *addr + * Output : None + * Return Value : void + * 1.Date : 2017/9/22 + * Modification : Created function + */ +void cqm_kfree_align(void *addr) +{ + void *index_addr = NULL; + + /* Release the original memory address. */ + index_addr = (void *)((char *)addr - sizeof(void *)); + + cqm_dbg("free aligned address: %p, original address: %p\n", addr, + *(void **)index_addr); + + kfree(*(void **)index_addr); +} + +static void cqm_write_lock(rwlock_t *lock, bool bh) +{ + if (bh) + write_lock_bh(lock); + else + write_lock(lock); +} + +static void cqm_write_unlock(rwlock_t *lock, bool bh) +{ + if (bh) + write_unlock_bh(lock); + else + write_unlock(lock); +} + +static void cqm_read_lock(rwlock_t *lock, bool bh) +{ + if (bh) + read_lock_bh(lock); + else + read_lock(lock); +} + +static void cqm_read_unlock(rwlock_t *lock, bool bh) +{ + if (bh) + read_unlock_bh(lock); + else + read_unlock(lock); +} + +static inline bool cqm_bat_entry_in_secure_mem(void *handle, u32 type) +{ + if (!cqm_need_secure_mem(handle)) + return false; + + if (type == CQM_BAT_ENTRY_T_QPC || type == CQM_BAT_ENTRY_T_SCQC || + type == CQM_BAT_ENTRY_T_SRQC || type == CQM_BAT_ENTRY_T_MPT) + return true; + + return false; +} + +s32 cqm_buf_alloc_direct(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf, bool direct) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct page **pages = NULL; + u32 i, j, order; + + order = (u32)get_order(buf->buf_size); + + if (!direct) { + buf->direct.va = NULL; + return CQM_SUCCESS; + } + + pages = vmalloc(sizeof(struct page *) * buf->page_number); + if (!pages) + return CQM_FAIL; + + for (i = 0; i < buf->buf_number; i++) { + for (j = 0; j < ((u32)1 << order); j++) + pages[(ulong)(unsigned int)((i << order) + j)] = + (void *)virt_to_page((u8 *)(buf->buf_list[i].va) + (PAGE_SIZE * j)); + } + + buf->direct.va = vmap(pages, buf->page_number, VM_MAP, PAGE_KERNEL); + vfree(pages); + if (!buf->direct.va) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf->direct.va)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static bool check_use_non_vram(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf) +{ + return buf->buf_info.use_vram ? false : true; +} + +static bool check_for_use_node_alloc(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf) +{ + if (buf->buf_info.use_vram == 0 && handle->board_info.service_mode == 0) + return true; + + return false; +} + +static bool check_for_nouse_node_alloc(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf) +{ + if (buf->buf_info.use_vram == 0 && handle->board_info.service_mode != 0) + return true; + + return false; +} + +static void cqm_buf_free_page_common(struct tag_cqm_buf *buf) +{ + u32 order; + s32 i; + + order = (u32)get_order(buf->buf_size); + + for (i = 0; i < (s32)buf->buf_number; i++) { + if (buf->buf_list[i].va) { + free_pages((ulong)(buf->buf_list[i].va), order); + buf->buf_list[i].va = NULL; + } + } +} + +static s32 cqm_buf_use_node_alloc_page(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf) +{ + struct page *newpage = NULL; + u32 order; + void *va = NULL; + s32 i, node; + + order = (u32)get_order(buf->buf_size); + node = dev_to_node(handle->dev_hdl); + for (i = 0; i < (s32)buf->buf_number; i++) { + newpage = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, order); + if (!newpage) + break; + va = (void *)page_address(newpage); + /* Initialize the page after the page is applied for. + * If hash entries are involved, the initialization + * value must be 0. + */ + memset(va, 0, buf->buf_size); + buf->buf_list[i].va = va; + } + + if (i != buf->buf_number) { + cqm_buf_free_page_common(buf); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static s32 cqm_buf_unused_node_alloc_page(struct hinic3_hwdev *handle, struct tag_cqm_buf *buf) +{ + u32 order; + void *va = NULL; + s32 i; + + order = (u32)get_order(buf->buf_size); + + for (i = 0; i < (s32)buf->buf_number; i++) { + va = (void *)ossl_get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!va) + break; + /* Initialize the page after the page is applied for. + * If hash entries are involved, the initialization + * value must be 0. + */ + memset(va, 0, buf->buf_size); + buf->buf_list[i].va = va; + } + + if (i != buf->buf_number) { + cqm_buf_free_page_common(buf); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static const struct malloc_memory g_malloc_funcs[] = { + {check_for_use_node_alloc, cqm_buf_use_node_alloc_page}, + {check_for_nouse_node_alloc, cqm_buf_unused_node_alloc_page} +}; + +static const struct free_memory g_free_funcs[] = { + {check_use_non_vram, cqm_buf_free_page_common} +}; + +static s32 cqm_buf_alloc_page(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 malloc_funcs_num = ARRAY_SIZE(g_malloc_funcs); + u32 i; + + for (i = 0; i < malloc_funcs_num; i++) { + if (g_malloc_funcs[i].check_alloc_mode && + g_malloc_funcs[i].malloc_func && + g_malloc_funcs[i].check_alloc_mode(handle, buf)) + return g_malloc_funcs[i].malloc_func(handle, buf); + } + + cqm_err(handle->dev_hdl, "Unknown alloc mode\n"); + + return CQM_FAIL; +} + +static void cqm_buf_free_page(struct tag_cqm_buf *buf) +{ + u32 free_funcs_num = ARRAY_SIZE(g_free_funcs); + u32 i; + + for (i = 0; i < free_funcs_num; i++) { + if (g_free_funcs[i].check_alloc_mode && + g_free_funcs[i].free_func && + g_free_funcs[i].check_alloc_mode(NULL, buf)) + return g_free_funcs[i].free_func(buf); + } +} + +static s32 cqm_buf_alloc_map(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct pci_dev *dev = cqm_handle->dev; + void *va = NULL; + s32 i; + + for (i = 0; i < (s32)buf->buf_number; i++) { + va = buf->buf_list[i].va; + buf->buf_list[i].pa = pci_map_single(dev, va, buf->buf_size, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev, buf->buf_list[i].pa)) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(buf_list)); + break; + } + } + + if (i != buf->buf_number) { + i--; + for (; i >= 0; i--) + pci_unmap_single(dev, buf->buf_list[i].pa, + buf->buf_size, PCI_DMA_BIDIRECTIONAL); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static s32 cqm_buf_get_secure_mem_pages(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 i; + + for (i = 0; i < buf->buf_number; i++) { + buf->buf_list[i].va = + cqm_get_secure_mem_pages(handle, + (u32)get_order(buf->buf_size), + &buf->buf_list[i].pa); + if (!buf->buf_list[i].va) + break; + } + + if (i != buf->buf_number) { + cqm_free_secure_mem_pages(handle, buf->buf_list[0].va, + (u32)get_order(buf->buf_size)); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_buf_alloc + * Description : Apply for buffer space and DMA mapping for the struct tag_cqm_buf + * structure. + * Input : struct tag_cqm_buf *buf + * struct pci_dev *dev + * bool direct: Whether direct remapping is required + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_buf_alloc(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf, bool direct) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct pci_dev *dev = cqm_handle->dev; + s32 i; + s32 ret; + + /* Applying for the buffer list descriptor space */ + buf->buf_list = vmalloc(buf->buf_number * sizeof(struct tag_cqm_buf_list)); + if (!buf->buf_list) + return CQM_FAIL; + memset(buf->buf_list, 0, buf->buf_number * sizeof(struct tag_cqm_buf_list)); + + /* Page for applying for each buffer */ + if (cqm_bat_entry_in_secure_mem((void *)handle, buf->bat_entry_type)) + ret = cqm_buf_get_secure_mem_pages(cqm_handle, buf); + else + ret = cqm_buf_alloc_page(cqm_handle, buf); + + if (ret == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(linux_cqm_buf_alloc_page)); + goto err1; + } + + /* PCI mapping of the buffer */ + if (!cqm_bat_entry_in_secure_mem((void *)handle, buf->bat_entry_type)) { + if (cqm_buf_alloc_map(cqm_handle, buf) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(linux_cqm_buf_alloc_map)); + goto err2; + } + } + + /* direct remapping */ + if (cqm_buf_alloc_direct(cqm_handle, buf, direct) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc_direct)); + goto err3; + } + + return CQM_SUCCESS; + +err3: + if (!cqm_bat_entry_in_secure_mem((void *)handle, buf->bat_entry_type)) { + for (i = 0; i < (s32)buf->buf_number; i++) { + pci_unmap_single(dev, buf->buf_list[i].pa, buf->buf_size, + PCI_DMA_BIDIRECTIONAL); + } + } +err2: + if (cqm_bat_entry_in_secure_mem((void *)handle, buf->bat_entry_type)) + cqm_free_secure_mem_pages(handle, buf->buf_list[0].va, + (u32)get_order(buf->buf_size)); + else + cqm_buf_free_page(buf); +err1: + vfree(buf->buf_list); + buf->buf_list = NULL; + return CQM_FAIL; +} + +/** + * Prototype : cqm_buf_free + * Description : Release the buffer space and DMA mapping for the struct tag_cqm_buf + * structure. + * Input : struct tag_cqm_buf *buf + * struct pci_dev *dev + * bool direct: Whether direct remapping is required + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_buf_free(struct tag_cqm_buf *buf, struct tag_cqm_handle *cqm_handle) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct pci_dev *dev = cqm_handle->dev; + s32 i; + + if (buf->direct.va) { + vunmap(buf->direct.va); + buf->direct.va = NULL; + } + + if (!buf->buf_list) + return; + + if (cqm_bat_entry_in_secure_mem(handle, buf->bat_entry_type)) { + cqm_free_secure_mem_pages(handle, buf->buf_list[0].va, + (u32)get_order(buf->buf_size)); + goto free; + } + + for (i = 0; i < (s32)(buf->buf_number); i++) { + if (buf->buf_list[i].va) + pci_unmap_single(dev, buf->buf_list[i].pa, + buf->buf_size, + PCI_DMA_BIDIRECTIONAL); + } + cqm_buf_free_page(buf); + +free: + vfree(buf->buf_list); + buf->buf_list = NULL; +} + +static s32 cqm_cla_cache_invalid_cmd(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_cmd_buf *buf_in, + struct tag_cqm_cla_cache_invalid_cmd *cmd) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_cla_cache_invalid_cmd *cla_cache_invalid_cmd = NULL; + s32 ret; + + cla_cache_invalid_cmd = (struct tag_cqm_cla_cache_invalid_cmd *)(buf_in->buf); + cla_cache_invalid_cmd->gpa_h = cmd->gpa_h; + cla_cache_invalid_cmd->gpa_l = cmd->gpa_l; + cla_cache_invalid_cmd->cache_size = cmd->cache_size; + cla_cache_invalid_cmd->smf_id = cmd->smf_id; + cla_cache_invalid_cmd->func_id = cmd->func_id; + + cqm_swab32((u8 *)cla_cache_invalid_cmd, + /* shift 2 bits by right to get length of dw(4B) */ + (sizeof(struct tag_cqm_cla_cache_invalid_cmd) >> 2)); + + /* Send the cmdq command. */ + ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle), CQM_MOD_CQM, + CQM_CMD_T_CLA_CACHE_INVALID, buf_in, NULL, NULL, + CQM_CMD_TIMEOUT, HINIC3_CHANNEL_DEFAULT); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(handle->dev_hdl, + "Cla cache invalid: cqm_send_cmd_box_ret=%d\n", + ret); + cqm_err(handle->dev_hdl, + "Cla cache invalid: cla_cache_invalid_cmd: 0x%x 0x%x 0x%x\n", + cmd->gpa_h, cmd->gpa_l, cmd->cache_size); + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +s32 cqm_cla_cache_invalid(struct tag_cqm_handle *cqm_handle, dma_addr_t pa, u32 cache_size) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_cmd_buf *buf_in = NULL; + struct hinic3_func_attr *func_attr = NULL; + struct tag_cqm_bat_entry_vf2pf gpa = {0}; + struct tag_cqm_cla_cache_invalid_cmd cmd; + u32 cla_gpa_h = 0; + s32 ret = CQM_FAIL; + u32 i; + + buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle)); + if (!buf_in) + return CQM_FAIL; + buf_in->size = sizeof(struct tag_cqm_cla_cache_invalid_cmd); + + gpa.cla_gpa_h = CQM_ADDR_HI(pa) & CQM_CHIP_GPA_HIMASK; + + /* On the SPU, the value of spu_en in the GPA address + * in the BAT is determined by the host ID and fun IDx. + */ + if (hinic3_host_id(cqm_handle->ex_handle) == CQM_SPU_HOST_ID) { + func_attr = &cqm_handle->func_attribute; + gpa.acs_spu_en = func_attr->func_global_idx & 0x1; + } else { + gpa.acs_spu_en = 0; + } + + /* In non-fake mode, set func_id to 0xffff. + * Indicate the current func fake mode. + * The value of func_id is a fake func ID. + */ + if (cqm_handle->func_capability.fake_func_type == CQM_FAKE_FUNC_CHILD) { + cmd.func_id = cqm_handle->func_attribute.func_global_idx; + func_attr = &cqm_handle->parent_cqm_handle->func_attribute; + gpa.fake_vf_en = 1; + gpa.pf_id = func_attr->func_global_idx; + } else { + cmd.func_id = 0xffff; + } + + memcpy(&cla_gpa_h, &gpa, sizeof(u32)); + + /* Fill command and convert it to big endian */ + cmd.cache_size = cache_size; + cmd.gpa_l = CQM_ADDR_LW(pa); + cmd.gpa_h = cla_gpa_h; + + /* The normal mode is the 1822 traditional mode and is all configured + * on SMF0. + */ + /* Mode 0 is hashed to 4 SMF engines (excluding PPF) by func ID. */ + if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_NORMAL || + (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && + cqm_handle->func_attribute.func_type != CQM_PPF)) { + cmd.smf_id = cqm_funcid2smfid(cqm_handle); + ret = cqm_cla_cache_invalid_cmd(cqm_handle, buf_in, &cmd); + /* Mode 1/2 are allocated to 4 SMF engines by flow. Therefore, + * one function needs to be allocated to 4 SMF engines. + */ + /* The PPF in mode 0 needs to be configured on 4 engines, + * and the timer resources need to be shared by the 4 engines. + */ + } else if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || + cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2 || + (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_0 && + cqm_handle->func_attribute.func_type == CQM_PPF)) { + for (i = 0; i < CQM_LB_SMF_MAX; i++) { + /* The smf_pg stored currently enabled SMF engine. */ + if (cqm_handle->func_capability.smf_pg & (1U << i)) { + cmd.smf_id = i; + ret = cqm_cla_cache_invalid_cmd(cqm_handle, + buf_in, &cmd); + if (ret != CQM_SUCCESS) + goto out; + } + } + } else { + cqm_err(handle->dev_hdl, "Cla cache invalid: unsupport lb mode=%u\n", + cqm_handle->func_capability.lb_mode); + ret = CQM_FAIL; + } + +out: + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return ret; +} + +static void free_cache_inv(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf, + s32 *inv_flag) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 order; + s32 i; + + order = (u32)get_order(buf->buf_size); + + if (!handle->chip_present_flag) + return; + + if (!buf->buf_list) + return; + + for (i = 0; i < (s32)(buf->buf_number); i++) { + if (!buf->buf_list[i].va) + continue; + + if (*inv_flag != CQM_SUCCESS) + continue; + + /* In the Pangea environment, if the cmdq times out, + * no subsequent message is sent. + */ + *inv_flag = cqm_cla_cache_invalid(cqm_handle, buf->buf_list[i].pa, + (u32)(PAGE_SIZE << order)); + if (*inv_flag != CQM_SUCCESS) + cqm_err(handle->dev_hdl, + "Buffer free: fail to invalid buf_list pa cache, inv_flag=%d\n", + *inv_flag); + } +} + +void cqm_buf_free_cache_inv(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf, + s32 *inv_flag) +{ + /* Send a command to the chip to kick out the cache. */ + free_cache_inv(cqm_handle, buf, inv_flag); + + /* Clear host resources */ + cqm_buf_free(buf, cqm_handle); +} + +void cqm_byte_print(u32 *ptr, u32 len) +{ + u32 i; + u32 len_num = len; + + len_num = (len_num >> 0x2); + for (i = 0; i < len_num; i = i + 0x4) { + cqm_dbg("%.8x %.8x %.8x %.8x\n", ptr[i], ptr[i + 1], + ptr[i + 2], /* index increases by 2 */ + ptr[i + 3]); /* index increases by 3 */ + } +} + +#define bitmap_section + +/** + * Prototype : cqm_single_bitmap_init + * Description : Initialize a bitmap. + * Input : struct tag_cqm_bitmap *bitmap + * Output : None + * Return Value : s32 + * 1.Date : 2015/9/9 + * Modification : Created function + */ +static s32 cqm_single_bitmap_init(struct tag_cqm_bitmap *bitmap) +{ + u32 bit_number; + + spin_lock_init(&bitmap->lock); + + /* Max_num of the bitmap is 8-aligned and then + * shifted rightward by 3 bits to obtain the number of bytes required. + */ + bit_number = (ALIGN(bitmap->max_num, CQM_NUM_BIT_BYTE) >> + CQM_BYTE_BIT_SHIFT); + bitmap->table = vmalloc(bit_number); + if (!bitmap->table) + return CQM_FAIL; + memset(bitmap->table, 0, bit_number); + + return CQM_SUCCESS; +} + +static s32 cqm_bitmap_toe_init(struct tag_cqm_handle *cqm_handle) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_bitmap *bitmap = NULL; + + /* SRQC of TOE services is not managed through the CLA table, + * but the bitmap is required to manage SRQid. + */ + if (cqm_handle->service[CQM_SERVICE_T_TOE].valid) { + bitmap = &cqm_handle->toe_own_capability.srqc_bitmap; + bitmap->max_num = + cqm_handle->toe_own_capability.toe_srqc_number; + bitmap->reserved_top = 0; + bitmap->reserved_back = 0; + bitmap->last = 0; + if (bitmap->max_num == 0) { + cqm_info(handle->dev_hdl, + "Bitmap init: toe_srqc_number=0, don't init bitmap\n"); + return CQM_SUCCESS; + } + + if (cqm_single_bitmap_init(bitmap) != CQM_SUCCESS) + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +static void cqm_bitmap_toe_uninit(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_bitmap *bitmap = NULL; + + if (cqm_handle->service[CQM_SERVICE_T_TOE].valid) { + bitmap = &cqm_handle->toe_own_capability.srqc_bitmap; + if (bitmap->table) { + spin_lock_deinit(&bitmap->lock); + vfree(bitmap->table); + bitmap->table = NULL; + } + } +} + +/** + * Prototype : cqm_bitmap_init + * Description : Initialize the bitmap. + * Input : struct tag_cqm_handle *cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_bitmap_init(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_func_capability *capability = &cqm_handle->func_capability; + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_bitmap *bitmap = NULL; + s32 ret = CQM_SUCCESS; + u32 i; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->obj_num == 0) { + cqm_info(handle->dev_hdl, + "Cla alloc: cla_type %u, obj_num=0, don't init bitmap\n", + cla_table->type); + continue; + } + + bitmap = &cla_table->bitmap; + snprintf(bitmap->bitmap_info.buf_vram_name, VRAM_NAME_MAX_LEN - 1, + "%s%s%02d", cla_table->name, + VRAM_CQM_BITMAP_BASE, cla_table->type); + + switch (cla_table->type) { + case CQM_BAT_ENTRY_T_QPC: + bitmap->max_num = capability->qpc_number; + bitmap->reserved_top = capability->qpc_reserved; + bitmap->reserved_back = capability->qpc_reserved_back; + bitmap->last = capability->qpc_reserved; + cqm_info(handle->dev_hdl, + "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = cqm_single_bitmap_init(bitmap); + break; + case CQM_BAT_ENTRY_T_MPT: + bitmap->max_num = capability->mpt_number; + bitmap->reserved_top = capability->mpt_reserved; + bitmap->reserved_back = 0; + bitmap->last = capability->mpt_reserved; + cqm_info(handle->dev_hdl, + "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = cqm_single_bitmap_init(bitmap); + break; + case CQM_BAT_ENTRY_T_SCQC: + bitmap->max_num = capability->scqc_number; + bitmap->reserved_top = capability->scq_reserved; + bitmap->reserved_back = 0; + bitmap->last = capability->scq_reserved; + cqm_info(handle->dev_hdl, + "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = cqm_single_bitmap_init(bitmap); + break; + case CQM_BAT_ENTRY_T_SRQC: + bitmap->max_num = capability->srqc_number; + bitmap->reserved_top = capability->srq_reserved; + bitmap->reserved_back = 0; + bitmap->last = capability->srq_reserved; + cqm_info(handle->dev_hdl, + "Bitmap init: cla_table_type=%u, max_num=0x%x\n", + cla_table->type, bitmap->max_num); + ret = cqm_single_bitmap_init(bitmap); + break; + default: + break; + } + + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + "Bitmap init: failed to init cla_table_type=%u, obj_num=0x%x\n", + cla_table->type, cla_table->obj_num); + goto err; + } + } + + if (cqm_bitmap_toe_init(cqm_handle) != CQM_SUCCESS) + goto err; + + return CQM_SUCCESS; + +err: + cqm_bitmap_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * Prototype : cqm_bitmap_uninit + * Description : Deinitialize the bitmap. + * Input : struct tag_cqm_handle *cqm_handle + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_bitmap_uninit(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_bitmap *bitmap = NULL; + u32 i; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + bitmap = &cla_table->bitmap; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID && + bitmap->table) { + spin_lock_deinit(&bitmap->lock); + vfree(bitmap->table); + bitmap->table = NULL; + } + } + + cqm_bitmap_toe_uninit(cqm_handle); +} + +/** + * Prototype : cqm_bitmap_check_range + * Description : Starting from begin, check whether the bits in number of count + * are idle in the table. Requirement: + * 1. This group of bits cannot cross steps. + * 2. This group of bits must be 0. + * Input : const ulong *table, + * u32 step, + * u32 max_num, + * u32 begin, + * u32 count + * Output : None + * Return Value : u32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +static u32 cqm_bitmap_check_range(const ulong *table, u32 step, u32 max_num, u32 begin, + u32 count) +{ + u32 end = (begin + (count - 1)); + u32 i; + + /* Single-bit check is not performed. */ + if (count == 1) + return begin; + + /* The end value exceeds the threshold. */ + if (end >= max_num) + return max_num; + + /* Bit check, the next bit is returned when a non-zero bit is found. */ + for (i = (begin + 1); i <= end; i++) { + if (test_bit((int)i, table)) + return i + 1; + } + + /* Check whether it's in different steps. */ + if ((begin & (~(step - 1))) != (end & (~(step - 1)))) + return (end & (~(step - 1))); + + /* If the check succeeds, begin is returned. */ + return begin; +} + +static void cqm_bitmap_find(struct tag_cqm_bitmap *bitmap, u32 *index, u32 last, + u32 step, u32 count) +{ + u32 last_num = last; + u32 max_num = bitmap->max_num - bitmap->reserved_back; + ulong *table = bitmap->table; + + do { + *index = (u32)find_next_zero_bit(table, max_num, last_num); + if (*index < max_num) + last_num = cqm_bitmap_check_range(table, step, max_num, + *index, count); + else + break; + } while (last_num != *index); +} + +static void cqm_bitmap_find_with_low2bit_align(struct tag_cqm_bitmap *bitmap, u32 *index, + u32 max_num, u32 last, u32 low2bit) +{ + ulong *table = bitmap->table; + u32 offset = last; + + while (offset < max_num) { + *index = (u32)find_next_zero_bit(table, max_num, offset); + if (*index >= max_num) + break; + + if ((*index & 0x3) == (low2bit & 0x3)) /* 0x3 used for low2bit align */ + break; + + offset = *index + 1; + if (offset == max_num) + *index = max_num; + } +} + +/** + * Prototype : cqm_bitmap_alloc + * Description : Apply for a bitmap index. 0 and 1 must be left blank. + * Scan backwards from where you last applied. + * A string of consecutive indexes must be applied for and + * cannot be applied for across trunks. + * Input : struct tag_cqm_bitmap *bitmap, + * u32 step, + * u32 count + * Output : None + * Return Value : u32 + * The obtained index is returned. + * If a failure occurs, the value of max is returned. + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u32 cqm_bitmap_alloc(struct tag_cqm_bitmap *bitmap, u32 step, u32 count, bool update_last) +{ + u32 index = 0; + u32 max_num = bitmap->max_num - bitmap->reserved_back; + u32 last = bitmap->last; + ulong *table = bitmap->table; + u32 i; + + spin_lock(&bitmap->lock); + + /* Search for an idle bit from the last position. */ + cqm_bitmap_find(bitmap, &index, last, step, count); + + /* The preceding search fails. Search for an idle bit + * from the beginning. + */ + if (index >= max_num) { + last = bitmap->reserved_top; + cqm_bitmap_find(bitmap, &index, last, step, count); + } + + /* Set the found bit to 1 and reset last. */ + if (index < max_num) { + for (i = index; i < (index + count); i++) + set_bit(i, table); + + if (update_last) { + bitmap->last = (index + count); + if (bitmap->last >= max_num) + bitmap->last = bitmap->reserved_top; + } + } + + spin_unlock(&bitmap->lock); + return index; +} + +/** + * Prototype : cqm_bitmap_alloc_low2bit_align + * Description : Apply for a bitmap index with low2bit align. 0 and 1 must be left blank. + * Scan backwards from where you last applied. + * A string of consecutive indexes must be applied for and + * cannot be applied for across trunks. + * Input : struct tag_cqm_bitmap *bitmap, + * u32 low2bit, + * bool update_last + * Output : None + * Return Value : u32 + * The obtained index is returned. + * If a failure occurs, the value of max is returned. + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u32 cqm_bitmap_alloc_low2bit_align(struct tag_cqm_bitmap *bitmap, u32 low2bit, bool update_last) +{ + u32 index = 0; + u32 max_num = bitmap->max_num - bitmap->reserved_back; + u32 last = bitmap->last; + ulong *table = bitmap->table; + + spin_lock(&bitmap->lock); + + /* Search for an idle bit from the last position. */ + cqm_bitmap_find_with_low2bit_align(bitmap, &index, max_num, last, low2bit); + + /* The preceding search fails. Search for an idle bit from the beginning. */ + if (index >= max_num) { + last = bitmap->reserved_top; + cqm_bitmap_find_with_low2bit_align(bitmap, &index, max_num, last, low2bit); + } + + /* Set the found bit to 1 and reset last. */ + if (index < max_num) { + set_bit(index, table); + + if (update_last) { + bitmap->last = index; + if (bitmap->last >= max_num) + bitmap->last = bitmap->reserved_top; + } + } + + spin_unlock(&bitmap->lock); + return index; +} + +/** + * Prototype : cqm_bitmap_alloc_reserved + * Description : Reserve bit applied for based on index. + * Input : struct tag_cqm_bitmap *bitmap, + * u32 count, + * u32 index + * Output : None + * Return Value : u32 + * The obtained index is returned. + * If a failure occurs, the value of max is returned. + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u32 cqm_bitmap_alloc_reserved(struct tag_cqm_bitmap *bitmap, u32 count, u32 index) +{ + ulong *table = bitmap->table; + u32 ret_index; + + if (index >= bitmap->max_num || count != 1) + return CQM_INDEX_INVALID; + + if (index >= bitmap->reserved_top && (index < bitmap->max_num - bitmap->reserved_back)) + return CQM_INDEX_INVALID; + + spin_lock(&bitmap->lock); + + if (test_bit((int)index, table)) { + ret_index = CQM_INDEX_INVALID; + } else { + set_bit(index, table); + ret_index = index; + } + + spin_unlock(&bitmap->lock); + return ret_index; +} + +/** + * Prototype : cqm_bitmap_free + * Description : Releases a bitmap index. + * Input : struct tag_cqm_bitmap *bitmap, + * u32 index, + * u32 count + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_bitmap_free(struct tag_cqm_bitmap *bitmap, u32 index, u32 count) +{ + u32 i; + + spin_lock(&bitmap->lock); + + for (i = index; i < (index + count); i++) + clear_bit((s32)i, bitmap->table); + + spin_unlock(&bitmap->lock); +} + +#define obj_table_section + +/** + * Prototype : cqm_single_object_table_init + * Description : Initialize a object table. + * Input : struct tag_cqm_object_table *obj_table + * Output : None + * Return Value : s32 + * 1.Date : 2015/9/9 + * Modification : Created function + */ +static s32 cqm_single_object_table_init(struct tag_cqm_object_table *obj_table) +{ + rwlock_init(&obj_table->lock); + + obj_table->table = vmalloc(obj_table->max_num * sizeof(void *)); + if (!obj_table->table) + return CQM_FAIL; + memset(obj_table->table, 0, obj_table->max_num * sizeof(void *)); + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_object_table_init + * Description : Initialize the association table between objects and indexes. + * Input : struct tag_cqm_handle *cqm_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_object_table_init(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_func_capability *capability = &cqm_handle->func_capability; + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_object_table *obj_table = NULL; + struct tag_cqm_cla_table *cla_table = NULL; + s32 ret = CQM_SUCCESS; + u32 i; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + if (cla_table->obj_num == 0) { + cqm_info(handle->dev_hdl, + "Obj table init: cla_table_type %u, obj_num=0, don't init obj table\n", + cla_table->type); + continue; + } + + obj_table = &cla_table->obj_table; + + switch (cla_table->type) { + case CQM_BAT_ENTRY_T_QPC: + obj_table->max_num = capability->qpc_number; + ret = cqm_single_object_table_init(obj_table); + break; + case CQM_BAT_ENTRY_T_MPT: + obj_table->max_num = capability->mpt_number; + ret = cqm_single_object_table_init(obj_table); + break; + case CQM_BAT_ENTRY_T_SCQC: + obj_table->max_num = capability->scqc_number; + ret = cqm_single_object_table_init(obj_table); + break; + case CQM_BAT_ENTRY_T_SRQC: + obj_table->max_num = capability->srqc_number; + ret = cqm_single_object_table_init(obj_table); + break; + default: + break; + } + + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + "Obj table init: failed to init cla_table_type=%u, obj_num=0x%x\n", + cla_table->type, cla_table->obj_num); + goto err; + } + } + + return CQM_SUCCESS; + +err: + cqm_object_table_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * Prototype : cqm_object_table_uninit + * Description : Deinitialize the association table between objects and + * indexes. + * Input : struct tag_cqm_handle *cqm_handle + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_object_table_uninit(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct tag_cqm_object_table *obj_table = NULL; + struct tag_cqm_cla_table *cla_table = NULL; + u32 i; + + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + cla_table = &bat_table->entry[i]; + obj_table = &cla_table->obj_table; + if (cla_table->type != CQM_BAT_ENTRY_T_INVALID) { + if (obj_table->table) { + rwlock_deinit(&obj_table->lock); + vfree(obj_table->table); + obj_table->table = NULL; + } + } + } +} + +/** + * Prototype : cqm_object_table_insert + * Description : Insert an object + * Input : struct tag_cqm_handle *cqm_handle + * struct tag_cqm_object_table *object_table + * u32 index + * struct tag_cqm_object *obj + * bool bh + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_object_table_insert(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_object_table *object_table, + u32 index, struct tag_cqm_object *obj, bool bh) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + if (index >= object_table->max_num) { + cqm_err(handle->dev_hdl, + "Obj table insert: index 0x%x exceeds max_num 0x%x\n", + index, object_table->max_num); + return CQM_FAIL; + } + + cqm_write_lock(&object_table->lock, bh); + + if (!object_table->table[index]) { + object_table->table[index] = obj; + cqm_write_unlock(&object_table->lock, bh); + return CQM_SUCCESS; + } + + cqm_write_unlock(&object_table->lock, bh); + cqm_err(handle->dev_hdl, + "Obj table insert: object_table->table[0x%x] has been inserted\n", + index); + + return CQM_FAIL; +} + +/** + * Prototype : cqm_object_table_remove + * Description : Remove an object + * Input : struct tag_cqm_handle *cqm_handle + * struct tag_cqm_object_table *object_table + * u32 index + * const struct tag_cqm_object *obj + * bool bh + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_object_table_remove(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_object_table *object_table, + u32 index, const struct tag_cqm_object *obj, bool bh) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + if (index >= object_table->max_num) { + cqm_err(handle->dev_hdl, + "Obj table remove: index 0x%x exceeds max_num 0x%x\n", + index, object_table->max_num); + return; + } + + cqm_write_lock(&object_table->lock, bh); + + if (object_table->table[index] && object_table->table[index] == obj) + object_table->table[index] = NULL; + else + cqm_err(handle->dev_hdl, + "Obj table remove: object_table->table[0x%x] has been removed\n", + index); + + cqm_write_unlock(&object_table->lock, bh); +} + +/** + * Prototype : cqm_object_table_get + * Description : Remove an object + * Input : struct tag_cqm_handle *cqm_handle + * struct tag_cqm_object_table *object_table + * u32 index + * bool bh + * Output : None + * Return Value : struct tag_cqm_object *obj + * 1.Date : 2018/6/20 + * Modification : Created function + */ +struct tag_cqm_object *cqm_object_table_get(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_object_table *object_table, + u32 index, bool bh) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_object *obj = NULL; + + if (index >= object_table->max_num) { + cqm_err(handle->dev_hdl, + "Obj table get: index 0x%x exceeds max_num 0x%x\n", + index, object_table->max_num); + return NULL; + } + + cqm_read_lock(&object_table->lock, bh); + + obj = object_table->table[index]; + if (obj) + atomic_inc(&obj->refcount); + + cqm_read_unlock(&object_table->lock, bh); + + return obj; +} + +u32 cqm_bitmap_alloc_by_xid(struct tag_cqm_bitmap *bitmap, u32 count, u32 index) +{ + ulong *table = bitmap->table; + u32 ret_index; + + if (index >= bitmap->max_num || count != 1) + return CQM_INDEX_INVALID; + + spin_lock(&bitmap->lock); + + if (test_bit((int)index, table)) { + ret_index = CQM_INDEX_INVALID; + } else { + set_bit(index, table); + ret_index = index; + } + + spin_unlock(&bitmap->lock); + return ret_index; +} diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.h new file mode 100644 index 000000000..7febf767f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bitmap_table.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_BITMAP_TABLE_H +#define CQM_BITMAP_TABLE_H + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/spinlock.h> + +#include "cqm_object.h" + +struct tag_cqm_bitmap { + ulong *table; + u32 max_num; + u32 last; + u32 reserved_top; /* reserved index */ + u32 reserved_back; + spinlock_t lock; /* lock for cqm */ + struct vram_buf_info bitmap_info; +}; + +struct tag_cqm_object_table { + /* Now is big array. Later will be optimized as a red-black tree. */ + struct tag_cqm_object **table; + u32 max_num; + rwlock_t lock; +}; + +struct tag_cqm_handle; + +s32 cqm_bitmap_init(struct tag_cqm_handle *cqm_handle); +void cqm_bitmap_uninit(struct tag_cqm_handle *cqm_handle); +u32 cqm_bitmap_alloc(struct tag_cqm_bitmap *bitmap, u32 step, u32 count, bool update_last); +u32 cqm_bitmap_alloc_low2bit_align(struct tag_cqm_bitmap *bitmap, u32 low2bit, bool update_last); +u32 cqm_bitmap_alloc_reserved(struct tag_cqm_bitmap *bitmap, u32 count, u32 index); +void cqm_bitmap_free(struct tag_cqm_bitmap *bitmap, u32 index, u32 count); +s32 cqm_object_table_init(struct tag_cqm_handle *cqm_handle); +void cqm_object_table_uninit(struct tag_cqm_handle *cqm_handle); +s32 cqm_object_table_insert(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_object_table *object_table, + u32 index, struct tag_cqm_object *obj, bool bh); +void cqm_object_table_remove(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_object_table *object_table, + u32 index, const struct tag_cqm_object *obj, bool bh); +struct tag_cqm_object *cqm_object_table_get(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_object_table *object_table, + u32 index, bool bh); +u32 cqm_bitmap_alloc_by_xid(struct tag_cqm_bitmap *bitmap, u32 count, u32 index); + +void cqm_swab64(u8 *addr, u32 cnt); +void cqm_swab32(u8 *addr, u32 cnt); +bool cqm_check_align(u32 data); +s32 cqm_shift(u32 data); +s32 cqm_buf_alloc(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf, bool direct); +s32 cqm_buf_alloc_direct(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf, bool direct); +void cqm_buf_free(struct tag_cqm_buf *buf, struct tag_cqm_handle *cqm_handle); +void cqm_buf_free_cache_inv(struct tag_cqm_handle *cqm_handle, struct tag_cqm_buf *buf, + s32 *inv_flag); +s32 cqm_cla_cache_invalid(struct tag_cqm_handle *cqm_handle, dma_addr_t gpa, + u32 cache_size); +void *cqm_kmalloc_align(size_t size, gfp_t flags, u16 align_order); +void cqm_kfree_align(void *addr); +void cqm_byte_print(u32 *ptr, u32 len); + +#endif /* CQM_BITMAP_TABLE_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bloomfilter.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bloomfilter.c new file mode 100644 index 000000000..5ff669733 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bloomfilter.c @@ -0,0 +1,535 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" + +#include "cqm_object.h" +#include "cqm_bitmap_table.h" +#include "cqm_bat_cla.h" +#include "cqm_cmd.h" +#include "cqm_main.h" +#include "cqm_bloomfilter.h" + +#include "cqm_npu_cmd.h" +#include "cqm_npu_cmd_defs.h" + +/** + * Prototype : bloomfilter_init_cmd + * Description : host send cmd to ucode to init bloomfilter mem + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2016/8/13 + * Modification : Created function + */ +static s32 bloomfilter_init_cmd(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_func_capability *capability = &cqm_handle->func_capability; + struct tag_cqm_bloomfilter_init_cmd *cmd = NULL; + struct tag_cqm_cmd_buf *buf_in = NULL; + s32 ret; + + buf_in = cqm_cmd_alloc((void *)(cqm_handle->ex_handle)); + if (!buf_in) + return CQM_FAIL; + + /* Fill the command format and convert it to big-endian. */ + buf_in->size = sizeof(struct tag_cqm_bloomfilter_init_cmd); + cmd = (struct tag_cqm_bloomfilter_init_cmd *)(buf_in->buf); + cmd->bloom_filter_addr = capability->bloomfilter_addr; + cmd->bloom_filter_len = capability->bloomfilter_length; + + cqm_swab32((u8 *)cmd, + (sizeof(struct tag_cqm_bloomfilter_init_cmd) >> CQM_DW_SHIFT)); + + ret = cqm_send_cmd_box((void *)(cqm_handle->ex_handle), + CQM_MOD_CQM, CQM_CMD_T_BLOOMFILTER_INIT, buf_in, + NULL, NULL, CQM_CMD_TIMEOUT, + HINIC3_CHANNEL_DEFAULT); + if (ret != CQM_SUCCESS) { + cqm_err(cqm_handle->ex_handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(cqm_handle->ex_handle->dev_hdl, "Bloomfilter: %s ret=%d\n", __func__, + ret); + cqm_err(cqm_handle->ex_handle->dev_hdl, "Bloomfilter: %s: 0x%x 0x%x\n", + __func__, cmd->bloom_filter_addr, + cmd->bloom_filter_len); + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return CQM_FAIL; + } + cqm_cmd_free((void *)(cqm_handle->ex_handle), buf_in); + return CQM_SUCCESS; +} + +static void cqm_func_bloomfilter_uninit(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_bloomfilter_table *bloomfilter_table = &cqm_handle->bloomfilter_table; + + if (bloomfilter_table->table) { + mutex_deinit(&bloomfilter_table->lock); + vfree(bloomfilter_table->table); + bloomfilter_table->table = NULL; + } +} + +static s32 cqm_func_bloomfilter_init(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_bloomfilter_table *bloomfilter_table = NULL; + struct tag_cqm_func_capability *capability = NULL; + u32 array_size; + s32 ret; + + bloomfilter_table = &cqm_handle->bloomfilter_table; + capability = &cqm_handle->func_capability; + + if (capability->bloomfilter_length == 0) { + cqm_info(cqm_handle->ex_handle->dev_hdl, + "Bloomfilter: bf_length=0, don't need to init bloomfilter\n"); + return CQM_SUCCESS; + } + + /* The unit of bloomfilter_length is 64B(512bits). Each bit is a table + * node. Therefore the value must be shift 9 bits to the left. + */ + bloomfilter_table->table_size = capability->bloomfilter_length << + CQM_BF_LENGTH_UNIT; + /* The unit of bloomfilter_length is 64B. The unit of array entryis 32B. + */ + array_size = capability->bloomfilter_length << 1; + if (array_size == 0 || array_size > CQM_BF_BITARRAY_MAX) { + cqm_err(cqm_handle->ex_handle->dev_hdl, CQM_WRONG_VALUE(array_size)); + return CQM_FAIL; + } + + bloomfilter_table->array_mask = array_size - 1; + /* This table is not a bitmap, it is the counter of corresponding bit. + */ + bloomfilter_table->table = vmalloc(bloomfilter_table->table_size * + (sizeof(u32))); + if (!bloomfilter_table->table) + return CQM_FAIL; + + memset(bloomfilter_table->table, 0, (bloomfilter_table->table_size * sizeof(u32))); + + /* The bloomfilter must be initialized to 0 by ucode, + * because the bloomfilter is mem mode + */ + if (cqm_handle->func_capability.bloomfilter_enable) { + ret = bloomfilter_init_cmd(cqm_handle); + if (ret != CQM_SUCCESS) { + cqm_err(cqm_handle->ex_handle->dev_hdl, + "Bloomfilter: bloomfilter_init_cmd ret=%d\n", + ret); + vfree(bloomfilter_table->table); + bloomfilter_table->table = NULL; + return CQM_FAIL; + } + } + + mutex_init(&bloomfilter_table->lock); + cqm_dbg("Bloomfilter: table_size=0x%x, array_size=0x%x\n", + bloomfilter_table->table_size, array_size); + return CQM_SUCCESS; +} + +static void cqm_fake_bloomfilter_uninit(struct tag_cqm_handle *cqm_handle) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_handle *fake_cqm_handle = NULL; + s32 child_func_number; + u32 i; + + if (cqm_handle->func_capability.fake_func_type != CQM_FAKE_FUNC_PARENT) + return; + + child_func_number = cqm_get_child_func_number(cqm_handle); + if (child_func_number == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(child_func_number)); + return; + } + + for (i = 0; i < (u32)child_func_number; i++) { + fake_cqm_handle = cqm_handle->fake_cqm_handle[i]; + cqm_func_bloomfilter_uninit(fake_cqm_handle); + } +} + +static s32 cqm_fake_bloomfilter_init(struct tag_cqm_handle *cqm_handle) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_handle *fake_cqm_handle = NULL; + s32 child_func_number; + u32 i; + + if (cqm_handle->func_capability.fake_func_type != CQM_FAKE_FUNC_PARENT) + return CQM_SUCCESS; + + child_func_number = cqm_get_child_func_number(cqm_handle); + if (child_func_number == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(child_func_number)); + return CQM_FAIL; + } + + for (i = 0; i < (u32)child_func_number; i++) { + fake_cqm_handle = cqm_handle->fake_cqm_handle[i]; + if (cqm_func_bloomfilter_init(fake_cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_func_bloomfilter_init)); + goto bloomfilter_init_err; + } + } + + return CQM_SUCCESS; + +bloomfilter_init_err: + cqm_fake_bloomfilter_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * Prototype : cqm_bloomfilter_init + * Description : initialize the bloomfilter of cqm + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2016/7/6 + * Modification : Created function + */ +s32 cqm_bloomfilter_init(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + + if (cqm_fake_bloomfilter_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_fake_bloomfilter_init)); + return CQM_FAIL; + } + + if (cqm_func_bloomfilter_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_func_bloomfilter_init)); + goto bloomfilter_init_err; + } + + return CQM_SUCCESS; + +bloomfilter_init_err: + cqm_fake_bloomfilter_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * Prototype : cqm_bloomfilter_uninit + * Description : uninitialize the bloomfilter of cqm + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2016/7/6 + * Modification : Created function + */ +void cqm_bloomfilter_uninit(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + + cqm_fake_bloomfilter_uninit(cqm_handle); + cqm_func_bloomfilter_uninit(cqm_handle); +} + +/** + * Prototype : cqm_bloomfilter_cmd + * Description : host send bloomfilter api cmd to ucode + * Input : void *ex_handle + * u32 op, + * u32 k_flag + * u64 id, + * Output : None + * Return Value : s32 + * 1.Date : 2016/7/7 + * Modification : Created function + */ +s32 cqm_bloomfilter_cmd(void *ex_handle, u16 func_id, u32 op, u32 k_flag, u64 id) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_cmd_buf *buf_in = NULL; + struct tag_cqm_bloomfilter_cmd *cmd = NULL; + s32 ret; + + buf_in = cqm_cmd_alloc(ex_handle); + if (!buf_in) + return CQM_FAIL; + + /* Fill the command format and convert it to big-endian. */ + buf_in->size = sizeof(struct tag_cqm_bloomfilter_cmd); + cmd = (struct tag_cqm_bloomfilter_cmd *)(buf_in->buf); + memset((void *)cmd, 0, sizeof(struct tag_cqm_bloomfilter_cmd)); + cmd->func_id = func_id; + cmd->k_en = k_flag; + cmd->index_h = (u32)(id >> CQM_DW_OFFSET); + cmd->index_l = (u32)(id & CQM_DW_MASK); + + cqm_swab32((u8 *)cmd, (sizeof(struct tag_cqm_bloomfilter_cmd) >> CQM_DW_SHIFT)); + + ret = cqm_send_cmd_box(ex_handle, CQM_MOD_CQM, (u8)op, buf_in, NULL, + NULL, CQM_CMD_TIMEOUT, HINIC3_CHANNEL_DEFAULT); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_send_cmd_box)); + cqm_err(handle->dev_hdl, "Bloomfilter: bloomfilter_cmd ret=%d\n", + ret); + cqm_err(handle->dev_hdl, "Bloomfilter: op=0x%x, cmd: 0x%x 0x%x 0x%x 0x%x\n", + op, *((u32 *)cmd), *(((u32 *)cmd) + CQM_DW_INDEX1), + *(((u32 *)cmd) + CQM_DW_INDEX2), + *(((u32 *)cmd) + CQM_DW_INDEX3)); + cqm_cmd_free(ex_handle, buf_in); + return CQM_FAIL; + } + + cqm_cmd_free(ex_handle, buf_in); + + return CQM_SUCCESS; +} + +static struct tag_cqm_handle *cqm_get_func_cqm_handle(struct hinic3_hwdev *ex_handle, u16 func_id) +{ + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_func_capability *func_cap = NULL; + s32 child_func_start, child_func_number; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + cqm_handle = (struct tag_cqm_handle *)(ex_handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return NULL; + } + + /* function id is PF/VF */ + if (func_id == hinic3_global_func_id(ex_handle)) + return cqm_handle; + + func_cap = &cqm_handle->func_capability; + if (func_cap->fake_func_type != CQM_FAKE_FUNC_PARENT) { + cqm_err(ex_handle->dev_hdl, CQM_WRONG_VALUE(func_cap->fake_func_type)); + return NULL; + } + + child_func_start = cqm_get_child_func_start(cqm_handle); + if (child_func_start == CQM_FAIL) { + cqm_err(ex_handle->dev_hdl, CQM_WRONG_VALUE(child_func_start)); + return NULL; + } + + child_func_number = cqm_get_child_func_number(cqm_handle); + if (child_func_number == CQM_FAIL) { + cqm_err(ex_handle->dev_hdl, CQM_WRONG_VALUE(child_func_number)); + return NULL; + } + + /* function id is fake vf */ + if (func_id >= child_func_start && (func_id < (child_func_start + child_func_number))) + return cqm_handle->fake_cqm_handle[func_id - (u16)child_func_start]; + + return NULL; +} + +/** + * Prototype : cqm_bloomfilter_inc + * Description : The reference counting field is added to the ID of the + * bloomfilter. + * Input : void *ex_handle + * u64 id--hash value + * Output : None + * Return Value : s32 + * 1.Date : 2016/7/7 + * Modification : Created function + */ +s32 cqm_bloomfilter_inc(void *ex_handle, u16 func_id, u64 id) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_bloomfilter_table *bloomfilter_table = NULL; + u32 array_tmp[CQM_BF_SECTION_NUMBER] = {0}; + struct tag_cqm_handle *cqm_handle = NULL; + u32 array_index, array_bit, i; + u32 k_flag = 0; + + cqm_dbg("Bloomfilter: func_id: %d, inc id=0x%llx\n", func_id, id); + + cqm_handle = cqm_get_func_cqm_handle(ex_handle, func_id); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle_bf_inc is null\n", __func__); + return CQM_FAIL; + } + + if (cqm_handle->func_capability.bloomfilter_enable == 0) { + cqm_info(handle->dev_hdl, "Bloomfilter inc: bloomfilter is disable\n"); + return CQM_SUCCESS; + } + + /* |(array_index=0)32B(array_bit:256bits)|(array_index=1)32B(256bits)| + * array_index = 0~bloomfilter_table->table_size/256bit + * array_bit = 0~255 + */ + cqm_dbg("Bloomfilter: inc id=0x%llx\n", id); + bloomfilter_table = &cqm_handle->bloomfilter_table; + + /* The array index identifies a 32-byte entry. */ + array_index = (u32)CQM_BF_BITARRAY_INDEX(id, bloomfilter_table->array_mask); + /* convert the unit of array_index to bit */ + array_index = array_index << CQM_BF_ENTRY_SIZE_UNIT; + cqm_dbg("Bloomfilter: inc array_index=0x%x\n", array_index); + + mutex_lock(&bloomfilter_table->lock); + for (i = 0; i < CQM_BF_SECTION_NUMBER; i++) { + /* the position of the bit in 64-bit section */ + array_bit = + (id >> (CQM_BF_SECTION_BASE + i * CQM_BF_SECTION_SIZE)) & + CQM_BF_SECTION_MASK; + /* array_bit + number of 32-byte array entries + number of + * 64-bit sections before the section + */ + array_bit = array_bit + array_index + + (i * CQM_BF_SECTION_BIT_NUMBER); + + /* array_temp[i] records the index of the bloomfilter. + * It is used to roll back the reference counting of the + * bitarray. + */ + array_tmp[i] = array_bit; + cqm_dbg("Bloomfilter: inc array_bit=0x%x\n", array_bit); + + /* Add one to the corresponding bit in bloomfilter table. + * If the value changes from 0 to 1, change the corresponding + * bit in k_flag. + */ + (bloomfilter_table->table[array_bit])++; + cqm_dbg("Bloomfilter: inc bloomfilter_table->table[%d]=0x%x\n", + array_bit, bloomfilter_table->table[array_bit]); + if (bloomfilter_table->table[array_bit] == 1) + k_flag |= (1U << i); + } + + if (k_flag != 0) { + /* send cmd to ucode and set corresponding bit. */ + if (cqm_bloomfilter_cmd(ex_handle, func_id, CQM_CMD_T_BLOOMFILTER_SET, + k_flag, id) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_bloomfilter_cmd_inc)); + for (i = 0; i < CQM_BF_SECTION_NUMBER; i++) { + array_bit = array_tmp[i]; + (bloomfilter_table->table[array_bit])--; + } + mutex_unlock(&bloomfilter_table->lock); + return CQM_FAIL; + } + } + + mutex_unlock(&bloomfilter_table->lock); + + return CQM_SUCCESS; +} +EXPORT_SYMBOL(cqm_bloomfilter_inc); + +/** + * Prototype : cqm_bloomfilter_dec + * Description : The reference counting field is decreased to the ID of the + * bloomfilter. + * Input : void *ex_handle + * u64 id--hash value + * Output : None + * Return Value : s32 + * 1.Date : 2016/7/7 + * Modification : Created function + */ +s32 cqm_bloomfilter_dec(void *ex_handle, u16 func_id, u64 id) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_bloomfilter_table *bloomfilter_table = NULL; + u32 array_tmp[CQM_BF_SECTION_NUMBER] = {0}; + struct tag_cqm_handle *cqm_handle = NULL; + u32 array_index, array_bit, i; + u32 k_flag = 0; + + cqm_handle = cqm_get_func_cqm_handle(ex_handle, func_id); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle_bf_dec is null\n", __func__); + return CQM_FAIL; + } + + if (cqm_handle->func_capability.bloomfilter_enable == 0) { + cqm_info(handle->dev_hdl, "Bloomfilter dec: bloomfilter is disable\n"); + return CQM_SUCCESS; + } + + cqm_dbg("Bloomfilter: dec id=0x%llx\n", id); + bloomfilter_table = &cqm_handle->bloomfilter_table; + + /* The array index identifies a 32-byte entry. */ + array_index = (u32)CQM_BF_BITARRAY_INDEX(id, bloomfilter_table->array_mask); + cqm_dbg("Bloomfilter: dec array_index=0x%x\n", array_index); + mutex_lock(&bloomfilter_table->lock); + for (i = 0; i < CQM_BF_SECTION_NUMBER; i++) { + /* the position of the bit in 64-bit section */ + array_bit = + (id >> (CQM_BF_SECTION_BASE + i * CQM_BF_SECTION_SIZE)) & + CQM_BF_SECTION_MASK; + /* array_bit + number of 32-byte array entries + number of + * 64-bit sections before the section + */ + array_bit = array_bit + (array_index << 0x8) + (i * 0x40); + + /* array_temp[i] records the index of the bloomfilter. + * It is used to roll back the reference counting of the + * bitarray. + */ + array_tmp[i] = array_bit; + + /* Deduct one to the corresponding bit in bloomfilter table. + * If the value changes from 1 to 0, change the corresponding + * bit in k_flag. Do not continue -1 when the reference counting + * value of the bit is 0. + */ + if (bloomfilter_table->table[array_bit] != 0) { + (bloomfilter_table->table[array_bit])--; + cqm_dbg("Bloomfilter: dec bloomfilter_table->table[%d]=0x%x\n", + array_bit, (bloomfilter_table->table[array_bit])); + if (bloomfilter_table->table[array_bit] == 0) + k_flag |= (1U << i); + } + } + + if (k_flag != 0) { + /* send cmd to ucode and clear corresponding bit. */ + if (cqm_bloomfilter_cmd(ex_handle, func_id, CQM_CMD_T_BLOOMFILTER_CLEAR, + k_flag, id) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_bloomfilter_cmd_dec)); + for (i = 0; i < CQM_BF_SECTION_NUMBER; i++) { + array_bit = array_tmp[i]; + (bloomfilter_table->table[array_bit])++; + } + mutex_unlock(&bloomfilter_table->lock); + return CQM_FAIL; + } + } + + mutex_unlock(&bloomfilter_table->lock); + + return CQM_SUCCESS; +} +EXPORT_SYMBOL(cqm_bloomfilter_dec); diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bloomfilter.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bloomfilter.h new file mode 100644 index 000000000..8fd446c95 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_bloomfilter.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_BLOOMFILTER_H +#define CQM_BLOOMFILTER_H + +#include <linux/types.h> +#include <linux/mutex.h> + +/* Bloomfilter entry size is 32B(256bit), whitch index is the 48-32-bit of the + * hash. |31~26|25~20|19~14|13~8| will be used to locate 4 bloom filter section + * in one entry. k_en[3:0] used to specify the section of bloom filter. + */ +#define CQM_BF_ENTRY_SIZE 32 +#define CQM_BF_ENTRY_SIZE_UNIT 8 +#define CQM_BF_BITARRAY_MAX BIT(17) + +#define CQM_BF_SECTION_NUMBER 4 +#define CQM_BF_SECTION_BASE 8 +#define CQM_BF_SECTION_SIZE 6 +#define CQM_BF_SECTION_MASK 0x3f +#define CQM_BF_SECTION_BIT_NUMBER 64 + +#define CQM_BF_ARRAY_INDEX_OFFSET 32 +#define CQM_BF_BITARRAY_INDEX(id, mask) \ + (((id) >> CQM_BF_ARRAY_INDEX_OFFSET) & (mask)) + +/* The unit of bloomfilter_length is 64B(512bits). */ +#define CQM_BF_LENGTH_UNIT 9 + +#define CQM_DW_MASK 0xffffffff +#define CQM_DW_OFFSET 32 +#define CQM_DW_INDEX0 0 +#define CQM_DW_INDEX1 1 +#define CQM_DW_INDEX2 2 +#define CQM_DW_INDEX3 3 + +struct tag_cqm_bloomfilter_table { + u32 *table; + u32 table_size; /* The unit is bit */ + u32 array_mask; /* The unit of array entry is 32B, used to address entry + */ + struct mutex lock; +}; + +/* only for test */ +s32 cqm_bloomfilter_cmd(void *ex_handle, u16 func_id, u32 op, u32 k_flag, u64 id); +s32 cqm_bloomfilter_init(void *ex_handle); +void cqm_bloomfilter_uninit(void *ex_handle); +s32 cqm_bloomfilter_inc(void *ex_handle, u16 func_id, u64 id); +s32 cqm_bloomfilter_dec(void *ex_handle, u16 func_id, u64 id); + +#endif /* CQM_BLOOMFILTER_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.c new file mode 100644 index 000000000..413629ac1 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" +#include "hinic3_hw.h" +#include "hinic3_mt.h" +#include "hinic3_hwdev.h" + +#include "cqm_bitmap_table.h" +#include "cqm_bat_cla.h" +#include "cqm_main.h" + +/** + * Prototype : cqm_cmd_alloc + * Description : Apply for a cmd buffer. The buffer size is fixed to 2 KB. + * The buffer content is not cleared and needs to be cleared by + * services. + * Input : void *ex_handle + * Output : None + * Return Value : struct tag_cqm_cmd_buf * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_cqm_cmd_buf *cqm_cmd_alloc(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_alloc_cnt); + + return (struct tag_cqm_cmd_buf *)hinic3_alloc_cmd_buf(ex_handle); +} +EXPORT_SYMBOL(cqm_cmd_alloc); + +/** + * Prototype : cqm_cmd_free + * Description : Release for a cmd buffer. + * Input : void *ex_handle + * struct tag_cqm_cmd_buf *cmd_buf + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_cmd_free(void *ex_handle, struct tag_cqm_cmd_buf *cmd_buf) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return; + } + if (unlikely(!cmd_buf)) { + pr_err("[CQM]%s: cmd_buf is null\n", __func__); + return; + } + if (unlikely(!cmd_buf->buf)) { + pr_err("[CQM]%s: buf is null\n", __func__); + return; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_cmd_free_cnt); + + hinic3_free_cmd_buf(ex_handle, (struct hinic3_cmd_buf *)cmd_buf); +} +EXPORT_SYMBOL(cqm_cmd_free); + +/** + * Prototype : cqm_send_cmd_box + * Description : Send a cmd message in box mode. + * This interface will mount a completion quantity, + * causing sleep. + * Input : void *ex_handle + * u8 mod + * u8 cmd, + * struct tag_cqm_cmd_buf *buf_in + * struct tag_cqm_cmd_buf *buf_out + * u64 *out_param + * u32 timeout + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, struct tag_cqm_cmd_buf *buf_in, + struct tag_cqm_cmd_buf *buf_out, u64 *out_param, u32 timeout, + u16 channel) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!buf_in)) { + pr_err("[CQM]%s: buf_in is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!buf_in->buf)) { + pr_err("[CQM]%s: buf is null\n", __func__); + return CQM_FAIL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_send_cmd_box_cnt); + + return hinic3_cmdq_detail_resp(ex_handle, mod, cmd, + (struct hinic3_cmd_buf *)buf_in, + (struct hinic3_cmd_buf *)buf_out, + out_param, timeout, channel); +} +EXPORT_SYMBOL(cqm_send_cmd_box); + +/** + * Prototype : cqm_lb_send_cmd_box + * Description : Send a cmd message in box mode and open cos_id. + * This interface will mount a completion quantity, + * causing sleep. + * Input : void *ex_handle + * u8 mod + * u8 cmd + * u8 cos_id + * struct tag_cqm_cmd_buf *buf_in + * struct tag_cqm_cmd_buf *buf_out + * u64 *out_param + * u32 timeout + * Output : None + * Return Value : s32 + * 1.Date : 2020/4/9 + * Modification : Created function + */ +s32 cqm_lb_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, u8 cos_id, + struct tag_cqm_cmd_buf *buf_in, struct tag_cqm_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!buf_in)) { + pr_err("[CQM]%s: buf_in is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!buf_in->buf)) { + pr_err("[CQM]%s: buf is null\n", __func__); + return CQM_FAIL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_send_cmd_box_cnt); + + return hinic3_cos_id_detail_resp(ex_handle, mod, cmd, cos_id, + (struct hinic3_cmd_buf *)buf_in, + (struct hinic3_cmd_buf *)buf_out, + out_param, timeout, channel); +} +EXPORT_SYMBOL(cqm_lb_send_cmd_box); + +/** + * Prototype : cqm_lb_send_cmd_box_async + * Description : Send a cmd message in box mode and open cos_id. + * This interface will not wait completion + * Input : void *ex_handle + * u8 mod + * u8 cmd + * u8 cos_id + * struct tag_cqm_cmd_buf *buf_in + * u16 channel + * Output : None + * Return Value : s32 + * 1.Date : 2023/5/19 + * Modification : Created function + */ +s32 cqm_lb_send_cmd_box_async(void *ex_handle, u8 mod, u8 cmd, + u8 cos_id, struct tag_cqm_cmd_buf *buf_in, + u16 channel) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!buf_in)) { + pr_err("[CQM]%s: buf_in is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!buf_in->buf)) { + pr_err("[CQM]%s: buf is null\n", __func__); + return CQM_FAIL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_send_cmd_box_cnt); + + return hinic3_cmdq_async_cos(ex_handle, mod, cmd, cos_id, + (struct hinic3_cmd_buf *)buf_in, channel); +} +EXPORT_SYMBOL(cqm_lb_send_cmd_box_async); + +/** + * Prototype : cqm_send_cmd_imm + * Description : Send a cmd message in imm mode. + * This interface will mount a completion quantity, + * causing sleep. + * Input : void *ex_handle + * u8 mod + * u8 cmd + * struct tag_cqm_cmd_buf *buf_in + * u64 *out_param + * u32 timeout + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_send_cmd_imm(void *ex_handle, u8 mod, u8 cmd, struct tag_cqm_cmd_buf *buf_in, + u64 *out_param, u32 timeout, u16 channel) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!buf_in)) { + pr_err("[CQM]%s: buf_in is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!buf_in->buf)) { + pr_err("[CQM]%s: buf is null\n", __func__); + return CQM_FAIL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_send_cmd_imm_cnt); + + return hinic3_cmdq_direct_resp((void *)ex_handle, mod, cmd, + (struct hinic3_cmd_buf *)buf_in, + out_param, timeout, channel); +} +EXPORT_SYMBOL(cqm_send_cmd_imm); diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.h new file mode 100644 index 000000000..a5b85f12b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_cmd.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_CMD_H +#define CQM_CMD_H + +#include <linux/types.h> + +#include "cqm_object.h" + +#ifdef __cplusplus +#if __cplusplus +extern "C" { +#endif +#endif /* __cplusplus */ + +#define CQM_CMD_TIMEOUT 10000 /* ms */ + +struct tag_cqm_cmd_buf *cqm_cmd_alloc(void *ex_handle); +void cqm_cmd_free(void *ex_handle, struct tag_cqm_cmd_buf *cmd_buf); +s32 cqm_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, struct tag_cqm_cmd_buf *buf_in, + struct tag_cqm_cmd_buf *buf_out, u64 *out_param, u32 timeout, + u16 channel); +s32 cqm_lb_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, u8 cos_id, + struct tag_cqm_cmd_buf *buf_in, struct tag_cqm_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel); +s32 cqm_lb_send_cmd_box_async(void *ex_handle, u8 mod, u8 cmd, + u8 cos_id, struct tag_cqm_cmd_buf *buf_in, + u16 channel); +s32 cqm_send_cmd_imm(void *ex_handle, u8 mod, u8 cmd, struct tag_cqm_cmd_buf *buf_in, + u64 *out_param, u32 timeout, u16 channel); + +#ifdef __cplusplus +#if __cplusplus +} +#endif +#endif /* __cplusplus */ + +#endif /* CQM_CMD_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.c new file mode 100644 index 000000000..59ed378ac --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_mt.h" +#include "hinic3_hwdev.h" + +#include "cqm_object.h" +#include "cqm_bitmap_table.h" +#include "cqm_bat_cla.h" +#include "cqm_object_intern.h" +#include "cqm_main.h" +#include "cqm_db.h" + +/** + * Prototype : cqm_db_addr_alloc + * Description : Apply for a page of hardware doorbell and dwqe. + * The indexes are the same. The obtained addresses are physical + * addresses. Each function has a maximum of 1K addresses(DB). + * Input : void *ex_handle + * void __iomem **db_addr, + * void __iomem **dwqe_addr + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/5 + * Modification : Created function + */ +s32 cqm_db_addr_alloc(void *ex_handle, void __iomem **db_addr, + void __iomem **dwqe_addr) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!db_addr)) { + pr_err("[CQM]%s: db_addr is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!dwqe_addr)) { + pr_err("[CQM]%s: dwqe_addr is null\n", __func__); + return CQM_FAIL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_alloc_cnt); + + return hinic3_alloc_db_addr(ex_handle, db_addr, dwqe_addr); +} + +s32 cqm_db_phy_addr_alloc(void *ex_handle, u64 *db_paddr, u64 *dwqe_addr) +{ + return hinic3_alloc_db_phy_addr(ex_handle, db_paddr, dwqe_addr); +} + +/** + * Prototype : cqm_db_addr_free + * Description : Release a page of hardware doorbell and dwqe. + * Input : void *ex_handle + * const void __iomem **db_addr, + * void __iomem **dwqe_addr + * Output : None + * Return Value : void + * 1.Date : 2015/5/5 + * Modification : Created function + */ +void cqm_db_addr_free(void *ex_handle, const void __iomem *db_addr, + void __iomem *dwqe_addr) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_db_addr_free_cnt); + + hinic3_free_db_addr(ex_handle, db_addr, dwqe_addr); +} + +static void cqm_db_phy_addr_free(void *ex_handle, u64 *db_paddr, const u64 *dwqe_addr) +{ + hinic3_free_db_phy_addr(ex_handle, *db_paddr, *dwqe_addr); +} + +static bool cqm_need_db_init(s32 service) +{ + bool need_db_init = false; + + switch (service) { + case CQM_SERVICE_T_NIC: + case CQM_SERVICE_T_OVS: + case CQM_SERVICE_T_IPSEC: + case CQM_SERVICE_T_VIRTIO: + case CQM_SERVICE_T_PPA: + need_db_init = false; + break; + default: + need_db_init = true; + } + + return need_db_init; +} + +/** + * Prototype : cqm_db_init + * Description : Initialize the doorbell of the CQM. + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/7/6 + * Modification : Created function + */ +s32 cqm_db_init(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + s32 i; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + + /* Allocate hardware doorbells to services. */ + for (i = 0; i < CQM_SERVICE_T_MAX; i++) { + service = &cqm_handle->service[i]; + if (!cqm_need_db_init(i) || !service->valid) + continue; + + if (cqm_db_addr_alloc(ex_handle, &service->hardware_db_vaddr, + &service->dwqe_vaddr) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_db_addr_alloc)); + break; + } + + if (cqm_db_phy_addr_alloc(handle, &service->hardware_db_paddr, + &service->dwqe_paddr) != + CQM_SUCCESS) { + cqm_db_addr_free(ex_handle, service->hardware_db_vaddr, + service->dwqe_vaddr); + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_db_phy_addr_alloc)); + break; + } + } + + if (i != CQM_SERVICE_T_MAX) { + i--; + for (; i >= 0; i--) { + service = &cqm_handle->service[i]; + if (!cqm_need_db_init(i) || !service->valid) + continue; + + cqm_db_addr_free(ex_handle, service->hardware_db_vaddr, + service->dwqe_vaddr); + cqm_db_phy_addr_free(ex_handle, + &service->hardware_db_paddr, + &service->dwqe_paddr); + } + return CQM_FAIL; + } + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_db_uninit + * Description : Deinitialize the doorbell of the CQM. + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2015/7/6 + * Modification : Created function + */ +void cqm_db_uninit(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + s32 i; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + + /* Release hardware doorbell. */ + for (i = 0; i < CQM_SERVICE_T_MAX; i++) { + service = &cqm_handle->service[i]; + if (service->valid && cqm_need_db_init(i)) { + cqm_db_addr_free(ex_handle, service->hardware_db_vaddr, + service->dwqe_vaddr); + cqm_db_phy_addr_free(ex_handle, &service->hardware_db_paddr, + &service->dwqe_paddr); + } + } +} + +/** + * Prototype : cqm_get_db_addr + * Description : Return hardware DB vaddr. + * Input : void *ex_handle + * u32 service_type + * Output : None + * Return Value : void * + * 1.Date : 2015/7/6 + * Modification : Created function + */ +void *cqm_get_db_addr(void *ex_handle, u32 service_type) +{ + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct hinic3_hwdev *handle = NULL; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + handle = (struct hinic3_hwdev *)ex_handle; + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + service = &cqm_handle->service[service_type]; + + return (void *)service->hardware_db_vaddr; +} +EXPORT_SYMBOL(cqm_get_db_addr); + +s32 cqm_get_hardware_db_addr(void *ex_handle, u64 *addr, + enum hinic3_service_type service_type) +{ + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct hinic3_hwdev *handle = NULL; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!addr)) { + pr_err("[CQM]%s: addr is null\n", __func__); + return CQM_FAIL; + } + + if (service_type < SERVICE_T_NIC || service_type >= SERVICE_T_MAX) { + pr_err("%s service_type = %d state is error\n", __func__, + service_type); + return CQM_FAIL; + } + + handle = (struct hinic3_hwdev *)ex_handle; + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + service = &cqm_handle->service[service_type]; + + *addr = service->hardware_db_paddr; + return CQM_SUCCESS; +} +EXPORT_SYMBOL(cqm_get_hardware_db_addr); + +/** + * Prototype : cqm_ring_hardware_db + * Description : Ring hardware DB to chip. + * Input : void *ex_handle + * u32 service_type: Each kernel-mode service is allocated a + * hardware db page. + * u8 db_count: The bit[7:0] of PI can't be store in 64-bit db. + * u64 db: It contains the content of db, whitch is organized by + * service, including big-endian conversion + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/5 + * Modification : Created function + */ +s32 cqm_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, u64 db) +{ + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct hinic3_hwdev *handle = NULL; + + handle = (struct hinic3_hwdev *)ex_handle; + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + service = &cqm_handle->service[service_type]; + + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ + wmb(); + *((u64 *)service->hardware_db_vaddr + db_count) = db; + return CQM_SUCCESS; +} +EXPORT_SYMBOL(cqm_ring_hardware_db); + +/** + * Prototype : cqm_ring_hardware_db_fc // Todo cqm_ring_fakevf_hardware_db + * Description : Ring fake vf hardware DB to chip. + * Input : void *ex_handle + * u32 service_type: Each kernel-mode service is allocated a + * hardware db page. + * u8 db_count: The bit[7:0] of PI can't be store in 64-bit db. + * u8 pagenum: Indicates the doorbell address offset of the fake + * VFID. + * u64 db: It contains the content of db, whitch is organized by + * service, including big-endian conversion. + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/5 + * Modification : Created function + */ +s32 cqm_ring_hardware_db_fc(void *ex_handle, u32 service_type, u8 db_count, + u8 pagenum, u64 db) +{ +#define HIFC_DB_FAKE_VF_OFFSET 32 + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct hinic3_hwdev *handle = NULL; + void *dbaddr = NULL; + + handle = (struct hinic3_hwdev *)ex_handle; + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + service = &cqm_handle->service[service_type]; + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ + wmb(); + dbaddr = (u8 *)service->hardware_db_vaddr + + ((pagenum + HIFC_DB_FAKE_VF_OFFSET) * HINIC3_DB_PAGE_SIZE); + *((u64 *)dbaddr + db_count) = db; + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_ring_direct_wqe_db // Todo <--cqm_ring_direct_wqe_db_fc + * Description : Ring direct wqe hardware DB to chip. + * Input : void *ex_handle + * u32 service_type: Each kernel-mode service is allocated a + * hardware db page. + * u8 db_count: The bit[7:0] of PI can't be store in 64-bit db. + * void *direct_wqe: The content of direct_wqe. + * u16 length: The length of direct_wqe. + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/5 + * Modification : Created function + */ +s32 cqm_ring_direct_wqe_db(void *ex_handle, u32 service_type, u8 db_count, + void *direct_wqe) +{ + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct hinic3_hwdev *handle = NULL; + u64 *tmp = (u64 *)direct_wqe; + int i; + + handle = (struct hinic3_hwdev *)ex_handle; + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + service = &cqm_handle->service[service_type]; + + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ + wmb(); + for (i = 0; i < 0x80 / 0x8; i++) + *((u64 *)service->dwqe_vaddr + 0x40 + i) = *tmp++; + + return CQM_SUCCESS; +} +EXPORT_SYMBOL(cqm_ring_direct_wqe_db); + +s32 cqm_ring_direct_wqe_db_fc(void *ex_handle, u32 service_type, + void *direct_wqe) +{ + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct hinic3_hwdev *handle = NULL; + u64 *tmp = (u64 *)direct_wqe; + int i; + + handle = (struct hinic3_hwdev *)ex_handle; + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + service = &cqm_handle->service[service_type]; + + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ + wmb(); + *((u64 *)service->dwqe_vaddr + 0x0) = tmp[0x2]; + *((u64 *)service->dwqe_vaddr + 0x1) = tmp[0x3]; + *((u64 *)service->dwqe_vaddr + 0x2) = tmp[0x0]; + *((u64 *)service->dwqe_vaddr + 0x3) = tmp[0x1]; + tmp += 0x4; + + /* The FC use 256B WQE. The directwqe is written at block0, + * and the length is 256B + */ + for (i = 0x4; i < 0x20; i++) + *((u64 *)service->dwqe_vaddr + i) = *tmp++; + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_ring_hardware_db_update_pri + * Description : Provides the doorbell interface for the CQM to convert the PRI + * to the CoS. The doorbell transmitted by the service must be + * the host sequence. This interface converts the network + * sequence. + * Input : void *ex_handle + * u32 service_type: Each kernel-mode service is allocated a + * hardware db page. + * u8 db_count: The bit[7:0] of PI can't be store in 64-bit db. + * u64 db: It contains the content of db, whitch is organized by + * service, including big-endian conversion. + * Output : None + * Return Value : s32 + * 1.Date : 2016/11/24 + * Modification : Created function + */ +s32 cqm_ring_hardware_db_update_pri(void *ex_handle, u32 service_type, + u8 db_count, u64 db) +{ + struct tag_cqm_db_common *db_common = (struct tag_cqm_db_common *)(&db); + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct hinic3_hwdev *handle = NULL; + + handle = (struct hinic3_hwdev *)ex_handle; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + service = &cqm_handle->service[service_type]; + + /* the CQM converts the PRI to the CoS */ + db_common->cos = 0x7 - db_common->cos; + + cqm_swab32((u8 *)db_common, sizeof(u64) >> CQM_DW_SHIFT); + + /* Considering the performance of ringing hardware db, + * the parameter is not checked. + */ + wmb(); + *((u64 *)service->hardware_db_vaddr + db_count) = db; + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_ring_software_db + * Description : Ring software db. + * Input : struct tag_cqm_object *object + * u64 db_record: It contains the content of db, whitch is + * organized by service, including big-endian + * conversion. For RQ/SQ: This field is filled + * with the doorbell_record area of queue_header. + * For CQ: This field is filled with the value of + * ci_record in queue_header. + * Output : None + * Return Value : s32 + * 1.Date : 2015/5/5 + * Modification : Created function + */ +s32 cqm_ring_software_db(struct tag_cqm_object *object, u64 db_record) +{ + struct tag_cqm_nonrdma_qinfo *nonrdma_qinfo = NULL; + struct tag_cqm_rdma_qinfo *rdma_qinfo = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct hinic3_hwdev *handle = NULL; + + if (unlikely(!object)) { + pr_err("[CQM]%s: object is null\n", __func__); + return CQM_FAIL; + } + + cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return CQM_FAIL; + } + handle = cqm_handle->ex_handle; + + if (object->object_type == CQM_OBJECT_NONRDMA_EMBEDDED_RQ || + object->object_type == CQM_OBJECT_NONRDMA_EMBEDDED_SQ || + object->object_type == CQM_OBJECT_NONRDMA_SRQ) { + nonrdma_qinfo = (struct tag_cqm_nonrdma_qinfo *)(void *)object; + nonrdma_qinfo->common.q_header_vaddr->doorbell_record = + db_record; + } else if ((object->object_type == CQM_OBJECT_NONRDMA_EMBEDDED_CQ) || + (object->object_type == CQM_OBJECT_NONRDMA_SCQ)) { + nonrdma_qinfo = (struct tag_cqm_nonrdma_qinfo *)(void *)object; + nonrdma_qinfo->common.q_header_vaddr->ci_record = db_record; + } else if ((object->object_type == CQM_OBJECT_RDMA_QP) || + (object->object_type == CQM_OBJECT_RDMA_SRQ)) { + rdma_qinfo = (struct tag_cqm_rdma_qinfo *)(void *)object; + rdma_qinfo->common.q_header_vaddr->doorbell_record = db_record; + } else if (object->object_type == CQM_OBJECT_RDMA_SCQ) { + rdma_qinfo = (struct tag_cqm_rdma_qinfo *)(void *)object; + rdma_qinfo->common.q_header_vaddr->ci_record = db_record; + } else { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); + } + + return CQM_SUCCESS; +} +EXPORT_SYMBOL(cqm_ring_software_db); diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.h new file mode 100644 index 000000000..954f62bba --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_db.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_DB_H +#define CQM_DB_H + +#include <linux/types.h> + +struct tag_cqm_db_common { +#if (BYTE_ORDER == LITTLE_ENDIAN) + u32 rsvd1 : 23; + u32 c : 1; + u32 cos : 3; + u32 service_type : 5; +#else + u32 service_type : 5; + u32 cos : 3; + u32 c : 1; + u32 rsvd1 : 23; +#endif + + u32 rsvd2; +}; + +/* Only for test */ +s32 cqm_db_addr_alloc(void *ex_handle, void __iomem **db_addr, + void __iomem **dwqe_addr); +s32 cqm_db_phy_addr_alloc(void *ex_handle, u64 *db_paddr, u64 *dwqe_addr); + +s32 cqm_db_init(void *ex_handle); +void cqm_db_uninit(void *ex_handle); + +s32 cqm_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, + u64 db); + +#endif /* CQM_DB_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_define.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_define.h new file mode 100644 index 000000000..fd56354d4 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_define.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_DEFINE_H +#define CQM_DEFINE_H +#ifndef HIUDK_SDK +#define cqm_init cqm3_init +#define cqm_uninit cqm3_uninit +#define cqm_service_register cqm3_service_register +#define cqm_service_unregister cqm3_service_unregister +#define cqm_bloomfilter_dec cqm3_bloomfilter_dec +#define cqm_bloomfilter_inc cqm3_bloomfilter_inc +#define cqm_cmd_alloc cqm3_cmd_alloc +#define cqm_get_hardware_db_addr cqm3_get_hardware_db_addr +#define cqm_cmd_free cqm3_cmd_free +#define cqm_send_cmd_box cqm3_send_cmd_box +#define cqm_lb_send_cmd_box cqm3_lb_send_cmd_box +#define cqm_lb_send_cmd_box_async cqm3_lb_send_cmd_box_async +#define cqm_send_cmd_imm cqm3_send_cmd_imm +#define cqm_db_addr_alloc cqm3_db_addr_alloc +#define cqm_db_addr_free cqm3_db_addr_free +#define cqm_ring_hardware_db cqm3_ring_hardware_db +#define cqm_ring_software_db cqm3_ring_software_db +#define cqm_object_fc_srq_create cqm3_object_fc_srq_create +#define cqm_object_share_recv_queue_create cqm3_object_share_recv_queue_create +#define cqm_object_share_recv_queue_add_container \ + cqm3_object_share_recv_queue_add_container +#define cqm_object_srq_add_container_free cqm3_object_srq_add_container_free +#define cqm_object_recv_queue_create cqm3_object_recv_queue_create +#define cqm_object_qpc_mpt_create cqm3_object_qpc_mpt_create +#define cqm_object_nonrdma_queue_create cqm3_object_nonrdma_queue_create +#define cqm_object_rdma_queue_create cqm3_object_rdma_queue_create +#define cqm_object_rdma_table_get cqm3_object_rdma_table_get +#define cqm_object_delete cqm3_object_delete +#define cqm_object_offset_addr cqm3_object_offset_addr +#define cqm_object_get cqm3_object_get +#define cqm_object_put cqm3_object_put +#define cqm_object_funcid cqm3_object_funcid +#define cqm_object_resize_alloc_new cqm3_object_resize_alloc_new +#define cqm_object_resize_free_new cqm3_object_resize_free_new +#define cqm_object_resize_free_old cqm3_object_resize_free_old +#define cqm_function_timer_clear cqm3_function_timer_clear +#define cqm_function_hash_buf_clear cqm3_function_hash_buf_clear +#define cqm_srq_used_rq_container_delete cqm3_srq_used_rq_container_delete +#define cqm_timer_base cqm3_timer_base +#define cqm_dtoe_free_srq_bitmap_index cqm3_dtoe_free_srq_bitmap_index +#define cqm_dtoe_share_recv_queue_create cqm3_dtoe_share_recv_queue_create +#define cqm_get_db_addr cqm3_get_db_addr +#define cqm_ring_direct_wqe_db cqm3_ring_direct_wqe_db +#define cqm_fake_vf_num_set cqm3_fake_vf_num_set +#define cqm_need_secure_mem cqm3_need_secure_mem + +#endif +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c new file mode 100644 index 000000000..4232ae0f3 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.c @@ -0,0 +1,1743 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/vmalloc.h> + +#include "ossl_knl.h" +#include "hinic3_hw.h" +#include "hinic3_mt.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_hw_cfg.h" + +#include "cqm_object.h" +#include "cqm_bitmap_table.h" +#include "cqm_bat_cla.h" +#include "cqm_bloomfilter.h" +#include "cqm_db.h" +#include "cqm_memsec.h" +#include "cqm_main.h" + +static unsigned char roce_qpc_rsv_mode = CQM_QPC_ROCE_NORMAL; +module_param(roce_qpc_rsv_mode, byte, 0644); +MODULE_PARM_DESC(roce_qpc_rsv_mode, + "for roce reserve 4k qpc(qpn) (default=0, 0-rsv:2, 1-rsv:4k, 2-rsv:200k+2)"); + +static s32 cqm_set_fake_vf_child_timer(struct tag_cqm_handle *cqm_handle, + struct tag_cqm_handle *fake_cqm_handle, bool en) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)cqm_handle->ex_handle; + u16 func_global_idx; + s32 ret; + + if (fake_cqm_handle->func_capability.timer_enable == 0) + return CQM_SUCCESS; + + func_global_idx = fake_cqm_handle->func_attribute.func_global_idx; + ret = hinic3_func_tmr_bitmap_set(cqm_handle->ex_handle, func_global_idx, en); + if (ret != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "func_id %u Timer %s timer bitmap failed\n", + func_global_idx, en ? "enable" : "disable"); + return CQM_FAIL; +} + + return CQM_SUCCESS; +} + +static s32 cqm_unset_fake_vf_timer(struct tag_cqm_handle *cqm_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)cqm_handle->ex_handle; + s32 child_func_number; + u32 i; + + child_func_number = cqm_get_child_func_number(cqm_handle); + if (child_func_number == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(child_func_number)); + return CQM_FAIL; + } + + for (i = 0; i < (u32)child_func_number; i++) + (void)cqm_set_fake_vf_child_timer(cqm_handle, + cqm_handle->fake_cqm_handle[i], false); + + return CQM_SUCCESS; +} + +static s32 cqm_set_fake_vf_timer(struct tag_cqm_handle *cqm_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)cqm_handle->ex_handle; + s32 child_func_number; + u32 i; + s32 ret; + + child_func_number = cqm_get_child_func_number(cqm_handle); + if (child_func_number == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(child_func_number)); + return CQM_FAIL; + } + + for (i = 0; i < (u32)child_func_number; i++) { + ret = cqm_set_fake_vf_child_timer(cqm_handle, + cqm_handle->fake_cqm_handle[i], true); + if (ret != CQM_SUCCESS) + goto err; + } + + return CQM_SUCCESS; +err: + (void)cqm_unset_fake_vf_timer(cqm_handle); + return CQM_FAIL; +} + +static s32 cqm_set_timer_enable(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + + if (!ex_handle) + return CQM_FAIL; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (cqm_handle->func_capability.fake_func_type == CQM_FAKE_FUNC_PARENT && + cqm_set_fake_vf_timer(cqm_handle) != CQM_SUCCESS) + return CQM_FAIL; + + /* The timer bitmap is set directly at the beginning of the CQM. + * The ifconfig up/down command is not used to set or clear the bitmap. + */ + if (hinic3_func_tmr_bitmap_set(ex_handle, hinic3_global_func_id(ex_handle), + true) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, "func_id %u Timer start: enable timer bitmap failed\n", + hinic3_global_func_id(ex_handle)); + goto err; + } + + return CQM_SUCCESS; + +err: + cqm_unset_fake_vf_timer(cqm_handle); + return CQM_FAIL; +} + +static s32 cqm_set_timer_disable(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + + if (!ex_handle) + return CQM_FAIL; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + + if (cqm_handle->func_capability.fake_func_type != CQM_FAKE_FUNC_CHILD_CONFLICT && + hinic3_func_tmr_bitmap_set(ex_handle, hinic3_global_func_id(ex_handle), + false) != CQM_SUCCESS) + cqm_err(handle->dev_hdl, "func_id %u Timer stop: disable timer bitmap failed\n", + hinic3_global_func_id(ex_handle)); + + if (cqm_handle->func_capability.fake_func_type == CQM_FAKE_FUNC_PARENT && + cqm_unset_fake_vf_timer(cqm_handle) != CQM_SUCCESS) + return CQM_FAIL; + + return CQM_SUCCESS; +} + +static s32 cqm_init_all(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + /* Initialize secure memory. */ + if (cqm_secure_mem_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_mem_init)); + return CQM_FAIL; + } + + /* Initialize memory entries such as BAT, CLA, and bitmap. */ + if (cqm_mem_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_mem_init)); + goto err1; + } + + /* Event callback initialization */ + if (cqm_event_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_event_init)); + goto err2; + } + + /* Doorbell initiation */ + if (cqm_db_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_db_init)); + goto err3; + } + + /* Initialize the bloom filter. */ + if (cqm_bloomfilter_init(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bloomfilter_init)); + goto err4; + } + + if (cqm_set_timer_enable(ex_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_set_timer_enable)); + goto err5; + } + + return CQM_SUCCESS; +err5: + cqm_bloomfilter_uninit(ex_handle); +err4: + cqm_db_uninit(ex_handle); +err3: + cqm_event_uninit(ex_handle); +err2: + cqm_mem_uninit(ex_handle); +err1: + cqm_secure_mem_deinit(ex_handle); + return CQM_FAIL; +} + +/** + * Prototype : cqm_init + * Description : Complete CQM initialization. + * If the function is a parent fake function, copy the fake. + * If it is a child fake function (in the fake copy function, + * not in this function), set fake_en in the BAT/CLA table. + * cqm_init->cqm_mem_init->cqm_fake_init(copy) + * If the child fake conflict occurs, resources are not + * initialized, but the timer must be enabled. + * If the function is of the normal type, + * follow the normal process. + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_init(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + s32 ret; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + + cqm_handle = kmalloc(sizeof(*cqm_handle), GFP_KERNEL | __GFP_ZERO); + if (!cqm_handle) + return CQM_FAIL; + + /* Clear the memory to prevent other systems from + * not clearing the memory. + */ + memset(cqm_handle, 0, sizeof(struct tag_cqm_handle)); + + cqm_handle->ex_handle = handle; + cqm_handle->dev = (struct pci_dev *)(handle->pcidev_hdl); + handle->cqm_hdl = (void *)cqm_handle; + + /* Clearing Statistics */ + memset(&handle->hw_stats.cqm_stats, 0, sizeof(struct cqm_stats)); + + /* Reads VF/PF information. */ + cqm_handle->func_attribute = handle->hwif->attr; + cqm_info(handle->dev_hdl, "Func init: function[%u] type %d(0:PF,1:VF,2:PPF)\n", + cqm_handle->func_attribute.func_global_idx, + cqm_handle->func_attribute.func_type); + + /* Read capability from configuration management module */ + ret = cqm_capability_init(ex_handle); + if (ret == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_capability_init)); + goto err; + } + + /* In FAKE mode, only the bitmap of the timer of the function is + * enabled, and resources are not initialized. Otherwise, the + * configuration of the fake function is overwritten. + */ + if (cqm_handle->func_capability.fake_func_type == CQM_FAKE_FUNC_CHILD_CONFLICT) { + handle->cqm_hdl = NULL; + kfree(cqm_handle); + return CQM_SUCCESS; + } + + ret = cqm_init_all(ex_handle); + if (ret == CQM_FAIL) + goto err; + + return CQM_SUCCESS; +err: + handle->cqm_hdl = NULL; + kfree(cqm_handle); + return CQM_FAIL; +} + +/** + * Prototype : cqm_uninit + * Description : Deinitializes the CQM module. This function is called once + * each time a function is removed. + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_uninit(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + s32 ret; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return; + } + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return; + } + + cqm_set_timer_disable(ex_handle); + + /* After the TMR timer stops, the system releases resources + * after a delay of one or two milliseconds. + */ + if (cqm_handle->func_attribute.func_type == CQM_PPF) { + if (cqm_handle->func_capability.timer_enable == + CQM_TIMER_ENABLE) { + cqm_info(handle->dev_hdl, "PPF timer stop\n"); + ret = hinic3_ppf_tmr_stop(handle); + if (ret != CQM_SUCCESS) + /* The timer fails to be stopped, + * and the resource release is not affected. + */ + cqm_info(handle->dev_hdl, "PPF timer stop, ret=%d\n", ret); + } + + hinic3_ppf_ht_gpa_deinit(handle); + + usleep_range(0x384, 0x3E8); /* Somebody requires a delay of 1 ms, + * which is inaccurate. + */ + } + + /* Release Bloom Filter Table */ + cqm_bloomfilter_uninit(ex_handle); + + /* Release hardware doorbell */ + cqm_db_uninit(ex_handle); + + /* Cancel the callback of the event */ + cqm_event_uninit(ex_handle); + + /* Release various memory tables and require the service + * to release all objects. + */ + cqm_mem_uninit(ex_handle); + + cqm_secure_mem_deinit(ex_handle); + + /* Release cqm_handle */ + handle->cqm_hdl = NULL; + kfree(cqm_handle); +} + +static void cqm_test_mode_init(struct tag_cqm_handle *cqm_handle, + struct service_cap *service_capability) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + if (service_capability->test_mode == 0) + return; + + cqm_info(handle->dev_hdl, "Enter CQM test mode\n"); + + func_cap->qpc_number = service_capability->test_qpc_num; + func_cap->qpc_reserved = + GET_MAX(func_cap->qpc_reserved, + service_capability->test_qpc_resvd_num); + func_cap->xid_alloc_mode = service_capability->test_xid_alloc_mode; + func_cap->gpa_check_enable = service_capability->test_gpa_check_enable; + func_cap->pagesize_reorder = service_capability->test_page_size_reorder; + func_cap->qpc_alloc_static = + (bool)(service_capability->test_qpc_alloc_mode); + func_cap->scqc_alloc_static = + (bool)(service_capability->test_scqc_alloc_mode); + func_cap->flow_table_based_conn_number = + service_capability->test_max_conn_num; + func_cap->flow_table_based_conn_cache_number = + service_capability->test_max_cache_conn_num; + func_cap->scqc_number = service_capability->test_scqc_num; + func_cap->mpt_number = service_capability->test_mpt_num; + func_cap->mpt_reserved = service_capability->test_mpt_recvd_num; + func_cap->reorder_number = service_capability->test_reorder_num; + /* 256K buckets, 256K*64B = 16MB */ + func_cap->hash_number = service_capability->test_hash_num; +} + +static void cqm_service_capability_update(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + + func_cap->qpc_number = GET_MIN(CQM_MAX_QPC_NUM, func_cap->qpc_number); + func_cap->scqc_number = GET_MIN(CQM_MAX_SCQC_NUM, + func_cap->scqc_number); + func_cap->srqc_number = GET_MIN(CQM_MAX_SRQC_NUM, + func_cap->srqc_number); + func_cap->childc_number = GET_MIN(CQM_MAX_CHILDC_NUM, + func_cap->childc_number); +} + +static void cqm_service_valid_init(struct tag_cqm_handle *cqm_handle, + const struct service_cap *service_capability) +{ + u16 type = service_capability->chip_svc_type; + struct tag_cqm_service *svc = cqm_handle->service; + + svc[CQM_SERVICE_T_NIC].valid = ((type & CFG_SERVICE_MASK_NIC) != 0) ? + true : false; + svc[CQM_SERVICE_T_OVS].valid = ((type & CFG_SERVICE_MASK_OVS) != 0) ? + true : false; + svc[CQM_SERVICE_T_ROCE].valid = ((type & CFG_SERVICE_MASK_ROCE) != 0) ? + true : false; + svc[CQM_SERVICE_T_TOE].valid = ((type & CFG_SERVICE_MASK_TOE) != 0) ? + true : false; + svc[CQM_SERVICE_T_FC].valid = ((type & CFG_SERVICE_MASK_FC) != 0) ? + true : false; + svc[CQM_SERVICE_T_IPSEC].valid = ((type & CFG_SERVICE_MASK_IPSEC) != 0) ? + true : false; + svc[CQM_SERVICE_T_VBS].valid = ((type & CFG_SERVICE_MASK_VBS) != 0) ? + true : false; + svc[CQM_SERVICE_T_VIRTIO].valid = ((type & CFG_SERVICE_MASK_VIRTIO) != 0) ? + true : false; + svc[CQM_SERVICE_T_IOE].valid = false; + svc[CQM_SERVICE_T_PPA].valid = ((type & CFG_SERVICE_MASK_PPA) != 0) ? + true : false; +} + +static void cqm_service_capability_init_nic(struct tag_cqm_handle *cqm_handle, void *pra) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + cqm_info(handle->dev_hdl, "Cap init: nic is valid, but nic need not be init by cqm\n"); +} + +static void cqm_service_capability_init_ovs(struct tag_cqm_handle *cqm_handle, void *pra) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct service_cap *service_capability = (struct service_cap *)pra; + struct ovs_service_cap *ovs_cap = &service_capability->ovs_cap; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + cqm_info(handle->dev_hdl, "Cap init: ovs is valid\n"); + cqm_info(handle->dev_hdl, "Cap init: ovs qpc 0x%x\n", + ovs_cap->dev_ovs_cap.max_pctxs); + func_cap->hash_number += ovs_cap->dev_ovs_cap.max_pctxs; + func_cap->hash_basic_size = CQM_HASH_BUCKET_SIZE_64; + func_cap->qpc_number += ovs_cap->dev_ovs_cap.max_pctxs; + func_cap->qpc_basic_size = GET_MAX(ovs_cap->pctx_sz, + func_cap->qpc_basic_size); + func_cap->qpc_reserved += ovs_cap->dev_ovs_cap.max_pctxs; + func_cap->qpc_alloc_static = true; + func_cap->pagesize_reorder = CQM_OVS_PAGESIZE_ORDER; +} + +static void cqm_service_capability_init_roce(struct tag_cqm_handle *cqm_handle, void *pra) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct hinic3_board_info *board_info = &handle->board_info; + struct service_cap *service_capability = (struct service_cap *)pra; + struct rdma_service_cap *rdma_cap = &service_capability->rdma_cap; + struct dev_roce_svc_own_cap *roce_own_cap = + &rdma_cap->dev_rdma_cap.roce_own_cap; + + cqm_info(handle->dev_hdl, "Cap init: roce is valid\n"); + cqm_info(handle->dev_hdl, "Cap init: roce qpc 0x%x, scqc 0x%x, srqc 0x%x, drc_qp 0x%x\n", + roce_own_cap->max_qps, roce_own_cap->max_cqs, + roce_own_cap->max_srqs, roce_own_cap->max_drc_qps); + cqm_info(handle->dev_hdl, "Cap init: type 0x%x, scenes:0x%x, qpc_rsv:0x%x, srv_bmp:0x%x\n", + board_info->board_type, board_info->scenes_id, + roce_qpc_rsv_mode, board_info->service_en_bitmap); + + if (roce_qpc_rsv_mode == CQM_QPC_ROCE_VBS_MODE) { + func_cap->qpc_reserved += CQM_QPC_ROCE_RSVD; + func_cap->qpc_reserved_back += CQM_QPC_ROCE_VBS_RSVD_BACK; + } else if ((service_capability->chip_svc_type & CFG_SERVICE_MASK_ROCEAA) != 0) { + func_cap->qpc_reserved += CQM_QPC_ROCEAA_RSVD; + func_cap->scq_reserved += CQM_CQ_ROCEAA_RSVD; + func_cap->srq_reserved += CQM_SRQ_ROCEAA_RSVD; + } else { + func_cap->qpc_reserved += CQM_QPC_ROCE_RSVD; + } + func_cap->qpc_number += roce_own_cap->max_qps; + func_cap->qpc_basic_size = GET_MAX(roce_own_cap->qpc_entry_sz, + func_cap->qpc_basic_size); + if (cqm_handle->func_attribute.func_type == CQM_PF && (IS_MASTER_HOST(handle))) { + func_cap->hash_number = roce_own_cap->max_qps; + func_cap->hash_basic_size = CQM_HASH_BUCKET_SIZE_64; + } + func_cap->qpc_alloc_static = true; + func_cap->scqc_number += roce_own_cap->max_cqs; + func_cap->scqc_basic_size = GET_MAX(rdma_cap->cqc_entry_sz, + func_cap->scqc_basic_size); + func_cap->srqc_number += roce_own_cap->max_srqs; + func_cap->srqc_basic_size = GET_MAX(roce_own_cap->srqc_entry_sz, + func_cap->srqc_basic_size); + func_cap->mpt_number += roce_own_cap->max_mpts; + func_cap->mpt_reserved += rdma_cap->reserved_mrws; + func_cap->mpt_basic_size = GET_MAX(rdma_cap->mpt_entry_sz, + func_cap->mpt_basic_size); + func_cap->gid_number = CQM_GID_RDMA_NUM; + func_cap->gid_basic_size = CQM_GID_SIZE_32; + func_cap->childc_number += CQM_CHILDC_ROCE_NUM; + func_cap->childc_basic_size = GET_MAX(CQM_CHILDC_SIZE_256, + func_cap->childc_basic_size); +} + +static void cqm_service_capability_init_toe(struct tag_cqm_handle *cqm_handle, void *pra) +{ + struct tag_cqm_toe_private_capability *toe_own_cap = &cqm_handle->toe_own_capability; + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct service_cap *service_capability = (struct service_cap *)pra; + struct rdma_service_cap *rdma_cap = &service_capability->rdma_cap; + struct toe_service_cap *toe_cap = &service_capability->toe_cap; + struct dev_toe_svc_cap *dev_toe_cap = &toe_cap->dev_toe_cap; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + cqm_info(handle->dev_hdl, "Cap init: toe is valid\n"); + cqm_info(handle->dev_hdl, "Cap init: toe qpc 0x%x, scqc 0x%x, srqc 0x%x\n", + dev_toe_cap->max_pctxs, dev_toe_cap->max_cqs, + dev_toe_cap->max_srqs); + func_cap->hash_number += dev_toe_cap->max_pctxs; + func_cap->hash_basic_size = CQM_HASH_BUCKET_SIZE_64; + func_cap->qpc_number += dev_toe_cap->max_pctxs; + func_cap->qpc_basic_size = GET_MAX(toe_cap->pctx_sz, + func_cap->qpc_basic_size); + func_cap->qpc_alloc_static = true; + func_cap->scqc_number += dev_toe_cap->max_cqs; + func_cap->scqc_basic_size = GET_MAX(toe_cap->scqc_sz, + func_cap->scqc_basic_size); + func_cap->scqc_alloc_static = true; + + toe_own_cap->toe_srqc_number = dev_toe_cap->max_srqs; + toe_own_cap->toe_srqc_start_id = dev_toe_cap->srq_id_start; + toe_own_cap->toe_srqc_basic_size = CQM_SRQC_SIZE_64; + func_cap->childc_number += dev_toe_cap->max_cctxt; + func_cap->childc_basic_size = GET_MAX(CQM_CHILDC_SIZE_256, + func_cap->childc_basic_size); + func_cap->mpt_number += dev_toe_cap->max_mpts; + func_cap->mpt_reserved = 0; + func_cap->mpt_basic_size = GET_MAX(rdma_cap->mpt_entry_sz, + func_cap->mpt_basic_size); +} + +static void cqm_service_capability_init_ioe(struct tag_cqm_handle *cqm_handle, void *pra) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + cqm_info(handle->dev_hdl, "Cap init: ioe is valid\n"); +} + +static void cqm_service_capability_init_fc(struct tag_cqm_handle *cqm_handle, void *pra) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct service_cap *service_capability = (struct service_cap *)pra; + struct fc_service_cap *fc_cap = &service_capability->fc_cap; + struct dev_fc_svc_cap *dev_fc_cap = &fc_cap->dev_fc_cap; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + cqm_info(handle->dev_hdl, "Cap init: fc is valid\n"); + cqm_info(handle->dev_hdl, "Cap init: fc qpc 0x%x, scqc 0x%x, srqc 0x%x\n", + dev_fc_cap->max_parent_qpc_num, dev_fc_cap->scq_num, + dev_fc_cap->srq_num); + func_cap->hash_number += dev_fc_cap->max_parent_qpc_num; + func_cap->hash_basic_size = CQM_HASH_BUCKET_SIZE_64; + func_cap->qpc_number += dev_fc_cap->max_parent_qpc_num; + func_cap->qpc_basic_size = GET_MAX(fc_cap->parent_qpc_size, + func_cap->qpc_basic_size); + func_cap->qpc_alloc_static = true; + func_cap->scqc_number += dev_fc_cap->scq_num; + func_cap->scqc_basic_size = GET_MAX(fc_cap->scqc_size, + func_cap->scqc_basic_size); + func_cap->srqc_number += dev_fc_cap->srq_num; + func_cap->srqc_basic_size = GET_MAX(fc_cap->srqc_size, + func_cap->srqc_basic_size); + func_cap->lun_number = CQM_LUN_FC_NUM; + func_cap->lun_basic_size = CQM_LUN_SIZE_8; + func_cap->taskmap_number = CQM_TASKMAP_FC_NUM; + func_cap->taskmap_basic_size = PAGE_SIZE; + func_cap->childc_number += dev_fc_cap->max_child_qpc_num; + func_cap->childc_basic_size = GET_MAX(fc_cap->child_qpc_size, + func_cap->childc_basic_size); + func_cap->pagesize_reorder = CQM_FC_PAGESIZE_ORDER; +} + +static void cqm_service_capability_init_vbs(struct tag_cqm_handle *cqm_handle, void *pra) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct service_cap *service_capability = (struct service_cap *)pra; + struct vbs_service_cap *vbs_cap = &service_capability->vbs_cap; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + cqm_info(handle->dev_hdl, "Cap init: vbs is valid\n"); + + /* If the entry size is greater than the cache line (256 bytes), + * align the entries by cache line. + */ + func_cap->xid2cid_number += + (CQM_XID2CID_VBS_NUM * service_capability->virtio_vq_size) / CQM_CHIP_CACHELINE; + func_cap->xid2cid_basic_size = CQM_CHIP_CACHELINE; + func_cap->qpc_number += (vbs_cap->vbs_max_volq * 2); // VOLQ group * 2 + func_cap->qpc_basic_size = GET_MAX(CQM_VBS_QPC_SIZE, + func_cap->qpc_basic_size); + func_cap->qpc_alloc_static = true; +} + +static void cqm_service_capability_init_ipsec(struct tag_cqm_handle *cqm_handle, void *pra) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct service_cap *service_capability = (struct service_cap *)pra; + struct ipsec_service_cap *ipsec_cap = &service_capability->ipsec_cap; + struct dev_ipsec_svc_cap *ipsec_srvcap = &ipsec_cap->dev_ipsec_cap; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + func_cap->childc_number += ipsec_srvcap->max_sactxs; + func_cap->childc_basic_size = GET_MAX(CQM_CHILDC_SIZE_256, + func_cap->childc_basic_size); + func_cap->scqc_number += ipsec_srvcap->max_cqs; + func_cap->scqc_basic_size = GET_MAX(CQM_SCQC_SIZE_64, + func_cap->scqc_basic_size); + func_cap->scqc_alloc_static = true; + cqm_info(handle->dev_hdl, "Cap init: ipsec is valid\n"); + cqm_info(handle->dev_hdl, "Cap init: ipsec 0x%x, childc %d, scqc 0x%x, scqc_bsize %d\n", + ipsec_srvcap->max_sactxs, func_cap->childc_basic_size, + ipsec_srvcap->max_cqs, func_cap->scqc_basic_size); +} + +static void cqm_service_capability_init_virtio(struct tag_cqm_handle *cqm_handle, void *pra) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct service_cap *service_capability = (struct service_cap *)pra; + + cqm_info(handle->dev_hdl, "Cap init: virtio is valid\n"); + /* If the entry size is greater than the cache line (256 bytes), + * align the entries by cache line. + */ + cqm_handle->func_capability.xid2cid_number += + (CQM_XID2CID_VIRTIO_NUM * service_capability->virtio_vq_size) / CQM_CHIP_CACHELINE; + cqm_handle->func_capability.xid2cid_basic_size = CQM_CHIP_CACHELINE; +} + +static void cqm_service_capability_init_ppa(struct tag_cqm_handle *cqm_handle, void *pra) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct service_cap *service_capability = (struct service_cap *)pra; + struct ppa_service_cap *ppa_cap = &service_capability->ppa_cap; + + cqm_info(handle->dev_hdl, "Cap init: ppa is valid\n"); + func_cap->hash_basic_size = CQM_HASH_BUCKET_SIZE_64; + func_cap->qpc_alloc_static = true; + func_cap->pagesize_reorder = CQM_PPA_PAGESIZE_ORDER; + func_cap->qpc_basic_size = GET_MAX(ppa_cap->pctx_sz, + func_cap->qpc_basic_size); +} + +struct cqm_srv_cap_init serv_cap_init_list[] = { + {CQM_SERVICE_T_NIC, cqm_service_capability_init_nic}, + {CQM_SERVICE_T_OVS, cqm_service_capability_init_ovs}, + {CQM_SERVICE_T_ROCE, cqm_service_capability_init_roce}, + {CQM_SERVICE_T_TOE, cqm_service_capability_init_toe}, + {CQM_SERVICE_T_IOE, cqm_service_capability_init_ioe}, + {CQM_SERVICE_T_FC, cqm_service_capability_init_fc}, + {CQM_SERVICE_T_VBS, cqm_service_capability_init_vbs}, + {CQM_SERVICE_T_IPSEC, cqm_service_capability_init_ipsec}, + {CQM_SERVICE_T_VIRTIO, cqm_service_capability_init_virtio}, + {CQM_SERVICE_T_PPA, cqm_service_capability_init_ppa}, +}; + +static void cqm_service_capability_init(struct tag_cqm_handle *cqm_handle, + struct service_cap *service_capability) +{ + u32 list_size = ARRAY_SIZE(serv_cap_init_list); + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 i; + + for (i = 0; i < CQM_SERVICE_T_MAX; i++) { + cqm_handle->service[i].valid = false; + cqm_handle->service[i].has_register = false; + cqm_handle->service[i].buf_order = 0; + } + + cqm_service_valid_init(cqm_handle, service_capability); + + cqm_info(handle->dev_hdl, "Cap init: service type %d\n", + service_capability->chip_svc_type); + + for (i = 0; i < list_size; i++) { + if (cqm_handle->service[serv_cap_init_list[i].service_type].valid && + serv_cap_init_list[i].serv_cap_proc) { + serv_cap_init_list[i].serv_cap_proc(cqm_handle, + (void *)service_capability); + } + } +} + +s32 cqm_get_fake_func_type(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + u32 parent_func, child_func_start, child_func_number, i; + u32 idx = cqm_handle->func_attribute.func_global_idx; + + /* Currently, only one set of fake configurations is implemented. + * fake_cfg_number = 1 + */ + for (i = 0; i < func_cap->fake_cfg_number; i++) { + parent_func = func_cap->fake_cfg[i].parent_func; + child_func_start = func_cap->fake_cfg[i].child_func_start; + child_func_number = func_cap->fake_cfg[i].child_func_number; + + if (idx == parent_func) { + return CQM_FAKE_FUNC_PARENT; + } else if ((idx >= child_func_start) && + (idx < (child_func_start + child_func_number))) { + return CQM_FAKE_FUNC_CHILD_CONFLICT; + } + } + + return CQM_FAKE_FUNC_NORMAL; +} + +s32 cqm_get_child_func_start(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct hinic3_func_attr *func_attr = &cqm_handle->func_attribute; + u32 i; + + /* Currently, only one set of fake configurations is implemented. + * fake_cfg_number = 1 + */ + for (i = 0; i < func_cap->fake_cfg_number; i++) { + if (func_attr->func_global_idx == + func_cap->fake_cfg[i].parent_func) + return (s32)(func_cap->fake_cfg[i].child_func_start); + } + + return CQM_FAIL; +} + +s32 cqm_get_child_func_number(struct tag_cqm_handle *cqm_handle) +{ + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct hinic3_func_attr *func_attr = &cqm_handle->func_attribute; + u32 i; + + for (i = 0; i < func_cap->fake_cfg_number; i++) { + if (func_attr->func_global_idx == + func_cap->fake_cfg[i].parent_func) + return (s32)(func_cap->fake_cfg[i].child_func_number); + } + + return CQM_FAIL; +} + +/* Set func_type in fake_cqm_handle to ppf, pf, or vf. */ +static void cqm_set_func_type(struct tag_cqm_handle *cqm_handle) +{ + u32 idx = cqm_handle->func_attribute.func_global_idx; + + if (idx == 0) + cqm_handle->func_attribute.func_type = CQM_PPF; + else if (idx < CQM_MAX_PF_NUM) + cqm_handle->func_attribute.func_type = CQM_PF; + else + cqm_handle->func_attribute.func_type = CQM_VF; +} + +static void cqm_lb_fake_mode_init(struct hinic3_hwdev *handle, struct service_cap *svc_cap) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct tag_cqm_fake_cfg *cfg = func_cap->fake_cfg; + + func_cap->lb_mode = svc_cap->lb_mode; + + /* Initializing the LB Mode */ + if (func_cap->lb_mode == CQM_LB_MODE_NORMAL) + func_cap->smf_pg = 0; + else + func_cap->smf_pg = svc_cap->smf_pg; + + /* Initializing the FAKE Mode */ + if (svc_cap->fake_vf_num == 0) { + func_cap->fake_cfg_number = 0; + func_cap->fake_func_type = CQM_FAKE_FUNC_NORMAL; + func_cap->fake_vf_qpc_number = 0; + } else { + func_cap->fake_cfg_number = 1; + + /* When configuring fake mode, ensure that the parent function + * cannot be contained in the child function; otherwise, the + * system will be initialized repeatedly. The following + * configuration is used to verify the OVS fake configuration on + * the FPGA. + */ + cfg[0].parent_func = cqm_handle->func_attribute.port_to_port_idx; + cfg[0].child_func_start = svc_cap->fake_vf_start_id; + cfg[0].child_func_number = svc_cap->fake_vf_num_cfg; + + func_cap->fake_func_type = (u32)cqm_get_fake_func_type(cqm_handle); + func_cap->fake_vf_qpc_number = svc_cap->fake_vf_max_pctx; + } + + cqm_info(handle->dev_hdl, "Cap init: lb_mode=%u\n", func_cap->lb_mode); + cqm_info(handle->dev_hdl, "Cap init: smf_pg=%u\n", func_cap->smf_pg); + cqm_info(handle->dev_hdl, "Cap init: fake_func_type=%u\n", func_cap->fake_func_type); + cqm_info(handle->dev_hdl, "Cap init: fake_cfg_number=%u\n", func_cap->fake_cfg_number); +} + +static int cqm_capability_init_bloomfilter(struct hinic3_hwdev *handle) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + + func_cap->bloomfilter_enable = service_capability->bloomfilter_en; + cqm_info(handle->dev_hdl, "Cap init: bloomfilter_enable %u (1: enable; 0: disable)\n", + func_cap->bloomfilter_enable); + + if (func_cap->bloomfilter_enable != 0) { + func_cap->bloomfilter_length = service_capability->bfilter_len; + func_cap->bloomfilter_addr = service_capability->bfilter_start_addr; + if (func_cap->bloomfilter_length != 0 && + !cqm_check_align(func_cap->bloomfilter_length)) { + cqm_err(handle->dev_hdl, "Cap bloomfilter len %u is not the power of 2\n", + func_cap->bloomfilter_length); + + return CQM_FAIL; + } + } + + cqm_info(handle->dev_hdl, "Cap init: bloomfilter_length 0x%x, bloomfilter_addr 0x%x\n", + func_cap->bloomfilter_length, func_cap->bloomfilter_addr); + + return 0; +} + +static void cqm_capability_init_part_cap(struct hinic3_hwdev *handle) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + + func_cap->flow_table_based_conn_number = service_capability->max_connect_num; + func_cap->flow_table_based_conn_cache_number = service_capability->max_stick2cache_num; + cqm_info(handle->dev_hdl, "Cap init: cfg max_conn_num 0x%x, max_cache_conn_num 0x%x\n", + func_cap->flow_table_based_conn_number, + func_cap->flow_table_based_conn_cache_number); + + func_cap->qpc_reserved = 0; + func_cap->qpc_reserved_back = 0; + func_cap->mpt_reserved = 0; + func_cap->scq_reserved = 0; + func_cap->srq_reserved = 0; + func_cap->qpc_alloc_static = false; + func_cap->scqc_alloc_static = false; + + func_cap->l3i_number = 0; + func_cap->l3i_basic_size = CQM_L3I_SIZE_8; + + func_cap->xid_alloc_mode = true; /* xid alloc do not reuse */ + func_cap->gpa_check_enable = true; +} + +static int cqm_capability_init_timer(struct hinic3_hwdev *handle) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + struct hinic3_func_attr *func_attr = &cqm_handle->func_attribute; + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + u32 total_timer_num = 0; + int err; + + /* Initializes the PPF capabilities: include timer, pf, vf. */ + if (func_attr->func_type == CQM_PPF && service_capability->timer_en) { + func_cap->pf_num = service_capability->pf_num; + func_cap->pf_id_start = service_capability->pf_id_start; + func_cap->vf_num = service_capability->vf_num; + func_cap->vf_id_start = service_capability->vf_id_start; + cqm_info(handle->dev_hdl, "Cap init: total function num 0x%x\n", + service_capability->host_total_function); + cqm_info(handle->dev_hdl, + "Cap init: pf_num 0x%x, pf_start 0x%x, vf_num 0x%x, vf_start 0x%x\n", + func_cap->pf_num, func_cap->pf_id_start, + func_cap->vf_num, func_cap->vf_id_start); + + err = hinic3_get_ppf_timer_cfg(handle); + if (err != 0) + return err; + + func_cap->timer_pf_num = service_capability->timer_pf_num; + func_cap->timer_pf_id_start = service_capability->timer_pf_id_start; + func_cap->timer_vf_num = service_capability->timer_vf_num; + func_cap->timer_vf_id_start = service_capability->timer_vf_id_start; + cqm_info(handle->dev_hdl, + "timer init: pf_num 0x%x, pf_start 0x%x, vf_num 0x%x, vf_start 0x%x\n", + func_cap->timer_pf_num, func_cap->timer_pf_id_start, + func_cap->timer_vf_num, func_cap->timer_vf_id_start); + + total_timer_num = func_cap->timer_pf_num + func_cap->timer_vf_num; + if (IS_SLAVE_HOST(handle)) { + total_timer_num *= CQM_TIMER_NUM_MULTI; + cqm_info(handle->dev_hdl, + "timer init: need double tw resources, total_timer_num=0x%x\n", + total_timer_num); + } + } + + func_cap->timer_enable = service_capability->timer_en; + cqm_info(handle->dev_hdl, "Cap init: timer_enable %u (1: enable; 0: disable)\n", + func_cap->timer_enable); + + func_cap->timer_number = CQM_TIMER_ALIGN_SCALE_NUM * total_timer_num; + func_cap->timer_basic_size = CQM_TIMER_SIZE_32; + + return 0; +} + +static void cqm_capability_init_cap_print(struct hinic3_hwdev *handle) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + + func_cap->ft_enable = service_capability->sf_svc_attr.ft_en; + func_cap->rdma_enable = service_capability->sf_svc_attr.rdma_en; + + cqm_info(handle->dev_hdl, "Cap init: pagesize_reorder %u\n", func_cap->pagesize_reorder); + cqm_info(handle->dev_hdl, "Cap init: xid_alloc_mode %d, gpa_check_enable %d\n", + func_cap->xid_alloc_mode, func_cap->gpa_check_enable); + cqm_info(handle->dev_hdl, "Cap init: qpc_alloc_mode %d, scqc_alloc_mode %d\n", + func_cap->qpc_alloc_static, func_cap->scqc_alloc_static); + cqm_info(handle->dev_hdl, "Cap init: hash_number 0x%x\n", func_cap->hash_number); + cqm_info(handle->dev_hdl, "Cap init: qpc_num 0x%x, qpc_rsvd 0x%x, qpc_basic_size 0x%x\n", + func_cap->qpc_number, func_cap->qpc_reserved, func_cap->qpc_basic_size); + cqm_info(handle->dev_hdl, "Cap init: scqc_num 0x%x, scqc_rsvd 0x%x, scqc_basic 0x%x\n", + func_cap->scqc_number, func_cap->scq_reserved, func_cap->scqc_basic_size); + cqm_info(handle->dev_hdl, "Cap init: srqc_num 0x%x, srqc_rsvd 0x%x, srqc_basic 0x%x\n", + func_cap->srqc_number, func_cap->srq_reserved, func_cap->srqc_basic_size); + cqm_info(handle->dev_hdl, "Cap init: mpt_number 0x%x, mpt_reserved 0x%x\n", + func_cap->mpt_number, func_cap->mpt_reserved); + cqm_info(handle->dev_hdl, "Cap init: gid_number 0x%x, lun_number 0x%x\n", + func_cap->gid_number, func_cap->lun_number); + cqm_info(handle->dev_hdl, "Cap init: taskmap_number 0x%x, l3i_number 0x%x\n", + func_cap->taskmap_number, func_cap->l3i_number); + cqm_info(handle->dev_hdl, "Cap init: timer_number 0x%x, childc_number 0x%x\n", + func_cap->timer_number, func_cap->childc_number); + cqm_info(handle->dev_hdl, "Cap init: childc_basic_size 0x%x\n", + func_cap->childc_basic_size); + cqm_info(handle->dev_hdl, "Cap init: xid2cid_number 0x%x, reorder_number 0x%x\n", + func_cap->xid2cid_number, func_cap->reorder_number); + cqm_info(handle->dev_hdl, "Cap init: ft_enable %d, rdma_enable %d\n", + func_cap->ft_enable, func_cap->rdma_enable); +} + +/** + * Prototype : cqm_capability_init + * Description : Initializes the function and service capabilities of the CQM. + * Information needs to be read from the configuration management + * module. + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/12/9 + * Modification : Created function + */ +s32 cqm_capability_init(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + struct service_cap *service_capability = &handle->cfg_mgmt->svc_cap; + struct hinic3_func_attr *func_attr = &cqm_handle->func_attribute; + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + int err = 0; + + err = cqm_capability_init_timer(handle); + if (err != 0) + goto out; + + err = cqm_capability_init_bloomfilter(handle); + if (err != 0) + goto out; + + cqm_capability_init_part_cap(handle); + + cqm_lb_fake_mode_init(handle, service_capability); + + cqm_service_capability_init(cqm_handle, service_capability); + + cqm_test_mode_init(cqm_handle, service_capability); + + cqm_service_capability_update(cqm_handle); + + cqm_capability_init_cap_print(handle); + + return CQM_SUCCESS; + +out: + if (func_attr->func_type == CQM_PPF) + func_cap->timer_enable = 0; + + return err; +} + +static void cqm_fake_uninit(struct tag_cqm_handle *cqm_handle) +{ + u32 i; + + if (cqm_handle->func_capability.fake_func_type != + CQM_FAKE_FUNC_PARENT) + return; + + for (i = 0; i < CQM_FAKE_FUNC_MAX; i++) { + kfree(cqm_handle->fake_cqm_handle[i]); + cqm_handle->fake_cqm_handle[i] = NULL; + } +} + +static void set_fake_cqm_attr(struct hinic3_hwdev *handle, struct tag_cqm_handle *fake_cqm_handle, + s32 child_func_start, u32 i) +{ + struct tag_cqm_func_capability *func_cap = NULL; + struct hinic3_func_attr *func_attr = NULL; + struct service_cap *cap = &handle->cfg_mgmt->svc_cap; + + func_attr = &fake_cqm_handle->func_attribute; + func_cap = &fake_cqm_handle->func_capability; + func_attr->func_global_idx = (u16)(child_func_start + i); + cqm_set_func_type(fake_cqm_handle); + func_cap->fake_func_type = CQM_FAKE_FUNC_CHILD; + cqm_info(handle->dev_hdl, "Fake func init: function[%u] type %d(0:PF,1:VF,2:PPF)\n", + func_attr->func_global_idx, func_attr->func_type); + + func_cap->qpc_number = cap->fake_vf_max_pctx; + func_cap->qpc_number = GET_MIN(CQM_MAX_QPC_NUM, func_cap->qpc_number); + func_cap->hash_number = cap->fake_vf_max_pctx; + func_cap->qpc_reserved = cap->fake_vf_max_pctx; + + if (cap->fake_vf_bfilter_len != 0) { + func_cap->bloomfilter_enable = true; + func_cap->bloomfilter_addr = cap->fake_vf_bfilter_start_addr + + cap->fake_vf_bfilter_len * i; + func_cap->bloomfilter_length = cap->fake_vf_bfilter_len; + } +} + +/** + * Prototype : cqm_fake_init + * Description : When the fake VF mode is supported, the CQM handles of + * the fake VFs need to be copied. + * Input : struct tag_cqm_handle *cqm_handle: Parent CQM handle of the current PF + * Output : None + * Return Value : s32 + * 1.Date : 2020/4/15 + * Modification : Created function + */ +static s32 cqm_fake_init(struct tag_cqm_handle *cqm_handle) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_handle *fake_cqm_handle = NULL; + struct tag_cqm_func_capability *func_cap = NULL; + s32 child_func_start, child_func_number; + u32 i; + + func_cap = &cqm_handle->func_capability; + if (func_cap->fake_func_type != CQM_FAKE_FUNC_PARENT) + return CQM_SUCCESS; + + child_func_start = cqm_get_child_func_start(cqm_handle); + if (child_func_start == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(child_func_start)); + return CQM_FAIL; + } + + child_func_number = cqm_get_child_func_number(cqm_handle); + if (child_func_number == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(child_func_number)); + return CQM_FAIL; + } + + for (i = 0; i < (u32)child_func_number; i++) { + fake_cqm_handle = kmalloc(sizeof(*fake_cqm_handle), GFP_KERNEL | __GFP_ZERO); + if (!fake_cqm_handle) + goto err; + + /* Copy the attributes of the parent CQM handle to the child CQM + * handle and modify the values of function. + */ + memcpy(fake_cqm_handle, cqm_handle, sizeof(struct tag_cqm_handle)); + set_fake_cqm_attr(handle, fake_cqm_handle, child_func_start, i); + + fake_cqm_handle->parent_cqm_handle = cqm_handle; + cqm_handle->fake_cqm_handle[i] = fake_cqm_handle; + } + + return CQM_SUCCESS; + +err: + cqm_fake_uninit(cqm_handle); + return CQM_FAIL; +} + +static void cqm_fake_mem_uninit(struct tag_cqm_handle *cqm_handle) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_handle *fake_cqm_handle = NULL; + s32 child_func_number; + u32 i; + + if (cqm_handle->func_capability.fake_func_type != + CQM_FAKE_FUNC_PARENT) + return; + + child_func_number = cqm_get_child_func_number(cqm_handle); + if (child_func_number == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(child_func_number)); + return; + } + + for (i = 0; i < (u32)child_func_number; i++) { + fake_cqm_handle = cqm_handle->fake_cqm_handle[i]; + + cqm_object_table_uninit(fake_cqm_handle); + cqm_bitmap_uninit(fake_cqm_handle); + cqm_cla_uninit(fake_cqm_handle, CQM_BAT_ENTRY_MAX); + cqm_bat_uninit(fake_cqm_handle); + } +} + +/** + * Prototype : cqm_fake_mem_init + * Description : Initialize resources of the extended fake function. + * Input : struct tag_cqm_handle *cqm_handle: Parent CQM handle of the current PF + * Output : None + * Return Value : s32 + * 1.Date : 2020/4/15 + * Modification : Created function + */ +static s32 cqm_fake_mem_init(struct tag_cqm_handle *cqm_handle) +{ + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_handle *fake_cqm_handle = NULL; + s32 child_func_number; + u32 i; + + if (cqm_handle->func_capability.fake_func_type != + CQM_FAKE_FUNC_PARENT) + return CQM_SUCCESS; + + child_func_number = cqm_get_child_func_number(cqm_handle); + if (child_func_number == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(child_func_number)); + return CQM_FAIL; + } + + for (i = 0; i < (u32)child_func_number; i++) { + fake_cqm_handle = cqm_handle->fake_cqm_handle[i]; + snprintf(fake_cqm_handle->name, VRAM_NAME_MAX_LEN - 1, + "%s%s%02u", cqm_handle->name, VRAM_CQM_FAKE_MEM_BASE, i); + + if (cqm_bat_init(fake_cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_bat_init)); + goto err; + } + + if (cqm_cla_init(fake_cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_cla_init)); + goto err; + } + + if (cqm_bitmap_init(fake_cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_bitmap_init)); + goto err; + } + + if (cqm_object_table_init(fake_cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_object_table_init)); + goto err; + } + } + + return CQM_SUCCESS; + +err: + cqm_fake_mem_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * Prototype : cqm_mem_init + * Description : Initialize CQM memory, including tables at different levels. + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/7/6 + * Modification : Created function + */ +s32 cqm_mem_init(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + snprintf(cqm_handle->name, VRAM_NAME_MAX_LEN - 1, + "%s%02u", VRAM_CQM_GLB_FUNC_BASE, hinic3_global_func_id(handle)); + + if (cqm_fake_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_fake_init)); + return CQM_FAIL; + } + + if (cqm_fake_mem_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_fake_mem_init)); + goto err1; + } + + if (cqm_bat_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bat_init)); + goto err2; + } + + if (cqm_cla_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_init)); + goto err3; + } + + if (cqm_bitmap_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_init)); + goto err4; + } + + if (cqm_object_table_init(cqm_handle) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_object_table_init)); + goto err5; + } + + return CQM_SUCCESS; + +err5: + cqm_bitmap_uninit(cqm_handle); +err4: + cqm_cla_uninit(cqm_handle, CQM_BAT_ENTRY_MAX); +err3: + cqm_bat_uninit(cqm_handle); +err2: + cqm_fake_mem_uninit(cqm_handle); +err1: + cqm_fake_uninit(cqm_handle); + return CQM_FAIL; +} + +/** + * Prototype : cqm_mem_uninit + * Description : Deinitialize CQM memory, including tables at different levels. + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2015/7/6 + * Modification : Created function + */ +void cqm_mem_uninit(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + + cqm_object_table_uninit(cqm_handle); + cqm_bitmap_uninit(cqm_handle); + cqm_cla_uninit(cqm_handle, CQM_BAT_ENTRY_MAX); + cqm_bat_uninit(cqm_handle); + cqm_fake_mem_uninit(cqm_handle); + cqm_fake_uninit(cqm_handle); +} + +/** + * Prototype : cqm_event_init + * Description : Initialize CQM event callback. + * Input : void *ex_handle + * Output : None + * Return Value : s32 + * 1.Date : 2015/7/6 + * Modification : Created function + */ +s32 cqm_event_init(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + /* Registers the CEQ and AEQ callback functions. */ + if (hinic3_ceq_register_cb(ex_handle, ex_handle, HINIC3_NON_L2NIC_SCQ, + cqm_scq_callback) != CHIPIF_SUCCESS) { + cqm_err(handle->dev_hdl, "Event: fail to register scq callback\n"); + return CQM_FAIL; + } + + if (hinic3_ceq_register_cb(ex_handle, ex_handle, HINIC3_NON_L2NIC_ECQ, + cqm_ecq_callback) != CHIPIF_SUCCESS) { + cqm_err(handle->dev_hdl, "Event: fail to register ecq callback\n"); + goto err1; + } + + if (hinic3_ceq_register_cb(ex_handle, ex_handle, HINIC3_NON_L2NIC_NO_CQ_EQ, + cqm_nocq_callback) != CHIPIF_SUCCESS) { + cqm_err(handle->dev_hdl, "Event: fail to register nocq callback\n"); + goto err2; + } + + if (hinic3_aeq_register_swe_cb(ex_handle, ex_handle, HINIC3_STATEFUL_EVENT, + cqm_aeq_callback) != CHIPIF_SUCCESS) { + cqm_err(handle->dev_hdl, "Event: fail to register aeq callback\n"); + goto err3; + } + + return CQM_SUCCESS; + +err3: + hinic3_ceq_unregister_cb(ex_handle, HINIC3_NON_L2NIC_NO_CQ_EQ); +err2: + hinic3_ceq_unregister_cb(ex_handle, HINIC3_NON_L2NIC_ECQ); +err1: + hinic3_ceq_unregister_cb(ex_handle, HINIC3_NON_L2NIC_SCQ); + return CQM_FAIL; +} + +/** + * Prototype : cqm_event_uninit + * Description : Deinitialize CQM event callback. + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2015/7/6 + * Modification : Created function + */ +void cqm_event_uninit(void *ex_handle) +{ + hinic3_aeq_unregister_swe_cb(ex_handle, HINIC3_STATEFUL_EVENT); + hinic3_ceq_unregister_cb(ex_handle, HINIC3_NON_L2NIC_NO_CQ_EQ); + hinic3_ceq_unregister_cb(ex_handle, HINIC3_NON_L2NIC_ECQ); + hinic3_ceq_unregister_cb(ex_handle, HINIC3_NON_L2NIC_SCQ); +} + +/** + * Prototype : cqm_scq_callback + * Description : CQM module callback processing for the ceq, + * which processes NON_L2NIC_SCQ. + * Input : void *ex_handle + * u32 ceqe_data + * Output : None + * Return Value : void + * 1.Date : 2015/5/5 + * Modification : Created function + */ +void cqm_scq_callback(void *ex_handle, u32 ceqe_data) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_service_register_template *service_template = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct tag_cqm_queue *cqm_queue = NULL; + struct tag_cqm_object *obj = NULL; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: scq_callback_ex_handle is null\n", __func__); + return; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_scq_callback_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: scq_callback_cqm_handle is null\n", __func__); + return; + } + + cqm_dbg("Event: %s, ceqe_data=0x%x\n", __func__, ceqe_data); + obj = cqm_object_get(ex_handle, CQM_OBJECT_NONRDMA_SCQ, + CQM_CQN_FROM_CEQE(ceqe_data), true); + if (unlikely(!obj)) { + pr_err("[CQM]%s: scq_callback_obj is null\n", __func__); + return; + } + + if (unlikely(obj->service_type >= CQM_SERVICE_T_MAX)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(obj->service_type)); + cqm_object_put(obj); + return; + } + + service = &cqm_handle->service[obj->service_type]; + service_template = &service->service_template; + if (service_template->shared_cq_ceq_callback) { + cqm_queue = (struct tag_cqm_queue *)obj; + service_template->shared_cq_ceq_callback(service_template->service_handle, + CQM_CQN_FROM_CEQE(ceqe_data), + cqm_queue->priv); + } else { + cqm_err(handle->dev_hdl, CQM_PTR_NULL(shared_cq_ceq_callback)); + } + + cqm_object_put(obj); +} + +/** + * Prototype : cqm_ecq_callback + * Description : CQM module callback processing for the ceq, + * which processes NON_L2NIC_ECQ. + * Input : void *ex_handle + * u32 ceqe_data + * Output : None + * Return Value : void + * 1.Date : 2015/5/5 + * Modification : Created function + */ +void cqm_ecq_callback(void *ex_handle, u32 ceqe_data) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_service_register_template *service_template = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct tag_cqm_qpc_mpt *qpc = NULL; + struct tag_cqm_object *obj = NULL; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ecq_callback_ex_handle is null\n", __func__); + return; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_ecq_callback_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: ecq_callback_cqm_handle is null\n", __func__); + return; + } + + obj = cqm_object_get(ex_handle, CQM_OBJECT_SERVICE_CTX, + CQM_XID_FROM_CEQE(ceqe_data), true); + if (unlikely(!obj)) { + pr_err("[CQM]%s: ecq_callback_obj is null\n", __func__); + return; + } + + if (unlikely(obj->service_type >= CQM_SERVICE_T_MAX)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(obj->service_type)); + cqm_object_put(obj); + return; + } + + service = &cqm_handle->service[obj->service_type]; + service_template = &service->service_template; + if (service_template->embedded_cq_ceq_callback) { + qpc = (struct tag_cqm_qpc_mpt *)obj; + service_template->embedded_cq_ceq_callback(service_template->service_handle, + CQM_XID_FROM_CEQE(ceqe_data), + qpc->priv); + } else { + cqm_err(handle->dev_hdl, + CQM_PTR_NULL(embedded_cq_ceq_callback)); + } + + cqm_object_put(obj); +} + +/** + * Prototype : cqm_nocq_callback + * Description : CQM module callback processing for the ceq, + * which processes NON_L2NIC_NO_CQ_EQ. + * Input : void *ex_handle + * u32 ceqe_data + * Output : None + * Return Value : void + * 1.Date : 2015/5/5 + * Modification : Created function + */ +void cqm_nocq_callback(void *ex_handle, u32 ceqe_data) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_service_register_template *service_template = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct tag_cqm_qpc_mpt *qpc = NULL; + struct tag_cqm_object *obj = NULL; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: nocq_callback_ex_handle is null\n", __func__); + return; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_nocq_callback_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: nocq_callback_cqm_handle is null\n", __func__); + return; + } + + obj = cqm_object_get(ex_handle, CQM_OBJECT_SERVICE_CTX, + CQM_XID_FROM_CEQE(ceqe_data), true); + if (unlikely(!obj)) { + pr_err("[CQM]%s: nocq_callback_obj is null\n", __func__); + return; + } + + if (unlikely(obj->service_type >= CQM_SERVICE_T_MAX)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(obj->service_type)); + cqm_object_put(obj); + return; + } + + service = &cqm_handle->service[obj->service_type]; + service_template = &service->service_template; + if (service_template->no_cq_ceq_callback) { + qpc = (struct tag_cqm_qpc_mpt *)obj; + service_template->no_cq_ceq_callback(service_template->service_handle, + CQM_XID_FROM_CEQE(ceqe_data), + CQM_QID_FROM_CEQE(ceqe_data), + qpc->priv); + } else { + cqm_err(handle->dev_hdl, CQM_PTR_NULL(no_cq_ceq_callback)); + } + + cqm_object_put(obj); +} + +static u32 cqm_aeq_event2type(u8 event) +{ + u32 service_type; + + /* Distributes events to different service modules + * based on the event type. + */ + if (event < CQM_AEQ_BASE_T_ROCE) + service_type = CQM_SERVICE_T_NIC; + else if (event < CQM_AEQ_BASE_T_FC) + service_type = CQM_SERVICE_T_ROCE; + else if (event < CQM_AEQ_BASE_T_IOE) + service_type = CQM_SERVICE_T_FC; + else if (event < CQM_AEQ_BASE_T_TOE) + service_type = CQM_SERVICE_T_IOE; + else if (event < CQM_AEQ_BASE_T_VBS) + service_type = CQM_SERVICE_T_TOE; + else if (event < CQM_AEQ_BASE_T_IPSEC) + service_type = CQM_SERVICE_T_VBS; + else if (event < CQM_AEQ_BASE_T_MAX) + service_type = CQM_SERVICE_T_IPSEC; + else + service_type = CQM_SERVICE_T_MAX; + + return service_type; +} + +/** + * Prototype : cqm_aeq_callback + * Description : CQM module callback processing for the aeq. + * Input : void *ex_handle + * u8 event + * u64 data + * Output : None + * Return Value : void + * 1.Date : 2015/5/5 + * Modification : Created function + */ +u8 cqm_aeq_callback(void *ex_handle, u8 event, u8 *data) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_service_register_template *service_template = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + u8 event_level = FAULT_LEVEL_MAX; + u32 service_type; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: aeq_callback_ex_handle is null\n", __func__); + return event_level; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_aeq_callback_cnt[event]); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: aeq_callback_cqm_handle is null\n", __func__); + return event_level; + } + + /* Distributes events to different service modules + * based on the event type. + */ + service_type = cqm_aeq_event2type(event); + if (service_type == CQM_SERVICE_T_MAX) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(event)); + return event_level; + } + + service = &cqm_handle->service[service_type]; + service_template = &service->service_template; + + if (!service_template->aeq_level_callback) + cqm_err(handle->dev_hdl, + "Event: service_type %u aeq_level_callback unregistered, event %u\n", + service_type, event); + else + event_level = + service_template->aeq_level_callback(service_template->service_handle, + event, data); + + if (!service_template->aeq_callback) + cqm_err(handle->dev_hdl, "Event: service_type %u aeq_callback unregistered\n", + service_type); + else + service_template->aeq_callback(service_template->service_handle, + event, data); + + return event_level; +} + +/** + * Prototype : cqm_service_register + * Description : Callback template for the service driver + * to register with the CQM. + * Input : void *ex_handle + * struct tag_service_register_template *service_template + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/5 + * Modification : Created function + */ +s32 cqm_service_register(void *ex_handle, struct tag_service_register_template *service_template) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!service_template)) { + pr_err("[CQM]%s: service_template is null\n", __func__); + return CQM_FAIL; + } + + if (service_template->service_type >= CQM_SERVICE_T_MAX) { + cqm_err(handle->dev_hdl, + CQM_WRONG_VALUE(service_template->service_type)); + return CQM_FAIL; + } + service = &cqm_handle->service[service_template->service_type]; + if (!service->valid) { + cqm_err(handle->dev_hdl, "Service register: service_type %u is invalid\n", + service_template->service_type); + return CQM_FAIL; + } + + if (service->has_register) { + cqm_err(handle->dev_hdl, "Service register: service_type %u has registered\n", + service_template->service_type); + return CQM_FAIL; + } + + service->has_register = true; + memcpy((void *)(&service->service_template), (void *)service_template, + sizeof(struct tag_service_register_template)); + + return CQM_SUCCESS; +} +EXPORT_SYMBOL(cqm_service_register); + +/** + * Prototype : cqm_service_unregister + * Description : The service driver deregisters the callback function + * from the CQM. + * Input : void *ex_handle + * u32 service_type + * Output : None + * Return Value : void + * 1.Date : 2015/4/5 + * Modification : Created function + */ +void cqm_service_unregister(void *ex_handle, u32 service_type) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return; + } + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return; + } + + if (service_type >= CQM_SERVICE_T_MAX) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return; + } + + service = &cqm_handle->service[service_type]; + if (!service->valid) + cqm_err(handle->dev_hdl, "Service unregister: service_type %u is disable\n", + service_type); + + service->has_register = false; + memset(&service->service_template, 0, sizeof(struct tag_service_register_template)); +} +EXPORT_SYMBOL(cqm_service_unregister); + +s32 cqm_fake_vf_num_set(void *ex_handle, u16 fake_vf_num_cfg) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct service_cap *svc_cap = NULL; + + if (!ex_handle) + return CQM_FAIL; + + svc_cap = &handle->cfg_mgmt->svc_cap; + + if (fake_vf_num_cfg > svc_cap->fake_vf_num) { + cqm_err(handle->dev_hdl, "fake_vf_num_cfg is invlaid, fw fake_vf_num is %u\n", + svc_cap->fake_vf_num); + return CQM_FAIL; + } + + /* fake_vf_num_cfg is valid when func type is CQM_FAKE_FUNC_PARENT */ + svc_cap->fake_vf_num_cfg = fake_vf_num_cfg; + + return CQM_SUCCESS; +} +EXPORT_SYMBOL(cqm_fake_vf_num_set); diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h new file mode 100644 index 000000000..4f87bd1d7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_main.h @@ -0,0 +1,380 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_MAIN_H +#define CQM_MAIN_H + +#include <linux/pci.h> + +#include "hinic3_crm.h" +#include "cqm_bloomfilter.h" +#include "hinic3_hwif.h" +#include "cqm_bat_cla.h" + +#define GET_MAX max +#define GET_MIN min +#define CQM_DW_SHIFT 2 +#define CQM_QW_SHIFT 3 +#define CQM_BYTE_BIT_SHIFT 3 +#define CQM_NUM_BIT_BYTE 8 + +#define CHIPIF_SUCCESS 0 +#define CHIPIF_FAIL (-1) + +#define CQM_TIMER_ENABLE 1 +#define CQM_TIMER_DISABLE 0 + +#define CQM_TIMER_NUM_MULTI 2 + +/* The value must be the same as that of hinic3_service_type in hinic3_crm.h. */ +#define CQM_SERVICE_T_NIC SERVICE_T_NIC +#define CQM_SERVICE_T_OVS SERVICE_T_OVS +#define CQM_SERVICE_T_ROCE SERVICE_T_ROCE +#define CQM_SERVICE_T_TOE SERVICE_T_TOE +#define CQM_SERVICE_T_IOE SERVICE_T_IOE +#define CQM_SERVICE_T_FC SERVICE_T_FC +#define CQM_SERVICE_T_VBS SERVICE_T_VBS +#define CQM_SERVICE_T_IPSEC SERVICE_T_IPSEC +#define CQM_SERVICE_T_VIRTIO SERVICE_T_VIRTIO +#define CQM_SERVICE_T_PPA SERVICE_T_PPA +#define CQM_SERVICE_T_MAX SERVICE_T_MAX + +struct tag_cqm_service { + bool valid; /* Whether to enable this service on the function. */ + bool has_register; /* Registered or Not */ + u64 hardware_db_paddr; + void __iomem *hardware_db_vaddr; + u64 dwqe_paddr; + void __iomem *dwqe_vaddr; + u32 buf_order; /* The size of each buf node is 2^buf_order pages. */ + struct tag_service_register_template service_template; +}; + +struct tag_cqm_fake_cfg { + u32 parent_func; /* The parent func_id of the fake vfs. */ + u32 child_func_start; /* The start func_id of the child fake vfs. */ + u32 child_func_number; /* The number of the child fake vfs. */ +}; + +#define CQM_MAX_FACKVF_GROUP 4 + +struct tag_cqm_func_capability { + /* BAT_PTR table(SMLC) */ + bool ft_enable; /* BAT for flow table enable: support toe/ioe/fc service + */ + bool rdma_enable; /* BAT for rdma enable: support RoCE */ + /* VAT table(SMIR) */ + bool ft_pf_enable; /* Same as ft_enable. BAT entry for toe/ioe/fc on pf + */ + bool rdma_pf_enable; /* Same as rdma_enable. BAT entry for rdma on pf */ + + /* Dynamic or static memory allocation during the application of + * specified QPC/SCQC for each service. + */ + bool qpc_alloc_static; + bool scqc_alloc_static; + + u8 timer_enable; /* Whether the timer function is enabled */ + u8 bloomfilter_enable; /* Whether the bloomgfilter function is enabled + */ + u32 flow_table_based_conn_number; /* Maximum number of connections for + * toe/ioe/fc, whitch cannot excedd + * qpc_number + */ + u32 flow_table_based_conn_cache_number; /* Maximum number of sticky + * caches + */ + u32 bloomfilter_length; /* Size of the bloomfilter table, 64-byte + * aligned + */ + u32 bloomfilter_addr; /* Start position of the bloomfilter table in the + * SMF main cache. + */ + u32 qpc_reserved; /* Reserved bit in bitmap */ + u32 qpc_reserved_back; /* Reserved back bit in bitmap */ + u32 mpt_reserved; /* The ROCE/IWARP MPT also has a reserved bit. */ + + /* All basic_size must be 2^n-aligned. */ + u32 hash_number; /* The number of hash bucket. The size of BAT table is + * aliaed with 64 bucket. At least 64 buckets is + * required. + */ + u32 hash_basic_size; /* THe basic size of hash bucket is 64B, including + * 5 valid entry and one next entry. + */ + u32 qpc_number; + u32 fake_vf_qpc_number; + u32 qpc_basic_size; + + /* NUmber of PFs/VFs on the current host only for timer resource used */ + u32 pf_num; + u32 pf_id_start; + u32 vf_num; + u32 vf_id_start; + + u8 timer_pf_num; + u8 timer_pf_id_start; + u16 timer_vf_num; + u16 timer_vf_id_start; + + u32 lb_mode; + /* Only lower 4bit is valid, indicating which SMFs are enabled. + * For example, 0101B indicates that SMF0 and SMF2 are enabled. + */ + u32 smf_pg; + + u32 fake_mode; + u32 fake_func_type; /* Whether the current function belongs to the fake + * group (parent or child) + */ + u32 fake_cfg_number; /* Number of current configuration groups */ + struct tag_cqm_fake_cfg fake_cfg[CQM_MAX_FACKVF_GROUP]; + + /* Note: for cqm specail test */ + u32 pagesize_reorder; + bool xid_alloc_mode; + bool gpa_check_enable; + u32 scq_reserved; + u32 srq_reserved; + + u32 mpt_number; + u32 mpt_basic_size; + u32 scqc_number; + u32 scqc_basic_size; + u32 srqc_number; + u32 srqc_basic_size; + + u32 gid_number; + u32 gid_basic_size; + u32 lun_number; + u32 lun_basic_size; + u32 taskmap_number; + u32 taskmap_basic_size; + u32 l3i_number; + u32 l3i_basic_size; + u32 childc_number; + u32 childc_basic_size; + u32 child_qpc_id_start; /* FC service Child CTX is global addressing. */ + u32 childc_number_all_function; /* The chip supports a maximum of 8096 + * child CTXs. + */ + u32 timer_number; + u32 timer_basic_size; + u32 xid2cid_number; + u32 xid2cid_basic_size; + u32 reorder_number; + u32 reorder_basic_size; +}; + +#define CQM_PF TYPE_PF +#define CQM_VF TYPE_VF +#define CQM_PPF TYPE_PPF +#define CQM_UNKNOWN TYPE_UNKNOWN +#define CQM_MAX_PF_NUM 32 + +#define CQM_LB_MODE_NORMAL 0xff +#define CQM_LB_MODE_0 0 +#define CQM_LB_MODE_1 1 +#define CQM_LB_MODE_2 2 + +#define CQM_LB_SMF_MAX 4 + +#define CQM_FPGA_MODE 0 +#define CQM_EMU_MODE 1 + +#define CQM_FAKE_FUNC_NORMAL 0 +#define CQM_FAKE_FUNC_PARENT 1 +#define CQM_FAKE_FUNC_CHILD 2 +#define CQM_FAKE_FUNC_CHILD_CONFLICT 3 /* The detected function is the + * function that is faked. + */ + +#define CQM_FAKE_FUNC_MAX 32 + +#define CQM_SPU_HOST_ID 4 + +#define CQM_QPC_ROCE_PER_DRCT 12 +#define CQM_QPC_ROCE_NORMAL 0 +#define CQM_QPC_ROCE_VBS_MODE 2 + +struct tag_cqm_toe_private_capability { + /* TOE srq is different from other services + * and does not need to be managed by the CLA table. + */ + u32 toe_srqc_number; + u32 toe_srqc_basic_size; + u32 toe_srqc_start_id; + + struct tag_cqm_bitmap srqc_bitmap; +}; + +struct tag_cqm_secure_mem { + u16 func_id; + bool need_secure_mem; + + u32 mode; + u32 gpa_len0; + + void __iomem *va_base; + void __iomem *va_end; + u64 pa_base; + u32 page_num; + + /* bitmap mgmt */ + spinlock_t bitmap_lock; + unsigned long *bitmap; + u32 bits_nr; + u32 alloc_cnt; + u32 free_cnt; +}; + +struct tag_cqm_handle { + struct hinic3_hwdev *ex_handle; + struct pci_dev *dev; + struct hinic3_func_attr func_attribute; /* vf/pf attributes */ + struct tag_cqm_func_capability func_capability; /* function capability set */ + struct tag_cqm_service service[CQM_SERVICE_T_MAX]; /* Service-related structure */ + struct tag_cqm_bat_table bat_table; + struct tag_cqm_bloomfilter_table bloomfilter_table; + /* fake-vf-related structure */ + struct tag_cqm_handle *fake_cqm_handle[CQM_FAKE_FUNC_MAX]; + struct tag_cqm_handle *parent_cqm_handle; + + struct tag_cqm_toe_private_capability toe_own_capability; /* TOE service-related + * capability set + */ + struct tag_cqm_secure_mem secure_mem; + struct list_head node; + char name[VRAM_NAME_APPLY_LEN]; +}; + +#define CQM_CQN_FROM_CEQE(data) ((data) & 0xfffff) +#define CQM_XID_FROM_CEQE(data) ((data) & 0xfffff) +#define CQM_QID_FROM_CEQE(data) (((data) >> 20) & 0x7) +#define CQM_TYPE_FROM_CEQE(data) (((data) >> 23) & 0x7) + +#define CQM_HASH_BUCKET_SIZE_64 64 + +#define CQM_MAX_QPC_NUM 0x100000U +#define CQM_MAX_SCQC_NUM 0x100000U +#define CQM_MAX_SRQC_NUM 0x100000U +#define CQM_MAX_CHILDC_NUM 0x100000U + +#define CQM_QPC_SIZE_256 256U +#define CQM_QPC_SIZE_512 512U +#define CQM_QPC_SIZE_1024 1024U + +#define CQM_SCQC_SIZE_32 32U +#define CQM_SCQC_SIZE_64 64U +#define CQM_SCQC_SIZE_128 128U + +#define CQM_SRQC_SIZE_32 32 +#define CQM_SRQC_SIZE_64 64 +#define CQM_SRQC_SIZE_128 128 + +#define CQM_MPT_SIZE_64 64 + +#define CQM_GID_SIZE_32 32 + +#define CQM_LUN_SIZE_8 8 + +#define CQM_L3I_SIZE_8 8 + +#define CQM_TIMER_SIZE_32 32 + +#define CQM_XID2CID_SIZE_8 8 + +#define CQM_REORDER_SIZE_256 256 + +#define CQM_CHILDC_SIZE_256 256U + +#define CQM_XID2CID_VBS_NUM (2 * 1024) /* 2K nvme Q */ + +#define CQM_VBS_QPC_SIZE 512U + +#define CQM_XID2CID_VIRTIO_NUM (16 * 1024) /* 16K virt Q */ + +#define CQM_GID_RDMA_NUM 128 + +#define CQM_LUN_FC_NUM 64 + +#define CQM_TASKMAP_FC_NUM 4 + +#define CQM_L3I_COMM_NUM 64 + +#define CQM_CHILDC_ROCE_NUM (8 * 1024) +#define CQM_CHILDC_OVS_VBS_NUM (8 * 1024) + +#define CQM_TIMER_SCALE_NUM (2 * 1024) +#define CQM_TIMER_ALIGN_WHEEL_NUM 8 +#define CQM_TIMER_ALIGN_SCALE_NUM \ + (CQM_TIMER_SCALE_NUM * CQM_TIMER_ALIGN_WHEEL_NUM) + +#define CQM_QPC_OVS_RSVD (1024 * 1024) +#define CQM_QPC_ROCE_RSVD 2 +#define CQM_QPC_ROCEAA_SWITCH_QP_NUM 4 +#define CQM_QPC_ROCEAA_RSVD \ + (4 * 1024 + CQM_QPC_ROCEAA_SWITCH_QP_NUM) /* 4096 Normal QP + + * 4 Switch QP + */ + +#define CQM_CQ_ROCEAA_RSVD 64 +#define CQM_SRQ_ROCEAA_RSVD 64 +#define CQM_QPC_ROCE_VBS_RSVD_BACK 204800 /* 200K */ + +#define CQM_OVS_PAGESIZE_ORDER 9 +#define CQM_OVS_MAX_TIMER_FUNC 48 + +#define CQM_PPA_PAGESIZE_ORDER 8 + +#define CQM_FC_PAGESIZE_ORDER 0 + +#define CQM_QHEAD_ALIGN_ORDER 6 + +typedef void (*serv_cap_init_cb)(struct tag_cqm_handle *, void *); + +struct cqm_srv_cap_init { + u32 service_type; + serv_cap_init_cb serv_cap_proc; +}; + +/* Only for llt test */ +s32 cqm_capability_init(void *ex_handle); +/* Can be defined as static */ +s32 cqm_mem_init(void *ex_handle); +void cqm_mem_uninit(void *ex_handle); +s32 cqm_event_init(void *ex_handle); +void cqm_event_uninit(void *ex_handle); +void cqm_scq_callback(void *ex_handle, u32 ceqe_data); +void cqm_ecq_callback(void *ex_handle, u32 ceqe_data); +void cqm_nocq_callback(void *ex_handle, u32 ceqe_data); +u8 cqm_aeq_callback(void *ex_handle, u8 event, u8 *data); +s32 cqm_get_fake_func_type(struct tag_cqm_handle *cqm_handle); +s32 cqm_get_child_func_start(struct tag_cqm_handle *cqm_handle); +s32 cqm_get_child_func_number(struct tag_cqm_handle *cqm_handle); + +s32 cqm_init(void *ex_handle); +void cqm_uninit(void *ex_handle); +s32 cqm_service_register(void *ex_handle, struct tag_service_register_template *service_template); +void cqm_service_unregister(void *ex_handle, u32 service_type); + +s32 cqm_fake_vf_num_set(void *ex_handle, u16 fake_vf_num_cfg); +#define CQM_LOG_ID 0 + +#define CQM_PTR_NULL(x) "%s: " #x " is null\n", __func__ +#define CQM_MAP_FAIL(x) "%s: " #x " map fail\n", __func__ +#define CQM_FUNCTION_FAIL(x) "%s: " #x " return failure\n", __func__ +#define CQM_WRONG_VALUE(x) "%s: " #x " %u is wrong\n", __func__, (u32)(x) + +#define cqm_err(dev, format, ...) dev_err(dev, "[CQM]" format, ##__VA_ARGS__) +#define cqm_warn(dev, format, ...) dev_warn(dev, "[CQM]" format, ##__VA_ARGS__) +#define cqm_notice(dev, format, ...) \ + dev_notice(dev, "[CQM]" format, ##__VA_ARGS__) +#define cqm_info(dev, format, ...) dev_info(dev, "[CQM]" format, ##__VA_ARGS__) +#ifdef __CQM_DEBUG__ +#define cqm_dbg(format, ...) pr_info("[CQM]" format, ##__VA_ARGS__) +#else +#define cqm_dbg(format, ...) +#endif + +#endif /* CQM_MAIN_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.c new file mode 100644 index 000000000..213803f56 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.c @@ -0,0 +1,665 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> + +#include "ossl_knl.h" +#include "hinic3_hw.h" +#include "hinic3_mt.h" +#include "hinic3_hwif.h" +#include "hinic3_hw_cfg.h" + +#include "cqm_object.h" +#include "cqm_bitmap_table.h" +#include "cqm_bat_cla.h" +#include "cqm_bloomfilter.h" +#include "cqm_db.h" +#include "cqm_main.h" +#include "vmsec_mpu_common.h" +#include "cqm_memsec.h" + +#define SECURE_VA_TO_IDX(va, base) (((va) - (base)) / PAGE_SIZE) +#define PCI_PROC_NAME_LEN 32 +#define U8_BIT 8 +#define MEM_SEC_PROC_DIR "driver/memsec" +#define BITS_TO_MB(bits) ((bits) * PAGE_SIZE / 1024 / 1024) +#define MEM_SEC_UNNECESSARY 1 +#define MEMSEC_TMP_LEN 32 +#define STD_INPUT_ONE_PARA 1 +#define STD_INPUT_TWO_PARA 2 +#define MR_KEY_2_INDEX_SHIFT 8 + +static int memsec_proc_show(struct seq_file *seq, void *offset); +static int memsec_proc_open(struct inode *inode, struct file *file); +static int memsec_proc_release(struct inode *inode, struct file *file); +static void memsec_info_print(struct seq_file *seq, struct tag_cqm_secure_mem *secure_mem); +static int hinic3_secure_mem_proc_ent_init(void *hwdev); +static void hinic3_secure_mem_proc_ent_deinit(void); +static int hinic3_secure_mem_proc_node_remove(void *hwdev); +static int hinic3_secure_mem_proc_node_add(void *hwdev); +static ssize_t memsec_proc_write(struct file *file, const char __user *data, size_t len, + loff_t *pff); + +static struct proc_dir_entry *g_hinic3_memsec_proc_ent; /* proc dir */ +static atomic_t g_memsec_proc_refcnt = ATOMIC_INIT(0); + +static const struct proc_ops memsec_proc_fops = { + .proc_open = memsec_proc_open, + .proc_read = seq_read, + .proc_write = memsec_proc_write, + .proc_release = memsec_proc_release, +}; + +bool cqm_need_secure_mem(void *hwdev) +{ + struct tag_cqm_secure_mem *info = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)hwdev; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + info = &cqm_handle->secure_mem; + return ((info->need_secure_mem) && hinic3_is_guest_vmsec_enable(hwdev)); +} +EXPORT_SYMBOL(cqm_need_secure_mem); + +static int memsec_proc_open(struct inode *inode, struct file *file) +{ + struct hinic3_hwdev *handle = pde_data(inode); + int ret; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + ret = single_open(file, memsec_proc_show, handle); + if (ret) + module_put(THIS_MODULE); + + return ret; +} + +static int memsec_proc_release(struct inode *inode, struct file *file) +{ + module_put(THIS_MODULE); + return single_release(inode, file); +} + +static void memsec_info_print(struct seq_file *seq, struct tag_cqm_secure_mem *secure_mem) +{ + int i, j; + + seq_printf(seq, "Secure MemPageSize: %lu\n", PAGE_SIZE); + seq_printf(seq, "Secure MemTotal: %u pages\n", secure_mem->bits_nr); + seq_printf(seq, "Secure MemTotal: %lu MB\n", BITS_TO_MB(secure_mem->bits_nr)); + seq_printf(seq, "Secure MemUsed: %d pages\n", + bitmap_weight(secure_mem->bitmap, secure_mem->bits_nr)); + seq_printf(seq, "Secure MemAvailable: %d pages\n", + secure_mem->bits_nr - bitmap_weight(secure_mem->bitmap, secure_mem->bits_nr)); + seq_printf(seq, "Secure MemFirstAvailableIdx: %lu\n", + find_first_zero_bit(secure_mem->bitmap, secure_mem->bits_nr)); + seq_printf(seq, "Secure MemVirtualAddrStart: 0x%p\n", secure_mem->va_base); + seq_printf(seq, "Secure MemVirtualAddrEnd: 0x%p\n", secure_mem->va_end); + seq_printf(seq, "Secure MemPhysicalAddrStart: 0x%llx\n", secure_mem->pa_base); + seq_printf(seq, "Secure MemPhysicalAddrEnd: 0x%llx\n", + secure_mem->pa_base + secure_mem->gpa_len0); + seq_printf(seq, "Secure MemAllocCnt: %d\n", secure_mem->alloc_cnt); + seq_printf(seq, "Secure MemFreeCnt: %d\n", secure_mem->free_cnt); + seq_printf(seq, "Secure MemProcRefCnt: %d\n", atomic_read(&g_memsec_proc_refcnt)); + seq_puts(seq, "Secure MemBitmap:"); + + for (i = 0, j = 0; i < (secure_mem->bits_nr / U8_BIT); i++) { + if (i % U8_BIT == 0) { + seq_printf(seq, "\n [%05d-%05d]: ", j, j + (U8_BIT * U8_BIT) - 0x1); + j += U8_BIT * U8_BIT; + } + seq_printf(seq, "0x%x ", *(u8 *)((u8 *)secure_mem->bitmap + i)); + } + + seq_puts(seq, "\nSecure MemBitmap info end\n"); +} + +static struct tag_cqm_secure_mem *memsec_proc_get_secure_mem(struct hinic3_hwdev *handle) +{ + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_secure_mem *info = NULL; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (!cqm_handle) { + cqm_err(handle->dev_hdl, "[memsec]cqm not inited yet\n"); + return ERR_PTR(-EINVAL); + } + + info = &cqm_handle->secure_mem; + if (!info || !info->bitmap) { + cqm_err(handle->dev_hdl, "[memsec]secure mem not inited yet\n"); + return ERR_PTR(-EINVAL); + } + + return info; +} + +static int memsec_proc_show(struct seq_file *seq, void *offset) +{ + struct hinic3_hwdev *handle = seq->private; + struct tag_cqm_secure_mem *info = NULL; + + info = memsec_proc_get_secure_mem(handle); + if (IS_ERR(info)) + return -EINVAL; + + memsec_info_print(seq, info); + + return 0; +} + +static int test_read_secure_mem(struct hinic3_hwdev *handle, char *data, size_t len) +{ + u64 mem_ptr; + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + struct tag_cqm_secure_mem *info = &cqm_handle->secure_mem; + + if (sscanf(data, "r %llx", &mem_ptr) != STD_INPUT_ONE_PARA) { + cqm_err(handle->dev_hdl, "[memsec_dfx] read info format unknown!\n"); + return -EINVAL; + } + + if (mem_ptr < (u64)(info->va_base) || mem_ptr >= (u64)(info->va_end)) { + cqm_err(handle->dev_hdl, "[memsec_dfx] addr 0x%llx invalid!\n", mem_ptr); + return -EINVAL; + } + + cqm_info(handle->dev_hdl, "[memsec_dfx] read addr 0x%llx val 0x%llx\n", + mem_ptr, *(u64 *)mem_ptr); + return 0; +} + +static int test_write_secure_mem(struct hinic3_hwdev *handle, char *data, size_t len) +{ + u64 mem_ptr; + u64 val; + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + struct tag_cqm_secure_mem *info = &cqm_handle->secure_mem; + + if (sscanf(data, "w %llx %llx", &mem_ptr, &val) != STD_INPUT_TWO_PARA) { + cqm_err(handle->dev_hdl, "[memsec_dfx] read info format unknown!\n"); + return -EINVAL; + } + + if (mem_ptr < (u64)(info->va_base) || mem_ptr >= (u64)(info->va_end)) { + cqm_err(handle->dev_hdl, "[memsec_dfx] addr 0x%llx invalid!\n", mem_ptr); + return -EINVAL; + } + + *(u64 *)mem_ptr = val; + + cqm_info(handle->dev_hdl, "[memsec_dfx] write addr 0x%llx val 0x%llx now val 0x%llx\n", + mem_ptr, val, *(u64 *)mem_ptr); + return 0; +} + +static void test_query_usage(struct hinic3_hwdev *handle) +{ + cqm_info(handle->dev_hdl, "\t[memsec_dfx]Usage: q <query_type> <index>\n"); + cqm_info(handle->dev_hdl, "\t[memsec_dfx]Check whether roce context is in secure memory\n"); + cqm_info(handle->dev_hdl, "\t[memsec_dfx]Options:\n"); + cqm_info(handle->dev_hdl, "\t[memsec_dfx]query_type: qpc, mpt, srqc, scqc\n"); + cqm_info(handle->dev_hdl, "\t[memsec_dfx]index: valid index.e.g. 0x3\n"); +} + +static int test_query_parse_type(struct hinic3_hwdev *handle, char *data, + enum cqm_object_type *type, u32 *index) +{ + char query_type[MEMSEC_TMP_LEN] = {'\0'}; + + if (sscanf(data, "q %s %x", query_type, index) != STD_INPUT_TWO_PARA) { + cqm_err(handle->dev_hdl, "[memsec_dfx] parse query cmd fail!\n"); + return -1; + } + query_type[MEMSEC_TMP_LEN - 1] = '\0'; + + if (*index <= 0) { + cqm_err(handle->dev_hdl, "[memsec_dfx] query index 0x%x is invalid\n", *index); + return -1; + } + + if (strcmp(query_type, "qpc") == 0) { + *type = CQM_OBJECT_SERVICE_CTX; + } else if (strcmp(query_type, "mpt") == 0) { + *type = CQM_OBJECT_MPT; + *index = (*index >> MR_KEY_2_INDEX_SHIFT) & 0xFFFFFF; + } else if (strcmp(query_type, "srqc") == 0) { + *type = CQM_OBJECT_RDMA_SRQ; + } else if (strcmp(query_type, "scqc") == 0) { + *type = CQM_OBJECT_RDMA_SCQ; + } else { + cqm_err(handle->dev_hdl, "[memsec_dfx] query type is invalid\n"); + return -1; + } + + return 0; +} + +static int test_query_context(struct hinic3_hwdev *handle, char *data, size_t len) +{ + int ret; + u32 index = 0; + bool in_secmem = false; + struct tag_cqm_object *cqm_obj = NULL; + struct tag_cqm_qpc_mpt *qpc_mpt = NULL; + struct tag_cqm_queue *cqm_queue = NULL; + struct tag_cqm_secure_mem *info = NULL; + enum cqm_object_type query_type; + + ret = test_query_parse_type(handle, data, &query_type, &index); + if (ret < 0) { + test_query_usage(handle); + return -EINVAL; + } + + info = memsec_proc_get_secure_mem(handle); + if (IS_ERR(info)) + return -EINVAL; + + cqm_obj = cqm_object_get((void *)handle, query_type, index, false); + if (!cqm_obj) { + cqm_err(handle->dev_hdl, "[memsec_dfx] get cmq obj fail!\n"); + return -EINVAL; + } + + switch (query_type) { + case CQM_OBJECT_SERVICE_CTX: + case CQM_OBJECT_MPT: + qpc_mpt = (struct tag_cqm_qpc_mpt *)cqm_obj; + if (qpc_mpt->vaddr >= (u8 *)info->va_base && + (qpc_mpt->vaddr + cqm_obj->object_size) < (u8 *)info->va_end) + in_secmem = true; + cqm_info(handle->dev_hdl, + "[memsec_dfx]Query %s:0x%x, va=%p %sin secure mem\n", + query_type == CQM_OBJECT_MPT ? "MPT, mpt_index" : "QPC, qpn", + index, qpc_mpt->vaddr, in_secmem ? "" : "not "); + break; + case CQM_OBJECT_RDMA_SRQ: + case CQM_OBJECT_RDMA_SCQ: + cqm_queue = (struct tag_cqm_queue *)cqm_obj; + if (cqm_queue->q_ctx_vaddr >= (u8 *)info->va_base && + (cqm_queue->q_ctx_vaddr + cqm_obj->object_size) < (u8 *)info->va_end) + in_secmem = true; + cqm_info(handle->dev_hdl, + "[memsec_dfx]Query %s:0x%x, va=%p %sin secure mem\n", + query_type == CQM_OBJECT_RDMA_SRQ ? "SRQC, srqn " : "SCQC, scqn", + index, cqm_queue->q_ctx_vaddr, in_secmem ? "" : "not "); + break; + default: + cqm_err(handle->dev_hdl, "[memsec_dfx] not support query type!\n"); + break; + } + + cqm_object_put(cqm_obj); + return 0; +} + +static ssize_t memsec_proc_write(struct file *file, const char __user *data, + size_t len, loff_t *off) +{ + int ret = -EINVAL; + struct hinic3_hwdev *handle = pde_data(file->f_inode); + char tmp[MEMSEC_TMP_LEN] = {0}; + + if (!handle) + return -EIO; + + if (len >= MEMSEC_TMP_LEN) + return -EFBIG; + + if (copy_from_user(tmp, data, len)) + return -EIO; + + switch (tmp[0]) { + case 'r': + ret = test_read_secure_mem(handle, tmp, len); + break; + case 'w': + ret = test_write_secure_mem(handle, tmp, len); + break; + case 'q': + ret = test_query_context(handle, tmp, len); + break; + default: + cqm_err(handle->dev_hdl, "[memsec_dfx] not support cmd!\n"); + } + + return (ret == 0) ? len : ret; +} + +static int hinic3_secure_mem_proc_ent_init(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (g_hinic3_memsec_proc_ent) + return 0; + + g_hinic3_memsec_proc_ent = proc_mkdir(MEM_SEC_PROC_DIR, NULL); + if (!g_hinic3_memsec_proc_ent) { + /* try again */ + remove_proc_entry(MEM_SEC_PROC_DIR, NULL); + g_hinic3_memsec_proc_ent = proc_mkdir(MEM_SEC_PROC_DIR, NULL); + if (!g_hinic3_memsec_proc_ent) { + cqm_err(dev->dev_hdl, "[memsec]create secure mem proc fail!\n"); + return -EINVAL; + } + } + + return 0; +} + +static void hinic3_secure_mem_proc_ent_deinit(void) +{ + if (g_hinic3_memsec_proc_ent && !atomic_read(&g_memsec_proc_refcnt)) { + remove_proc_entry(MEM_SEC_PROC_DIR, NULL); + g_hinic3_memsec_proc_ent = NULL; + } +} + +static int hinic3_secure_mem_proc_node_remove(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + struct pci_dev *pdev = dev->pcidev_hdl; + char pci_name[PCI_PROC_NAME_LEN] = {0}; + + if (!g_hinic3_memsec_proc_ent) { + sdk_info(dev->dev_hdl, "[memsec]proc_ent_null!\n"); + return 0; + } + + atomic_dec(&g_memsec_proc_refcnt); + + snprintf(pci_name, PCI_PROC_NAME_LEN - 1, + "%02x:%02x:%x", pdev->bus->number, pdev->slot->number, + PCI_FUNC(pdev->devfn)); + + remove_proc_entry(pci_name, g_hinic3_memsec_proc_ent); + + return 0; +} + +static int hinic3_secure_mem_proc_node_add(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + struct pci_dev *pdev = dev->pcidev_hdl; + struct proc_dir_entry *res = NULL; + char pci_name[PCI_PROC_NAME_LEN] = {0}; + + if (!g_hinic3_memsec_proc_ent) { + cqm_err(dev->dev_hdl, "[memsec]proc_ent_null!\n"); + return -EINVAL; + } + + atomic_inc(&g_memsec_proc_refcnt); + + snprintf(pci_name, PCI_PROC_NAME_LEN - 1, + "%02x:%02x:%x", pdev->bus->number, pdev->slot->number, + PCI_FUNC(pdev->devfn)); + /* 0400 Read by owner */ + res = proc_create_data(pci_name, 0400, g_hinic3_memsec_proc_ent, &memsec_proc_fops, + hwdev); + if (!res) { + cqm_err(dev->dev_hdl, "[memsec]proc_create_data fail!\n"); + return -ENOMEM; + } + + return 0; +} + +void hinic3_memsec_proc_init(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + int ret; + + ret = hinic3_secure_mem_proc_ent_init(hwdev); + if (ret != 0) { + cqm_err(dev->dev_hdl, "[memsec]proc ent init fail!\n"); + return; + } + + ret = hinic3_secure_mem_proc_node_add(hwdev); + if (ret != 0) { + cqm_err(dev->dev_hdl, "[memsec]proc node add fail!\n"); + return; + } +} + +void hinic3_memsec_proc_deinit(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + int ret; + + if (!cqm_need_secure_mem(hwdev)) + return; + + ret = hinic3_secure_mem_proc_node_remove(hwdev); + if (ret != 0) { + cqm_err(dev->dev_hdl, "[memsec]proc node remove fail!\n"); + return; + } + + hinic3_secure_mem_proc_ent_deinit(); +} + +static int cqm_get_secure_mem_cfg(void *dev, struct tag_cqm_secure_mem *info) +{ + struct hinic3_hwdev *hwdev = (struct hinic3_hwdev *)dev; + struct vmsec_cfg_ctx_gpa_entry_cmd mem_info; + u16 out_size = sizeof(struct vmsec_cfg_ctx_gpa_entry_cmd); + int err; + + if (!hwdev || !info) + return -EINVAL; + + memset(&mem_info, 0, sizeof(mem_info)); + mem_info.entry.func_id = info->func_id; + + err = hinic3_msg_to_mgmt_sync(hwdev, HINIC3_MOD_VMSEC, VMSEC_MPU_CMD_CTX_GPA_SHOW, + &mem_info, sizeof(mem_info), &mem_info, + &out_size, 0, HINIC3_CHANNEL_COMM); + if (err || !out_size || mem_info.head.status) { + cqm_err(hwdev->dev_hdl, "failed to get memsec info, err: %d, status: 0x%x, out size: 0x%x\n", + err, mem_info.head.status, out_size); + return -EINVAL; + } + + info->gpa_len0 = mem_info.entry.gpa_len0; + info->mode = mem_info.entry.mode; + info->pa_base = (u64)((((u64)mem_info.entry.gpa_addr0_hi) << CQM_INT_ADDR_SHIFT) | + mem_info.entry.gpa_addr0_lo); + + return 0; +} + +static int cqm_secure_mem_param_check(void *ex_handle, struct tag_cqm_secure_mem *info) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (!info->pa_base || !info->gpa_len0) + goto no_need_secure_mem; + + if (!IS_ALIGNED(info->pa_base, CQM_SECURE_MEM_ALIGNED_SIZE) || + !IS_ALIGNED(info->gpa_len0, CQM_SECURE_MEM_ALIGNED_SIZE)) { + cqm_err(handle->dev_hdl, "func_id %u secure mem not 2M aligned\n", + info->func_id); + return -EINVAL; + } + + if (info->mode == VM_GPA_INFO_MODE_NMIG) + goto no_need_secure_mem; + + return 0; + +no_need_secure_mem: + cqm_info(handle->dev_hdl, "func_id %u no need secure mem gpa 0x%llx len0 0x%x mode 0x%x\n", + info->func_id, info->pa_base, info->gpa_len0, info->mode); + info->need_secure_mem = false; + return MEM_SEC_UNNECESSARY; +} + +int cqm_secure_mem_init(void *ex_handle) +{ + int err; + struct tag_cqm_secure_mem *info = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (!handle) + return -EINVAL; + + // only vf in vm need secure mem + if (!hinic3_is_guest_vmsec_enable(ex_handle)) { + cqm_info(handle->dev_hdl, "no need secure mem\n"); + return 0; + } + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + info = &cqm_handle->secure_mem; + info->func_id = hinic3_global_func_id(ex_handle); + + // get gpa info from mpu + err = cqm_get_secure_mem_cfg(ex_handle, info); + if (err) { + cqm_err(handle->dev_hdl, "func_id %u get secure mem failed, ret %d\n", + info->func_id, err); + return err; + } + + // remap gpa + err = cqm_secure_mem_param_check(ex_handle, info); + if (err) { + cqm_info(handle->dev_hdl, "func_id %u cqm_secure_mem_param_check failed\n", + info->func_id); + return (err == MEM_SEC_UNNECESSARY) ? 0 : err; + } + + info->va_base = ioremap(info->pa_base, info->gpa_len0); + info->va_end = info->va_base + info->gpa_len0; + info->page_num = info->gpa_len0 / PAGE_SIZE; + info->need_secure_mem = true; + info->bits_nr = info->page_num; + info->bitmap = bitmap_zalloc(info->bits_nr, GFP_KERNEL); + if (!info->bitmap) { + cqm_err(handle->dev_hdl, "func_id %u bitmap_zalloc failed\n", + info->func_id); + iounmap(info->va_base); + return -ENOMEM; + } + + hinic3_memsec_proc_init(ex_handle); + return err; +} + +int cqm_secure_mem_deinit(void *ex_handle) +{ + struct tag_cqm_secure_mem *info = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + + if (!handle) + return -EINVAL; + + // only vf in vm need secure mem + if (!cqm_need_secure_mem(ex_handle)) + return 0; + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + info = &cqm_handle->secure_mem; + + if (info && info->va_base) + iounmap(info->va_base); + + if (info && info->bitmap) + kfree(info->bitmap); + + hinic3_memsec_proc_deinit(ex_handle); + return 0; +} + +void *cqm_get_secure_mem_pages(struct hinic3_hwdev *handle, u32 order, dma_addr_t *pa_base) +{ + struct tag_cqm_secure_mem *info = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + unsigned int nr; + unsigned long *bitmap = NULL; + unsigned long index; + unsigned long flags; + + if (!handle || !(handle->cqm_hdl)) { + pr_err("[memsec]%s null pointer\n", __func__); + return NULL; + } + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + info = &cqm_handle->secure_mem; + bitmap = info->bitmap; + nr = 1 << order; + + if (!bitmap) { + cqm_err(handle->dev_hdl, "[memsec] %s bitmap null\n", __func__); + return NULL; + } + + spin_lock_irqsave(&info->bitmap_lock, flags); + + index = (order) ? bitmap_find_next_zero_area(bitmap, info->bits_nr, 0, nr, 0) : + find_first_zero_bit(bitmap, info->bits_nr); + if (index >= info->bits_nr) { + spin_unlock_irqrestore(&info->bitmap_lock, flags); + cqm_err(handle->dev_hdl, + "can not find continuous memory, size %d pages, weight %d\n", + nr, bitmap_weight(bitmap, info->bits_nr)); + return NULL; + } + + bitmap_set(bitmap, index, nr); + info->alloc_cnt++; + spin_unlock_irqrestore(&info->bitmap_lock, flags); + + *pa_base = info->pa_base + index * PAGE_SIZE; + return (void *)(info->va_base + index * PAGE_SIZE); +} + +void cqm_free_secure_mem_pages(struct hinic3_hwdev *handle, void *va, u32 order) +{ + struct tag_cqm_secure_mem *info = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + unsigned int nr; + unsigned long *bitmap = NULL; + unsigned long index; + unsigned long flags; + + if (!handle || !(handle->cqm_hdl)) { + pr_err("%s null pointer\n", __func__); + return; + } + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + info = &cqm_handle->secure_mem; + bitmap = info->bitmap; + nr = 1UL << order; + + if (!bitmap) { + cqm_err(handle->dev_hdl, "%s bitmap null\n", __func__); + return; + } + + if (va < info->va_base || va > (info->va_end - PAGE_SIZE) || + !PAGE_ALIGNED((va - info->va_base))) + cqm_err(handle->dev_hdl, "%s va wrong value\n", __func__); + + index = SECURE_VA_TO_IDX(va, info->va_base); + spin_lock_irqsave(&info->bitmap_lock, flags); + bitmap_clear(bitmap, index, nr); + info->free_cnt++; + spin_unlock_irqrestore(&info->bitmap_lock, flags); +} diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.h new file mode 100644 index 000000000..7d4a42268 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_memsec.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved. */ +#ifndef CQM_MEMSEC_H +#define CQM_MEMSEC_H + +#include <linux/pci.h> +#include "hinic3_hwdev.h" +#include "hinic3_crm.h" +#include "cqm_define.h" + +#define CQM_GET_MEMSEC_CTX_GPA 19 +#define CQM_INT_ADDR_SHIFT 32 +#define CQM_SECURE_MEM_ALIGNED_SIZE (2 * 1024 * 1024) + +bool cqm_need_secure_mem(void *hwdev); +void *cqm_get_secure_mem_pages(struct hinic3_hwdev *handle, u32 order, dma_addr_t *pa_base); +void cqm_free_secure_mem_pages(struct hinic3_hwdev *handle, void *va, u32 order); +int cqm_secure_mem_init(void *ex_handle); +int cqm_secure_mem_deinit(void *ex_handle); +void hinic3_memsec_proc_init(void *hwdev); +void hinic3_memsec_proc_deinit(void *hwdev); + +#endif /* CQM_MEMSEC_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c new file mode 100644 index 000000000..ea21ebf9b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.c @@ -0,0 +1,1664 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <linux/mm.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_mt.h" +#include "hinic3_hwdev.h" + +#include "cqm_bitmap_table.h" +#include "cqm_bat_cla.h" +#include "cqm_object_intern.h" +#include "cqm_main.h" +#include "cqm_object.h" + +/** + * Prototype : cqm_object_qpc_mpt_create + * Description : create QPC/MPT + * Input : void *ex_handle + * u32 service_type + * enum cqm_object_type object_type: must be mpt or ctx. + * u32 object_size: unit is Byte + * void *object_priv: private structure of the service layer, + * it can be NULL. + * u32 index: apply for the reserved qpn based on this value; + * if automatic allocation is required, + * please fill CQM_INDEX_INVALID. + * Output : None + * Return Value : struct tag_cqm_qpc_mpt * + * 1.Date : 2016/2/16 + * Modification : Created function + */ +struct tag_cqm_qpc_mpt *cqm_object_qpc_mpt_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 object_size, void *object_priv, u32 index, + bool low2bit_align_en) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_qpc_mpt_info *qpc_mpt_info = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + s32 ret = CQM_FAIL; + u32 relative_index; + u32 fake_func_id; + u32 index_num = index; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_qpc_mpt_create_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return NULL; + } + + if (service_type >= CQM_SERVICE_T_MAX || !cqm_handle->service[service_type].has_register) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return NULL; + } + + if (object_type != CQM_OBJECT_SERVICE_CTX && object_type != CQM_OBJECT_MPT) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return NULL; + } + + /* fake vf adaption, switch to corresponding VF. */ + if (cqm_handle->func_capability.fake_func_type == CQM_FAKE_FUNC_PARENT) { + fake_func_id = index_num / cqm_handle->func_capability.fake_vf_qpc_number; + relative_index = index_num % cqm_handle->func_capability.fake_vf_qpc_number; + + if ((s32)fake_func_id >= cqm_get_child_func_number(cqm_handle)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(fake_func_id)); + return NULL; + } + + index_num = relative_index; + cqm_handle = cqm_handle->fake_cqm_handle[fake_func_id]; + } + + qpc_mpt_info = kmalloc(sizeof(*qpc_mpt_info), GFP_ATOMIC | __GFP_ZERO); + if (!qpc_mpt_info) + return NULL; + + qpc_mpt_info->common.object.service_type = service_type; + qpc_mpt_info->common.object.object_type = object_type; + qpc_mpt_info->common.object.object_size = object_size; + atomic_set(&qpc_mpt_info->common.object.refcount, 1); + init_completion(&qpc_mpt_info->common.object.free); + qpc_mpt_info->common.object.cqm_handle = cqm_handle; + qpc_mpt_info->common.xid = index_num; + + qpc_mpt_info->common.priv = object_priv; + + ret = cqm_qpc_mpt_create(&qpc_mpt_info->common.object, low2bit_align_en); + if (ret == CQM_SUCCESS) + return &qpc_mpt_info->common; + + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_qpc_mpt_create)); + kfree(qpc_mpt_info); + return NULL; +} +EXPORT_SYMBOL(cqm_object_qpc_mpt_create); + +/** + * Prototype : cqm_object_recv_queue_create + * Description : when srq is used, create rq. + * Input : void *ex_handle + * u32 service_type + * enum cqm_object_type object_type + * u32 init_rq_num + * u32 container_size + * u32 wqe_size + * void *object_priv + * Output : None + * Return Value : struct tag_cqm_queue * + * 1.Date : 2016/2/16 + * Modification : Created function + */ +struct tag_cqm_queue *cqm_object_recv_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 init_rq_num, u32 container_size, + u32 wqe_size, void *object_priv) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_nonrdma_qinfo *rq_qinfo = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + s32 ret; + u32 i; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_rq_create_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return NULL; + } + + if (object_type != CQM_OBJECT_NONRDMA_EMBEDDED_RQ) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return NULL; + } + + if (service_type != CQM_SERVICE_T_TOE) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return NULL; + } + + if (!cqm_handle->service[service_type].has_register) { + cqm_err(handle->dev_hdl, "Rq create: service_type %u has not registered\n", + service_type); + return NULL; + } + + /* 1. create rq qinfo */ + rq_qinfo = kmalloc(sizeof(*rq_qinfo), GFP_KERNEL | __GFP_ZERO); + if (!rq_qinfo) + return NULL; + + /* 2. init rq qinfo */ + rq_qinfo->container_size = container_size; + rq_qinfo->wqe_size = wqe_size; + rq_qinfo->wqe_per_buf = container_size / wqe_size - 1; + + rq_qinfo->common.queue_link_mode = CQM_QUEUE_TOE_SRQ_LINK_MODE; + rq_qinfo->common.priv = object_priv; + rq_qinfo->common.object.cqm_handle = cqm_handle; + /* this object_size is used as container num */ + rq_qinfo->common.object.object_size = init_rq_num; + rq_qinfo->common.object.service_type = service_type; + rq_qinfo->common.object.object_type = object_type; + atomic_set(&rq_qinfo->common.object.refcount, 1); + init_completion(&rq_qinfo->common.object.free); + + /* 3. create queue header */ + rq_qinfo->common.q_header_vaddr = + cqm_kmalloc_align(sizeof(struct tag_cqm_queue_header), + GFP_KERNEL | __GFP_ZERO, CQM_QHEAD_ALIGN_ORDER); + if (!rq_qinfo->common.q_header_vaddr) + goto err1; + + rq_qinfo->common.q_header_paddr = + pci_map_single(cqm_handle->dev, rq_qinfo->common.q_header_vaddr, + sizeof(struct tag_cqm_queue_header), PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, + rq_qinfo->common.q_header_paddr) != 0) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_header_vaddr)); + goto err2; + } + + /* 4. create rq */ + for (i = 0; i < init_rq_num; i++) { + ret = cqm_container_create(&rq_qinfo->common.object, NULL, + true); + if (ret == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_container_create)); + goto err3; + } + if (!rq_qinfo->common.head_container) + rq_qinfo->common.head_container = + rq_qinfo->common.tail_container; + } + + return &rq_qinfo->common; + +err3: + cqm_container_free(rq_qinfo->common.head_container, NULL, + &rq_qinfo->common); +err2: + cqm_kfree_align(rq_qinfo->common.q_header_vaddr); + rq_qinfo->common.q_header_vaddr = NULL; +err1: + kfree(rq_qinfo); + return NULL; +} +EXPORT_SYMBOL(cqm_object_recv_queue_create); + +/** + * Prototype : cqm_object_share_recv_queue_add_container + * Description : allocate new container for srq + * Input : struct tag_cqm_queue *common + * Output : None + * Return Value : tail_container address + * 1.Date : 2016/2/14 + * Modification : Created function + */ +s32 cqm_object_share_recv_queue_add_container(struct tag_cqm_queue *common) +{ + if (unlikely(!common)) { + pr_err("[CQM]%s: common is null\n", __func__); + return CQM_FAIL; + } + + return cqm_container_create(&common->object, NULL, true); +} +EXPORT_SYMBOL(cqm_object_share_recv_queue_add_container); + +s32 cqm_object_srq_add_container_free(struct tag_cqm_queue *common, u8 **container_addr) +{ + if (unlikely(!common)) { + pr_err("[CQM]%s: common is null\n", __func__); + return CQM_FAIL; + } + + return cqm_container_create(&common->object, container_addr, false); +} +EXPORT_SYMBOL(cqm_object_srq_add_container_free); + +static bool cqm_object_share_recv_queue_param_check(struct hinic3_hwdev *handle, u32 service_type, + enum cqm_object_type object_type, + u32 container_size, u32 wqe_size) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + + /* service_type must be CQM_SERVICE_T_TOE */ + if (service_type != CQM_SERVICE_T_TOE) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return false; + } + + /* exception of service registration check */ + if (!cqm_handle->service[service_type].has_register) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return false; + } + + /* container size2^N aligning */ + if (!cqm_check_align(container_size)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(container_size)); + return false; + } + + /* external parameter check: object_type must be + * CQM_OBJECT_NONRDMA_SRQ + */ + if (object_type != CQM_OBJECT_NONRDMA_SRQ) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return false; + } + + /* wqe_size, the divisor, cannot be 0 */ + if (wqe_size == 0) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); + return false; + } + + return true; +} + +/** + * Prototype : cqm_object_share_recv_queue_create + * Description : create srq + * Input : void *ex_handle + * u32 service_type + * enum cqm_object_type object_type + * u32 container_number + * u32 container_size + * u32 wqe_size + * Output : None + * Return Value : struct tag_cqm_queue * + * 1.Date : 2016/2/1 + * Modification : Created function + */ +struct tag_cqm_queue *cqm_object_share_recv_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 container_number, u32 container_size, + u32 wqe_size) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_nonrdma_qinfo *srq_qinfo = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + s32 ret; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_srq_create_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return NULL; + } + + if (!cqm_object_share_recv_queue_param_check(handle, service_type, object_type, + container_size, wqe_size)) + return NULL; + + /* 2. create and initialize srq info */ + srq_qinfo = kmalloc(sizeof(*srq_qinfo), GFP_KERNEL | __GFP_ZERO); + if (!srq_qinfo) + return NULL; + + srq_qinfo->common.object.cqm_handle = cqm_handle; + srq_qinfo->common.object.object_size = container_number; + srq_qinfo->common.object.object_type = object_type; + srq_qinfo->common.object.service_type = service_type; + atomic_set(&srq_qinfo->common.object.refcount, 1); + init_completion(&srq_qinfo->common.object.free); + + srq_qinfo->common.queue_link_mode = CQM_QUEUE_TOE_SRQ_LINK_MODE; + srq_qinfo->common.priv = NULL; + srq_qinfo->wqe_per_buf = container_size / wqe_size - 1; + srq_qinfo->wqe_size = wqe_size; + srq_qinfo->container_size = container_size; + service = &cqm_handle->service[service_type]; + srq_qinfo->q_ctx_size = service->service_template.srq_ctx_size; + + /* 3. create srq and srq ctx */ + ret = cqm_share_recv_queue_create(&srq_qinfo->common.object); + if (ret == CQM_SUCCESS) + return &srq_qinfo->common; + + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_share_recv_queue_create)); + kfree(srq_qinfo); + return NULL; +} +EXPORT_SYMBOL(cqm_object_share_recv_queue_create); + +/** + * Prototype : cqm_object_fc_rq_create + * Description : RQ creation temporarily provided for the FC service. + * Special requirement: The number of valid WQEs in the queue + * must meet the number of transferred WQEs. Linkwqe can only be + * filled at the end of the page. The actual valid number exceeds + * the requirement. In this case, the service needs to be + * informed of the additional number to be created. + * Input : void *ex_handle + * u32 service_type + * enum cqm_object_type object_type + * u32 wqe_number: Number of valid WQEs + * u32 wqe_size + * void *object_priv + * Output : None + * 1.Date : 2016/3/1 + * Modification : Created function + */ +struct tag_cqm_queue *cqm_object_fc_srq_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_nonrdma_qinfo *nonrdma_qinfo = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + u32 valid_wqe_per_buffer; + u32 wqe_sum; /* include linkwqe, normal wqe */ + u32 buf_size; + u32 buf_num; + s32 ret; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_fc_srq_create_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return NULL; + } + + /* service_type must be fc */ + if (service_type != CQM_SERVICE_T_FC) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return NULL; + } + + /* exception of service unregistered check */ + if (!cqm_handle->service[service_type].has_register) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return NULL; + } + + /* wqe_size cannot exceed PAGE_SIZE and must be 2^n aligned. */ + if (wqe_size >= PAGE_SIZE || (!cqm_check_align(wqe_size))) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); + return NULL; + } + + /* FC RQ is SRQ. (Different from the SRQ concept of TOE, FC indicates + * that packets received by all flows are placed on the same RQ. + * The SRQ of TOE is similar to the RQ resource pool.) + */ + if (object_type != CQM_OBJECT_NONRDMA_SRQ) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return NULL; + } + + service = &cqm_handle->service[service_type]; + buf_size = (u32)(PAGE_SIZE << (service->buf_order)); + /* subtract 1 link wqe */ + valid_wqe_per_buffer = buf_size / wqe_size - 1; + buf_num = wqe_number / valid_wqe_per_buffer; + if (wqe_number % valid_wqe_per_buffer != 0) + buf_num++; + + /* calculate the total number of WQEs */ + wqe_sum = buf_num * (valid_wqe_per_buffer + 1); + nonrdma_qinfo = kmalloc(sizeof(*nonrdma_qinfo), GFP_KERNEL | __GFP_ZERO); + if (!nonrdma_qinfo) + return NULL; + + /* initialize object member */ + nonrdma_qinfo->common.object.service_type = service_type; + nonrdma_qinfo->common.object.object_type = object_type; + /* total number of WQEs */ + nonrdma_qinfo->common.object.object_size = wqe_sum; + atomic_set(&nonrdma_qinfo->common.object.refcount, 1); + init_completion(&nonrdma_qinfo->common.object.free); + nonrdma_qinfo->common.object.cqm_handle = cqm_handle; + + /* Initialize the doorbell used by the current queue. + * The default doorbell is the hardware doorbell. + */ + nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; + /* Currently, the connection mode is fixed. In the future, + * the service needs to transfer the connection mode. + */ + nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE; + + /* initialize public members */ + nonrdma_qinfo->common.priv = object_priv; + nonrdma_qinfo->common.valid_wqe_num = wqe_sum - buf_num; + + /* initialize internal private members */ + nonrdma_qinfo->wqe_size = wqe_size; + /* RQ (also called SRQ of FC) created by FC services, + * CTX needs to be created. + */ + nonrdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size; + + ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); + if (ret == CQM_SUCCESS) + return &nonrdma_qinfo->common; + + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_fc_queue_create)); + kfree(nonrdma_qinfo); + return NULL; +} +EXPORT_SYMBOL(cqm_object_fc_srq_create); + +static bool cqm_object_nonrdma_queue_param_check(struct hinic3_hwdev *handle, u32 service_type, + enum cqm_object_type object_type, u32 wqe_size) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + + /* exception of service registrion check */ + if (!cqm_handle->service[service_type].has_register) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return false; + } + /* wqe_size can't be more than PAGE_SIZE, can't be zero, must be power + * of 2 the function of cqm_check_align is to check above + */ + if (wqe_size >= PAGE_SIZE || (!cqm_check_align(wqe_size))) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_size)); + return false; + } + + /* nonrdma supports: RQ, SQ, SRQ, CQ, SCQ */ + if (object_type < CQM_OBJECT_NONRDMA_EMBEDDED_RQ || + object_type > CQM_OBJECT_NONRDMA_SCQ) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return false; + } + + return true; +} + +/** + * Prototype : cqm_object_nonrdma_queue_create + * Description : create nonrdma queue + * Input : void *ex_handle + * u32 service_type + * enum cqm_object_type object_type: can be embedded RQ/SQ/CQ and + * SRQ/SCQ. + * u32 wqe_number: include link wqe + * u32 wqe_size: fixed length, must be power of 2 + * void *object_priv: private structure of the service layer, + * it can be NULL. + * Output : None + * Return Value : struct tag_cqm_queue * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_cqm_queue *cqm_object_nonrdma_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_nonrdma_qinfo *nonrdma_qinfo = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + s32 ret; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_nonrdma_queue_create_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return NULL; + } + + if (!cqm_object_nonrdma_queue_param_check(handle, service_type, object_type, wqe_size)) + return NULL; + + nonrdma_qinfo = kmalloc(sizeof(*nonrdma_qinfo), GFP_KERNEL | __GFP_ZERO); + if (!nonrdma_qinfo) + return NULL; + + nonrdma_qinfo->common.object.service_type = service_type; + nonrdma_qinfo->common.object.object_type = object_type; + nonrdma_qinfo->common.object.object_size = wqe_number; + atomic_set(&nonrdma_qinfo->common.object.refcount, 1); + init_completion(&nonrdma_qinfo->common.object.free); + nonrdma_qinfo->common.object.cqm_handle = cqm_handle; + + /* Initialize the doorbell used by the current queue. + * The default value is hardware doorbell + */ + nonrdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; + /* Currently, the link mode is hardcoded and needs to be transferred by + * the service side. + */ + nonrdma_qinfo->common.queue_link_mode = CQM_QUEUE_RING_MODE; + + nonrdma_qinfo->common.priv = object_priv; + + /* Initialize internal private members */ + nonrdma_qinfo->wqe_size = wqe_size; + service = &cqm_handle->service[service_type]; + switch (object_type) { + case CQM_OBJECT_NONRDMA_SCQ: + nonrdma_qinfo->q_ctx_size = service->service_template.scq_ctx_size; + break; + case CQM_OBJECT_NONRDMA_SRQ: + /* Currently, the SRQ of the service is created through a + * dedicated interface. + */ + nonrdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size; + break; + default: + break; + } + + ret = cqm_nonrdma_queue_create(&nonrdma_qinfo->common.object); + if (ret == CQM_SUCCESS) + return &nonrdma_qinfo->common; + + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_nonrdma_queue_create)); + kfree(nonrdma_qinfo); + return NULL; +} +EXPORT_SYMBOL(cqm_object_nonrdma_queue_create); + +static bool cqm_object_rdma_queue_param_check(struct hinic3_hwdev *handle, u32 service_type, + enum cqm_object_type object_type) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + + /* service_type must be CQM_SERVICE_T_ROCE */ + if (service_type != CQM_SERVICE_T_ROCE) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return false; + } + /* exception of service registrion check */ + if (!cqm_handle->service[service_type].has_register) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return false; + } + + /* rdma supports: QP, SRQ, SCQ */ + if (object_type > CQM_OBJECT_RDMA_SCQ || object_type < CQM_OBJECT_RDMA_QP) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return false; + } + + return true; +} + +/** + * Prototype : cqm_object_rdma_queue_create + * Description : create rdma queue + * Input : void *ex_handle + * u32 service_type + * enum cqm_object_type object_type: can be QP and SRQ/SCQ. + * u32 object_size + * void *object_priv: private structure of the service layer, + * it can be NULL. + * bool room_header_alloc: Whether to apply for queue room and + * header space + * u32 xid + * Output : None + * Return Value : struct tag_cqm_queue * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_cqm_queue *cqm_object_rdma_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 object_size, void *object_priv, + bool room_header_alloc, u32 xid) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_rdma_qinfo *rdma_qinfo = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + s32 ret; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_rdma_queue_create_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return NULL; + } + + if (!cqm_object_rdma_queue_param_check(handle, service_type, object_type)) + return NULL; + + rdma_qinfo = kmalloc(sizeof(*rdma_qinfo), GFP_KERNEL | __GFP_ZERO); + if (!rdma_qinfo) + return NULL; + + rdma_qinfo->common.object.service_type = service_type; + rdma_qinfo->common.object.object_type = object_type; + rdma_qinfo->common.object.object_size = object_size; + atomic_set(&rdma_qinfo->common.object.refcount, 1); + init_completion(&rdma_qinfo->common.object.free); + rdma_qinfo->common.object.cqm_handle = cqm_handle; + rdma_qinfo->common.queue_link_mode = CQM_QUEUE_RDMA_QUEUE_MODE; + rdma_qinfo->common.priv = object_priv; + rdma_qinfo->common.current_q_room = CQM_RDMA_Q_ROOM_1; + rdma_qinfo->room_header_alloc = room_header_alloc; + rdma_qinfo->common.index = xid; + + /* Initializes the doorbell used by the current queue. + * The default value is hardware doorbell + */ + rdma_qinfo->common.current_q_doorbell = CQM_HARDWARE_DOORBELL; + + service = &cqm_handle->service[service_type]; + switch (object_type) { + case CQM_OBJECT_RDMA_SCQ: + rdma_qinfo->q_ctx_size = service->service_template.scq_ctx_size; + break; + case CQM_OBJECT_RDMA_SRQ: + rdma_qinfo->q_ctx_size = service->service_template.srq_ctx_size; + break; + default: + break; + } + + ret = cqm_rdma_queue_create(&rdma_qinfo->common.object); + if (ret == CQM_SUCCESS) + return &rdma_qinfo->common; + + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_rdma_queue_create)); + kfree(rdma_qinfo); + return NULL; +} +EXPORT_SYMBOL(cqm_object_rdma_queue_create); + +/** + * Prototype : cqm_object_rdma_table_get + * Description : create mtt and rdmarc of the rdma service + * Input : void *ex_handle + * u32 service_type + * enum cqm_object_type object_type + * u32 index_base: start of index + * u32 index_number + * Output : None + * Return Value : struct tag_cqm_mtt_rdmarc * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_cqm_mtt_rdmarc *cqm_object_rdma_table_get(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 index_base, u32 index_number) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_rdma_table *rdma_table = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + s32 ret; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_rdma_table_create_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return NULL; + } + + /* service_type must be CQM_SERVICE_T_ROCE */ + if (service_type != CQM_SERVICE_T_ROCE) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return NULL; + } + + /* exception of service registrion check */ + if (!cqm_handle->service[service_type].has_register) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(service_type)); + return NULL; + } + + if (object_type != CQM_OBJECT_MTT && + object_type != CQM_OBJECT_RDMARC) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object_type)); + return NULL; + } + + rdma_table = kmalloc(sizeof(*rdma_table), GFP_KERNEL | __GFP_ZERO); + if (!rdma_table) + return NULL; + + rdma_table->common.object.service_type = service_type; + rdma_table->common.object.object_type = object_type; + rdma_table->common.object.object_size = (u32)(index_number * + sizeof(dma_addr_t)); + atomic_set(&rdma_table->common.object.refcount, 1); + init_completion(&rdma_table->common.object.free); + rdma_table->common.object.cqm_handle = cqm_handle; + rdma_table->common.index_base = index_base; + rdma_table->common.index_number = index_number; + + ret = cqm_rdma_table_create(&rdma_table->common.object); + if (ret == CQM_SUCCESS) + return &rdma_table->common; + + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_rdma_table_create)); + kfree(rdma_table); + return NULL; +} +EXPORT_SYMBOL(cqm_object_rdma_table_get); + +static s32 cqm_qpc_mpt_delete_ret(struct tag_cqm_object *object) +{ + u32 object_type; + + object_type = object->object_type; + switch (object_type) { + case CQM_OBJECT_SERVICE_CTX: + case CQM_OBJECT_MPT: + cqm_qpc_mpt_delete(object); + return CQM_SUCCESS; + default: + return CQM_FAIL; + } +} + +static s32 cqm_nonrdma_queue_delete_ret(struct tag_cqm_object *object) +{ + u32 object_type; + + object_type = object->object_type; + switch (object_type) { + case CQM_OBJECT_NONRDMA_EMBEDDED_RQ: + case CQM_OBJECT_NONRDMA_EMBEDDED_SQ: + case CQM_OBJECT_NONRDMA_EMBEDDED_CQ: + case CQM_OBJECT_NONRDMA_SCQ: + cqm_nonrdma_queue_delete(object); + return CQM_SUCCESS; + case CQM_OBJECT_NONRDMA_SRQ: + if (object->service_type == CQM_SERVICE_T_TOE) + cqm_share_recv_queue_delete(object); + else + cqm_nonrdma_queue_delete(object); + + return CQM_SUCCESS; + default: + return CQM_FAIL; + } +} + +static s32 cqm_rdma_queue_delete_ret(struct tag_cqm_object *object) +{ + u32 object_type; + + object_type = object->object_type; + switch (object_type) { + case CQM_OBJECT_RDMA_QP: + case CQM_OBJECT_RDMA_SRQ: + case CQM_OBJECT_RDMA_SCQ: + cqm_rdma_queue_delete(object); + return CQM_SUCCESS; + default: + return CQM_FAIL; + } +} + +static s32 cqm_rdma_table_delete_ret(struct tag_cqm_object *object) +{ + u32 object_type; + + object_type = object->object_type; + switch (object_type) { + case CQM_OBJECT_MTT: + case CQM_OBJECT_RDMARC: + cqm_rdma_table_delete(object); + return CQM_SUCCESS; + default: + return CQM_FAIL; + } +} + +/** + * Prototype : cqm_object_delete + * Description : Deletes a created object. This function will be sleep and wait + * for all operations on this object to be performed. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_object_delete(struct tag_cqm_object *object) +{ + struct tag_cqm_handle *cqm_handle = NULL; + struct hinic3_hwdev *handle = NULL; + + if (unlikely(!object)) { + pr_err("[CQM]%s: object is null\n", __func__); + return; + } + if (!object->cqm_handle) { + pr_err("[CQM]object del: cqm_handle is null, service type %u, refcount %d\n", + object->service_type, (int)object->refcount.counter); + kfree(object); + return; + } + + cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + + if (!cqm_handle->ex_handle) { + pr_err("[CQM]object del: ex_handle is null, service type %u, refcount %d\n", + object->service_type, (int)object->refcount.counter); + kfree(object); + return; + } + + handle = cqm_handle->ex_handle; + + if (object->service_type >= CQM_SERVICE_T_MAX) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->service_type)); + kfree(object); + return; + } + + if (cqm_qpc_mpt_delete_ret(object) == CQM_SUCCESS) { + kfree(object); + return; + } + + if (cqm_nonrdma_queue_delete_ret(object) == CQM_SUCCESS) { + kfree(object); + return; + } + + if (cqm_rdma_queue_delete_ret(object) == CQM_SUCCESS) { + kfree(object); + return; + } + + if (cqm_rdma_table_delete_ret(object) == CQM_SUCCESS) { + kfree(object); + return; + } + + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); + kfree(object); +} +EXPORT_SYMBOL(cqm_object_delete); + +/** + * Prototype : cqm_object_offset_addr + * Description : Only the rdma table can be searched to obtain the PA and VA + * at the specified offset of the object buffer. + * Input : struct tag_cqm_object *object + * u32 offset: For a rdma table, the offset is the absolute index + * number. + * dma_addr_t *paddr: PA(physical address) + * Output : None + * Return Value : u8 * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u8 *cqm_object_offset_addr(struct tag_cqm_object *object, u32 offset, dma_addr_t *paddr) +{ + u32 object_type = object->object_type; + + /* The data flow path takes performance into consideration and + * does not check input parameters. + */ + switch (object_type) { + case CQM_OBJECT_MTT: + case CQM_OBJECT_RDMARC: + return cqm_rdma_table_offset_addr(object, offset, paddr); + default: + break; + } + + return NULL; +} +EXPORT_SYMBOL(cqm_object_offset_addr); + +/** + * Prototype : cqm_object_get + * Description : Obtain an object based on the index. + * Input : void *ex_handle + * enum cqm_object_type object_type + * u32 index: support qpn,mptn,scqn,srqn (n->number) + * bool bh + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +struct tag_cqm_object *cqm_object_get(void *ex_handle, enum cqm_object_type object_type, + u32 index, bool bh) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct tag_cqm_object_table *object_table = NULL; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_object *object = NULL; + + /* The data flow path takes performance into consideration and + * does not check input parameters. + */ + switch (object_type) { + case CQM_OBJECT_SERVICE_CTX: + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); + break; + case CQM_OBJECT_MPT: + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_MPT); + break; + case CQM_OBJECT_RDMA_SRQ: + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SRQC); + break; + case CQM_OBJECT_RDMA_SCQ: + case CQM_OBJECT_NONRDMA_SCQ: + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); + break; + default: + return NULL; + } + + if (!cla_table) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_table_get)); + return NULL; + } + + object_table = &cla_table->obj_table; + object = cqm_object_table_get(cqm_handle, object_table, index, bh); + return object; +} +EXPORT_SYMBOL(cqm_object_get); + +/** + * Prototype : cqm_object_put + * Description : This function must be called after the cqm_object_get + * function. Otherwise, the object cannot be released. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_object_put(struct tag_cqm_object *object) +{ + /* The data flow path takes performance into consideration and + * does not check input parameters. + */ + if (atomic_dec_and_test(&object->refcount) != 0) + complete(&object->free); +} +EXPORT_SYMBOL(cqm_object_put); + +/** + * Prototype : cqm_object_funcid + * Description : Obtain the ID of the function to which the object belongs. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : If successful, the ID of the function will be returned. + * If fail CQM_FAIL(-1) will be returned. + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_object_funcid(struct tag_cqm_object *object) +{ + struct tag_cqm_handle *cqm_handle = NULL; + + if (unlikely(!object)) { + pr_err("[CQM]%s: object is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!object->cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return CQM_FAIL; + } + + cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + + return cqm_handle->func_attribute.func_global_idx; +} +EXPORT_SYMBOL(cqm_object_funcid); + +/** + * Prototype : cqm_object_resize_alloc_new + * Description : Currently this function is only used for RoCE. + * The CQ buffer is ajusted, but the cqn and cqc remain + * unchanged. This function allocates new buffer, but do not + * release old buffer. The valid buffer is still old buffer. + * Input : struct tag_cqm_object *object + * u32 object_size + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_object_resize_alloc_new(struct tag_cqm_object *object, u32 object_size) +{ + struct tag_cqm_rdma_qinfo *qinfo = (struct tag_cqm_rdma_qinfo *)(void *)object; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_service *service = NULL; + struct tag_cqm_buf *q_room_buf = NULL; + struct hinic3_hwdev *handle = NULL; + u32 order, buf_size; + + if (unlikely(!object)) { + pr_err("[CQM]%s: object is null\n", __func__); + return CQM_FAIL; + } + + cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return CQM_FAIL; + } + handle = cqm_handle->ex_handle; + + /* This interface is used only for the CQ of RoCE service. */ + if (object->service_type == CQM_SERVICE_T_ROCE && + object->object_type == CQM_OBJECT_RDMA_SCQ) { + service = cqm_handle->service + object->service_type; + order = service->buf_order; + buf_size = (u32)(PAGE_SIZE << order); + + if (qinfo->common.current_q_room == CQM_RDMA_Q_ROOM_1) + q_room_buf = &qinfo->common.q_room_buf_2; + else + q_room_buf = &qinfo->common.q_room_buf_1; + + if (qinfo->room_header_alloc) { + q_room_buf->buf_number = ALIGN(object_size, buf_size) / + buf_size; + q_room_buf->page_number = q_room_buf->buf_number << + order; + q_room_buf->buf_size = buf_size; + if (cqm_buf_alloc(cqm_handle, q_room_buf, true) == + CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_buf_alloc)); + return CQM_FAIL; + } + + qinfo->new_object_size = object_size; + return CQM_SUCCESS; + } + + cqm_err(handle->dev_hdl, + CQM_WRONG_VALUE(qinfo->room_header_alloc)); + return CQM_FAIL; + } + + cqm_err(handle->dev_hdl, + "Cq resize alloc: service_type %u object_type %u do not support resize\n", + object->service_type, object->object_type); + return CQM_FAIL; +} +EXPORT_SYMBOL(cqm_object_resize_alloc_new); + +/** + * Prototype : cqm_object_resize_free_new + * Description : Currently this function is only used for RoCE. + * The CQ buffer is ajusted, but the cqn and cqc remain + * unchanged. This function frees new buffer, and is used to deal + * with exceptions. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_object_resize_free_new(struct tag_cqm_object *object) +{ + struct tag_cqm_rdma_qinfo *qinfo = (struct tag_cqm_rdma_qinfo *)(void *)object; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_buf *q_room_buf = NULL; + struct hinic3_hwdev *handle = NULL; + + if (unlikely(!object)) { + pr_err("[CQM]%s: object is null\n", __func__); + return; + } + + cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return; + } + handle = cqm_handle->ex_handle; + + /* This interface is used only for the CQ of RoCE service. */ + if (object->service_type == CQM_SERVICE_T_ROCE && + object->object_type == CQM_OBJECT_RDMA_SCQ) { + if (qinfo->common.current_q_room == CQM_RDMA_Q_ROOM_1) + q_room_buf = &qinfo->common.q_room_buf_2; + else + q_room_buf = &qinfo->common.q_room_buf_1; + + qinfo->new_object_size = 0; + + cqm_buf_free(q_room_buf, cqm_handle); + } else { + cqm_err(handle->dev_hdl, + "Cq resize free: service_type %u object_type %u do not support resize\n", + object->service_type, object->object_type); + } +} +EXPORT_SYMBOL(cqm_object_resize_free_new); + +/** + * Prototype : cqm_object_resize_free_old + * Description : Currently this function is only used for RoCE. + * The CQ buffer is ajusted, but the cqn and cqc remain + * unchanged. This function frees old buffer and switches the + * valid buffer to new buffer. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_object_resize_free_old(struct tag_cqm_object *object) +{ + struct tag_cqm_rdma_qinfo *qinfo = (struct tag_cqm_rdma_qinfo *)(void *)object; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_buf *q_room_buf = NULL; + + if (unlikely(!object)) { + pr_err("[CQM]%s: object is null\n", __func__); + return; + } + + cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return; + } + + /* This interface is used only for the CQ of RoCE service. */ + if (object->service_type == CQM_SERVICE_T_ROCE && + object->object_type == CQM_OBJECT_RDMA_SCQ) { + if (qinfo->common.current_q_room == CQM_RDMA_Q_ROOM_1) { + q_room_buf = &qinfo->common.q_room_buf_1; + qinfo->common.current_q_room = CQM_RDMA_Q_ROOM_2; + } else { + q_room_buf = &qinfo->common.q_room_buf_2; + qinfo->common.current_q_room = CQM_RDMA_Q_ROOM_1; + } + + object->object_size = qinfo->new_object_size; + + cqm_buf_free(q_room_buf, cqm_handle); + } +} +EXPORT_SYMBOL(cqm_object_resize_free_old); + +/** + * Prototype : cqm_gid_base + * Description : Obtain the base virtual address of the gid table for FT + * debug. + * Input : void *ex_handle + * Output : None + * 1.Date : 2015/9/8 + * Modification : Created function + */ +void *cqm_gid_base(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_bat_table *bat_table = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_buf *cla_z_buf = NULL; + u32 entry_type, i; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return NULL; + } + + bat_table = &cqm_handle->bat_table; + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + entry_type = bat_table->bat_entry_type[i]; + if (entry_type == CQM_BAT_ENTRY_T_GID) { + cla_table = &bat_table->entry[i]; + cla_z_buf = &cla_table->cla_z_buf; + if (cla_z_buf->buf_list) + return cla_z_buf->buf_list->va; + } + } + + return NULL; +} + +/** + * Prototype : cqm_timer_base + * Description : Obtain the base virtual address of the timer for live + * migration. + * Input : void *ex_handle + * Output : None + * Return Value : void + * 1.Date : 2020/5/21 + * Modification : Created function + */ +void *cqm_timer_base(void *ex_handle) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_bat_table *bat_table = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_buf *cla_z_buf = NULL; + u32 entry_type, i; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return NULL; + } + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return NULL; + } + + /* Timer resource is configured on PPF. */ + if (handle->hwif->attr.func_type != CQM_PPF) { + cqm_err(handle->dev_hdl, "%s: wrong function type:%d\n", + __func__, handle->hwif->attr.func_type); + return NULL; + } + + bat_table = &cqm_handle->bat_table; + for (i = 0; i < CQM_BAT_ENTRY_MAX; i++) { + entry_type = bat_table->bat_entry_type[i]; + if (entry_type != CQM_BAT_ENTRY_T_TIMER) + continue; + + cla_table = &bat_table->entry[i]; + cla_z_buf = &cla_table->cla_z_buf; + + if (!cla_z_buf->direct.va) { + if (cqm_buf_alloc_direct(cqm_handle, cla_z_buf, true) == + CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_buf_alloc_direct)); + return NULL; + } + } + + return cla_z_buf->direct.va; + } + + return NULL; +} +EXPORT_SYMBOL(cqm_timer_base); + +static s32 cqm_function_timer_clear_getindex(struct hinic3_hwdev *ex_handle, u32 *buffer_index, + u32 function_id, u32 timer_page_num, + const struct tag_cqm_buf *cla_z_buf) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(ex_handle->cqm_hdl); + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + u32 index; + + /* Convert functionid and the functionid does not exceed the value range + * of the tiemr buffer. + */ + if (function_id < (func_cap->timer_pf_id_start + func_cap->timer_pf_num) && + function_id >= func_cap->timer_pf_id_start) { + index = function_id - func_cap->timer_pf_id_start; + } else if (function_id < (func_cap->timer_vf_id_start + func_cap->timer_vf_num) && + function_id >= func_cap->timer_vf_id_start) { + index = (function_id - func_cap->timer_vf_id_start) + + func_cap->timer_pf_num; + } else { + cqm_err(ex_handle->dev_hdl, "Timer clear: wrong function_id=0x%x\n", + function_id); + return CQM_FAIL; + } + + if ((index * timer_page_num + timer_page_num) > cla_z_buf->buf_number) { + cqm_err(ex_handle->dev_hdl, + "Timer clear: over cla_z_buf_num, buffer_i=0x%x, zbuf_num=0x%x\n", + index, cla_z_buf->buf_number); + return CQM_FAIL; + } + + *buffer_index = index; + return CQM_SUCCESS; +} + +static void cqm_clear_timer(void *ex_handle, u32 function_id, struct hinic3_hwdev *handle, + struct tag_cqm_cla_table *cla_table) +{ + u32 timer_buffer_size = CQM_TIMER_ALIGN_SCALE_NUM * CQM_TIMER_SIZE_32; + struct tag_cqm_buf *cla_z_buf = &cla_table->cla_z_buf; + u32 timer_page_num, i; + u32 buffer_index = 0; + s32 ret; + + /* During CQM capability initialization, ensure that the basic size of + * the timer buffer page does not exceed 128 x 4 KB. Otherwise, + * clearing the timer buffer of the function is complex. + */ + timer_page_num = timer_buffer_size / + (PAGE_SIZE << cla_table->trunk_order); + if (timer_page_num == 0) { + cqm_err(handle->dev_hdl, + "Timer clear: fail to clear timer, buffer_size=0x%x, trunk_order=0x%x\n", + timer_buffer_size, cla_table->trunk_order); + return; + } + + /* Convert functionid and the functionid does not exceed the value range + * of the tiemr buffer. + */ + ret = cqm_function_timer_clear_getindex(ex_handle, &buffer_index, + function_id, timer_page_num, + cla_z_buf); + if (ret == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_function_timer_clear_getindex)); + return; + } + + if (cla_table->cla_lvl == CQM_CLA_LVL_1 || + cla_table->cla_lvl == CQM_CLA_LVL_2) { + for (i = buffer_index * timer_page_num; + i < (buffer_index * timer_page_num + timer_page_num); i++) + memset((u8 *)(cla_z_buf->buf_list[i].va), 0, + (PAGE_SIZE << cla_table->trunk_order)); + } else { + cqm_err(handle->dev_hdl, "Timer clear: timer cla lvl: %u, cla_z_buf_num=0x%x\n", + cla_table->cla_lvl, cla_z_buf->buf_number); + cqm_err(handle->dev_hdl, + "Timer clear: buf_i=0x%x, buf_size=0x%x, page_num=0x%x, order=0x%x\n", + buffer_index, timer_buffer_size, timer_page_num, + cla_table->trunk_order); + } +} + +/** + * Prototype : cqm_function_timer_clear + * Description : Clear the timer buffer based on the function ID. + * The function ID starts from 0 and the timer buffer is arranged + * in sequence by function ID. + * Input : void *ex_handle + * u32 functionid + * Output : None + * Return Value : void + * 1.Date : 2016/12/19 + * Modification : Created function + */ +void cqm_function_timer_clear(void *ex_handle, u32 function_id) +{ + /* The timer buffer of one function is 32B*8wheel*2048spoke=128*4k */ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + int loop, i; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_func_timer_clear_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return; + } + + if (cqm_handle->func_capability.lb_mode == CQM_LB_MODE_1 || + cqm_handle->func_capability.lb_mode == CQM_LB_MODE_2) { + cla_table = &cqm_handle->bat_table.timer_entry[0]; + loop = CQM_LB_SMF_MAX; + } else { + cla_table = cqm_cla_table_get(&cqm_handle->bat_table, CQM_BAT_ENTRY_T_TIMER); + loop = 1; + } + + if (unlikely(!cla_table)) { + pr_err("[CQM]%s: cla_table is null\n", __func__); + return; + } + for (i = 0; i < loop; i++) { + cqm_clear_timer(ex_handle, function_id, handle, cla_table); + cla_table++; + } +} +EXPORT_SYMBOL(cqm_function_timer_clear); + +/** + * Prototype : cqm_function_hash_buf_clear + * Description : clear hash buffer based on global function_id + * Input : void *ex_handle + * s32 global_funcid + * Output : None + * Return Value : None + * 1.Date : 2017/11/27 + * Modification : Created function + * 2.Date : 2021/02/23 + * Modification : Add para func_id; clear hash buf by func_id + */ +void cqm_function_hash_buf_clear(void *ex_handle, s32 global_funcid) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_func_capability *func_cap = NULL; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_buf *cla_z_buf = NULL; + s32 fake_funcid; + u32 i; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return; + } + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_func_hash_buf_clear_cnt); + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + if (unlikely(!cqm_handle)) { + pr_err("[CQM]%s: cqm_handle is null\n", __func__); + return; + } + func_cap = &cqm_handle->func_capability; + + /* fake vf adaption, switch to corresponding VF. */ + if (func_cap->fake_func_type == CQM_FAKE_FUNC_PARENT) { + fake_funcid = global_funcid - + (s32)(func_cap->fake_cfg[0].child_func_start); + cqm_info(handle->dev_hdl, "fake_funcid =%d\n", fake_funcid); + if (fake_funcid < 0 || fake_funcid >= CQM_FAKE_FUNC_MAX) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(fake_funcid)); + return; + } + + cqm_handle = cqm_handle->fake_cqm_handle[fake_funcid]; + } + + cla_table = cqm_cla_table_get(&cqm_handle->bat_table, + CQM_BAT_ENTRY_T_HASH); + if (unlikely(!cla_table)) { + pr_err("[CQM]%s: cla_table is null\n", __func__); + return; + } + cla_z_buf = &cla_table->cla_z_buf; + + for (i = 0; i < cla_z_buf->buf_number; i++) + memset(cla_z_buf->buf_list[i].va, 0, cla_z_buf->buf_size); +} +EXPORT_SYMBOL(cqm_function_hash_buf_clear); + +void cqm_srq_used_rq_container_delete(struct tag_cqm_object *object, u8 *container) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + u32 link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size; + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(common->object.cqm_handle); + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_srq_linkwqe *srq_link_wqe = NULL; + dma_addr_t addr; + + /* 1. Obtain the current container pa through link wqe table, + * unmap pa + */ + srq_link_wqe = (struct tag_cqm_srq_linkwqe *)(container + link_wqe_offset); + /* shift right by 2 bits to get the length of dw(4B) */ + cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct tag_cqm_linkwqe) >> 2); + + addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_gpa_h, + srq_link_wqe->current_buffer_gpa_l); + if (addr == 0) { + cqm_err(handle->dev_hdl, "Rq container del: buffer physical addr is null\n"); + return; + } + pci_unmap_single(cqm_handle->dev, addr, qinfo->container_size, + PCI_DMA_BIDIRECTIONAL); + + /* 2. Obtain the current container va through link wqe table, free va */ + addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_addr_h, + srq_link_wqe->current_buffer_addr_l); + if (addr == 0) { + cqm_err(handle->dev_hdl, "Rq container del: buffer virtual addr is null\n"); + return; + } + kfree((void *)addr); +} +EXPORT_SYMBOL(cqm_srq_used_rq_container_delete); + +s32 cqm_dtoe_share_recv_queue_create(void *ex_handle, u32 contex_size, + u32 *index_count, u32 *index) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_toe_private_capability *tow_own_cap = NULL; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_bitmap *bitmap = NULL; + u32 step; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!index_count)) { + pr_err("[CQM]%s: index_count is null\n", __func__); + return CQM_FAIL; + } + if (unlikely(!index)) { + pr_err("[CQM]%s: index is null\n", __func__); + return CQM_FAIL; + } + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + tow_own_cap = &cqm_handle->toe_own_capability; + + bitmap = &tow_own_cap->srqc_bitmap; + *index_count = (ALIGN(contex_size, tow_own_cap->toe_srqc_basic_size)) / + tow_own_cap->toe_srqc_basic_size; + /* toe srqc number must align of 2 */ + step = ALIGN(tow_own_cap->toe_srqc_number, 2); + *index = cqm_bitmap_alloc(bitmap, step, *index_count, + cqm_handle->func_capability.xid_alloc_mode); + if (*index >= bitmap->max_num) { + cqm_err(handle->dev_hdl, "Srq create: queue index %u exceeds max_num %u\n", + *index, bitmap->max_num); + return CQM_FAIL; + } + *index += tow_own_cap->toe_srqc_start_id; + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_srq_create_cnt); + + return CQM_SUCCESS; +} +EXPORT_SYMBOL(cqm_dtoe_share_recv_queue_create); + +void cqm_dtoe_free_srq_bitmap_index(void *ex_handle, u32 index_count, u32 index) +{ + struct hinic3_hwdev *handle = (struct hinic3_hwdev *)ex_handle; + struct tag_cqm_handle *cqm_handle = NULL; + struct tag_cqm_bitmap *bitmap = NULL; + + if (unlikely(!ex_handle)) { + pr_err("[CQM]%s: ex_handle is null\n", __func__); + return; + } + + cqm_handle = (struct tag_cqm_handle *)(handle->cqm_hdl); + bitmap = &cqm_handle->toe_own_capability.srqc_bitmap; + cqm_bitmap_free(bitmap, index, index_count); +} +EXPORT_SYMBOL(cqm_dtoe_free_srq_bitmap_index); diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.h new file mode 100644 index 000000000..ba61828f2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object.h @@ -0,0 +1,714 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_OBJECT_H +#define CQM_OBJECT_H + +#include "cqm_define.h" +#include "vram_common.h" + +#define CQM_LINKWQE_128B 128 +#define CQM_MOD_TOE HINIC3_MOD_TOE +#define CQM_MOD_CQM HINIC3_MOD_CQM + +#ifdef __cplusplus +#if __cplusplus +extern "C" { +#endif +#endif /* __cplusplus */ + +#ifndef HIUDK_SDK + +#define CQM_SUCCESS 0 +#define CQM_FAIL (-1) +/* Ignore the return value and continue */ +#define CQM_CONTINUE 1 + +/* type of WQE is LINK WQE */ +#define CQM_WQE_WF_LINK 1 +/* type of WQE is common WQE */ +#define CQM_WQE_WF_NORMAL 0 + +/* chain queue mode */ +#define CQM_QUEUE_LINK_MODE 0 +/* RING queue mode */ +#define CQM_QUEUE_RING_MODE 1 +/* SRQ queue mode */ +#define CQM_QUEUE_TOE_SRQ_LINK_MODE 2 +/* RDMA queue mode */ +#define CQM_QUEUE_RDMA_QUEUE_MODE 3 + +/* generic linkwqe structure */ +struct tag_cqm_linkwqe { + u32 rsv1 : 14; /* <reserved field */ + u32 wf : 1; /* <wf */ + u32 rsv2 : 14; /* <reserved field */ + u32 ctrlsl : 2; /* <ctrlsl */ + u32 o : 1; /* <o bit */ + + u32 rsv3 : 31; /* <reserved field */ + u32 lp : 1; /* The lp field determines whether the o-bit + * meaning is reversed. + */ + + u32 next_page_gpa_h; /* <record the upper 32b physical address of the + * next page for the chip + */ + u32 next_page_gpa_l; /* <record the lower 32b physical address of the + * next page for the chip + */ + + u32 next_buffer_addr_h; /* <record the upper 32b virtual address of the + * next page for the driver + */ + u32 next_buffer_addr_l; /* <record the lower 32b virtual address of the + * next page for the driver + */ +}; + +/* SRQ linkwqe structure. The wqe size must not exceed the common RQE size. */ +struct tag_cqm_srq_linkwqe { + struct tag_cqm_linkwqe linkwqe; /* <generic linkwqe structure */ + u32 current_buffer_gpa_h; /* <Record the upper 32b physical address of + * the current page, which is used when the + * driver releases the container and cancels + * the mapping. + */ + u32 current_buffer_gpa_l; /* <Record the lower 32b physical address of + * the current page, which is used when the + * driver releases the container and cancels + * the mapping. + */ + u32 current_buffer_addr_h; /* <Record the upper 32b of the virtual + * address of the current page, which is used + * when the driver releases the container. + */ + u32 current_buffer_addr_l; /* <Record the lower 32b of the virtual + * address of the current page, which is used + * when the driver releases the container. + */ + + u32 fast_link_page_addr_h; /* <Record the upper 32b of the virtual + * address of the fastlink page where the + * container address is recorded. It is used + * when the driver releases the fastlink. + */ + u32 fast_link_page_addr_l; /* <Record the lower 32b virtual address of + * the fastlink page where the container + * address is recorded. It is used when the + * driver releases the fastlink. + */ + + u32 fixed_next_buffer_addr_h; /* <Record the upper 32b virtual address + * of the next contianer, which is used to + * release driver resources. The driver + * cannot be modified. + */ + u32 fixed_next_buffer_addr_l; /* <Record the lower 32b virtual address + * of the next contianer, which is used to + * release driver resources. The driver + * cannot be modified. + */ +}; + +/* first 64B of standard 128B WQE */ +union tag_cqm_linkwqe_first64B { + struct tag_cqm_linkwqe basic_linkwqe; /* <generic linkwqe structure */ + struct tag_cqm_srq_linkwqe toe_srq_linkwqe; /* <SRQ linkwqe structure */ + u32 value[16]; /* <reserved field */ +}; + +/* second 64B of standard 128B WQE */ +struct tag_cqm_linkwqe_second64B { + u32 rsvd0[4]; /* <first 16B reserved field */ + u32 rsvd1[4]; /* <second 16B reserved field */ + union { + struct { + u32 rsvd0[3]; + u32 rsvd1 : 29; + u32 toe_o : 1; /* <o bit of toe */ + u32 resvd2 : 2; + } bs; + u32 value[4]; + } third_16B; /* <third 16B */ + + union { + struct { + u32 rsvd0[2]; + u32 rsvd1 : 31; + u32 ifoe_o : 1; /* <o bit of ifoe */ + u32 rsvd2; + } bs; + u32 value[4]; + } forth_16B; /* <fourth 16B */ +}; + +/* standard 128B WQE structure */ +struct tag_cqm_linkwqe_128B { + union tag_cqm_linkwqe_first64B first64B; /* <first 64B of standard 128B WQE */ + struct tag_cqm_linkwqe_second64B second64B; /* <back 64B of standard 128B WQE */ +}; + +/* AEQ type definition */ +enum cqm_aeq_event_type { + CQM_AEQ_BASE_T_NIC = 0, /* <NIC consists of 16 events:0~15 */ + CQM_AEQ_BASE_T_ROCE = 16, /* <ROCE consists of 32 events:16~47 */ + CQM_AEQ_BASE_T_FC = 48, /* <FC consists of 8 events:48~55 */ + CQM_AEQ_BASE_T_IOE = 56, /* <IOE consists of 8 events:56~63 */ + CQM_AEQ_BASE_T_TOE = 64, /* <TOE consists of 16 events:64~95 */ + CQM_AEQ_BASE_T_VBS = 96, /* <VBS consists of 16 events:96~111 */ + CQM_AEQ_BASE_T_IPSEC = 112, /* <VBS consists of 16 events:112~127 */ + CQM_AEQ_BASE_T_MAX = 128 /* <maximum of 128 events can be defined */ +}; + +/* service registration template */ +struct tag_service_register_template { + u32 service_type; /* <service type */ + u32 srq_ctx_size; /* <SRQ context size */ + u32 scq_ctx_size; /* <SCQ context size */ + void *service_handle; /* <pointer to the service driver when the + * ceq/aeq function is called back + */ + /* <ceq callback:shared cq */ + void (*shared_cq_ceq_callback)(void *service_handle, u32 cqn, + void *cq_priv); + /* <ceq callback:embedded cq */ + void (*embedded_cq_ceq_callback)(void *service_handle, u32 xid, + void *qpc_priv); + /* <ceq callback:no cq */ + void (*no_cq_ceq_callback)(void *service_handle, u32 xid, u32 qid, + void *qpc_priv); + /* <aeq level callback */ + u8 (*aeq_level_callback)(void *service_handle, u8 event_type, u8 *val); + /* <aeq callback */ + void (*aeq_callback)(void *service_handle, u8 event_type, u8 *val); +}; + +/* object operation type definition */ +enum cqm_object_type { + CQM_OBJECT_ROOT_CTX = 0, /* <0:root context, which is compatible with + * root CTX management + */ + CQM_OBJECT_SERVICE_CTX, /* <1:QPC, connection management object */ + CQM_OBJECT_MPT, /* <2:RDMA service usage */ + + CQM_OBJECT_NONRDMA_EMBEDDED_RQ = 10, /* <10:RQ of non-RDMA services, + * managed by LINKWQE + */ + CQM_OBJECT_NONRDMA_EMBEDDED_SQ, /* <11:SQ of non-RDMA services, + * managed by LINKWQE + */ + CQM_OBJECT_NONRDMA_SRQ, /* <12:SRQ of non-RDMA services, + * managed by MTT, but the CQM + * needs to apply for MTT. + */ + CQM_OBJECT_NONRDMA_EMBEDDED_CQ, /* <13:Embedded CQ for non-RDMA + * services, managed by LINKWQE + */ + CQM_OBJECT_NONRDMA_SCQ, /* <14:SCQ of non-RDMA services, + * managed by LINKWQE + */ + + CQM_OBJECT_RESV = 20, + + CQM_OBJECT_RDMA_QP = 30, /* <30:QP of RDMA services, managed by MTT */ + CQM_OBJECT_RDMA_SRQ, /* <31:SRQ of RDMA services, managed by MTT */ + CQM_OBJECT_RDMA_SCQ, /* <32:SCQ of RDMA services, managed by MTT */ + + CQM_OBJECT_MTT = 50, /* <50:MTT table of the RDMA service */ + CQM_OBJECT_RDMARC, /* <51:RC of the RDMA service */ +}; + +/* return value of the failure to apply for the BITMAP table */ +#define CQM_INDEX_INVALID (~(0U)) +/* Return value of the reserved bit applied for in the BITMAP table, + * indicating that the index is allocated by the CQM and + * cannot be specified by the driver. + */ +#define CQM_INDEX_RESERVED 0xfffff + +/* to support ROCE Q buffer resize, the first Q buffer space */ +#define CQM_RDMA_Q_ROOM_1 1 +/* to support the Q buffer resize of ROCE, the second Q buffer space */ +#define CQM_RDMA_Q_ROOM_2 2 + +/* doorbell mode selected by the current Q, hardware doorbell */ +#define CQM_HARDWARE_DOORBELL 1 +/* doorbell mode selected by the current Q, software doorbell */ +#define CQM_SOFTWARE_DOORBELL 2 + +/* single-node structure of the CQM buffer */ +struct tag_cqm_buf_list { + void *va; /* <virtual address */ + dma_addr_t pa; /* <physical address */ + u32 refcount; /* <reference counting of the buf, + * which is used for internal buf management. + */ +}; + +/* common management structure of the CQM buffer */ +struct tag_cqm_buf { + struct tag_cqm_buf_list *buf_list; /* <buffer list */ + struct tag_cqm_buf_list direct; /* <map the discrete buffer list to a group + * of consecutive addresses + */ + u32 page_number; /* <buf_number in quantity of page_number=2^n */ + u32 buf_number; /* <number of buf_list nodes */ + u32 buf_size; /* <PAGE_SIZE in quantity of buf_size=2^n */ + struct vram_buf_info buf_info; + u32 bat_entry_type; +}; + +/* CQM object structure, which can be considered + * as the base class abstracted from all queues/CTX. + */ +struct tag_cqm_object { + u32 service_type; /* <service type */ + u32 object_type; /* <object type, such as context, queue, mpt, + * and mtt, etc + */ + u32 object_size; /* <object Size, for queue/CTX/MPT, + * the unit is Byte, for MTT/RDMARC, + * the unit is the number of entries, + * for containers, the unit is the number of + * containers. + */ + atomic_t refcount; /* <reference counting */ + struct completion free; /* <release completed quantity */ + void *cqm_handle; /* <cqm_handle */ +}; + +/* structure of the QPC and MPT objects of the CQM */ +struct tag_cqm_qpc_mpt { + struct tag_cqm_object object; /* <object base class */ + u32 xid; /* <xid */ + dma_addr_t paddr; /* <physical address of the QPC/MTT memory */ + void *priv; /* <private information about the object of + * the service driver. + */ + u8 *vaddr; /* <virtual address of the QPC/MTT memory */ +}; + +/* queue header structure */ +struct tag_cqm_queue_header { + u64 doorbell_record; /* <SQ/RQ DB content */ + u64 ci_record; /* <CQ DB content */ + u64 rsv1; /* <This area is a user-defined area for driver + * and microcode information transfer. + */ + u64 rsv2; /* <This area is a user-defined area for driver + * and microcode information transfer. + */ +}; + +/* queue management structure: for queues of non-RDMA services, embedded queues + * are managed by LinkWQE, SRQ and SCQ are managed by MTT, but MTT needs to be + * applied by CQM; the queue of the RDMA service is managed by the MTT. + */ +struct tag_cqm_queue { + struct tag_cqm_object object; /* <object base class */ + u32 index; /* <The embedded queue and QP do not have + * indexes, but the SRQ and SCQ do. + */ + void *priv; /* <private information about the object of + * the service driver + */ + u32 current_q_doorbell; /* <doorbell type selected by the current + * queue. HW/SW are used for the roce QP. + */ + u32 current_q_room; /* <roce:current valid room buf */ + struct tag_cqm_buf q_room_buf_1; /* <nonrdma:only q_room_buf_1 can be set to + * q_room_buf + */ + struct tag_cqm_buf q_room_buf_2; /* <The CQ of RDMA reallocates the size of + * the queue room. + */ + struct tag_cqm_queue_header *q_header_vaddr; /* <queue header virtual address */ + dma_addr_t q_header_paddr; /* <physical address of the queue header */ + u8 *q_ctx_vaddr; /* <CTX virtual addresses of SRQ and SCQ */ + dma_addr_t q_ctx_paddr; /* <CTX physical addresses of SRQ and SCQ */ + u32 valid_wqe_num; /* <number of valid WQEs that are + * successfully created + */ + u8 *tail_container; /* <tail pointer of the SRQ container */ + u8 *head_container; /* <head pointer of SRQ container */ + u8 queue_link_mode; /* <Determine the connection mode during + * queue creation, such as link and ring. + */ +}; + +/* MTT/RDMARC management structure */ +struct tag_cqm_mtt_rdmarc { + struct tag_cqm_object object; /* <object base class */ + u32 index_base; /* <index_base */ + u32 index_number; /* <index_number */ + u8 *vaddr; /* <buffer virtual address */ +}; + +/* sending command structure */ +struct tag_cqm_cmd_buf { + void *buf; /* <command buffer virtual address */ + dma_addr_t dma; /* <physical address of the command buffer */ + u16 size; /* <command buffer size */ +}; + +/* definition of sending ACK mode */ +enum cqm_cmd_ack_type { + CQM_CMD_ACK_TYPE_CMDQ = 0, /* <ack is written back to cmdq */ + CQM_CMD_ACK_TYPE_SHARE_CQN = 1, /* <ack is reported through the SCQ of + * the root CTX. + */ + CQM_CMD_ACK_TYPE_APP_CQN = 2 /* <ack is reported through the SCQ of + * service + */ +}; + +#endif +/** + * @brief: create FC SRQ. + * @details: The number of valid WQEs in the queue must meet the number of + * transferred WQEs. Linkwqe can only be filled at the end of the + * page. The actual number of valid links exceeds the requirement. + * The service needs to be informed of the number of extra links to + * be created. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param wqe_number: number of WQEs + * @param wqe_size: wqe size + * @param object_priv: pointer to object private information + * @retval struct tag_cqm_queue*: queue structure pointer + * @date: 2019-5-4 + */ +struct tag_cqm_queue *cqm_object_fc_srq_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv); + +/** + * @brief: create RQ. + * @details: When SRQ is used, the RQ queue is created. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param init_rq_num: number of containers + * @param container_size: container size + * @param wqe_size: wqe size + * @param object_priv: pointer to object private information + * @retval struct tag_cqm_queue*: queue structure pointer + * @date: 2019-5-4 + */ +struct tag_cqm_queue *cqm_object_recv_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 init_rq_num, u32 container_size, + u32 wqe_size, void *object_priv); + +/** + * @brief: SRQ applies for a new container and is linked after the container + * is created. + * @details: SRQ applies for a new container and is linked after the container + * is created. + * @param common: queue structure pointer + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +s32 cqm_object_share_recv_queue_add_container(struct tag_cqm_queue *common); + +/** + * @brief: SRQ applies for a new container. After the container is created, + * no link is attached to the container. The service is attached to + * the container. + * @details: SRQ applies for a new container. After the container is created, + * no link is attached to the container. The service is attached to + * the container. + * @param common: queue structure pointer + * @param container_addr: returned container address + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +s32 cqm_object_srq_add_container_free(struct tag_cqm_queue *common, u8 **container_addr); + +/** + * @brief: create SRQ for TOE services. + * @details: create SRQ for TOE services. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param container_number: number of containers + * @param container_size: container size + * @param wqe_size: wqe size + * @retval struct tag_cqm_queue*: queue structure pointer + * @date: 2019-5-4 + */ +struct tag_cqm_queue *cqm_object_share_recv_queue_create(void *ex_handle, + u32 service_type, + enum cqm_object_type object_type, + u32 container_number, + u32 container_size, + u32 wqe_size); + +/** + * @brief: create QPC and MPT. + * @details: When QPC and MPT are created, the interface sleeps. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param object_size: object size, in bytes. + * @param object_priv: private structure of the service layer. + * The value can be NULL. + * @param index: apply for reserved qpn based on the value. If automatic + * allocation is required, fill CQM_INDEX_INVALID. + * @retval struct tag_cqm_qpc_mpt *: pointer to the QPC/MPT structure + * @date: 2019-5-4 + */ +struct tag_cqm_qpc_mpt *cqm_object_qpc_mpt_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 object_size, void *object_priv, + u32 index, bool low2bit_align_en); + +/** + * @brief: create a queue for non-RDMA services. + * @details: create a queue for non-RDMA services. The interface sleeps. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param wqe_number: number of Link WQEs + * @param wqe_size: fixed length, size 2^n + * @param object_priv: private structure of the service layer. + * The value can be NULL. + * @retval struct tag_cqm_queue *: queue structure pointer + * @date: 2019-5-4 + */ +struct tag_cqm_queue *cqm_object_nonrdma_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv); + +/** + * @brief: create a RDMA service queue. + * @details: create a queue for the RDMA service. The interface sleeps. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param object_size: object size + * @param object_priv: private structure of the service layer. + * The value can be NULL. + * @param room_header_alloc: whether to apply for the queue room and header + * space + * @retval struct tag_cqm_queue *: queue structure pointer + * @date: 2019-5-4 + */ +struct tag_cqm_queue *cqm_object_rdma_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 object_size, void *object_priv, + bool room_header_alloc, u32 xid); + +/** + * @brief: create the MTT and RDMARC of the RDMA service. + * @details: create the MTT and RDMARC of the RDMA service. + * @param ex_handle: device pointer that represents the PF + * @param service_type: service type + * @param object_type: object type + * @param index_base: start index number + * @param index_number: index number + * @retval struct tag_cqm_mtt_rdmarc *: pointer to the MTT/RDMARC structure + * @date: 2019-5-4 + */ +struct tag_cqm_mtt_rdmarc *cqm_object_rdma_table_get(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 index_base, u32 index_number); + +/** + * @brief: delete created objects. + * @details: delete the created object. This function does not return until all + * operations on the object are complete. + * @param object: object pointer + * @retval: void + * @date: 2019-5-4 + */ +void cqm_object_delete(struct tag_cqm_object *object); + +/** + * @brief: obtains the physical address and virtual address at the specified + * offset of the object buffer. + * @details: Only RDMA table query is supported to obtain the physical address + * and virtual address at the specified offset of the object buffer. + * @param object: object pointer + * @param offset: for a rdma table, offset is the absolute index number. + * @param paddr: The physical address is returned only for the rdma table. + * @retval u8 *: buffer specify the virtual address at the offset + * @date: 2019-5-4 + */ +u8 *cqm_object_offset_addr(struct tag_cqm_object *object, u32 offset, dma_addr_t *paddr); + +/** + * @brief: obtain object according index. + * @details: obtain object according index. + * @param ex_handle: device pointer that represents the PF + * @param object_type: object type + * @param index: support qpn,mptn,scqn,srqn + * @param bh: whether to disable the bottom half of the interrupt + * @retval struct tag_cqm_object *: object pointer + * @date: 2019-5-4 + */ +struct tag_cqm_object *cqm_object_get(void *ex_handle, enum cqm_object_type object_type, + u32 index, bool bh); + +/** + * @brief: object reference counting release + * @details: After the function cqm_object_get is invoked, this API must be put. + * Otherwise, the object cannot be released. + * @param object: object pointer + * @retval: void + * @date: 2019-5-4 + */ +void cqm_object_put(struct tag_cqm_object *object); + +/** + * @brief: obtain the ID of the function where the object resides. + * @details: obtain the ID of the function where the object resides. + * @param object: object pointer + * @retval >=0: ID of function + * @retval -1: fail + * @date: 2020-4-15 + */ +s32 cqm_object_funcid(struct tag_cqm_object *object); + +/** + * @brief: apply for a new space for an object. + * @details: Currently, this parameter is valid only for the ROCE service. + * The CQ buffer size is adjusted, but the CQN and CQC remain + * unchanged. New buffer space is applied for, and the old buffer + * space is not released. The current valid buffer is still the old + * buffer. + * @param object: object pointer + * @param object_size: new buffer size + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +s32 cqm_object_resize_alloc_new(struct tag_cqm_object *object, u32 object_size); + +/** + * @brief: release the newly applied buffer space for the object. + * @details: This function is used to release the newly applied buffer space for + * service exception handling. + * @param object: object pointer + * @retval: void + * @date: 2019-5-4 + */ +void cqm_object_resize_free_new(struct tag_cqm_object *object); + +/** + * @brief: release old buffer space for objects. + * @details: This function releases the old buffer and sets the current valid + * buffer to the new buffer. + * @param object: object pointer + * @retval: void + * @date: 2019-5-4 + */ +void cqm_object_resize_free_old(struct tag_cqm_object *object); + +/** + * @brief: release container. + * @details: release container. + * @param object: object pointer + * @param container: container pointer to be released + * @retval: void + * @date: 2019-5-4 + */ +void cqm_srq_used_rq_container_delete(struct tag_cqm_object *object, u8 *container); + +void *cqm_get_db_addr(void *ex_handle, u32 service_type); + +s32 cqm_ring_hardware_db_fc(void *ex_handle, u32 service_type, u8 db_count, + u8 pagenum, u64 db); + +/** + * @brief: provide the interface of knocking on doorbell. + * The CQM converts the pri to cos. + * @details: provide interface of knocking on doorbell for the CQM to convert + * the pri to cos. The doorbell transferred by the service must be the + * host sequence. This interface converts the network sequence. + * @param ex_handle: device pointer that represents the PF + * @param service_type: Each kernel-mode service is allocated a hardware + * doorbell page. + * @param db_count: PI[7:0] beyond 64b in the doorbell + * @param db: The doorbell content is organized by the service. If there is + * endian conversion, the service needs to complete the conversion. + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +s32 cqm_ring_hardware_db_update_pri(void *ex_handle, u32 service_type, + u8 db_count, u64 db); + +/** + * @brief: knock on software doorbell. + * @details: knock on software doorbell. + * @param object: object pointer + * @param db_record: software doorbell content. If there is big-endian + * conversion, the service needs to complete the conversion. + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +s32 cqm_ring_software_db(struct tag_cqm_object *object, u64 db_record); + +/** + * @brief: reference counting is added to the bloom filter ID. + * @details: reference counting is added to the bloom filter ID. When the ID + * changes from 0 to 1, the sending API is set to 1. + * This interface sleeps. + * @param ex_handle: device pointer that represents the PF + * @param id: id + * @retval 0: success + * @retval -1: fail + * @date: 2019-5-4 + */ +void *cqm_gid_base(void *ex_handle); + +/** + * @brief: obtain the base virtual address of the timer. + * @details: obtain the base virtual address of the timer. + * @param ex_handle: device pointer that represents the PF + * @retval void *: base virtual address of the timer + * @date: 2020-5-21 + */ +void *cqm_timer_base(void *ex_handle); + +/** + * @brief: clear timer buffer. + * @details: clear the timer buffer based on the function ID. Function IDs start + * from 0, and timer buffers are arranged by function ID. + * @param ex_handle: device pointer that represents the PF + * @param function_id: function id + * @retval: void + * @date: 2019-5-4 + */ +void cqm_function_timer_clear(void *ex_handle, u32 function_id); + +/** + * @brief: clear hash buffer. + * @details: clear the hash buffer based on the function ID. + * @param ex_handle: device pointer that represents the PF + * @param global_funcid + * @retval: void + * @date: 2019-5-4 + */ +void cqm_function_hash_buf_clear(void *ex_handle, s32 global_funcid); + +s32 cqm_ring_direct_wqe_db(void *ex_handle, u32 service_type, u8 db_count, + void *direct_wqe); +s32 cqm_ring_direct_wqe_db_fc(void *ex_handle, u32 service_type, + void *direct_wqe); + +#ifdef __cplusplus +#if __cplusplus +} +#endif +#endif /* __cplusplus */ + +#endif /* CQM_OBJECT_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c new file mode 100644 index 000000000..d149d54f7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.c @@ -0,0 +1,1467 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/pci.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/device.h> +#include <linux/gfp.h> +#include <linux/mm.h> + +#include "ossl_knl.h" +#include "hinic3_crm.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" + +#include "cqm_object.h" +#include "cqm_bitmap_table.h" +#include "cqm_bat_cla.h" +#include "cqm_main.h" +#include "cqm_object_intern.h" + +#define srq_obj_intern_if_section + +/** + * Prototype : cqm_container_free + * Description : Only the container buffer is released. The buffer in the WQE + * and fast link tables are not involved. + * Containers can be released from head to tail, including head + * and tail. This function does not modify the start and + * end pointers of qinfo records. + * Input : u8 *srq_head_container + * u8 *srq_tail_container: If it is NULL, it means to release + * container from head to tail. + * struct tag_cqm_queue *common + * Output : None + * Return Value : void + * 1.Date : 2016/2/1 + * Modification : Created function + */ +void cqm_container_free(u8 *srq_head_container, u8 *srq_tail_container, + struct tag_cqm_queue *common) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(common->object.cqm_handle); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + u32 link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_srq_linkwqe *srq_link_wqe = NULL; + u32 container_size = qinfo->container_size; + struct pci_dev *dev = cqm_handle->dev; + u64 addr; + u8 *srqhead_container = srq_head_container; + u8 *srqtail_container = srq_tail_container; + + if (unlikely(!srqhead_container)) { + pr_err("[CQM]%s: srqhead_container is null\n", __func__); + return; + } + + /* 1. The range is released cyclically from the head to the tail, i.e. + * [head:tail]. If the tail is null, the range is [head:null]. Oterwise, + * [head:tail->next). + */ + if (srqtail_container) { + /* [head:tail->next): Update srqtail_container to the next + * container va. + */ + srq_link_wqe = (struct tag_cqm_srq_linkwqe *)(srqtail_container + + link_wqe_offset); + /* Only the link wqe part needs to be converted. */ + cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct tag_cqm_linkwqe) >> CQM_DW_SHIFT); + srqtail_container = (u8 *)CQM_ADDR_COMBINE(srq_link_wqe->fixed_next_buffer_addr_h, + srq_link_wqe->fixed_next_buffer_addr_l); + } + + do { + /* 2. Obtain the link wqe of the current container */ + srq_link_wqe = (struct tag_cqm_srq_linkwqe *)(srqhead_container + + link_wqe_offset); + /* Only the link wqe part needs to be converted. */ + cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct tag_cqm_linkwqe) >> CQM_DW_SHIFT); + /* Obtain the va of the next container using the link wqe. */ + srqhead_container = (u8 *)CQM_ADDR_COMBINE(srq_link_wqe->fixed_next_buffer_addr_h, + srq_link_wqe->fixed_next_buffer_addr_l); + + /* 3. Obtain the current container pa from the link wqe, + * and cancel the mapping + */ + addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_gpa_h, + srq_link_wqe->current_buffer_gpa_l); + if (addr == 0) { + cqm_err(handle->dev_hdl, "Container free: buffer physical addr is null\n"); + return; + } + pci_unmap_single(dev, (dma_addr_t)addr, container_size, + PCI_DMA_BIDIRECTIONAL); + + /* 4. Obtain the container va through linkwqe and release the + * container va. + */ + addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_addr_h, + srq_link_wqe->current_buffer_addr_l); + if (addr == 0) { + cqm_err(handle->dev_hdl, "Container free: buffer virtual addr is null\n"); + return; + } + kfree((void *)addr); + } while (srqhead_container != srqtail_container); +} + +/** + * Prototype : cqm_container_create + * Description : Create a container for the RQ or SRQ, link it to the tail of + * the queue, and update the tail container pointer of the queue. + * Input : struct tag_cqm_object *object + * u8 **container_addr + * bool link + * Output : None + * Return Value : s32 + * 1.Date : 2016/2/16 + * Modification : Created function + */ +s32 cqm_container_create(struct tag_cqm_object *object, u8 **container_addr, bool link) +{ + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(object->cqm_handle); + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + u32 link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_srq_linkwqe *srq_link_wqe = NULL; + struct tag_cqm_linkwqe *link_wqe = NULL; + dma_addr_t new_container_pa; + u8 *new_container = NULL; + + /* 1. Applying for Container Space and Initializing Invalid/Normal WQE + * of the Container. + */ + new_container = kmalloc(qinfo->container_size, GFP_ATOMIC | __GFP_ZERO); + if (!new_container) + return CQM_FAIL; + + /* Container PCI mapping */ + new_container_pa = pci_map_single(cqm_handle->dev, new_container, + qinfo->container_size, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, new_container_pa) != 0) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(new_container_pa)); + goto map_fail; + } + + /* 2. The container is linked to the SRQ, and the link wqe of + * tail_container and new_container is updated. + */ + /* If the SRQ is not empty, update the linkwqe of the tail container. */ + if (link) { + if (common->tail_container) { + srq_link_wqe = (struct tag_cqm_srq_linkwqe *)(common->tail_container + + link_wqe_offset); + link_wqe = &srq_link_wqe->linkwqe; + link_wqe->next_page_gpa_h = + __swab32((u32)CQM_ADDR_HI(new_container_pa)); + link_wqe->next_page_gpa_l = + __swab32((u32)CQM_ADDR_LW(new_container_pa)); + link_wqe->next_buffer_addr_h = + __swab32((u32)CQM_ADDR_HI(new_container)); + link_wqe->next_buffer_addr_l = + __swab32((u32)CQM_ADDR_LW(new_container)); + /* make sure next page gpa and next buffer addr of + * link wqe update first + */ + wmb(); + /* The SRQ tail container may be accessed by the chip. + * Therefore, obit must be set to 1 at last. + */ + (*(u32 *)link_wqe) |= 0x80; + /* make sure obit set ahead of fixed next buffer addr + * updating of srq link wqe + */ + wmb(); + srq_link_wqe->fixed_next_buffer_addr_h = + (u32)CQM_ADDR_HI(new_container); + srq_link_wqe->fixed_next_buffer_addr_l = + (u32)CQM_ADDR_LW(new_container); + } + } + + /* Update the Invalid WQE of a New Container */ + clear_bit(0x1F, (ulong *)new_container); + /* Update the link wqe of the new container. */ + srq_link_wqe = (struct tag_cqm_srq_linkwqe *)(new_container + link_wqe_offset); + link_wqe = &srq_link_wqe->linkwqe; + link_wqe->o = CQM_LINK_WQE_OWNER_INVALID; + link_wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; + link_wqe->lp = CQM_LINK_WQE_LP_INVALID; + link_wqe->wf = CQM_WQE_WF_LINK; + srq_link_wqe->current_buffer_gpa_h = CQM_ADDR_HI(new_container_pa); + srq_link_wqe->current_buffer_gpa_l = CQM_ADDR_LW(new_container_pa); + srq_link_wqe->current_buffer_addr_h = CQM_ADDR_HI(new_container); + srq_link_wqe->current_buffer_addr_l = CQM_ADDR_LW(new_container); + /* Convert only the area accessed by the chip to the network sequence */ + cqm_swab32((u8 *)link_wqe, sizeof(struct tag_cqm_linkwqe) >> CQM_DW_SHIFT); + if (link) + /* Update the tail pointer of a queue. */ + common->tail_container = new_container; + else + *container_addr = new_container; + + return CQM_SUCCESS; + +map_fail: + kfree(new_container); + return CQM_FAIL; +} + +/** + * Prototype : cqm_srq_container_init + * Description : Initialize the SRQ to create all containers and link them. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : s32 + * 1.Date : 2016/2/3 + * Modification : Created function + */ +static s32 cqm_srq_container_init(struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 container_num = object->object_size; + s32 ret; + u32 i; + + if (common->head_container || common->tail_container) { + cqm_err(handle->dev_hdl, "Srq container init: srq tail/head container not null\n"); + return CQM_FAIL; + } + + /* Applying for a Container + * During initialization, the head/tail pointer is null. + * After the first application is successful, head=tail. + */ + ret = cqm_container_create(&qinfo->common.object, NULL, true); + if (ret == CQM_FAIL) { + cqm_err(handle->dev_hdl, "Srq container init: cqm_srq_container_add fail\n"); + return CQM_FAIL; + } + common->head_container = common->tail_container; + + /* The container is dynamically created and the tail pointer is updated. + * If the container fails to be created, release the containers from + * head to null. + */ + for (i = 1; i < container_num; i++) { + ret = cqm_container_create(&qinfo->common.object, NULL, true); + if (ret == CQM_FAIL) { + cqm_container_free(common->head_container, NULL, + &qinfo->common); + return CQM_FAIL; + } + } + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_share_recv_queue_create + * Description : Create SRQ(share receive queue) + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : s32 + * 1.Date : 2016/1/27 + * Modification : Created function + */ +s32 cqm_share_recv_queue_create(struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_toe_private_capability *toe_own_cap = &cqm_handle->toe_own_capability; + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_bitmap *bitmap = NULL; + u32 step; + s32 ret; + + /* 1. Create srq container, including initializing the link wqe. */ + ret = cqm_srq_container_init(object); + if (ret == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_srq_container_init)); + return CQM_FAIL; + } + + /* 2. Create srq ctx: SRQ CTX is directly delivered by the driver to the + * chip memory area through the cmdq channel, and no CLA table + * management is required. Therefore, the CQM applies for only one empty + * buffer for the driver. + */ + /* bitmap applies for index */ + bitmap = &toe_own_cap->srqc_bitmap; + qinfo->index_count = (ALIGN(qinfo->q_ctx_size, + toe_own_cap->toe_srqc_basic_size)) / + toe_own_cap->toe_srqc_basic_size; + /* align with 2 as the upper bound */ + step = ALIGN(toe_own_cap->toe_srqc_number, 2); + qinfo->common.index = cqm_bitmap_alloc(bitmap, step, qinfo->index_count, + func_cap->xid_alloc_mode); + if (qinfo->common.index >= bitmap->max_num) { + cqm_err(handle->dev_hdl, "Srq create: queue index %u exceeds max_num %u\n", + qinfo->common.index, bitmap->max_num); + goto err1; + } + qinfo->common.index += toe_own_cap->toe_srqc_start_id; + + /* apply for buffer for SRQC */ + common->q_ctx_vaddr = kmalloc(qinfo->q_ctx_size, + GFP_KERNEL | __GFP_ZERO); + if (!common->q_ctx_vaddr) + goto err2; + return CQM_SUCCESS; + +err2: + cqm_bitmap_free(bitmap, + qinfo->common.index - toe_own_cap->toe_srqc_start_id, + qinfo->index_count); +err1: + cqm_container_free(common->head_container, common->tail_container, + &qinfo->common); + return CQM_FAIL; +} + +/** + * Prototype : cqm_srq_used_rq_delete + * Description : Delete RQ in TOE SRQ mode. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2016/5/19 + * Modification : Created function + */ +static void cqm_srq_used_rq_delete(const struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)(common->object.cqm_handle); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + u32 link_wqe_offset = qinfo->wqe_per_buf * qinfo->wqe_size; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_srq_linkwqe *srq_link_wqe = NULL; + dma_addr_t addr; + + /* Currently, the SRQ solution does not support RQ initialization + * without mounting container. + * As a result, RQ resources are released incorrectly. + * Temporary workaround: Only one container is mounted during RQ + * initialization and only one container is released + * during resource release. + */ + if (unlikely(!common->head_container)) { + pr_err("[CQM]%s: Rq del: rq has no contianer to release\n", __func__); + return; + } + + /* 1. Obtain current container pa from the link wqe table and + * cancel the mapping. + */ + srq_link_wqe = (struct tag_cqm_srq_linkwqe *)(common->head_container + link_wqe_offset); + /* Only the link wqe part needs to be converted. */ + cqm_swab32((u8 *)(srq_link_wqe), sizeof(struct tag_cqm_linkwqe) >> CQM_DW_SHIFT); + + addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_gpa_h, + srq_link_wqe->current_buffer_gpa_l); + if (addr == 0) { + cqm_err(handle->dev_hdl, "Rq del: buffer physical addr is null\n"); + return; + } + pci_unmap_single(cqm_handle->dev, addr, qinfo->container_size, + PCI_DMA_BIDIRECTIONAL); + + /* 2. Obtain the container va through the linkwqe and release. */ + addr = CQM_ADDR_COMBINE(srq_link_wqe->current_buffer_addr_h, + srq_link_wqe->current_buffer_addr_l); + if (addr == 0) { + cqm_err(handle->dev_hdl, "Rq del: buffer virtual addr is null\n"); + return; + } + kfree((void *)addr); +} + +/** + * Prototype : cqm_share_recv_queue_delete + * Description : The SRQ object is deleted. Delete only containers that are not + * used by SRQ, that is, containers from the head to the tail. + * The RQ releases containers that have been used by the RQ. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2016/2/2 + * Modification : Created function + */ +void cqm_share_recv_queue_delete(struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_bitmap *bitmap = &cqm_handle->toe_own_capability.srqc_bitmap; + u32 index = common->index - cqm_handle->toe_own_capability.toe_srqc_start_id; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + + /* 1. Wait for completion and ensure that all references to the QPC + * are complete. + */ + if (atomic_dec_and_test(&object->refcount) != 0) + complete(&object->free); + else + cqm_err(handle->dev_hdl, "Srq del: object is referred by others, has to wait for completion\n"); + + wait_for_completion(&object->free); + destroy_completion(&object->free); + /* 2. The corresponding index in the bitmap is cleared. */ + cqm_bitmap_free(bitmap, index, qinfo->index_count); + + /* 3. SRQC resource release */ + if (unlikely(!common->q_ctx_vaddr)) { + pr_err("[CQM]%s: Srq del: srqc kfree, context virtual addr is null\n", __func__); + return; + } + kfree(common->q_ctx_vaddr); + + /* 4. The SRQ queue is released. */ + cqm_container_free(common->head_container, NULL, &qinfo->common); +} + +#define obj_intern_if_section + +#define CQM_INDEX_INVALID_MASK 0x1FFFFFFFU +#define CQM_IDX_VALID_SHIFT 29 + +/** + * Prototype : cqm_qpc_mpt_bitmap_alloc + * Description : Apply for index from the bitmap when creating QPC or MPT. + * Input : struct tag_cqm_object *object + * struct tag_cqm_cla_table *cla_table + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +static s32 cqm_qpc_mpt_bitmap_alloc(struct tag_cqm_object *object, + struct tag_cqm_cla_table *cla_table, bool low2bit_align_en) +{ + struct tag_cqm_qpc_mpt *common = container_of(object, struct tag_cqm_qpc_mpt, object); + struct tag_cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, + struct tag_cqm_qpc_mpt_info, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_func_capability *func_cap = &cqm_handle->func_capability; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_bitmap *bitmap = &cla_table->bitmap; + u32 index, count; + u32 xid = qpc_mpt_info->common.xid; + + count = (ALIGN(object->object_size, cla_table->obj_size)) / cla_table->obj_size; + qpc_mpt_info->index_count = count; + + if ((xid & CQM_INDEX_INVALID_MASK) == CQM_INDEX_INVALID_MASK) { + if (low2bit_align_en) { + if (count > 1) { + cqm_err(handle->dev_hdl, "Not support alloc multiple bits."); + return CQM_FAIL; + } + + index = cqm_bitmap_alloc_low2bit_align(bitmap, xid >> CQM_IDX_VALID_SHIFT, + func_cap->xid_alloc_mode); + } else { + /* apply for an index normally */ + index = cqm_bitmap_alloc(bitmap, 1U << (cla_table->z + 1), + count, func_cap->xid_alloc_mode); + } + + if (index < bitmap->max_num - bitmap->reserved_back) { + qpc_mpt_info->common.xid = index; + } else { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_alloc)); + return CQM_FAIL; + } + } else { + if ((hinic3_func_type((void *)handle) != TYPE_PPF) && + (hinic3_support_roce((void *)handle, NULL))) { + /* If PF is vroce control function, apply for index by xid */ + index = cqm_bitmap_alloc_by_xid(bitmap, count, xid); + } else { + /* apply for index to be reserved */ + index = cqm_bitmap_alloc_reserved(bitmap, count, xid); + } + + if (index != xid) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_bitmap_alloc_reserved)); + return CQM_FAIL; + } + } + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_qpc_mpt_create + * Description : Create QPC or MPT + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_qpc_mpt_create(struct tag_cqm_object *object, bool low2bit_align_en) +{ + struct tag_cqm_qpc_mpt *common = container_of(object, struct tag_cqm_qpc_mpt, object); + struct tag_cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, + struct tag_cqm_qpc_mpt_info, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_object_table *object_table = NULL; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_bitmap *bitmap = NULL; + u32 index, count; + + /* find the corresponding cla table */ + if (object->object_type == CQM_OBJECT_SERVICE_CTX) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); + } else if (object->object_type == CQM_OBJECT_MPT) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_MPT); + } else { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); + return CQM_FAIL; + } + + if (unlikely(!cla_table)) { + pr_err("[CQM]%s: cqm_cla_table_get is null\n", __func__); + return CQM_FAIL; + } + + /* Bitmap applies for index. */ + if (cqm_qpc_mpt_bitmap_alloc(object, cla_table, low2bit_align_en) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_qpc_mpt_bitmap_alloc)); + return CQM_FAIL; + } + + bitmap = &cla_table->bitmap; + index = qpc_mpt_info->common.xid; + count = qpc_mpt_info->index_count; + + /* Find the trunk page from the BAT/CLA and allocate the buffer. + * Ensure that the released buffer has been cleared. + */ + if (cla_table->alloc_static) + qpc_mpt_info->common.vaddr = cqm_cla_get_unlock(cqm_handle, + cla_table, + index, count, + &common->paddr); + else + qpc_mpt_info->common.vaddr = cqm_cla_get_lock(cqm_handle, + cla_table, index, + count, + &common->paddr); + + if (!qpc_mpt_info->common.vaddr) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_cla_get_lock)); + cqm_err(handle->dev_hdl, "Qpc mpt init: qpc mpt vaddr is null, cla_table->alloc_static=%d\n", + cla_table->alloc_static); + goto err1; + } + + /* Indexes are associated with objects, and FC is executed + * in the interrupt context. + */ + object_table = &cla_table->obj_table; + if (object->service_type == CQM_SERVICE_T_FC) { + if (cqm_object_table_insert(cqm_handle, object_table, index, + object, false) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_object_table_insert)); + goto err2; + } + } else { + if (cqm_object_table_insert(cqm_handle, object_table, index, + object, true) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_object_table_insert)); + goto err2; + } + } + + return CQM_SUCCESS; + +err2: + cqm_cla_put(cqm_handle, cla_table, index, count); +err1: + cqm_bitmap_free(bitmap, index, count); + return CQM_FAIL; +} + +/** + * Prototype : cqm_qpc_mpt_delete + * Description : Delete QPC or MPT. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_qpc_mpt_delete(struct tag_cqm_object *object) +{ + struct tag_cqm_qpc_mpt *common = container_of(object, struct tag_cqm_qpc_mpt, object); + struct tag_cqm_qpc_mpt_info *qpc_mpt_info = container_of(common, + struct tag_cqm_qpc_mpt_info, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_object_table *object_table = NULL; + struct tag_cqm_cla_table *cla_table = NULL; + u32 count = qpc_mpt_info->index_count; + u32 index = qpc_mpt_info->common.xid; + struct tag_cqm_bitmap *bitmap = NULL; + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_qpc_mpt_delete_cnt); + + /* find the corresponding cla table */ + /* Todo */ + if (object->object_type == CQM_OBJECT_SERVICE_CTX) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_QPC); + } else if (object->object_type == CQM_OBJECT_MPT) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_MPT); + } else { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(object->object_type)); + return; + } + + if (unlikely(!cla_table)) { + pr_err("[CQM]%s: cqm_cla_table_get_qpc return failure\n", __func__); + return; + } + + /* disassociate index and object */ + object_table = &cla_table->obj_table; + if (object->service_type == CQM_SERVICE_T_FC) + cqm_object_table_remove(cqm_handle, object_table, index, object, + false); + else + cqm_object_table_remove(cqm_handle, object_table, index, object, + true); + + /* wait for completion to ensure that all references to + * the QPC are complete + */ + if (atomic_dec_and_test(&object->refcount) != 0) + complete(&object->free); + else + cqm_err(handle->dev_hdl, "Qpc mpt del: object is referred by others, has to wait for completion\n"); + + /* Static QPC allocation must be non-blocking. + * Services ensure that the QPC is referenced + * when the QPC is deleted. + */ + if (!cla_table->alloc_static) + wait_for_completion(&object->free); + + /* VMware FC need explicitly deinit spin_lock in completion */ + destroy_completion(&object->free); + + /* release qpc buffer */ + cqm_cla_put(cqm_handle, cla_table, index, count); + + /* release the index to the bitmap */ + bitmap = &cla_table->bitmap; + cqm_bitmap_free(bitmap, index, count); +} + +/** + * Prototype : cqm_linkwqe_fill + * Description : Used to organize the queue buffer of non-RDMA services and + * fill the link wqe. + * Input : wqe_per_buf: Linkwqe is not included. + * wqe_number: Linkwqe is not included. + * tail: true - The linkwqe must be at the end of the page; + * false - The linkwqe can be not at the end of the page. + * Output : None + * Return Value : void + * 1.Date : 2015/6/15 + * Modification : Created function + */ +static void cqm_linkwqe_fill(struct tag_cqm_buf *buf, u32 wqe_per_buf, u32 wqe_size, + u32 wqe_number, bool tail, u8 link_mode) +{ + struct tag_cqm_linkwqe_128B *linkwqe = NULL; + struct tag_cqm_linkwqe *wqe = NULL; + dma_addr_t addr; + u8 *tmp = NULL; + u8 *va = NULL; + u32 i; + + /* The linkwqe of other buffer except the last buffer + * is directly filled to the tail. + */ + for (i = 0; i < buf->buf_number; i++) { + va = (u8 *)(buf->buf_list[i].va); + + if (i != (buf->buf_number - 1)) { + wqe = (struct tag_cqm_linkwqe *)(va + (u32)(wqe_size * wqe_per_buf)); + wqe->wf = CQM_WQE_WF_LINK; + wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; + wqe->lp = CQM_LINK_WQE_LP_INVALID; + /* The valid value of link wqe needs to be set to 1. + * Each service ensures that o-bit=1 indicates that + * link wqe is valid and o-bit=0 indicates that + * link wqe is invalid. + */ + wqe->o = CQM_LINK_WQE_OWNER_VALID; + addr = buf->buf_list[(u32)(i + 1)].pa; + wqe->next_page_gpa_h = CQM_ADDR_HI(addr); + wqe->next_page_gpa_l = CQM_ADDR_LW(addr); + } else { /* linkwqe special padding of the last buffer */ + if (tail) { + /* must be filled at the end of the page */ + tmp = va + (u32)(wqe_size * wqe_per_buf); + wqe = (struct tag_cqm_linkwqe *)tmp; + } else { + /* The last linkwqe is filled + * following the last wqe. + */ + tmp = va + (u32)(wqe_size * (wqe_number - wqe_per_buf * + (buf->buf_number - 1))); + wqe = (struct tag_cqm_linkwqe *)tmp; + } + wqe->wf = CQM_WQE_WF_LINK; + wqe->ctrlsl = CQM_LINK_WQE_CTRLSL_VALUE; + + /* In link mode, the last link WQE is invalid; + * In ring mode, the last link wqe is valid, pointing to + * the home page, and the lp is set. + */ + if (link_mode == CQM_QUEUE_LINK_MODE) { + wqe->o = CQM_LINK_WQE_OWNER_INVALID; + } else { + /* The lp field of the last link_wqe is set to + * 1, indicating that the meaning of the o-bit + * is reversed. + */ + wqe->lp = CQM_LINK_WQE_LP_VALID; + wqe->o = CQM_LINK_WQE_OWNER_VALID; + addr = buf->buf_list[0].pa; + wqe->next_page_gpa_h = CQM_ADDR_HI(addr); + wqe->next_page_gpa_l = CQM_ADDR_LW(addr); + } + } + + if (wqe_size == CQM_LINKWQE_128B) { + /* After the B800 version, the WQE obit scheme is + * changed. The 64B bits before and after the 128B WQE + * need to be assigned a value: + * ifoe the 63rd bit from the end of the last 64B is + * obit; + * toe the 157th bit from the end of the last 64B is + * obit. + */ + linkwqe = (struct tag_cqm_linkwqe_128B *)wqe; + linkwqe->second64B.third_16B.bs.toe_o = CQM_LINK_WQE_OWNER_VALID; + linkwqe->second64B.forth_16B.bs.ifoe_o = CQM_LINK_WQE_OWNER_VALID; + + /* shift 2 bits by right to get length of dw(4B) */ + cqm_swab32((u8 *)wqe, sizeof(struct tag_cqm_linkwqe_128B) >> 2); + } else { + /* shift 2 bits by right to get length of dw(4B) */ + cqm_swab32((u8 *)wqe, sizeof(struct tag_cqm_linkwqe) >> 2); + } + } +} + +static int cqm_nonrdma_queue_ctx_create_scq(struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_object_table *object_table = NULL; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_bitmap *bitmap = NULL; + bool bh = false; + + /* find the corresponding cla table */ + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); + if (!cla_table) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(nonrdma_cqm_cla_table_get)); + return CQM_FAIL; + } + + /* bitmap applies for index */ + bitmap = &cla_table->bitmap; + qinfo->index_count = (ALIGN(qinfo->q_ctx_size, cla_table->obj_size)) / cla_table->obj_size; + qinfo->common.index = cqm_bitmap_alloc(bitmap, 1U << (cla_table->z + 1), + qinfo->index_count, + cqm_handle->func_capability.xid_alloc_mode); + if (qinfo->common.index >= bitmap->max_num) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(nonrdma_cqm_bitmap_alloc)); + return CQM_FAIL; + } + + /* find the trunk page from BAT/CLA and allocate the buffer */ + common->q_ctx_vaddr = cqm_cla_get_lock(cqm_handle, cla_table, qinfo->common.index, + qinfo->index_count, &common->q_ctx_paddr); + if (!common->q_ctx_vaddr) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(nonrdma_cqm_cla_get_lock)); + cqm_bitmap_free(bitmap, qinfo->common.index, qinfo->index_count); + return CQM_FAIL; + } + + /* index and object association */ + object_table = &cla_table->obj_table; + bh = ((object->service_type == CQM_SERVICE_T_FC) ? false : true); + if (cqm_object_table_insert(cqm_handle, object_table, qinfo->common.index, object, + bh) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(nonrdma_cqm_object_table_insert)); + cqm_cla_put(cqm_handle, cla_table, qinfo->common.index, qinfo->index_count); + cqm_bitmap_free(bitmap, qinfo->common.index, qinfo->index_count); + + return CQM_FAIL; + } + + return 0; +} + +static s32 cqm_nonrdma_queue_ctx_create(struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + s32 shift; + int ret; + + if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) { + shift = cqm_shift(qinfo->q_ctx_size); + common->q_ctx_vaddr = cqm_kmalloc_align(qinfo->q_ctx_size, + GFP_KERNEL | __GFP_ZERO, + (u16)shift); + if (!common->q_ctx_vaddr) + return CQM_FAIL; + + common->q_ctx_paddr = pci_map_single(cqm_handle->dev, common->q_ctx_vaddr, + qinfo->q_ctx_size, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, common->q_ctx_paddr) != 0) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_ctx_vaddr)); + cqm_kfree_align(common->q_ctx_vaddr); + common->q_ctx_vaddr = NULL; + return CQM_FAIL; + } + } else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { + ret = cqm_nonrdma_queue_ctx_create_scq(object); + if (ret != 0) + return ret; + } + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_nonrdma_queue_create + * Description : Create a queue for non-RDMA services. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_nonrdma_queue_create(struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_service *service = cqm_handle->service + object->service_type; + struct tag_cqm_buf *q_room_buf = &common->q_room_buf_1; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + u32 wqe_number = qinfo->common.object.object_size; + u32 wqe_size = qinfo->wqe_size; + u32 order = service->buf_order; + u32 buf_number, buf_size; + bool tail = false; /* determine whether the linkwqe is at the end of the page */ + + /* When creating a CQ/SCQ queue, the page size is 4 KB, + * the linkwqe must be at the end of the page. + */ + if (object->object_type == CQM_OBJECT_NONRDMA_EMBEDDED_CQ || + object->object_type == CQM_OBJECT_NONRDMA_SCQ) { + /* depth: 2^n-aligned; depth range: 256-32 K */ + if (wqe_number < CQM_CQ_DEPTH_MIN || + wqe_number > CQM_CQ_DEPTH_MAX) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(wqe_number)); + return CQM_FAIL; + } + if (!cqm_check_align(wqe_number)) { + cqm_err(handle->dev_hdl, "Nonrdma queue alloc: wqe_number is not align on 2^n\n"); + return CQM_FAIL; + } + + order = CQM_4K_PAGE_ORDER; /* wqe page 4k */ + tail = true; /* The linkwqe must be at the end of the page. */ + buf_size = CQM_4K_PAGE_SIZE; + } else { + buf_size = (u32)(PAGE_SIZE << order); + } + + /* Calculate the total number of buffers required, + * -1 indicates that the link wqe in a buffer is deducted. + */ + qinfo->wqe_per_buf = (buf_size / wqe_size) - 1; + /* number of linkwqes that are included in the depth transferred + * by the service + */ + buf_number = ALIGN((wqe_size * wqe_number), buf_size) / buf_size; + + /* apply for buffer */ + q_room_buf->buf_number = buf_number; + q_room_buf->buf_size = buf_size; + q_room_buf->page_number = buf_number << order; + if (cqm_buf_alloc(cqm_handle, q_room_buf, false) == CQM_FAIL) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(cqm_buf_alloc)); + return CQM_FAIL; + } + /* fill link wqe, wqe_number - buf_number is the number of wqe without + * link wqe + */ + cqm_linkwqe_fill(q_room_buf, qinfo->wqe_per_buf, wqe_size, + wqe_number - buf_number, tail, + common->queue_link_mode); + + /* create queue header */ + qinfo->common.q_header_vaddr = cqm_kmalloc_align(sizeof(struct tag_cqm_queue_header), + GFP_KERNEL | __GFP_ZERO, + CQM_QHEAD_ALIGN_ORDER); + if (!qinfo->common.q_header_vaddr) + goto err1; + + common->q_header_paddr = pci_map_single(cqm_handle->dev, + qinfo->common.q_header_vaddr, + sizeof(struct tag_cqm_queue_header), + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, common->q_header_paddr) != 0) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_header_vaddr)); + goto err2; + } + + /* create queue ctx */ + if (cqm_nonrdma_queue_ctx_create(object) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_nonrdma_queue_ctx_create)); + goto err3; + } + + return CQM_SUCCESS; + +err3: + pci_unmap_single(cqm_handle->dev, common->q_header_paddr, + sizeof(struct tag_cqm_queue_header), PCI_DMA_BIDIRECTIONAL); +err2: + cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; +err1: + cqm_buf_free(q_room_buf, cqm_handle); + return CQM_FAIL; +} + +/** + * Prototype : cqm_nonrdma_queue_delete + * Description : Delete the queues of non-RDMA services. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_nonrdma_queue_delete(struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_nonrdma_qinfo *qinfo = container_of(common, struct tag_cqm_nonrdma_qinfo, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct tag_cqm_buf *q_room_buf = &common->q_room_buf_1; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_object_table *object_table = NULL; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_bitmap *bitmap = NULL; + u32 index = qinfo->common.index; + u32 count = qinfo->index_count; + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_nonrdma_queue_delete_cnt); + + /* The SCQ has an independent SCQN association. */ + if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); + if (unlikely(!cla_table)) { + pr_err("[CQM]%s: cqm_cla_table_get_queue return failure\n", __func__); + return; + } + + /* disassociate index and object */ + object_table = &cla_table->obj_table; + if (object->service_type == CQM_SERVICE_T_FC) + cqm_object_table_remove(cqm_handle, object_table, index, + object, false); + else + cqm_object_table_remove(cqm_handle, object_table, index, + object, true); + } + + /* wait for completion to ensure that all references to + * the QPC are complete + */ + if (atomic_dec_and_test(&object->refcount) != 0) + complete(&object->free); + else + cqm_err(handle->dev_hdl, "Nonrdma queue del: object is referred by others, has to wait for completion\n"); + + wait_for_completion(&object->free); + destroy_completion(&object->free); + + /* If the q header exists, release. */ + if (qinfo->common.q_header_vaddr) { + pci_unmap_single(cqm_handle->dev, common->q_header_paddr, + sizeof(struct tag_cqm_queue_header), + PCI_DMA_BIDIRECTIONAL); + + cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; + } + + /* RQ deletion in TOE SRQ mode */ + if (common->queue_link_mode == CQM_QUEUE_TOE_SRQ_LINK_MODE) { + cqm_dbg("Nonrdma queue del: delete srq used rq\n"); + cqm_srq_used_rq_delete(&common->object); + } else { + /* If q room exists, release. */ + cqm_buf_free(q_room_buf, cqm_handle); + } + /* SRQ and SCQ have independent CTXs and release. */ + if (object->object_type == CQM_OBJECT_NONRDMA_SRQ) { + /* The CTX of the SRQ of the nordma is + * applied for independently. + */ + if (common->q_ctx_vaddr) { + pci_unmap_single(cqm_handle->dev, common->q_ctx_paddr, + qinfo->q_ctx_size, + PCI_DMA_BIDIRECTIONAL); + + cqm_kfree_align(common->q_ctx_vaddr); + common->q_ctx_vaddr = NULL; + } + } else if (object->object_type == CQM_OBJECT_NONRDMA_SCQ) { + /* The CTX of the SCQ of the nordma is managed by BAT/CLA. */ + cqm_cla_put(cqm_handle, cla_table, index, count); + + /* release the index to the bitmap */ + bitmap = &cla_table->bitmap; + cqm_bitmap_free(bitmap, index, count); + } +} + +static s32 cqm_rdma_queue_ctx_create(struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_rdma_qinfo *qinfo = container_of(common, struct tag_cqm_rdma_qinfo, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_object_table *object_table = NULL; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_bitmap *bitmap = NULL; + u32 index; + + if (object->object_type == CQM_OBJECT_RDMA_SRQ || + object->object_type == CQM_OBJECT_RDMA_SCQ) { + if (object->object_type == CQM_OBJECT_RDMA_SRQ) + cla_table = cqm_cla_table_get(bat_table, + CQM_BAT_ENTRY_T_SRQC); + else + cla_table = cqm_cla_table_get(bat_table, + CQM_BAT_ENTRY_T_SCQC); + + if (!cla_table) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(rdma_cqm_cla_table_get)); + return CQM_FAIL; + } + + /* bitmap applies for index */ + bitmap = &cla_table->bitmap; + if (qinfo->common.index == CQM_INDEX_INVALID) { + qinfo->index_count = (ALIGN(qinfo->q_ctx_size, + cla_table->obj_size)) / + cla_table->obj_size; + qinfo->common.index = + cqm_bitmap_alloc(bitmap, 1U << (cla_table->z + 1), + qinfo->index_count, + cqm_handle->func_capability.xid_alloc_mode); + if (qinfo->common.index >= bitmap->max_num) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(rdma_cqm_bitmap_alloc)); + return CQM_FAIL; + } + } else { + /* apply for reserved index */ + qinfo->index_count = (ALIGN(qinfo->q_ctx_size, cla_table->obj_size)) / + cla_table->obj_size; + index = cqm_bitmap_alloc_reserved(bitmap, qinfo->index_count, + qinfo->common.index); + if (index != qinfo->common.index) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_bitmap_alloc_reserved)); + return CQM_FAIL; + } + } + + /* find the trunk page from BAT/CLA and allocate the buffer */ + qinfo->common.q_ctx_vaddr = + cqm_cla_get_lock(cqm_handle, cla_table, qinfo->common.index, + qinfo->index_count, &qinfo->common.q_ctx_paddr); + if (!qinfo->common.q_ctx_vaddr) { + cqm_err(handle->dev_hdl, CQM_FUNCTION_FAIL(rdma_cqm_cla_get_lock)); + cqm_bitmap_free(bitmap, qinfo->common.index, qinfo->index_count); + return CQM_FAIL; + } + + /* associate index and object */ + object_table = &cla_table->obj_table; + if (cqm_object_table_insert(cqm_handle, object_table, qinfo->common.index, object, + true) != CQM_SUCCESS) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(rdma_cqm_object_table_insert)); + cqm_cla_put(cqm_handle, cla_table, qinfo->common.index, + qinfo->index_count); + cqm_bitmap_free(bitmap, qinfo->common.index, qinfo->index_count); + return CQM_FAIL; + } + } + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_rdma_queue_create + * Description : Create rdma queue. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_rdma_queue_create(struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_rdma_qinfo *qinfo = container_of(common, struct tag_cqm_rdma_qinfo, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_service *service = cqm_handle->service + object->service_type; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_buf *q_room_buf = NULL; + u32 order = service->buf_order; + u32 buf_size = (u32)(PAGE_SIZE << order); + + if (qinfo->room_header_alloc) { + /* apply for queue room buffer */ + if (qinfo->common.current_q_room == CQM_RDMA_Q_ROOM_1) + q_room_buf = &qinfo->common.q_room_buf_1; + else + q_room_buf = &qinfo->common.q_room_buf_2; + + q_room_buf->buf_number = ALIGN(object->object_size, buf_size) / + buf_size; + q_room_buf->page_number = (q_room_buf->buf_number << order); + q_room_buf->buf_size = buf_size; + if (cqm_buf_alloc(cqm_handle, q_room_buf, true) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_buf_alloc)); + return CQM_FAIL; + } + + /* queue header */ + qinfo->common.q_header_vaddr = + cqm_kmalloc_align(sizeof(struct tag_cqm_queue_header), + GFP_KERNEL | __GFP_ZERO, + CQM_QHEAD_ALIGN_ORDER); + if (!qinfo->common.q_header_vaddr) + goto err1; + + qinfo->common.q_header_paddr = + pci_map_single(cqm_handle->dev, + qinfo->common.q_header_vaddr, + sizeof(struct tag_cqm_queue_header), + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(cqm_handle->dev, + qinfo->common.q_header_paddr) != 0) { + cqm_err(handle->dev_hdl, CQM_MAP_FAIL(q_header_vaddr)); + goto err2; + } + } + + /* queue ctx */ + if (cqm_rdma_queue_ctx_create(object) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_rdma_queue_ctx_create)); + goto err3; + } + + return CQM_SUCCESS; + +err3: + if (qinfo->room_header_alloc) + pci_unmap_single(cqm_handle->dev, qinfo->common.q_header_paddr, + sizeof(struct tag_cqm_queue_header), + PCI_DMA_BIDIRECTIONAL); +err2: + if (qinfo->room_header_alloc) { + cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; + } +err1: + if (qinfo->room_header_alloc) + cqm_buf_free(q_room_buf, cqm_handle); + + return CQM_FAIL; +} + +/** + * Prototype : cqm_rdma_queue_delete + * Description : Create rdma queue. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_rdma_queue_delete(struct tag_cqm_object *object) +{ + struct tag_cqm_queue *common = container_of(object, struct tag_cqm_queue, object); + struct tag_cqm_rdma_qinfo *qinfo = container_of(common, struct tag_cqm_rdma_qinfo, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct tag_cqm_bat_table *bat_table = &cqm_handle->bat_table; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_object_table *object_table = NULL; + struct tag_cqm_cla_table *cla_table = NULL; + struct tag_cqm_buf *q_room_buf = NULL; + struct tag_cqm_bitmap *bitmap = NULL; + u32 index = qinfo->common.index; + u32 count = qinfo->index_count; + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_rdma_queue_delete_cnt); + + if (qinfo->common.current_q_room == CQM_RDMA_Q_ROOM_1) + q_room_buf = &qinfo->common.q_room_buf_1; + else + q_room_buf = &qinfo->common.q_room_buf_2; + + /* SCQ and SRQ are associated with independent SCQN and SRQN. */ + if (object->object_type == CQM_OBJECT_RDMA_SCQ) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SCQC); + if (unlikely(!cla_table)) { + pr_err("[CQM]%s: cqm_cla_table_get return failure\n", __func__); + return; + } + /* disassociate index and object */ + object_table = &cla_table->obj_table; + cqm_object_table_remove(cqm_handle, object_table, index, object, true); + } else if (object->object_type == CQM_OBJECT_RDMA_SRQ) { + cla_table = cqm_cla_table_get(bat_table, CQM_BAT_ENTRY_T_SRQC); + if (unlikely(!cla_table)) { + pr_err("[CQM]%s: cqm_cla_table_get return failure\n", __func__); + return; + } + /* disassociate index and object */ + object_table = &cla_table->obj_table; + cqm_object_table_remove(cqm_handle, object_table, index, object, true); + } + + /* wait for completion to make sure all references are complete */ + if (atomic_dec_and_test(&object->refcount) != 0) + complete(&object->free); + else + cqm_err(handle->dev_hdl, "Rdma queue del: object is referred by others, has to wait for completion\n"); + + wait_for_completion(&object->free); + destroy_completion(&object->free); + + /* If the q header exists, release. */ + if (qinfo->room_header_alloc && qinfo->common.q_header_vaddr) { + pci_unmap_single(cqm_handle->dev, qinfo->common.q_header_paddr, + sizeof(struct tag_cqm_queue_header), PCI_DMA_BIDIRECTIONAL); + + cqm_kfree_align(qinfo->common.q_header_vaddr); + qinfo->common.q_header_vaddr = NULL; + } + + /* If q room exists, release. */ + cqm_buf_free(q_room_buf, cqm_handle); + + /* SRQ and SCQ have independent CTX, released. */ + if (object->object_type == CQM_OBJECT_RDMA_SRQ || + object->object_type == CQM_OBJECT_RDMA_SCQ) { + cqm_cla_put(cqm_handle, cla_table, index, count); + + /* release the index to the bitmap */ + bitmap = &cla_table->bitmap; + cqm_bitmap_free(bitmap, index, count); + } +} + +/** + * Prototype : cqm_rdma_table_create + * Description : Create RDMA-related entries. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : s32 + * 1.Date : 2015/4/15 + * Modification : Created function + */ +s32 cqm_rdma_table_create(struct tag_cqm_object *object) +{ + struct tag_cqm_mtt_rdmarc *common = container_of(object, struct tag_cqm_mtt_rdmarc, + object); + struct tag_cqm_rdma_table *rdma_table = container_of(common, struct tag_cqm_rdma_table, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_buf *buf = &rdma_table->buf; + + /* Less than one page is allocated by actual size. + * RDMARC also requires physical continuity. + */ + if (object->object_size <= PAGE_SIZE || + object->object_type == CQM_OBJECT_RDMARC) { + buf->buf_number = 1; + buf->page_number = buf->buf_number; + buf->buf_size = object->object_size; + buf->direct.va = pci_alloc_consistent(cqm_handle->dev, + buf->buf_size, + &buf->direct.pa); + if (!buf->direct.va) + return CQM_FAIL; + } else { /* page-by-page alignment greater than one page */ + buf->buf_number = ALIGN(object->object_size, PAGE_SIZE) / + PAGE_SIZE; + buf->page_number = buf->buf_number; + buf->buf_size = PAGE_SIZE; + if (cqm_buf_alloc(cqm_handle, buf, true) == CQM_FAIL) { + cqm_err(handle->dev_hdl, + CQM_FUNCTION_FAIL(cqm_buf_alloc)); + return CQM_FAIL; + } + } + + rdma_table->common.vaddr = (u8 *)(buf->direct.va); + + return CQM_SUCCESS; +} + +/** + * Prototype : cqm_rdma_table_create + * Description : Delete RDMA-related Entries. + * Input : struct tag_cqm_object *object + * Output : None + * Return Value : void + * 1.Date : 2015/4/15 + * Modification : Created function + */ +void cqm_rdma_table_delete(struct tag_cqm_object *object) +{ + struct tag_cqm_mtt_rdmarc *common = container_of(object, struct tag_cqm_mtt_rdmarc, + object); + struct tag_cqm_rdma_table *rdma_table = container_of(common, struct tag_cqm_rdma_table, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_buf *buf = &rdma_table->buf; + + atomic_inc(&handle->hw_stats.cqm_stats.cqm_rdma_table_delete_cnt); + + if (buf->buf_number == 1) { + if (buf->direct.va) { + pci_free_consistent(cqm_handle->dev, buf->buf_size, + buf->direct.va, buf->direct.pa); + buf->direct.va = NULL; + } + } else { + cqm_buf_free(buf, cqm_handle); + } +} + +/** + * Prototype : cqm_rdma_table_offset_addr + * Description : Obtain the address of the RDMA entry based on the offset. + * The offset is the index. + * Input : struct tag_cqm_object *object + * u32 offset + * dma_addr_t *paddr + * Output : None + * Return Value : u8 * + * 1.Date : 2015/4/15 + * Modification : Created function + */ +u8 *cqm_rdma_table_offset_addr(struct tag_cqm_object *object, u32 offset, dma_addr_t *paddr) +{ + struct tag_cqm_mtt_rdmarc *common = container_of(object, struct tag_cqm_mtt_rdmarc, + object); + struct tag_cqm_rdma_table *rdma_table = container_of(common, struct tag_cqm_rdma_table, + common); + struct tag_cqm_handle *cqm_handle = (struct tag_cqm_handle *)object->cqm_handle; + struct hinic3_hwdev *handle = cqm_handle->ex_handle; + struct tag_cqm_buf *buf = &rdma_table->buf; + struct tag_cqm_buf_list *buf_node = NULL; + u32 buf_id, buf_offset; + + if (offset < rdma_table->common.index_base || + ((offset - rdma_table->common.index_base) >= + rdma_table->common.index_number)) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(offset)); + return NULL; + } + + if (buf->buf_number == 1) { + buf_offset = (u32)((offset - rdma_table->common.index_base) * + (sizeof(dma_addr_t))); + + *paddr = buf->direct.pa + buf_offset; + return ((u8 *)(buf->direct.va)) + buf_offset; + } + + buf_id = (offset - rdma_table->common.index_base) / + (PAGE_SIZE / sizeof(dma_addr_t)); + buf_offset = (u32)((offset - rdma_table->common.index_base) - + (buf_id * (PAGE_SIZE / sizeof(dma_addr_t)))); + buf_offset = (u32)(buf_offset * sizeof(dma_addr_t)); + + if (buf_id >= buf->buf_number) { + cqm_err(handle->dev_hdl, CQM_WRONG_VALUE(buf_id)); + return NULL; + } + buf_node = buf->buf_list + buf_id; + *paddr = buf_node->pa + buf_offset; + + return ((u8 *)(buf->direct.va)) + + (offset - rdma_table->common.index_base) * (sizeof(dma_addr_t)); +} diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.h b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.h new file mode 100644 index 000000000..f82fda283 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/cqm_object_intern.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_OBJECT_INTERN_H +#define CQM_OBJECT_INTERN_H + +#include "ossl_knl.h" +#include "cqm_object.h" + +#define CQM_CQ_DEPTH_MAX 32768 +#define CQM_CQ_DEPTH_MIN 256 + +/* linkwqe */ +#define CQM_LINK_WQE_CTRLSL_VALUE 2 +#define CQM_LINK_WQE_LP_VALID 1 +#define CQM_LINK_WQE_LP_INVALID 0 +#define CQM_LINK_WQE_OWNER_VALID 1 +#define CQM_LINK_WQE_OWNER_INVALID 0 + +#define CQM_ADDR_COMBINE(high_addr, low_addr) \ + ((((dma_addr_t)(high_addr)) << 32) + ((dma_addr_t)(low_addr))) +#define CQM_ADDR_HI(addr) ((u32)((u64)(addr) >> 32)) +#define CQM_ADDR_LW(addr) ((u32)((u64)(addr) & 0xffffffff)) + +#define CQM_QPC_LAYOUT_TABLE_SIZE 16 +struct tag_cqm_qpc_layout_table_node { + u32 type; + u32 size; + u32 offset; + struct tag_cqm_object *object; +}; + +struct tag_cqm_qpc_mpt_info { + struct tag_cqm_qpc_mpt common; + /* Different service has different QPC. + * The large QPC/mpt will occupy some continuous indexes in bitmap. + */ + u32 index_count; + struct tag_cqm_qpc_layout_table_node qpc_layout_table[CQM_QPC_LAYOUT_TABLE_SIZE]; +}; + +struct tag_cqm_nonrdma_qinfo { + struct tag_cqm_queue common; + u32 wqe_size; + /* Number of WQEs in each buffer (excluding link WQEs) + * For SRQ, the value is the number of WQEs contained in a container. + */ + u32 wqe_per_buf; + u32 q_ctx_size; + /* When different services use CTXs of different sizes, + * a large CTX occupies multiple consecutive indexes in the bitmap. + */ + u32 index_count; + + /* add for srq */ + u32 container_size; +}; + +struct tag_cqm_rdma_qinfo { + struct tag_cqm_queue common; + bool room_header_alloc; + /* This field is used to temporarily record the new object_size during + * CQ resize. + */ + u32 new_object_size; + u32 q_ctx_size; + /* When different services use CTXs of different sizes, + * a large CTX occupies multiple consecutive indexes in the bitmap. + */ + u32 index_count; +}; + +struct tag_cqm_rdma_table { + struct tag_cqm_mtt_rdmarc common; + struct tag_cqm_buf buf; +}; + +void cqm_container_free(u8 *srq_head_container, u8 *srq_tail_container, + struct tag_cqm_queue *common); +s32 cqm_container_create(struct tag_cqm_object *object, u8 **container_addr, bool link); +s32 cqm_share_recv_queue_create(struct tag_cqm_object *object); +void cqm_share_recv_queue_delete(struct tag_cqm_object *object); +s32 cqm_qpc_mpt_create(struct tag_cqm_object *object, bool low2bit_align_en); +void cqm_qpc_mpt_delete(struct tag_cqm_object *object); +s32 cqm_nonrdma_queue_create(struct tag_cqm_object *object); +void cqm_nonrdma_queue_delete(struct tag_cqm_object *object); +s32 cqm_rdma_queue_create(struct tag_cqm_object *object); +void cqm_rdma_queue_delete(struct tag_cqm_object *object); +s32 cqm_rdma_table_create(struct tag_cqm_object *object); +void cqm_rdma_table_delete(struct tag_cqm_object *object); +u8 *cqm_rdma_table_offset_addr(struct tag_cqm_object *object, u32 offset, dma_addr_t *paddr); + +#endif /* CQM_OBJECT_INTERN_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/cqm/readme.txt b/drivers/net/ethernet/huawei/hinic3/cqm/readme.txt new file mode 100644 index 000000000..1e21b6660 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/cqm/readme.txt @@ -0,0 +1,3 @@ + +2021/02/25/10:35 gf ovs fake vf hash clear support, change comment +2019/03/28/15:17 wss provide stateful service queue and context management \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h index 98adaf057..195a5cf08 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_crm.h @@ -4,7 +4,9 @@ #ifndef HINIC3_CRM_H #define HINIC3_CRM_H
-#define HINIC3_DBG +#include <linux/pci.h> + +#include "mpu_cmd_base_defs.h"
#define HINIC3_DRV_VERSION "" #define HINIC3_DRV_DESC "Intelligent Network Interface Card Driver" @@ -42,6 +44,7 @@ enum hinic3_service_type { SERVICE_T_PPA, SERVICE_T_CUSTOM, SERVICE_T_VROCE, + SERVICE_T_CRYPT, SERVICE_T_MAX,
/* Only used for interruption resource management, @@ -75,7 +78,9 @@ struct ppa_service_cap {
struct vbs_service_cap { u16 vbs_max_volq; - u16 rsvd1; + u8 vbs_main_pf_enable; + u8 vbs_vsock_pf_enable; + u8 vbs_fushion_queue_pf_enable; };
struct migr_service_cap { @@ -297,8 +302,8 @@ struct ovs_service_cap {
/* PF IPsec service resource structure defined */ struct dev_ipsec_svc_cap { - u32 max_sactxs; /* max IPsec SA context num */ - u16 max_cqs; /* max IPsec SCQC num */ + u32 max_sactxs; /* max IPsec SA context num */ + u16 max_cqs; /* max IPsec SCQC num */ u16 rsvd0; };
@@ -310,8 +315,8 @@ struct ipsec_service_cap {
/* Defines the IRQ information structure */ struct irq_info { - u16 msix_entry_idx; /* IRQ corresponding index number */ - u32 irq_id; /* the IRQ number from OS */ + u16 msix_entry_idx; /* IRQ corresponding index number */ + u32 irq_id; /* the IRQ number from OS */ };
struct interrupt_info { @@ -342,6 +347,11 @@ enum func_type { TYPE_UNKNOWN, };
+enum func_nic_state { + HINIC3_FUNC_NIC_DEL, + HINIC3_FUNC_NIC_ADD, +}; + struct hinic3_init_para { /* Record hinic_pcidev or NDIS_Adapter pointer address */ void *adapter_hdl; @@ -356,7 +366,7 @@ struct hinic3_init_para {
/* Configure virtual address, PF is bar1, VF is bar0/1 */ void *cfg_reg_base; - /* interrupt configuration register address, PF is bar2, VF is bar2/3 + /* interrupt configuration register address, PF is bar2, VF is bar2/3 */ void *intr_reg_base; /* for PF bar3 virtual address, if function is VF should set to NULL */ @@ -394,6 +404,7 @@ struct card_node { struct list_head node; struct list_head func_list; char chip_name[IFNAMSIZ]; + int chip_id; void *log_info; void *dbgtool_info; void *func_handle_array[MAX_FUNCTION_NUM]; @@ -522,6 +533,7 @@ enum hinic3_comm_event_type { EVENT_COMM_SRIOV_STATE_CHANGE, EVENT_COMM_CARD_REMOVE, EVENT_COMM_MGMT_WATCHDOG, + EVENT_COMM_MULTI_HOST_MGMT, };
enum hinic3_event_service_type { @@ -532,14 +544,26 @@ enum hinic3_event_service_type { };
#define HINIC3_SRV_EVENT_TYPE(svc, type) ((((u32)(svc)) << 16) | (type)) +#ifndef HINIC3_EVENT_DATA_SIZE +#define HINIC3_EVENT_DATA_SIZE 104 +#endif struct hinic3_event_info { - u16 service; /* enum hinic3_event_service_type */ + u16 service; /* enum hinic3_event_service_type */ u16 type; - u8 event_data[104]; + u8 event_data[HINIC3_EVENT_DATA_SIZE]; };
typedef void (*hinic3_event_handler)(void *handle, struct hinic3_event_info *event);
+struct hinic3_func_nic_state { + u8 state; + u8 rsvd0; + u16 func_idx; + + u8 vroce_flag; + u8 rsvd1[15]; +}; + /* * * @brief hinic3_event_register - register hardware event * @param dev: device pointer to hwdev @@ -840,6 +864,15 @@ void hinic3_shutdown_hwdev(void *hwdev); */ int hinic3_set_ppf_flr_type(void *hwdev, enum hinic3_ppf_flr_type flr_type);
+/* * + * @brief hinic3_set_ppf_tbl_hotreplace_flag - set os hotreplace flag in ppf function table + * @param hwdev: device pointer to hwdev + * @param flag : os hotreplace flag : 0-not in os hotreplace 1-in os hotreplace + * @retval zero: success + * @retval non-zero: failure + */ +int hinic3_set_ppf_tbl_hotreplace_flag(void *hwdev, u8 flag); + /* * * @brief hinic3_get_mgmt_version - get management cpu version * @param hwdev: device pointer to hwdev @@ -907,6 +940,13 @@ enum func_type hinic3_func_type(void *hwdev); */ bool hinic3_get_stateful_enable(void *hwdev);
+/* * + * @brief hinic3_get_timer_enable - get timer status + * @param hwdev: device pointer to hwdev + * @retval timer enabel status + */ +bool hinic3_get_timer_enable(void *hwdev); + /* * * @brief hinic3_host_oq_id_mask - get oq id * @param hwdev: device pointer to hwdev @@ -1059,7 +1099,7 @@ int hinic3_get_card_present_state(void *hwdev, bool *card_present_state); * @retval zero: success * @retval non-zero: failure */ -int hinic3_func_rx_tx_flush(void *hwdev, u16 channel); +int hinic3_func_rx_tx_flush(void *hwdev, u16 channel, bool wait_io);
/* * * @brief hinic3_flush_mgmt_workq - when remove function should flush work queue @@ -1082,6 +1122,12 @@ u16 hinic3_intr_num(void *hwdev); */ u8 hinic3_flexq_en(void *hwdev);
+/* * + * @brief hinic3_get_fake_vf_info get fake_vf info + */ +int hinic3_get_fake_vf_info(void *hwdev, u8 *fake_vf_vld, + u8 *page_bit, u8 *pf_start_bit, u8 *map_host_id); + /* * * @brief hinic3_fault_event_report - report fault event * @param hwdev: device pointer to hwdev @@ -1159,4 +1205,48 @@ int hinic3_set_host_migrate_enable(void *hwdev, u8 host_id, bool enable); */ int hinic3_get_host_migrate_enable(void *hwdev, u8 host_id, u8 *migrate_en);
+/* * + * @brief hinic3_is_slave_func - hwdev is slave func + * @param dev: device pointer to hwdev + * @param is_slave_func: slave func + * @retval zero: success + * @retval non-zero: failure + */ +int hinic3_is_slave_func(const void *hwdev, bool *is_slave_func); + +/* * + * @brief hinic3_is_master_func - hwdev is master func + * @param dev: device pointer to hwdev + * @param is_master_func: master func + * @retval zero: success + * @retval non-zero: failure + */ +int hinic3_is_master_func(const void *hwdev, bool *is_master_func); + +bool hinic3_is_multi_bm(void *hwdev); + +bool hinic3_is_slave_host(void *hwdev); + +bool hinic3_is_vm_slave_host(void *hwdev); + +bool hinic3_is_bm_slave_host(void *hwdev); + +bool hinic3_is_guest_vmsec_enable(void *hwdev); + +int hinic3_get_vfid_by_vfpci(void *hwdev, struct pci_dev *pdev, u16 *global_func_id); + +int hinic3_set_func_nic_state(void *hwdev, struct hinic3_func_nic_state *state); + +int hinic3_get_netdev_state(void *hwdev, u16 func_idx, int *opened); + +int hinic3_get_mhost_func_nic_enable(void *hwdev, u16 func_id, bool *en); + +int hinic3_get_dev_cap(void *hwdev); + +int hinic3_mbox_to_host_sync(void *hwdev, enum hinic3_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +int hinic3_get_func_vroce_enable(void *hwdev, u16 glb_func_idx, u8 *en); + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c index 305f7bfc3..40ff47384 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_dbg.c @@ -19,6 +19,8 @@ #include "hinic3_dcb.h" #include "hinic3_nic.h" #include "hinic3_mgmt_interface.h" +#include "mag_mpu_cmd.h" +#include "mag_cmd.h"
typedef int (*nic_driv_module)(struct hinic3_nic_dev *nic_dev, const void *buf_in, u32 in_size, diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h index 34888e3d3..ef8c62b36 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw.h @@ -4,25 +4,24 @@ #ifndef HINIC3_HW_H #define HINIC3_HW_H
-#include "hinic3_comm_cmd.h" -#include "comm_msg_intf.h" -#include "comm_cmdq_intf.h" +#include "mpu_inband_cmd.h" +#include "mpu_inband_cmd_defs.h"
#include "hinic3_crm.h"
#ifndef BIG_ENDIAN -#define BIG_ENDIAN 0x4321 +#define BIG_ENDIAN 0x4321 #endif
#ifndef LITTLE_ENDIAN -#define LITTLE_ENDIAN 0x1234 +#define LITTLE_ENDIAN 0x1234 #endif
#ifdef BYTE_ORDER #undef BYTE_ORDER #endif /* X86 */ -#define BYTE_ORDER LITTLE_ENDIAN +#define BYTE_ORDER LITTLE_ENDIAN
/* to use 0-level CLA, page size must be: SQ 16B(wqe) * 64k(max_q_depth) */ #define HINIC3_DEFAULT_WQ_PAGE_SIZE 0x100000 @@ -127,7 +126,7 @@ typedef int (*hinic3_pf_recv_from_ppf_mbox_cb)(void *pri_handle, u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size);
/** - * @brief hinic3_aeq_register_hw_cb - register aeq hardware callback + * @brief hinic3_aeq_register_hw_cb - register aeq hardware callback * @param hwdev: device pointer to hwdev * @param event: event type * @param hwe_cb: callback function @@ -145,7 +144,7 @@ int hinic3_aeq_register_hw_cb(void *hwdev, void *pri_handle, void hinic3_aeq_unregister_hw_cb(void *hwdev, enum hinic3_aeq_type event);
/** - * @brief hinic3_aeq_register_swe_cb - register aeq soft event callback + * @brief hinic3_aeq_register_swe_cb - register aeq soft event callback * @param hwdev: device pointer to hwdev * @pri_handle: the pointer to private invoker device * @param event: event type @@ -164,7 +163,7 @@ int hinic3_aeq_register_swe_cb(void *hwdev, void *pri_handle, enum hinic3_aeq_sw void hinic3_aeq_unregister_swe_cb(void *hwdev, enum hinic3_aeq_sw_type event);
/** - * @brief hinic3_ceq_register_cb - register ceq callback + * @brief hinic3_ceq_register_cb - register ceq callback * @param hwdev: device pointer to hwdev * @param event: event type * @param callback: callback function @@ -514,7 +513,7 @@ int hinic3_api_csr_rd64(void *hwdev, u8 dest, u32 addr, u64 *val); * @retval zero: success * @retval non-zero: failure */ -int hinic3_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, const u16 *out_size); +int hinic3_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, const u32 *out_size);
/** * @brief hinic3_dbg_clear_hw_stats - clear hardware stats @@ -627,6 +626,23 @@ int hinic3_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel);
+/** + * @brief hinic3_mbox_to_vf_no_ack - mbox message to vf no ack + * @param hwdev: device pointer to hwdev + * @param vf_id: vf index + * @param mod: mod type + * @param cmd: cmd + * @param buf_in: message buffer in + * @param in_size: in buffer size + * @param buf_out: message buffer out + * @param out_size: out buffer size + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int hinic3_mbox_to_vf_no_ack(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u16 channel); + int hinic3_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size, void *buf_out, u16 *out_size); /** @@ -641,6 +657,20 @@ int hinic3_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, */ int hinic3_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct hinic3_cmd_buf *buf_in, u16 channel);
+/** + * @brief hinic3_cmdq_async_cos - cmdq asynchronous message by cos + * @param hwdev: device pointer to hwdev + * @param mod: mod type + * @param cmd: cmd + * @param cos_id: cos id + * @param buf_in: message buffer in + * @param channel: channel id + * @retval zero: success + * @retval non-zero: failure + */ +int hinic3_cmdq_async_cos(void *hwdev, u8 mod, u8 cmd, u8 cos_id, + struct hinic3_cmd_buf *buf_in, u16 channel); + /** * @brief hinic3_cmdq_detail_resp - cmdq direct message response * @param hwdev: device pointer to hwdev @@ -820,6 +850,7 @@ int hinic3_get_ceq_page_phy_addr(void *hwdev, u16 q_id, int hinic3_set_ceq_irq_disable(void *hwdev, u16 q_id); int hinic3_get_ceq_info(void *hwdev, u16 q_id, struct hinic3_ceq_info *ceq_info);
+int hinic3_init_single_ceq_status(void *hwdev, u16 q_id); void hinic3_set_api_stop(void *hwdev);
int hinic3_activate_firmware(void *hwdev, u8 cfg_index); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c index 4049e81ce..2fe808b83 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mag_cfg.c @@ -17,7 +17,6 @@ #include "ossl_knl.h" #include "hinic3_crm.h" #include "hinic3_hw.h" -#include "mag_cmd.h" #include "hinic3_nic_io.h" #include "hinic3_nic_cfg.h" #include "hinic3_srv_nic.h" @@ -552,8 +551,7 @@ static void port_sfp_abs_event(void *hwdev, void *buf_in, u16 in_size,
rt_cmd = &nic_io->nic_cfg.rt_cmd; mutex_lock(&nic_io->nic_cfg.sfp_mutex); - memcpy(&rt_cmd->abs, sfp_abs, - sizeof(struct mag_cmd_get_xsfp_present)); + memcpy(&rt_cmd->abs, sfp_abs, sizeof(struct mag_cmd_get_xsfp_present)); rt_cmd->mpu_send_sfp_abs = true; mutex_unlock(&nic_io->nic_cfg.sfp_mutex); } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h index c40b7d9be..522518df6 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h @@ -1,24 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Huawei HiNIC PCI Express Linux driver - * Copyright(c) 2017 Huawei Technologies Co., Ltd - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - */ - -#ifndef HINIC_MGMT_INTERFACE_H -#define HINIC_MGMT_INTERFACE_H +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef NIC_MPU_CMD_DEFS_H +#define NIC_MPU_CMD_DEFS_H
#include "nic_cfg_comm.h" -#include "mgmt_msg_base.h" +#include "mpu_cmd_base_defs.h"
#ifndef ETH_ALEN #define ETH_ALEN 6 @@ -30,6 +17,9 @@ #define HINIC3_CMD_OP_ADD 1 #define HINIC3_CMD_OP_DEL 0
+#define NIC_TCAM_BLOCK_LARGE_NUM 256 +#define NIC_TCAM_BLOCK_LARGE_SIZE 16 + #ifndef BIT #define BIT(n) (1UL << (n)) #endif @@ -269,6 +259,50 @@ struct hinic3_cmd_cons_idx_attr { u64 ci_addr; };
+union sm_tbl_args { + struct { + u32 tbl_index; + u32 cnt; + u32 total_cnt; + } mac_table_arg; + struct { + u32 er_id; + u32 vlan_id; + } vlan_elb_table_arg; + struct { + u32 func_id; + } vlan_filter_arg; + struct { + u32 mc_id; + } mc_elb_arg; + struct { + u32 func_id; + } func_tbl_arg; + struct { + u32 port_id; + } port_tbl_arg; + struct { + u32 tbl_index; + u32 cnt; + u32 total_cnt; + } fdir_io_table_arg; + struct { + u32 tbl_index; + u32 cnt; + u32 total_cnt; + } flexq_table_arg; + u32 args[4]; +}; + +#define DFX_SM_TBL_BUF_MAX (768) + +struct nic_cmd_dfx_sm_table { + struct hinic3_mgmt_msg_head msg_head; + u32 tbl_type; + union sm_tbl_args args; + u8 tbl_buf[DFX_SM_TBL_BUF_MAX]; +}; + struct hinic3_cmd_vlan_offload { struct hinic3_mgmt_msg_head msg_head;
@@ -282,9 +316,9 @@ struct nic_cmd_capture_info { struct hinic3_mgmt_msg_head msg_head; u32 op_type; u32 func_port; - u32 is_en_trx; /* 也作为tx_rx */ - u32 offset_cos; /* 也作为cos */ - u32 data_vlan; /* 也作为vlan */ + u32 is_en_trx; + u32 offset_cos; + u32 data_vlan; };
struct hinic3_cmd_lro_config { @@ -403,10 +437,10 @@ struct hinic3_cmd_link_ksettings_info { u8 rsvd1[3];
u32 valid_bitmap; - u8 speed; /* enum nic_speed_level */ - u8 autoneg; /* 0 - off, 1 - on */ - u8 fec; /* 0 - RSFEC, 1 - BASEFEC, 2 - NOFEC */ - u8 rsvd2[21]; /* reserved for duplex, port, etc. */ + u8 speed; /* enum nic_speed_level */ + u8 autoneg; /* 0 - off, 1 - on */ + u8 fec; /* 0 - RSFEC, 1 - BASEFEC, 2 - NOFEC */ + u8 rsvd2[21]; /* reserved for duplex, port, etc. */ };
struct mpu_lt_info { @@ -527,34 +561,34 @@ struct hinic3_up_ets_cfg { /* delet */ u8 tc_prio[NIC_DCB_TC_MAX]; };
-#define CMD_QOS_ETS_COS_TC BIT(0) -#define CMD_QOS_ETS_TC_BW BIT(1) -#define CMD_QOS_ETS_COS_PRIO BIT(2) -#define CMD_QOS_ETS_COS_BW BIT(3) -#define CMD_QOS_ETS_TC_PRIO BIT(4) +#define CMD_QOS_ETS_COS_TC BIT(0) +#define CMD_QOS_ETS_TC_BW BIT(1) +#define CMD_QOS_ETS_COS_PRIO BIT(2) +#define CMD_QOS_ETS_COS_BW BIT(3) +#define CMD_QOS_ETS_TC_PRIO BIT(4) struct hinic3_cmd_ets_cfg { struct hinic3_mgmt_msg_head head;
u8 port_id; - u8 op_code; /* 1 - set, 0 - get */ + u8 op_code; /* 1 - set, 0 - get */ /* bit0 - cos_tc, bit1 - tc_bw, bit2 - cos_prio, bit3 - cos_bw, bit4 - tc_prio */ u8 cfg_bitmap; u8 rsvd;
u8 cos_tc[NIC_DCB_COS_MAX]; u8 tc_bw[NIC_DCB_TC_MAX]; - u8 cos_prio[NIC_DCB_COS_MAX]; /* 0 - DWRR, 1 - STRICT */ + u8 cos_prio[NIC_DCB_COS_MAX]; /* 0 - DWRR, 1 - STRICT */ u8 cos_bw[NIC_DCB_COS_MAX]; - u8 tc_prio[NIC_DCB_TC_MAX]; /* 0 - DWRR, 1 - STRICT */ + u8 tc_prio[NIC_DCB_TC_MAX]; /* 0 - DWRR, 1 - STRICT */ };
struct hinic3_cmd_set_dcb_state { struct hinic3_mgmt_msg_head head;
u16 func_id; - u8 op_code; /* 0 - get dcb state, 1 - set dcb state */ - u8 state; /* 0 - disable, 1 - enable dcb */ - u8 port_state; /* 0 - disable, 1 - enable dcb */ + u8 op_code; /* 0 - get dcb state, 1 - set dcb state */ + u8 state; /* 0 - disable, 1 - enable dcb */ + u8 port_state; /* 0 - disable, 1 - enable dcb */ u8 rsvd[7]; };
@@ -563,20 +597,20 @@ struct hinic3_cmd_set_pfc { struct hinic3_mgmt_msg_head head;
u8 port_id; - u8 op_code; /* 0:get 1: set pfc_en 2: set pfc_bitmap 3: set all */ - u8 pfc_en; /* pfc_en 和 pfc_bitmap 必须同时设置 */ + u8 op_code; /* 0:get 1: set pfc_en 2: set pfc_bitmap 3: set all */ + u8 pfc_en; /* pfc_en 和 pfc_bitmap 必须同时设置 */ u8 pfc_bitmap; u8 rsvd[4]; };
-#define CMD_QOS_PORT_TRUST BIT(0) -#define CMD_QOS_PORT_DFT_COS BIT(1) +#define CMD_QOS_PORT_TRUST BIT(0) +#define CMD_QOS_PORT_DFT_COS BIT(1) struct hinic3_cmd_qos_port_cfg { struct hinic3_mgmt_msg_head head;
u8 port_id; - u8 op_code; /* 0 - get, 1 - set */ - u8 cfg_bitmap; /* bit0 - trust, bit1 - dft_cos */ + u8 op_code; /* 0 - get, 1 - set */ + u8 cfg_bitmap; /* bit0 - trust, bit1 - dft_cos */ u8 rsvd0;
u8 trust; @@ -585,8 +619,8 @@ struct hinic3_cmd_qos_port_cfg { };
#define MAP_COS_MAX_NUM 8 -#define CMD_QOS_MAP_PCP2COS BIT(0) -#define CMD_QOS_MAP_DSCP2COS BIT(1) +#define CMD_QOS_MAP_PCP2COS BIT(0) +#define CMD_QOS_MAP_DSCP2COS BIT(1) struct hinic3_cmd_qos_map_cfg { struct hinic3_mgmt_msg_head head;
@@ -594,9 +628,9 @@ struct hinic3_cmd_qos_map_cfg { u8 cfg_bitmap; /* bit0 - pcp2cos, bit1 - dscp2cos */ u16 rsvd0;
- u8 pcp2cos[8]; /* 必须8个一起配置 */ - /* 配置dscp2cos时,若cos值设置为0xFF,MPU则忽略此dscp优先级的配置, - * 允许一次性配置多个dscp跟cos的映射关系 + u8 pcp2cos[8]; /* 8 must be configured together */ + /* If the dscp2cos parameter is set to 0xFF, the MPU ignores the DSCP priority, + * Multiple mappings between DSCP values and CoS values can be configured at a time. */ u8 dscp2cos[64]; u32 rsvd1[4]; @@ -625,7 +659,6 @@ struct hinic3_cmd_pause_config { u8 rsvd2[5]; };
-/* pfc风暴检测配置 */ struct nic_cmd_pause_inquiry_cfg { struct hinic3_mgmt_msg_head head;
@@ -633,27 +666,26 @@ struct nic_cmd_pause_inquiry_cfg {
u32 type; /* 1: set, 2: get */
- u32 rx_inquiry_pause_drop_pkts_en; /* rx 卸包使能 */ - u32 rx_inquiry_pause_period_ms; /* rx pause 检测周期 默认 200ms */ - u32 rx_inquiry_pause_times; /* rx pause 检测次数 默认1次 */ - /* rx pause 检测阈值 默认 PAUSE_FRAME_THD_10G/25G/40G/100 */ + u32 rx_inquiry_pause_drop_pkts_en; + u32 rx_inquiry_pause_period_ms; + u32 rx_inquiry_pause_times; + /* rx pause Detection Threshold, Default PAUSE_FRAME_THD_10G/25G/40G/100 */ u32 rx_inquiry_pause_frame_thd; - u32 rx_inquiry_tx_total_pkts; /* rx pause 检测tx收包总数 */ - - u32 tx_inquiry_pause_en; /* tx pause 检测使能 */ - u32 tx_inquiry_pause_period_ms; /* tx pause 检测周期 默认 200ms */ - u32 tx_inquiry_pause_times; /* tx pause 检测次数 默认 5次 */ - u32 tx_inquiry_pause_frame_thd; /* tx pause 检测阈值 */ - u32 tx_inquiry_rx_total_pkts; /* tx pause 检测rx收包总数 */ + u32 rx_inquiry_tx_total_pkts;
+ u32 tx_inquiry_pause_en; /* tx pause detect enable */ + u32 tx_inquiry_pause_period_ms; /* tx pause Default Detection Period 200ms */ + u32 tx_inquiry_pause_times; /* tx pause Default Times Period 5 */ + u32 tx_inquiry_pause_frame_thd; /* tx pause Detection Threshold */ + u32 tx_inquiry_rx_total_pkts; u32 rsvd[4]; };
-/* pfc/pause风暴tx异常上报 */ +/* pfc/pause Storm TX exception reporting */ struct nic_cmd_tx_pause_notice { struct hinic3_mgmt_msg_head head;
- u32 tx_pause_except; /* 1: 异常,0: 正常 */ + u32 tx_pause_except; /* 1: abnormality,0: normal */ u32 except_level; u32 rsvd; }; @@ -717,7 +749,6 @@ struct hinic3_cable_plug_event { u8 port_id; };
-/* MAC模块接口 */ struct nic_cmd_mac_info { struct hinic3_mgmt_msg_head head;
@@ -749,14 +780,14 @@ struct nic_cmd_set_fdir_status { u8 rsvd2; };
-#define HINIC3_TCAM_BLOCK_ENABLE 1 -#define HINIC3_TCAM_BLOCK_DISABLE 0 -#define HINIC3_MAX_TCAM_RULES_NUM 4096 +#define HINIC3_TCAM_BLOCK_ENABLE 1 +#define HINIC3_TCAM_BLOCK_DISABLE 0 +#define HINIC3_MAX_TCAM_RULES_NUM 4096
/* tcam block type, according to tcam block size */ enum { NIC_TCAM_BLOCK_TYPE_LARGE = 0, /* block_size: 16 */ - NIC_TCAM_BLOCK_TYPE_SMALL, /* block_size: 0 */ + NIC_TCAM_BLOCK_TYPE_SMALL, /* block_size: 0 */ NIC_TCAM_BLOCK_TYPE_MAX };
@@ -764,13 +795,14 @@ enum { struct nic_cmd_ctrl_tcam_block_in { struct hinic3_mgmt_msg_head head;
- u16 func_id; /* func_id */ - u8 alloc_en; /* 0: 释放分配的tcam block, 1: 申请新的tcam block */ - /* 0: 分配16 size 的tcam block, 1: 分配0 size的tcam block, 其他预留 */ + u16 func_id; /* func_id */ + u8 alloc_en; /* 0: Releases the allocated TCAM block. 1: Applies for a new TCAM block */ + /* 0: 16 size tcam block, 1: 0 size tcam block, other reserved. */ u8 tcam_type; u16 tcam_block_index; - /* 驱动发给uP表示驱动希望分配的block大小 - * uP返回给驱动的接口,表示uP 支持的分配的tcam block大小 + /* Size of the block that the driver wants to allocate + * Interface returned by the UP to the driver, + * indicating the size of the allocated TCAM block supported by the UP */ u16 alloc_block_num; }; @@ -779,13 +811,14 @@ struct nic_cmd_ctrl_tcam_block_in { struct nic_cmd_ctrl_tcam_block_out { struct hinic3_mgmt_msg_head head;
- u16 func_id; /* func_id */ - u8 alloc_en; /* 0: 释放分配的tcam block, 1: 申请新的tcam block */ - /* 0: 分配16 size 的tcam block, 1: 分配0 size的tcam block, 其他预留 */ + u16 func_id; /* func_id */ + u8 alloc_en; /* 0: Releases the allocated TCAM block. 1: Applies for a new TCAM block */ + /* 0: 16 size tcam block, 1: 0 size tcam block, other reserved. */ u8 tcam_type; u16 tcam_block_index; - /* 驱动发给uP表示驱动希望分配的block大小 - * uP返回给驱动的接口,表示uP 支持的分配的tcam block大小 + /* Size of the block that the driver wants to allocate + * Interface returned by the UP to the driver, + * indicating the size of the allocated TCAM block supported by the UP */ u16 mpu_alloc_block_size; }; @@ -824,15 +857,15 @@ struct nic_tcam_cfg_rule { struct tcam_key_x_y key; };
-#define TCAM_RULE_FDIR_TYPE 0 -#define TCAM_RULE_PPA_TYPE 1 +#define TCAM_RULE_FDIR_TYPE 0 +#define TCAM_RULE_PPA_TYPE 1
struct nic_cmd_fdir_add_rule { struct hinic3_mgmt_msg_head head;
u16 func_id; u8 type; - u8 rsvd; + u8 fdir_ext; /* 0x1: flow bifur en bit */ struct nic_tcam_cfg_rule rule; };
@@ -859,6 +892,16 @@ struct nic_cmd_fdir_get_rule { u64 byte_count; };
+struct nic_cmd_fdir_get_block_rules { + struct hinic3_mgmt_msg_head head; + u8 tcam_block_type; // only NIC_TCAM_BLOCK_TYPE_LARGE + u8 tcam_table_type; // TCAM_RULE_PPA_TYPE or TCAM_RULE_FDIR_TYPE + u16 tcam_block_index; + u8 valid[NIC_TCAM_BLOCK_LARGE_SIZE]; + struct tcam_key_x_y key[NIC_TCAM_BLOCK_LARGE_SIZE]; + struct tcam_result data[NIC_TCAM_BLOCK_LARGE_SIZE]; +}; + struct hinic3_tcam_key_ipv4_mem { u32 rsvd1 : 4; u32 tunnel_type : 4; @@ -867,9 +910,10 @@ struct hinic3_tcam_key_ipv4_mem { u32 sipv4_h : 16; u32 ip_type : 1; u32 function_id : 15; - u32 dipv4_h : 16; - u32 sipv4_l : 16; - u32 rsvd2 : 16; + u32 dipv4_h : 16; + u32 sipv4_l : 16; + u32 vlan_id : 15; + u32 vlan_flag : 1; u32 dipv4_l : 16; u32 rsvd3; u32 dport : 16; @@ -886,6 +930,17 @@ struct hinic3_tcam_key_ipv4_mem { u32 vni_l : 16; };
+union hinic3_tag_tcam_ext_info { + struct { + u32 id : 16; /* id */ + u32 type : 4; /* type: 0-func, 1-vmdq, 2-port, 3-rsvd, 4-trunk, 5-dp, 6-mc */ + u32 host_id : 3; + u32 rsv : 8; + u32 ext : 1; + } bs; + u32 value; +}; + struct hinic3_tcam_key_ipv6_mem { u32 rsvd1 : 4; u32 tunnel_type : 4; @@ -992,6 +1047,40 @@ struct hinic3_ppa_cfg_ppa_en_cmd { u8 rsvd; };
+struct hinic3_func_flow_bifur_en_cmd { + struct hinic3_mgmt_msg_head msg_head; + u16 func_id; + u8 flow_bifur_en; + u8 rsvd[5]; +}; + +struct hinic3_port_flow_bifur_en_cmd { + struct hinic3_mgmt_msg_head msg_head; + u16 port_id; + u8 flow_bifur_en; + u8 rsvd[5]; +}; + +struct hinic3_bond_mask_cmd { + struct hinic3_mgmt_msg_head msg_head; + u16 func_id; + u8 bond_mask; + u8 bond_en; + u8 func_valid; + u8 rsvd[3]; +}; + +#define HINIC3_TX_SET_PROMISC_SKIP 0 +#define HINIC3_TX_GET_PROMISC_SKIP 1 + +struct hinic3_tx_promisc_cfg { + struct hinic3_mgmt_msg_head msg_head; + u8 port_id; + u8 promisc_skip_en; /* 0: disable tx promisc replication, 1: enable */ + u8 opcode; /* 0: set, 1: get */ + u8 rsvd1; +}; + struct hinic3_ppa_cfg_mode_cmd { struct hinic3_mgmt_msg_head msg_head;
@@ -1037,43 +1126,43 @@ enum { NIC_NVM_DATA_RESET = BIT(31), };
-#define BIOS_CFG_SIGNATURE 0x1923E518 -#define BIOS_OP_CFG_ALL(op_code_val) (((op_code_val) >> 1) & (0xFFFFFFFF)) -#define BIOS_OP_CFG_WRITE(op_code_val) ((op_code_val) & NIC_NVM_DATA_SET) -#define BIOS_OP_CFG_PXE_EN(op_code_val) ((op_code_val) & NIC_NVM_DATA_PXE) -#define BIOS_OP_CFG_VLAN_EN(op_code_val) ((op_code_val) & NIC_NVM_DATA_VLAN) -#define BIOS_OP_CFG_VLAN_PRI(op_code_val) ((op_code_val) & NIC_NVM_DATA_VLAN_PRI) -#define BIOS_OP_CFG_VLAN_ID(op_code_val) ((op_code_val) & NIC_NVM_DATA_VLAN_ID) -#define BIOS_OP_CFG_WORK_MODE(op_code_val) ((op_code_val) & NIC_NVM_DATA_WORK_MODE) -#define BIOS_OP_CFG_PF_BW(op_code_val) ((op_code_val) & NIC_NVM_DATA_PF_SPEED_LIMIT) -#define BIOS_OP_CFG_GE_SPEED(op_code_val) ((op_code_val) & NIC_NVM_DATA_GE_MODE) -#define BIOS_OP_CFG_AUTO_NEG(op_code_val) ((op_code_val) & NIC_NVM_DATA_AUTO_NEG) -#define BIOS_OP_CFG_LINK_FEC(op_code_val) ((op_code_val) & NIC_NVM_DATA_LINK_FEC) -#define BIOS_OP_CFG_AUTO_ADPAT(op_code_val) ((op_code_val) & NIC_NVM_DATA_PF_ADAPTIVE_LINK) -#define BIOS_OP_CFG_SRIOV_ENABLE(op_code_val) ((op_code_val) & NIC_NVM_DATA_SRIOV_CONTROL) -#define BIOS_OP_CFG_EXTEND_MODE(op_code_val) ((op_code_val) & NIC_NVM_DATA_EXTEND_MODE) -#define BIOS_OP_CFG_RST_DEF_SET(op_code_val) ((op_code_val) & (u32)NIC_NVM_DATA_RESET) +#define BIOS_CFG_SIGNATURE 0x1923E518 +#define BIOS_OP_CFG_ALL(op_code_val) ((((op_code_val) >> 1) & (0xFFFFFFFF)) != 0) +#define BIOS_OP_CFG_WRITE(op_code_val) ((((op_code_val) & NIC_NVM_DATA_SET)) != 0) +#define BIOS_OP_CFG_PXE_EN(op_code_val) (((op_code_val) & NIC_NVM_DATA_PXE) != 0) +#define BIOS_OP_CFG_VLAN_EN(op_code_val) (((op_code_val) & NIC_NVM_DATA_VLAN) != 0) +#define BIOS_OP_CFG_VLAN_PRI(op_code_val) (((op_code_val) & NIC_NVM_DATA_VLAN_PRI) != 0) +#define BIOS_OP_CFG_VLAN_ID(op_code_val) (((op_code_val) & NIC_NVM_DATA_VLAN_ID) != 0) +#define BIOS_OP_CFG_WORK_MODE(op_code_val) (((op_code_val) & NIC_NVM_DATA_WORK_MODE) != 0) +#define BIOS_OP_CFG_PF_BW(op_code_val) (((op_code_val) & NIC_NVM_DATA_PF_SPEED_LIMIT) != 0) +#define BIOS_OP_CFG_GE_SPEED(op_code_val) (((op_code_val) & NIC_NVM_DATA_GE_MODE) != 0) +#define BIOS_OP_CFG_AUTO_NEG(op_code_val) (((op_code_val) & NIC_NVM_DATA_AUTO_NEG) != 0) +#define BIOS_OP_CFG_LINK_FEC(op_code_val) (((op_code_val) & NIC_NVM_DATA_LINK_FEC) != 0) +#define BIOS_OP_CFG_AUTO_ADPAT(op_code_val) (((op_code_val) & NIC_NVM_DATA_PF_ADAPTIVE_LINK) != 0) +#define BIOS_OP_CFG_SRIOV_ENABLE(op_code_val) (((op_code_val) & NIC_NVM_DATA_SRIOV_CONTROL) != 0) +#define BIOS_OP_CFG_EXTEND_MODE(op_code_val) (((op_code_val) & NIC_NVM_DATA_EXTEND_MODE) != 0) +#define BIOS_OP_CFG_RST_DEF_SET(op_code_val) (((op_code_val) & (u32)NIC_NVM_DATA_RESET) != 0)
#define NIC_BIOS_CFG_MAX_PF_BW 100 -/* 注意:此结构必须保证4字节对齐 */ +/* Note: This structure must be 4-byte aligned. */ struct nic_bios_cfg { - u32 signature; /* 签名,用于判断FLASH的内容合法性 */ - u8 pxe_en; /* PXE enable: 0 - disable 1 - enable */ + u32 signature; + u8 pxe_en; /* PXE enable: 0 - disable 1 - enable */ u8 extend_mode; u8 rsvd0[2]; - u8 pxe_vlan_en; /* PXE VLAN enable: 0 - disable 1 - enable */ - u8 pxe_vlan_pri; /* PXE VLAN priority: 0-7 */ - u16 pxe_vlan_id; /* PXE VLAN ID 1-4094 */ - u32 service_mode; /* 参考CHIPIF_SERVICE_MODE_x 宏 */ - u32 pf_bw; /* PF速率,百分比 0-100 */ - u8 speed; /* enum of port speed */ - u8 auto_neg; /* 自协商开关 0 - 字段无效 1 - 开2 - 关 */ - u8 lanes; /* lane num */ - u8 fec; /* FEC模式, 参考 enum mag_cmd_port_fec */ - u8 auto_adapt; /* 自适应模式配置0 - 无效配置 1 - 开启 2 - 关闭 */ - u8 func_valid; /* 指示func_id是否有效; 0 - 无效,other - 有效 */ - u8 func_id; /* 当func_valid不为0时,该成员才有意义 */ - u8 sriov_en; /* SRIOV-EN: 0 - 无效配置, 1 - 开启, 2 - 关闭 */ + u8 pxe_vlan_en; /* PXE VLAN enable: 0 - disable 1 - enable */ + u8 pxe_vlan_pri; /* PXE VLAN priority: 0-7 */ + u16 pxe_vlan_id; /* PXE VLAN ID 1-4094 */ + u32 service_mode; /* @See CHIPIF_SERVICE_MODE_x */ + u32 pf_bw; /* PF rate, in percentage. The value ranges from 0 to 100. */ + u8 speed; /* enum of port speed */ + u8 auto_neg; /* Auto-Negotiation Switch 0 - Invalid Field 1 - On 2 - Off */ + u8 lanes; /* lane num */ + u8 fec; /* FEC mode, @See enum mag_cmd_port_fec */ + u8 auto_adapt; /* Adaptive Mode Configuration 0 - Invalid Configuration 1 - On 2 - Off */ + u8 func_valid; /* Whether func_id is valid; 0: invalid; other: valid */ + u8 func_id; /* This member is valid only when func_valid is not set to 0. */ + u8 sriov_en; /* SRIOV-EN: 0 - Invalid configuration, 1 - On, 2 - Off */ };
struct nic_cmd_bios_cfg { @@ -1087,26 +1176,25 @@ struct nic_cmd_vhd_config {
u16 func_id; u8 vhd_type; - u8 virtio_small_enable; /* 0: mergeable mode, 1: small mode */ + u8 virtio_small_enable; /* 0: mergeable mode, 1: small mode */ };
/* BOND */ struct hinic3_create_bond_info { - u32 bond_id; /* bond设备号,output时有效,mpu操作成功返回时回填 */ - u32 master_slave_port_id; /* */ - u32 slave_bitmap; /* bond port id bitmap */ - u32 poll_timeout; /* bond设备链路检查时间 */ - u32 up_delay; /* 暂时预留 */ - u32 down_delay; /* 暂时预留 */ - u32 bond_mode; /* 暂时预留 */ - u32 active_pf; /* bond使用的active pf id */ - u32 active_port_max_num; /* bond活动成员口个数上限 */ - u32 active_port_min_num; /* bond活动成员口个数下限 */ - u32 xmit_hash_policy; /* hash策略,用于微码选路逻辑 */ + u32 bond_id; + u32 master_slave_port_id; + u32 slave_bitmap; /* bond port id bitmap */ + u32 poll_timeout; /* Bond device link check time */ + u32 up_delay; /* Temporarily reserved */ + u32 down_delay; /* Temporarily reserved */ + u32 bond_mode; /* Temporarily reserved */ + u32 active_pf; /* bond use active pf id */ + u32 active_port_max_num; /* Maximum number of active bond member interfaces */ + u32 active_port_min_num; /* Minimum number of active bond member interfaces */ + u32 xmit_hash_policy; u32 rsvd[2]; };
-/* 创建bond的消息接口 */ struct hinic3_cmd_create_bond { struct hinic3_mgmt_msg_head head; struct hinic3_create_bond_info create_bond_info; @@ -1119,18 +1207,16 @@ struct hinic3_cmd_delete_bond { };
struct hinic3_open_close_bond_info { - u32 bond_id; /* bond设备号 */ - u32 open_close_flag; /* 开启/关闭bond标识:1为open, 0为close */ + u32 bond_id; + u32 open_close_flag; /* Bond flag. 1: open; 0: close. */ u32 rsvd[2]; };
-/* MPU bond的消息接口 */ struct hinic3_cmd_open_close_bond { struct hinic3_mgmt_msg_head head; struct hinic3_open_close_bond_info open_close_bond_info; };
-/* LACPDU的port相关字段 */ struct lacp_port_params { u16 port_number; u16 port_priority; @@ -1143,10 +1229,10 @@ struct lacp_port_params {
struct lacp_port_info { u32 selected; - u32 aggregator_port_id; /* 使用的 aggregator port ID */ + u32 aggregator_port_id;
- struct lacp_port_params actor; /* actor port参数 */ - struct lacp_port_params partner; /* partner port参数 */ + struct lacp_port_params actor; + struct lacp_port_params partner;
u64 tx_lacp_pkts; u64 rx_lacp_pkts; @@ -1157,18 +1243,17 @@ struct lacp_port_info { u64 tx_marker_pkts; };
-/* lacp 状态信息 */ struct hinic3_bond_status_info { struct hinic3_mgmt_msg_head head; u32 bond_id; - u32 bon_mmi_status; /* 该bond子设备的链路状态 */ - u32 active_bitmap; /* 该bond子设备的slave port状态 */ - u32 port_count; /* 该bond子设备个数 */ + u32 bon_mmi_status; + u32 active_bitmap; + u32 port_count;
struct lacp_port_info port_info[4];
- u64 success_report_cnt[4]; /* 每个host成功上报lacp协商结果次数 */ - u64 fail_report_cnt[4]; /* 每个host上报lacp协商结果失败次数 */ + u64 success_report_cnt[4]; + u64 fail_report_cnt[4];
u64 poll_timeout; u64 fast_periodic_timeout; @@ -1180,12 +1265,11 @@ struct hinic3_bond_status_info { u64 rx_marker_timer; };
-/* lacp协商结果更新之后向主机侧发送异步消息通知结构体 */ struct hinic3_bond_active_report_info { struct hinic3_mgmt_msg_head head; u32 bond_id; - u32 bon_mmi_status; /* 该bond子设备的链路状态 */ - u32 active_bitmap; /* 该bond子设备的slave port状态 */ + u32 bon_mmi_status; + u32 active_bitmap;
u8 rsvd[16]; }; @@ -1195,7 +1279,7 @@ struct hinic3_ipcs_err_rss_enable_operation_s { struct hinic3_mgmt_msg_head head;
u8 en_tag; - u8 type; /* 1: set 0: get */ + u8 type; /* 1: set 0: get */ u8 rsvd[2]; };
@@ -1206,4 +1290,9 @@ struct hinic3_smac_check_state { u8 rsvd[2]; };
+struct hinic3_clear_log_state { + struct hinic3_mgmt_msg_head head; + u32 type; +}; + #endif /* HINIC_MGMT_INTERFACE_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h index 69cacbae3..cc00bdcbc 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic.h @@ -10,7 +10,6 @@ #include "hinic3_common.h" #include "hinic3_nic_io.h" #include "hinic3_nic_cfg.h" -#include "mag_cmd.h"
/* ************************ array index define ********************* */ #define ARRAY_INDEX_0 0 diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h index 8585cbe5d..6cf18d199 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h @@ -8,6 +8,7 @@ #include <linux/netdevice.h>
#include "hinic3_mgmt_interface.h" +#include "mag_mpu_cmd.h" #include "mag_cmd.h"
#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_srv_nic.h b/drivers/net/ethernet/huawei/hinic3/hinic3_srv_nic.h index fee4cfca1..bdd5a8eb2 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_srv_nic.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_srv_nic.h @@ -12,6 +12,7 @@ #define HINIC3_SRV_NIC_H
#include "hinic3_mgmt_interface.h" +#include "mag_mpu_cmd.h" #include "mag_cmd.h" #include "hinic3_lld.h"
@@ -61,6 +62,8 @@ enum hinic3_nic_event_type { EVENT_NIC_LINK_UP, EVENT_NIC_PORT_MODULE_EVENT, EVENT_NIC_DCB_STATE_CHANGE, + EVENT_NIC_BOND_DOWN, + EVENT_NIC_BOND_UP, };
/* * @@ -207,6 +210,8 @@ void hinic3_free_qp_ctxts(void *hwdev); * @param hwdev: device pointer to hwdev * @param vf_link_forced: set link forced * @param link_state: Set link state, This parameter is valid only when vf_link_forced is true + * @retval zero: success + * @retval non-zero: failure */ int hinic3_pf_set_vf_link_state(void *hwdev, bool vf_link_forced, bool link_state);
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c index f5c7bb0d7..ea41e3660 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.c @@ -16,6 +16,7 @@ #include <linux/module.h>
#include "ossl_knl.h" +#include "npu_cmdq_base_defs.h" #include "hinic3_crm.h" #include "hinic3_hw.h" #include "hinic3_hwdev.h" @@ -824,8 +825,7 @@ static int cmdq_async_cmd(struct hinic3_cmdq *cmdq, u8 mod, u8 cmd, */ cmd_info->buf_in = buf_in;
- /* LB mode 1 compatible, cmdq 0 also for async, which is sync_no_wait */ - cmdq_set_db(cmdq, HINIC3_CMDQ_SYNC, next_prod_idx); + cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx);
cmdq_msg_unlock(cmdq);
@@ -993,6 +993,35 @@ int hinic3_cmdq_async(void *hwdev, u8 mod, u8 cmd, struct hinic3_cmd_buf *buf_in cmd, buf_in, channel); }
+int hinic3_cmdq_async_cos(void *hwdev, u8 mod, u8 cmd, + u8 cos_id, struct hinic3_cmd_buf *buf_in, u16 channel) +{ + struct hinic3_cmdqs *cmdqs = NULL; + int err; + + err = cmdq_params_valid(hwdev, buf_in); + if (err) + return err; + + cmdqs = ((struct hinic3_hwdev *)hwdev)->cmdqs; + + if (!get_card_present_state((struct hinic3_hwdev *)hwdev)) + return -EPERM; + + err = wait_cmdqs_enable(cmdqs); + if (err) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq is disable\n"); + return err; + } + + if (cos_id >= cmdqs->cmdq_num) { + sdk_err(cmdqs->hwdev->dev_hdl, "Cmdq id is invalid\n"); + return -EINVAL; + } + + return cmdq_async_cmd(&cmdqs->cmdq[cos_id], mod, cmd, buf_in, channel); +} + static void clear_wqe_complete_bit(struct hinic3_cmdq *cmdq, struct hinic3_cmdq_wqe *wqe, u16 ci) { diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h index 8a0dd48ff..b8b491ab4 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_cmdq.h @@ -8,12 +8,17 @@ #include <linux/completion.h> #include <linux/spinlock.h>
-#include "comm_msg_intf.h" +#include "mpu_inband_cmd_defs.h" #include "hinic3_hw.h" #include "hinic3_wq.h" #include "hinic3_common.h" #include "hinic3_hwdev.h"
+struct dma_pool { + unsigned int size; + void *dev_hdl; +}; + #define HINIC3_SCMD_DATA_LEN 16
#define HINIC3_CMDQ_DEPTH 4096 diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_csr.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_csr.h index b5390c9ed..4098d7f81 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_csr.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_csr.h @@ -183,5 +183,6 @@
#define HINIC3_MULT_HOST_SLAVE_STATUS_ADDR (HINIC3_MGMT_REGS_FLAG + 0xDF30) #define HINIC3_MULT_MIGRATE_HOST_STATUS_ADDR (HINIC3_MGMT_REGS_FLAG + 0xDF4C) +#define HINIC3_MULT_HOST_MASTER_MBOX_STATUS_ADDR HINIC3_MULT_HOST_SLAVE_STATUS_ADDR
#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.h index 0b5a08635..68dd0fb51 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_devlink.h @@ -143,6 +143,30 @@ struct host_image { u32 device_id; };
+struct hinic3_cmd_update_firmware { + struct mgmt_msg_head msg_head; + + struct { + u32 sl : 1; + u32 sf : 1; + u32 flag : 1; + u32 bit_signed : 1; + u32 reserved : 12; + u32 fragment_len : 16; + } ctl_info; + + struct { + u32 section_crc; + u32 section_type; + } section_info; + + u32 total_len; + u32 section_len; + u32 section_version; + u32 section_offset; + u32 data[384]; +}; + int hinic3_init_devlink(struct hinic3_hwdev *hwdev); void hinic3_uninit_devlink(struct hinic3_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c index 97c82df5b..a4c02358f 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_eqs.c @@ -1002,6 +1002,41 @@ static int init_eq(struct hinic3_eq *eq, struct hinic3_hwdev *hwdev, u16 q_id, return err; }
+int hinic3_init_single_ceq_status(void *hwdev, u16 q_id) +{ + int err = 0; + struct hinic3_hwdev *dev = hwdev; + struct hinic3_eq *eq = NULL; + + if (!hwdev) { + sdk_err(dev->dev_hdl, "hwdev is null\n"); + return -EINVAL; + } + + if (q_id >= dev->ceqs->num_ceqs) { + sdk_err(dev->dev_hdl, "q_id=%u is larger than num_ceqs %u.\n", + q_id, dev->ceqs->num_ceqs); + return -EINVAL; + } + + eq = &dev->ceqs->ceq[q_id]; + /* Indirect access should set q_id first */ + hinic3_hwif_write_reg(dev->hwif, HINIC3_EQ_INDIR_IDX_ADDR(eq->type), eq->q_id); + wmb(); /* write index before config */ + + reset_eq(eq); + + err = set_eq_ctrls(eq); + if (err) { + sdk_err(dev->dev_hdl, "Failed to set ctrls for eq\n"); + return err; + } + set_eq_cons_idx(eq, HINIC3_EQ_ARMED); + + return 0; +} +EXPORT_SYMBOL(hinic3_init_single_ceq_status); + /** * remove_eq - remove eq * @eq: the event queue diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c index 08a1b8f15..21951bde3 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.c @@ -16,7 +16,8 @@ #include "hinic3_hw.h" #include "hinic3_hwdev.h" #include "hinic3_hwif.h" -#include "cfg_mgt_comm_pub.h" +#include "cfg_mgmt_mpu_cmd.h" +#include "cfg_mgmt_mpu_cmd_defs.h" #include "hinic3_hw_cfg.h"
static void parse_pub_res_cap_dfx(struct hinic3_hwdev *hwdev, @@ -108,6 +109,11 @@ static void parse_pub_res_cap(struct hinic3_hwdev *hwdev, cap->host_valid_bitmap = dev_cap->host_valid_bitmap; cap->master_host_id = dev_cap->master_host_id; cap->srv_multi_host_mode = dev_cap->srv_multi_host_mode; + cap->fake_vf_en = dev_cap->fake_vf_en; + cap->fake_vf_start_bit = dev_cap->fake_vf_start_bit; + cap->fake_vf_end_bit = dev_cap->fake_vf_end_bit; + cap->fake_vf_page_bit = dev_cap->fake_vf_page_bit; + cap->map_host_id = dev_cap->map_host_id;
if (type != TYPE_VF) { cap->max_vf = dev_cap->max_vf; @@ -327,19 +333,6 @@ static void parse_ipsec_res_cap(struct hinic3_hwdev *hwdev, dev_cap->ipsec_max_sactx, dev_cap->ipsec_max_cq); }
-static void parse_vbs_res_cap(struct hinic3_hwdev *hwdev, - struct service_cap *cap, - struct cfg_cmd_dev_cap *dev_cap, - enum func_type type) -{ - struct vbs_service_cap *vbs_cap = &cap->vbs_cap; - - vbs_cap->vbs_max_volq = dev_cap->vbs_max_volq; - - sdk_info(hwdev->dev_hdl, "Get VBS resource capbility, vbs_max_volq: 0x%x\n", - dev_cap->vbs_max_volq); -} - static void parse_dev_cap(struct hinic3_hwdev *dev, struct cfg_cmd_dev_cap *dev_cap, enum func_type type) { @@ -382,9 +375,6 @@ static void parse_dev_cap(struct hinic3_hwdev *dev,
if (IS_PPA_TYPE(dev)) parse_ppa_res_cap(dev, cap, dev_cap, type); - - if (IS_VBS_TYPE(dev)) - parse_vbs_res_cap(dev, cap, dev_cap, type); }
static int get_cap_from_fw(struct hinic3_hwdev *dev, enum func_type type) @@ -414,29 +404,39 @@ static int get_cap_from_fw(struct hinic3_hwdev *dev, enum func_type type) return 0; }
-static int hinic3_get_dev_cap(struct hinic3_hwdev *dev) +int hinic3_get_dev_cap(void *dev) { - enum func_type type = HINIC3_FUNC_TYPE(dev); + enum func_type type; int err; + struct hinic3_hwdev *hwdev = NULL; + + if (!dev) { + pr_err("pointer dev is NULL\n"); + return -EINVAL; + } + hwdev = (struct hinic3_hwdev *)dev; + type = HINIC3_FUNC_TYPE(hwdev);
switch (type) { case TYPE_PF: case TYPE_PPF: case TYPE_VF: - err = get_cap_from_fw(dev, type); - if (err) { - sdk_err(dev->dev_hdl, "Failed to get PF/PPF capability\n"); + err = get_cap_from_fw(hwdev, type); + if (err != 0) { + sdk_err(hwdev->dev_hdl, + "Failed to get PF/PPF capability\n"); return err; } break; default: - sdk_err(dev->dev_hdl, "Unsupported PCI Function type: %d\n", - type); + sdk_err(hwdev->dev_hdl, + "Unsupported PCI Function type: %d\n", type); return -EINVAL; }
return 0; } +EXPORT_SYMBOL(hinic3_get_dev_cap);
int hinic3_get_ppf_timer_cfg(void *hwdev) { @@ -1017,21 +1017,21 @@ int init_cfg_mgmt(struct hinic3_hwdev *dev) cfg_mgmt->hwdev = dev;
err = cfg_init_eq(dev); - if (err) { + if (err != 0) { sdk_err(dev->dev_hdl, "Failed to init cfg event queue, err: %d\n", err); goto free_mgmt_mem; }
err = cfg_init_interrupt(dev); - if (err) { + if (err != 0) { sdk_err(dev->dev_hdl, "Failed to init cfg interrupt, err: %d\n", err); goto free_eq_mem; }
err = cfg_enable_interrupt(dev); - if (err) { + if (err != 0) { sdk_err(dev->dev_hdl, "Failed to enable cfg interrupt, err: %d\n", err); goto free_interrupt_mem; @@ -1089,6 +1089,33 @@ void free_cfg_mgmt(struct hinic3_hwdev *dev) kfree(cfg_mgmt); }
+/** + * hinic_set_vf_dev_cap - Set max queue num for VF + * @hwdev: the HW device for VF + */ +int hinic3_init_vf_dev_cap(void *hwdev) +{ + struct hinic3_hwdev *dev = NULL; + enum func_type type; + int err; + + if (!hwdev) + return -EFAULT; + + dev = (struct hinic3_hwdev *)hwdev; + type = HINIC3_FUNC_TYPE(dev); + if (type != TYPE_VF) + return -EPERM; + + err = hinic3_get_dev_cap(dev); + if (err != 0) + return err; + + nic_param_fix(dev); + + return 0; +} + int init_capability(struct hinic3_hwdev *dev) { int err; @@ -1123,7 +1150,7 @@ bool hinic3_support_nic(void *hwdev, struct nic_service_cap *cap) return false;
if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, sizeof(*cap)); + memcpy(cap, &dev->cfg_mgmt->svc_cap.nic_cap, sizeof(struct nic_service_cap));
return true; } @@ -1140,7 +1167,7 @@ bool hinic3_support_ppa(void *hwdev, struct ppa_service_cap *cap) return false;
if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.ppa_cap, sizeof(*cap)); + memcpy(cap, &dev->cfg_mgmt->svc_cap.ppa_cap, sizeof(struct ppa_service_cap));
return true; } @@ -1174,7 +1201,7 @@ bool hinic3_support_ipsec(void *hwdev, struct ipsec_service_cap *cap) return false;
if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.ipsec_cap, sizeof(*cap)); + memcpy(cap, &dev->cfg_mgmt->svc_cap.ipsec_cap, sizeof(struct ipsec_service_cap));
return true; } @@ -1191,7 +1218,7 @@ bool hinic3_support_roce(void *hwdev, struct rdma_service_cap *cap) return false;
if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(struct rdma_service_cap));
return true; } @@ -1208,7 +1235,7 @@ bool hinic3_support_fc(void *hwdev, struct fc_service_cap *cap) return false;
if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(*cap)); + memcpy(cap, &dev->cfg_mgmt->svc_cap.fc_cap, sizeof(struct fc_service_cap));
return true; } @@ -1221,11 +1248,11 @@ bool hinic3_support_rdma(void *hwdev, struct rdma_service_cap *cap) if (!hwdev) return false;
- if (!IS_RDMA_TYPE(dev)) + if (!IS_RDMA_TYPE(dev) && !(IS_RDMA_ENABLE(dev))) return false;
if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(*cap)); + memcpy(cap, &dev->cfg_mgmt->svc_cap.rdma_cap, sizeof(struct rdma_service_cap));
return true; } @@ -1242,7 +1269,7 @@ bool hinic3_support_ovs(void *hwdev, struct ovs_service_cap *cap) return false;
if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, sizeof(*cap)); + memcpy(cap, &dev->cfg_mgmt->svc_cap.ovs_cap, sizeof(struct ovs_service_cap));
return true; } @@ -1259,12 +1286,31 @@ bool hinic3_support_vbs(void *hwdev, struct vbs_service_cap *cap) return false;
if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.vbs_cap, sizeof(*cap)); + memcpy(cap, &dev->cfg_mgmt->svc_cap.vbs_cap, sizeof(struct vbs_service_cap));
return true; } EXPORT_SYMBOL(hinic3_support_vbs);
+bool hinic3_is_guest_vmsec_enable(void *hwdev) +{ + struct hinic3_hwdev *hw_dev = hwdev; + + if (!hwdev) { + pr_err("hwdev is null\n"); + return false; + } + + /* vf used in vm */ + if (IS_VM_SLAVE_HOST(hw_dev) && (hinic3_func_type(hwdev) == TYPE_VF) && + IS_RDMA_TYPE(hw_dev)) { + return true; + } + + return false; +} +EXPORT_SYMBOL(hinic3_is_guest_vmsec_enable); + /* Only PPF support it, PF is not */ bool hinic3_support_toe(void *hwdev, struct toe_service_cap *cap) { @@ -1277,7 +1323,7 @@ bool hinic3_support_toe(void *hwdev, struct toe_service_cap *cap) return false;
if (cap) - memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, sizeof(*cap)); + memcpy(cap, &dev->cfg_mgmt->svc_cap.toe_cap, sizeof(struct toe_service_cap));
return true; } @@ -1307,6 +1353,17 @@ bool hinic3_get_stateful_enable(void *hwdev) } EXPORT_SYMBOL(hinic3_get_stateful_enable);
+bool hinic3_get_timer_enable(void *hwdev) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + return dev->cfg_mgmt->svc_cap.timer_en; +} +EXPORT_SYMBOL(hinic3_get_timer_enable); + u8 hinic3_host_oq_id_mask(void *hwdev) { struct hinic3_hwdev *dev = hwdev; @@ -1478,3 +1535,27 @@ u8 hinic3_flexq_en(void *hwdev) } EXPORT_SYMBOL(hinic3_flexq_en);
+int hinic3_get_fake_vf_info(void *hwdev, u8 *fake_vf_vld, + u8 *page_bit, u8 *pf_start_bit, u8 *map_host_id) +{ + struct hinic3_hwdev *dev = hwdev; + + if (!dev) { + pr_err("Hwdev pointer is NULL for getting pf id start capability\n"); + return -EINVAL; + } + + if (!fake_vf_vld || !page_bit || !pf_start_bit || !map_host_id) { + pr_err("Fake vf member pointer is NULL for getting pf id start capability\n"); + return -EINVAL; + } + + *fake_vf_vld = dev->cfg_mgmt->svc_cap.fake_vf_en; + *page_bit = dev->cfg_mgmt->svc_cap.fake_vf_page_bit; + *pf_start_bit = dev->cfg_mgmt->svc_cap.fake_vf_start_bit; + *map_host_id = dev->cfg_mgmt->svc_cap.map_host_id; + + return 0; +} +EXPORT_SYMBOL(hinic3_get_fake_vf_info); + diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h index 0a27530ba..db6e3cab6 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_cfg.h @@ -5,7 +5,7 @@ #define HINIC3_HW_CFG_H
#include <linux/types.h> -#include "cfg_mgt_comm_pub.h" +#include "cfg_mgmt_mpu_cmd_defs.h" #include "hinic3_hwdev.h"
#define CFG_MAX_CMD_TIMEOUT 30000 /* ms */ @@ -16,13 +16,10 @@ enum { };
/* start position for CEQs allocation, Max number of CEQs is 32 */ -/*lint -save -e849*/ enum { CFG_RDMA_CEQ_BASE = 0 };
-/*lint -restore*/ - /* RDMA resource */ #define K_UNIT BIT(10) #define M_UNIT BIT(20) @@ -73,7 +70,7 @@ enum { #define RDMA_RSVD_MRWS 128 #define RDMA_MPT_ENTRY_SZ 64 #define RDMA_NUM_MTTS (1 * G_UNIT) -#define LOG_MTT_SEG 5 +#define LOG_MTT_SEG 9 #define MTT_ENTRY_SZ 8 #define LOG_RDMARC_SEG 3
@@ -150,7 +147,7 @@ enum intr_type { struct service_cap { struct dev_sf_svc_attr sf_svc_attr; u16 svc_type; /* user input service type */ - u16 chip_svc_type; /* HW supported service type, reference to servic_bit_define_e */ + u16 chip_svc_type; /* HW supported service type, reference to servic_bit_define */
u8 host_id; u8 ep_id; @@ -232,6 +229,12 @@ struct service_cap { */ u16 hash_bucket_num;
+ u8 map_host_id; + u8 fake_vf_en; + u8 fake_vf_start_bit; + u8 fake_vf_end_bit; + u8 fake_vf_page_bit; + struct nic_service_cap nic_cap; /* NIC capability */ struct rdma_service_cap rdma_cap; /* RDMA capability */ struct fc_service_cap fc_cap; /* FC capability */ @@ -328,5 +331,7 @@ int init_capability(struct hinic3_hwdev *dev);
void free_capability(struct hinic3_hwdev *dev);
+int hinic3_init_vf_dev_cap(void *hwdev); + #endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c index f207408b1..d8a1a28ba 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.c @@ -20,7 +20,7 @@ #include "hinic3_mgmt.h" #include "hinic3_hw_cfg.h" #include "hinic3_cmdq.h" -#include "comm_msg_intf.h" +#include "mpu_inband_cmd_defs.h" #include "hinic3_hw_comm.h"
#define HINIC3_MSIX_CNT_LLI_TIMER_SHIFT 0 @@ -174,7 +174,7 @@ int hinic3_set_interrupt_cfg(void *dev, struct interrupt_info info, u16 channel) temp_info.msix_index = info.msix_index;
err = hinic3_get_interrupt_cfg(hwdev, &temp_info, channel); - if (err) + if (err != 0) return -EINVAL;
if (!info.lli_set) { @@ -228,7 +228,7 @@ int hinic3_set_wq_page_size(void *hwdev, u16 func_idx, u32 page_size, &page_size_info, &out_size, channel); if (err || !out_size || page_size_info.head.status) { sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, - "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x, channel: 0x%x\n", + "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%x, channel: 0x%x\n", err, page_size_info.head.status, out_size, channel); return -EFAULT; } @@ -378,7 +378,7 @@ int hinic3_set_cmdq_ctxt(struct hinic3_hwdev *hwdev, u8 cmdq_id, int err;
memset(&cmdq_ctxt, 0, sizeof(cmdq_ctxt)); - memcpy(&cmdq_ctxt.ctxt, ctxt, sizeof(*ctxt)); + memcpy(&cmdq_ctxt.ctxt, ctxt, sizeof(struct cmdq_ctxt_info)); cmdq_ctxt.func_id = hinic3_global_func_id(hwdev); cmdq_ctxt.cmdq_id = cmdq_id;
@@ -521,6 +521,31 @@ int hinic3_set_ppf_flr_type(void *hwdev, enum hinic3_ppf_flr_type flr_type) } EXPORT_SYMBOL(hinic3_set_ppf_flr_type);
+int hinic3_set_ppf_tbl_hotreplace_flag(void *hwdev, u8 flag) +{ + struct comm_cmd_ppf_tbl_htrp_config htr_info = {0}; + u16 out_size = sizeof(struct comm_cmd_ppf_tbl_htrp_config); + struct hinic3_hwdev *dev = hwdev; + int ret; + + if (!hwdev) { + sdk_err(dev->dev_hdl, "Sdk set ppf table hotreplace flag para is null"); + return -EINVAL; + } + + htr_info.hotreplace_flag = flag; + ret = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_SET_PPF_TBL_HTR_FLG, + &htr_info, sizeof(htr_info), &htr_info, &out_size); + if (ret != 0 || htr_info.head.status != 0) { + sdk_err(dev->dev_hdl, "Send mbox to mpu failed in sdk, ret:%d, status:%u", + ret, htr_info.head.status); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL(hinic3_set_ppf_tbl_hotreplace_flag); + static int hinic3_get_fw_ver(struct hinic3_hwdev *hwdev, enum hinic3_fw_ver_type type, u8 *mgmt_ver, u8 version_size, u16 channel) { @@ -543,9 +568,7 @@ static int hinic3_get_fw_ver(struct hinic3_hwdev *hwdev, enum hinic3_fw_ver_type return -EIO; }
- err = snprintf(mgmt_ver, version_size, "%s", fw_ver.ver); - if (err < 0) - return -EINVAL; + memcpy(mgmt_ver, fw_ver.ver, version_size);
return 0; } @@ -569,13 +592,13 @@ int hinic3_get_fw_version(void *hwdev, struct hinic3_fw_version *fw_ver, err = hinic3_get_fw_ver(hwdev, HINIC3_FW_VER_TYPE_MPU, fw_ver->mgmt_ver, sizeof(fw_ver->mgmt_ver), channel); - if (err) + if (err != 0) return err;
err = hinic3_get_fw_ver(hwdev, HINIC3_FW_VER_TYPE_NPU, fw_ver->microcode_ver, sizeof(fw_ver->microcode_ver), channel); - if (err) + if (err != 0) return err;
return hinic3_get_fw_ver(hwdev, HINIC3_FW_VER_TYPE_BOOT, @@ -598,8 +621,9 @@ static int hinic3_comm_features_nego(void *hwdev, u8 opcode, u64 *s_feature, memset(&feature_nego, 0, sizeof(feature_nego)); feature_nego.func_id = hinic3_global_func_id(hwdev); feature_nego.opcode = opcode; - if (opcode == MGMT_MSG_CMD_OP_SET) + if (opcode == MGMT_MSG_CMD_OP_SET) { memcpy(feature_nego.s_feature, s_feature, (size * sizeof(u64))); + }
err = comm_msg_to_mgmt_sync(hwdev, COMM_MGMT_CMD_FEATURE_NEGO, &feature_nego, sizeof(feature_nego), @@ -611,7 +635,7 @@ static int hinic3_comm_features_nego(void *hwdev, u8 opcode, u64 *s_feature, }
if (opcode == MGMT_MSG_CMD_OP_GET) - memcpy(s_feature, feature_nego.s_feature, (size * sizeof(u64))); + memcpy(s_feature, feature_nego.s_feature, (COMM_MAX_FEATURE_QWORD * sizeof(u64)));
return 0; } @@ -679,14 +703,9 @@ int hinic3_func_tmr_bitmap_set(void *hwdev, u16 func_id, bool en) return 0; }
-static int ppf_ht_gpa_set(struct hinic3_hwdev *hwdev, struct hinic3_page_addr *pg0, - struct hinic3_page_addr *pg1) +static int ppf_ht_gpa_malloc(struct hinic3_hwdev *hwdev, struct hinic3_page_addr *pg0, + struct hinic3_page_addr *pg1) { - struct comm_cmd_ht_gpa ht_gpa_set; - u16 out_size = sizeof(ht_gpa_set); - int ret; - - memset(&ht_gpa_set, 0, sizeof(ht_gpa_set)); pg0->virt_addr = dma_zalloc_coherent(hwdev->dev_hdl, HINIC3_HT_GPA_PAGE_SIZE, &pg0->phys_addr, GFP_KERNEL); @@ -703,6 +722,37 @@ static int ppf_ht_gpa_set(struct hinic3_hwdev *hwdev, struct hinic3_page_addr *p return -EFAULT; }
+ return 0; +} + +static void ppf_ht_gpa_free(struct hinic3_hwdev *hwdev, struct hinic3_page_addr *pg0, + struct hinic3_page_addr *pg1) +{ + if (pg0->virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HINIC3_HT_GPA_PAGE_SIZE, pg0->virt_addr, + (dma_addr_t)(pg0->phys_addr)); + pg0->virt_addr = NULL; + } + if (pg1->virt_addr) { + dma_free_coherent(hwdev->dev_hdl, HINIC3_HT_GPA_PAGE_SIZE, pg1->virt_addr, + (dma_addr_t)(pg1->phys_addr)); + pg1->virt_addr = NULL; + } +} + +static int ppf_ht_gpa_set(struct hinic3_hwdev *hwdev, struct hinic3_page_addr *pg0, + struct hinic3_page_addr *pg1) +{ + struct comm_cmd_ht_gpa ht_gpa_set; + u16 out_size = sizeof(ht_gpa_set); + int ret; + + memset(&ht_gpa_set, 0, sizeof(ht_gpa_set)); + + ret = ppf_ht_gpa_malloc(hwdev, pg0, pg1); + if (ret) + return ret; + ht_gpa_set.host_id = hinic3_host_id(hwdev); ht_gpa_set.page_pa0 = pg0->phys_addr; ht_gpa_set.page_pa1 = pg1->phys_addr; @@ -751,22 +801,8 @@ int hinic3_ppf_ht_gpa_init(void *dev) break; }
- for (j = 0; j < i; j++) { - if (page_addr0[j].virt_addr) { - dma_free_coherent(hwdev->dev_hdl, - HINIC3_HT_GPA_PAGE_SIZE, - page_addr0[j].virt_addr, - (dma_addr_t)page_addr0[j].phys_addr); - page_addr0[j].virt_addr = NULL; - } - if (page_addr1[j].virt_addr) { - dma_free_coherent(hwdev->dev_hdl, - HINIC3_HT_GPA_PAGE_SIZE, - page_addr1[j].virt_addr, - (dma_addr_t)page_addr1[j].phys_addr); - page_addr1[j].virt_addr = NULL; - } - } + for (j = 0; j < i; j++) + ppf_ht_gpa_free(hwdev, &page_addr0[j], &page_addr1[j]);
if (i >= HINIC3_PPF_HT_GPA_SET_RETRY_TIMES) { sdk_err(hwdev->dev_hdl, "PPF ht gpa init failed, retry times: %d\n", @@ -855,16 +891,16 @@ EXPORT_SYMBOL(hinic3_ppf_tmr_stop); static int mqm_eqm_try_alloc_mem(struct hinic3_hwdev *hwdev, u32 page_size, u32 page_num) { - struct hinic3_page_addr *page_addr = hwdev->mqm_att.brm_srch_page_addr; + struct hinic3_dma_addr_align *page_addr = hwdev->mqm_att.brm_srch_page_addr; u32 valid_num = 0; u32 flag = 1; u32 i = 0; + int err;
for (i = 0; i < page_num; i++) { - page_addr->virt_addr = - dma_zalloc_coherent(hwdev->dev_hdl, page_size, - &page_addr->phys_addr, GFP_KERNEL); - if (!page_addr->virt_addr) { + err = hinic3_dma_zalloc_coherent_align(hwdev->dev_hdl, page_size, + page_size, GFP_KERNEL, page_addr); + if (err) { flag = 0; break; } @@ -878,9 +914,7 @@ static int mqm_eqm_try_alloc_mem(struct hinic3_hwdev *hwdev, u32 page_size, } else { page_addr = hwdev->mqm_att.brm_srch_page_addr; for (i = 0; i < valid_num; i++) { - dma_free_coherent(hwdev->dev_hdl, page_size, - page_addr->virt_addr, - (dma_addr_t)page_addr->phys_addr); + hinic3_dma_free_coherent_align(hwdev->dev_hdl, page_addr); page_addr++; } return -EFAULT; @@ -924,15 +958,12 @@ static int mqm_eqm_alloc_page_mem(struct hinic3_hwdev *hwdev) static void mqm_eqm_free_page_mem(struct hinic3_hwdev *hwdev) { u32 i; - struct hinic3_page_addr *page_addr; - u32 page_size; + struct hinic3_dma_addr_align *page_addr;
- page_size = hwdev->mqm_att.page_size; page_addr = hwdev->mqm_att.brm_srch_page_addr;
for (i = 0; i < hwdev->mqm_att.page_num; i++) { - dma_free_coherent(hwdev->dev_hdl, page_size, - page_addr->virt_addr, (dma_addr_t)(page_addr->phys_addr)); + hinic3_dma_free_coherent_align(hwdev->dev_hdl, page_addr); page_addr++; } } @@ -961,12 +992,11 @@ static int mqm_eqm_set_cfg_2_hw(struct hinic3_hwdev *hwdev, u8 valid) }
#define EQM_DATA_BUF_SIZE 1024 -#define MQM_ATT_PAGE_NUM 128
static int mqm_eqm_set_page_2_hw(struct hinic3_hwdev *hwdev) { struct comm_cmd_eqm_search_gpa *info = NULL; - struct hinic3_page_addr *page_addr = NULL; + struct hinic3_dma_addr_align *page_addr = NULL; void *send_buf = NULL; u16 send_buf_size; u32 i; @@ -995,7 +1025,7 @@ static int mqm_eqm_set_page_2_hw(struct hinic3_hwdev *hwdev) cmd = COMM_MGMT_CMD_SET_MQM_SRCH_GPA; for (i = 0; i < hwdev->mqm_att.page_num; i++) { /* gpa align to 4K, save gpa[31:12] */ - gpa = page_addr->phys_addr >> 12; + gpa = page_addr->align_paddr >> 12; gpa_hi52[num] = gpa; num++; if (num == MQM_ATT_PAGE_NUM) { @@ -1085,7 +1115,7 @@ static int mqm_eqm_init(struct hinic3_hwdev *hwdev) hwdev->mqm_att.page_num = 0;
hwdev->mqm_att.brm_srch_page_addr = - kcalloc(hwdev->mqm_att.chunk_num, sizeof(struct hinic3_page_addr), GFP_KERNEL); + kcalloc(hwdev->mqm_att.chunk_num, sizeof(struct hinic3_dma_addr_align), GFP_KERNEL); if (!(hwdev->mqm_att.brm_srch_page_addr)) { sdk_err(hwdev->dev_hdl, "Alloc virtual mem failed\r\n"); return -EFAULT; @@ -1245,7 +1275,7 @@ static int wait_cmdq_stop(struct hinic3_hwdev *hwdev) return err; }
-static int hinic3_rx_tx_flush(struct hinic3_hwdev *hwdev, u16 channel) +static int hinic3_rx_tx_flush(struct hinic3_hwdev *hwdev, u16 channel, bool wait_io) { struct hinic3_hwif *hwif = hwdev->hwif; struct comm_cmd_clear_doorbell clear_db; @@ -1254,7 +1284,7 @@ static int hinic3_rx_tx_flush(struct hinic3_hwdev *hwdev, u16 channel) int err; int ret = 0;
- if (HINIC3_FUNC_TYPE(hwdev) != TYPE_VF) + if ((HINIC3_FUNC_TYPE(hwdev) != TYPE_VF) && wait_io) msleep(100); /* wait ucode 100 ms stop I/O */
err = wait_cmdq_stop(hwdev); @@ -1317,7 +1347,7 @@ static int hinic3_rx_tx_flush(struct hinic3_hwdev *hwdev, u16 channel) return ret; }
-int hinic3_func_rx_tx_flush(void *hwdev, u16 channel) +int hinic3_func_rx_tx_flush(void *hwdev, u16 channel, bool wait_io) { struct hinic3_hwdev *dev = hwdev;
@@ -1327,7 +1357,7 @@ int hinic3_func_rx_tx_flush(void *hwdev, u16 channel) if (dev->chip_present_flag == 0) return 0;
- return hinic3_rx_tx_flush(dev, channel); + return hinic3_rx_tx_flush(dev, channel, wait_io); } EXPORT_SYMBOL(hinic3_func_rx_tx_flush);
@@ -1383,7 +1413,7 @@ int hinic3_get_hw_pf_infos(void *hwdev, struct hinic3_hw_pf_infos *infos, goto free_buf; }
- memcpy(infos, &pf_infos->infos, sizeof(*infos)); + memcpy(infos, &pf_infos->infos, sizeof(struct hinic3_hw_pf_infos));
free_buf: kfree(pf_infos); @@ -1407,7 +1437,7 @@ int hinic3_get_global_attr(void *hwdev, struct comm_global_attr *attr) return -EIO; }
- memcpy(attr, &get_attr.attr, sizeof(*attr)); + memcpy(attr, &get_attr.attr, sizeof(struct comm_global_attr));
return 0; } @@ -1477,7 +1507,7 @@ int hinic3_get_sml_table_info(void *hwdev, u32 tbl_id, u8 *node_id, u8 *instance
int hinic3_activate_firmware(void *hwdev, u8 cfg_index) { - struct hinic3_cmd_activate_firmware activate_msg; + struct cmd_active_firmware activate_msg; u16 out_size = sizeof(activate_msg); int err;
@@ -1509,7 +1539,7 @@ int hinic3_activate_firmware(void *hwdev, u8 cfg_index)
int hinic3_switch_config(void *hwdev, u8 cfg_index) { - struct hinic3_cmd_switch_config switch_cfg; + struct cmd_switch_cfg switch_cfg; u16 out_size = sizeof(switch_cfg); int err;
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h index be9e4a6b2..e031ec4cc 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_comm.h @@ -6,7 +6,7 @@
#include <linux/types.h>
-#include "comm_msg_intf.h" +#include "mpu_inband_cmd_defs.h" #include "hinic3_hwdev.h"
#define MSG_TO_MGMT_SYNC_RETURN_ERR(err, out_size, status) \ diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c index dab583ba8..e8aa7efe4 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hw_mt.c @@ -5,7 +5,7 @@ #include "hinic3_mt.h" #include "hinic3_crm.h" #include "hinic3_hw.h" -#include "hinic3_comm_cmd.h" +#include "mpu_inband_cmd.h" #include "hinic3_hw_mt.h"
#define HINIC3_CMDQ_BUF_MAX_SIZE 2048U @@ -14,8 +14,10 @@ #define MSG_MAX_IN_SIZE (2048 * 1024) #define MSG_MAX_OUT_SIZE (2048 * 1024)
+#define API_CSR_MAX_RD_LEN (4 * 1024 * 1024) + /* completion timeout interval, unit is millisecond */ -#define MGMT_MSG_UPDATE_TIMEOUT 50000U +#define MGMT_MSG_UPDATE_TIMEOUT 200000U
void free_buff_in(void *hwdev, const struct msg_module *nt_msg, void *buf_in) { @@ -127,6 +129,9 @@ int copy_buf_out_to_user(struct msg_module *nt_msg, int ret = 0; void *msg_out = NULL;
+ if (out_size == 0 || !buf_out) + return 0; + if (nt_msg->module == SEND_TO_NPU && !nt_msg->npu_cmd.direct_resp) msg_out = ((struct hinic3_cmd_buf *)buf_out)->buf; @@ -177,7 +182,7 @@ int get_hw_driver_stats(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 void *buf_out, u32 *out_size) { return hinic3_dbg_get_hw_stats(hinic3_get_sdk_hwdev_by_lld(lld_dev), - buf_out, (u16 *)out_size); + buf_out, out_size); }
int clear_hw_driver_stats(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, @@ -220,7 +225,7 @@ int get_chip_faults_stats(struct hinic3_lld_dev *lld_dev, const void *buf_in, u3
if (!buf_in || !buf_out || *out_size != sizeof(*fault_info) || in_size != sizeof(*fault_info)) { - pr_err("Unexpect out buf size from user: %d, expect: %lu\n", + pr_err("Unexpect out buf size from user: %u, expect: %lu\n", *out_size, sizeof(*fault_info)); return -EFAULT; } @@ -251,6 +256,7 @@ static int api_csr_read(void *hwdev, struct msg_module *nt_msg, void *buf_in, u32 in_size, void *buf_out, u32 *out_size) { struct up_log_msg_st *up_log_msg = (struct up_log_msg_st *)buf_in; + u8 *buf_out_tmp = (u8 *)buf_out; int ret = 0; u32 rd_len; u32 rd_addr; @@ -272,7 +278,7 @@ static int api_csr_read(void *hwdev, struct msg_module *nt_msg, for (i = 0; i < rd_cnt; i++) { ret = hinic3_api_csr_rd32(hwdev, node_id, rd_addr + offset, - (u32 *)(((u8 *)buf_out) + offset)); + (u32 *)(buf_out_tmp + offset)); if (ret) { pr_err("Csr rd fail, err: %d, node_id: %u, csr addr: 0x%08x\n", ret, node_id, rd_addr + offset); @@ -299,7 +305,8 @@ static int api_csr_write(void *hwdev, struct msg_module *nt_msg, u32 i; u8 *data = NULL;
- if (!buf_in || in_size != sizeof(*csr_write_msg) || csr_write_msg->rd_len % DW_WIDTH != 0) + if (!buf_in || in_size != sizeof(*csr_write_msg) || csr_write_msg->rd_len == 0 || + csr_write_msg->rd_len > API_CSR_MAX_RD_LEN || csr_write_msg->rd_len % DW_WIDTH != 0) return -EINVAL;
rd_len = csr_write_msg->rd_len; @@ -352,7 +359,7 @@ int send_to_mpu(void *hwdev, struct msg_module *nt_msg,
if (nt_msg->mpu_cmd.api_type == API_TYPE_MBOX) ret = hinic3_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, (u16)in_size, - buf_out, (u16 *)out_size, timeout, + buf_out, (u16 *)(u8 *)out_size, timeout, HINIC3_CHANNEL_DEFAULT); else ret = hinic3_clp_to_mgmt(hwdev, mod, cmd, buf_in, (u16)in_size, @@ -371,10 +378,10 @@ int send_to_mpu(void *hwdev, struct msg_module *nt_msg, if (hinic3_pcie_itf_id(hwdev) != SPU_HOST_ID) ret = hinic3_msg_to_mgmt_api_chain_sync(hwdev, mod, cmd, buf_in, (u16)in_size, buf_out, - (u16 *)out_size, timeout); + (u16 *)(u8 *)out_size, timeout); else ret = hinic3_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, (u16)in_size, - buf_out, (u16 *)out_size, timeout, + buf_out, (u16 *)(u8 *)out_size, timeout, HINIC3_CHANNEL_DEFAULT); if (ret) { pr_err("Message to mgmt api chain cpu return fail, mod: %d, cmd: %u\n", @@ -382,7 +389,7 @@ int send_to_mpu(void *hwdev, struct msg_module *nt_msg, return ret; } } else { - pr_err("Unsupported api_type %d\n", nt_msg->mpu_cmd.api_type); + pr_err("Unsupported api_type %u\n", nt_msg->mpu_cmd.api_type); return -EINVAL; }
@@ -561,16 +568,17 @@ int send_to_sm(void *hwdev, struct msg_module *nt_msg, { struct sm_in_st *sm_in = buf_in; struct sm_out_st *sm_out = buf_out; - u32 msg_formate = nt_msg->msg_formate; - int index, num_cmds = sizeof(sm_module_cmd_handle) / - sizeof(sm_module_cmd_handle[0]); + u32 msg_formate; + int index, num_cmds = ARRAY_LEN(sm_module_cmd_handle); int ret = 0;
- if (!buf_in || !buf_out || in_size != sizeof(*sm_in) || *out_size != sizeof(*sm_out)) { + if (!nt_msg || !buf_in || !buf_out || + in_size != sizeof(*sm_in) || *out_size != sizeof(*sm_out)) { pr_err("Unexpect out buf size :%u, in buf size: %u\n", *out_size, in_size); return -EINVAL; } + msg_formate = nt_msg->msg_formate;
for (index = 0; index < num_cmds; index++) { if (msg_formate != sm_module_cmd_handle[index].sm_cmd_name) @@ -586,9 +594,8 @@ int send_to_sm(void *hwdev, struct msg_module *nt_msg, pr_err("Can't find callback for %d\n", msg_formate); return -EINVAL; } - if (ret != 0) - pr_err("Get sm information fail, id:%u, instance:%u, node:%u\n", + pr_err("Get sm information fail, id:%d, instance:%d, node:%d\n", sm_in->id, sm_in->instance, sm_in->node);
*out_size = sizeof(struct sm_out_st); diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c index ac1f4955f..83e7b63ac 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.c @@ -28,7 +28,9 @@ #include "hinic3_mbox.h" #include "hinic3_cmdq.h" #include "hinic3_hw_cfg.h" +#include "hinic3_multi_host_mgmt.h" #include "hinic3_hw_comm.h" +#include "hinic3_cqm.h" #include "hinic3_prof_adap.h" #include "hinic3_devlink.h" #include "hinic3_hwdev.h" @@ -138,7 +140,12 @@ EXPORT_SYMBOL(hinic3_get_slave_host_enable); int hinic3_get_slave_bitmap(void *hwdev, u8 *slave_host_bitmap) { struct hinic3_hwdev *dev = hwdev; - struct service_cap *cap = &dev->cfg_mgmt->svc_cap; + struct service_cap *cap = NULL; + + if (!dev || !slave_host_bitmap) + return -EINVAL; + + cap = &dev->cfg_mgmt->svc_cap;
if (HINIC3_FUNC_TYPE(dev) != TYPE_PPF) { sdk_warn(dev->dev_hdl, "hwdev should be ppf\n"); @@ -151,7 +158,7 @@ int hinic3_get_slave_bitmap(void *hwdev, u8 *slave_host_bitmap) } EXPORT_SYMBOL(hinic3_get_slave_bitmap);
-static void set_func_host_mode(struct hinic3_hwdev *hwdev, enum hinic3_func_mode mode) +void set_func_host_mode(struct hinic3_hwdev *hwdev, enum hinic3_func_mode mode) { switch (mode) { case FUNC_MOD_MULTI_BM_MASTER: @@ -181,11 +188,6 @@ static void hinic3_init_host_mode_pre(struct hinic3_hwdev *hwdev) struct service_cap *cap = &hwdev->cfg_mgmt->svc_cap; u8 host_id = hwdev->hwif->attr.pci_intf_idx;
- if (HINIC3_FUNC_TYPE(hwdev) == TYPE_VF) { - set_func_host_mode(hwdev, FUNC_MOD_NORMAL_HOST); - return; - } - switch (cap->srv_multi_host_mode) { case HINIC3_SDI_MODE_BM: if (host_id == cap->master_host_id) @@ -205,28 +207,6 @@ static void hinic3_init_host_mode_pre(struct hinic3_hwdev *hwdev) } }
-static int hinic3_multi_host_init(struct hinic3_hwdev *hwdev) -{ - if (!IS_MULTI_HOST(hwdev) || !HINIC3_IS_PPF(hwdev)) - return 0; - - if (IS_SLAVE_HOST(hwdev)) - set_slave_host_enable(hwdev, hinic3_pcie_itf_id(hwdev), true); - - return 0; -} - -static int hinic3_multi_host_free(struct hinic3_hwdev *hwdev) -{ - if (!IS_MULTI_HOST(hwdev) || !HINIC3_IS_PPF(hwdev)) - return 0; - - if (IS_SLAVE_HOST(hwdev)) - set_slave_host_enable(hwdev, hinic3_pcie_itf_id(hwdev), false); - - return 0; -} - static u8 hinic3_nic_sw_aeqe_handler(void *hwdev, u8 event, u8 *data) { struct hinic3_hwdev *dev = hwdev; @@ -524,13 +504,13 @@ static void sw_watchdog_timeout_info_show(struct hinic3_hwdev *hwdev,
sdk_err(hwdev->dev_hdl, "Mgmt pc: 0x%llx, elr: 0x%llx, spsr: 0x%llx, far: 0x%llx, esr: 0x%llx, xzr: 0x%llx\n", watchdog_info->pc, watchdog_info->elr, watchdog_info->spsr, watchdog_info->far, - watchdog_info->esr, watchdog_info->xzr); /*lint !e10 !e26 */ + watchdog_info->esr, watchdog_info->xzr);
sdk_err(hwdev->dev_hdl, "Mgmt register info\n"); reg = &watchdog_info->x30; for (i = 0; i <= X_CSR_INDEX; i++) sdk_err(hwdev->dev_hdl, "x%02u:0x%llx\n", - X_CSR_INDEX - i, reg[i]); /*lint !e661 !e662 */ + X_CSR_INDEX - i, reg[i]);
if (watchdog_info->stack_actlen <= DATA_LEN_1K) { stack_len = watchdog_info->stack_actlen; @@ -816,7 +796,7 @@ static int init_aeqs_msix_attr(struct hinic3_hwdev *hwdev) info.msix_index = eq->eq_irq.msix_entry_idx; err = hinic3_set_interrupt_cfg_direct(hwdev, &info, HINIC3_CHANNEL_COMM); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Set msix attr for aeq %d failed\n", q_id); return -EFAULT; @@ -845,7 +825,7 @@ static int init_ceqs_msix_attr(struct hinic3_hwdev *hwdev) info.msix_index = eq->eq_irq.msix_entry_idx; err = hinic3_set_interrupt_cfg(hwdev, info, HINIC3_CHANNEL_COMM); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Set msix attr for ceq %u failed\n", q_id); return -EFAULT; @@ -863,7 +843,7 @@ static int hinic3_comm_clp_to_mgmt_init(struct hinic3_hwdev *hwdev) return 0;
err = hinic3_clp_pf_to_mgmt_init(hwdev); - if (err) + if (err != 0) return err;
return 0; @@ -891,7 +871,7 @@ static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev) } err = hinic3_alloc_irqs(hwdev, SERVICE_T_INTF, num_aeqs, aeq_irqs, &resp_num_irq); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to alloc aeq irqs, num_aeqs: %u\n", num_aeqs); return err; @@ -904,7 +884,7 @@ static int hinic3_comm_aeqs_init(struct hinic3_hwdev *hwdev) }
err = hinic3_aeqs_init(hwdev, num_aeqs, aeq_irqs); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init aeqs\n"); goto aeqs_init_err; } @@ -923,7 +903,7 @@ static void hinic3_comm_aeqs_free(struct hinic3_hwdev *hwdev) struct irq_info aeq_irqs[HINIC3_MAX_AEQS] = {{0} }; u16 num_irqs, i;
- hinic3_get_aeq_irqs(hwdev, aeq_irqs, &num_irqs); + hinic3_get_aeq_irqs(hwdev, (struct irq_info *)aeq_irqs, &num_irqs);
hinic3_aeqs_free(hwdev);
@@ -946,7 +926,7 @@ static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev)
err = hinic3_alloc_irqs(hwdev, SERVICE_T_INTF, num_ceqs, ceq_irqs, &resp_num_irq); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to alloc ceq irqs, num_ceqs: %u\n", num_ceqs); return err; @@ -959,7 +939,7 @@ static int hinic3_comm_ceqs_init(struct hinic3_hwdev *hwdev) }
err = hinic3_ceqs_init(hwdev, num_ceqs, ceq_irqs); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init ceqs, err:%d\n", err); goto ceqs_init_err; @@ -980,7 +960,7 @@ static void hinic3_comm_ceqs_free(struct hinic3_hwdev *hwdev) u16 num_irqs; int i;
- hinic3_get_ceq_irqs(hwdev, ceq_irqs, &num_irqs); + hinic3_get_ceq_irqs(hwdev, (struct irq_info *)ceq_irqs, &num_irqs);
hinic3_ceqs_free(hwdev);
@@ -993,7 +973,7 @@ static int hinic3_comm_func_to_func_init(struct hinic3_hwdev *hwdev) int err;
err = hinic3_func_to_func_init(hwdev); - if (err) + if (err != 0) return err;
hinic3_aeq_register_hw_cb(hwdev, hwdev, HINIC3_MBX_FROM_FUNC, @@ -1001,14 +981,13 @@ static int hinic3_comm_func_to_func_init(struct hinic3_hwdev *hwdev) hinic3_aeq_register_hw_cb(hwdev, hwdev, HINIC3_MSG_FROM_MGMT_CPU, hinic3_mgmt_msg_aeqe_handler);
- if (!HINIC3_IS_VF(hwdev)) - hinic3_register_pf_mbox_cb(hwdev, HINIC3_MOD_COMM, - hwdev, - pf_handle_vf_comm_mbox); - else - hinic3_register_vf_mbox_cb(hwdev, HINIC3_MOD_COMM, - hwdev, - vf_handle_pf_comm_mbox); + if (!HINIC3_IS_VF(hwdev)) { + hinic3_register_pf_mbox_cb(hwdev, HINIC3_MOD_COMM, hwdev, pf_handle_vf_comm_mbox); + hinic3_register_pf_mbox_cb(hwdev, HINIC3_MOD_SW_FUNC, + hwdev, sw_func_pf_mbox_handler); + } else { + hinic3_register_vf_mbox_cb(hwdev, HINIC3_MOD_COMM, hwdev, vf_handle_pf_comm_mbox); + }
set_bit(HINIC3_HWDEV_MBOX_INITED, &hwdev->func_state);
@@ -1042,7 +1021,7 @@ static int hinic3_comm_pf_to_mgmt_init(struct hinic3_hwdev *hwdev) return 0;
err = hinic3_pf_to_mgmt_init(hwdev); - if (err) + if (err != 0) return err;
hinic3_register_mgmt_msg_cb(hwdev, HINIC3_MOD_COMM, hwdev, @@ -1074,7 +1053,7 @@ static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev) int err;
err = hinic3_cmdqs_init(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); return err; } @@ -1082,7 +1061,7 @@ static int hinic3_comm_cmdqs_init(struct hinic3_hwdev *hwdev) hinic3_ceq_register_cb(hwdev, hwdev, HINIC3_CMDQ, hinic3_cmdq_ceq_handler);
err = hinic3_set_cmdq_depth(hwdev, HINIC3_CMDQ_DEPTH); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to set cmdq depth\n"); goto set_cmdq_depth_err; } @@ -1109,7 +1088,7 @@ static void hinic3_comm_cmdqs_free(struct hinic3_hwdev *hwdev)
static void hinic3_sync_mgmt_func_state(struct hinic3_hwdev *hwdev) { - hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_ACTIVE_FLAG); + hinic3_set_pf_status(hwdev->hwif, HINIC3_PF_STATUS_ACTIVE_FLAG); }
static void hinic3_unsync_mgmt_func_state(struct hinic3_hwdev *hwdev) @@ -1127,12 +1106,12 @@ static int init_basic_attributes(struct hinic3_hwdev *hwdev)
err = hinic3_get_board_info(hwdev, &hwdev->board_info, HINIC3_CHANNEL_COMM); - if (err) + if (err != 0) return err;
err = hinic3_get_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Get comm features failed\n"); return err; } @@ -1144,7 +1123,7 @@ static int init_basic_attributes(struct hinic3_hwdev *hwdev) hwdev->features[i] &= drv_features[i];
err = hinic3_get_global_attr(hwdev, &hwdev->glb_attr); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to get global attribute\n"); return err; } @@ -1164,19 +1143,19 @@ static int init_basic_mgmt_channel(struct hinic3_hwdev *hwdev) int err;
err = hinic3_comm_aeqs_init(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init async event queues\n"); return err; }
err = hinic3_comm_func_to_func_init(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init mailbox\n"); goto func_to_func_init_err; }
err = init_aeqs_msix_attr(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init aeqs msix attr\n"); goto aeqs_msix_attr_init_err; } @@ -1203,13 +1182,13 @@ static int init_pf_mgmt_channel(struct hinic3_hwdev *hwdev) int err;
err = hinic3_comm_clp_to_mgmt_init(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init clp\n"); return err; }
err = hinic3_comm_pf_to_mgmt_init(hwdev); - if (err) { + if (err != 0) { hinic3_comm_clp_to_mgmt_free(hwdev); sdk_err(hwdev->dev_hdl, "Failed to init pf to mgmt\n"); return err; @@ -1233,14 +1212,14 @@ static int init_mgmt_channel_post(struct hinic3_hwdev *hwdev) */ if (HINIC3_IS_PPF(hwdev)) { err = hinic3_mbox_init_host_msg_channel(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init mbox host channel\n"); return err; } }
err = init_pf_mgmt_channel(hwdev); - if (err) + if (err != 0) return err;
return 0; @@ -1256,19 +1235,19 @@ static int init_cmdqs_channel(struct hinic3_hwdev *hwdev) int err;
err = dma_attr_table_init(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init dma attr table\n"); goto dma_attr_init_err; }
err = hinic3_comm_ceqs_init(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init completion event queues\n"); goto ceqs_init_err; }
err = init_ceqs_msix_attr(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init ceqs msix attr\n"); goto init_ceq_msix_err; } @@ -1284,13 +1263,13 @@ static int init_cmdqs_channel(struct hinic3_hwdev *hwdev) sdk_info(hwdev->dev_hdl, "WQ page size: 0x%x\n", hwdev->wq_page_size); err = hinic3_set_wq_page_size(hwdev, hinic3_global_func_id(hwdev), hwdev->wq_page_size, HINIC3_CHANNEL_COMM); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to set wq page size\n"); goto init_wq_pg_size_err; }
err = hinic3_comm_cmdqs_init(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init cmd queues\n"); goto cmdq_init_err; } @@ -1328,28 +1307,28 @@ static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev) int err;
err = init_basic_mgmt_channel(hwdev); - if (err) + if (err != 0) return err;
err = hinic3_func_reset(hwdev, hinic3_global_func_id(hwdev), HINIC3_COMM_RES, HINIC3_CHANNEL_COMM); - if (err) + if (err != 0) goto func_reset_err;
err = init_basic_attributes(hwdev); - if (err) + if (err != 0) goto init_basic_attr_err;
err = init_mgmt_channel_post(hwdev); - if (err) + if (err != 0) goto init_mgmt_channel_post_err;
err = hinic3_set_func_svc_used_state(hwdev, SVC_T_COMM, 1, HINIC3_CHANNEL_COMM); - if (err) + if (err != 0) goto set_used_state_err;
err = init_cmdqs_channel(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init cmdq channel\n"); goto init_cmdqs_channel_err; } @@ -1363,7 +1342,7 @@ static int hinic3_init_comm_ch(struct hinic3_hwdev *hwdev)
err = hinic3_aeq_register_swe_cb(hwdev, hwdev, HINIC3_STATELESS_EVENT, hinic3_nic_sw_aeqe_handler); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to register sw aeqe handler\n"); goto register_ucode_aeqe_err; @@ -1408,7 +1387,7 @@ static void hinic3_auto_sync_time_work(struct work_struct *work) int err;
err = hinic3_sync_time(hwdev, ossl_get_real_time()); - if (err) + if (err != 0) sdk_err(hwdev->dev_hdl, "Synchronize UTC time to firmware failed, errno:%d.\n", err);
@@ -1478,6 +1457,7 @@ static int init_hwdew(struct hinic3_init_para *para) hwdev->poll = para->poll; hwdev->probe_fault_level = para->probe_fault_level; hwdev->func_state = 0; + sema_init(&hwdev->ppf_sem, 1);
hwdev->chip_fault_stats = vzalloc(HINIC3_CHIP_FAULT_SIZE); if (!hwdev->chip_fault_stats) @@ -1492,6 +1472,7 @@ static int init_hwdew(struct hinic3_init_para *para) return 0;
alloc_chip_fault_stats_err: + sema_deinit(&hwdev->ppf_sem); para->probe_fault_level = hwdev->probe_fault_level; kfree(hwdev); *para->hwdev = NULL; @@ -1500,18 +1481,17 @@ static int init_hwdew(struct hinic3_init_para *para)
int hinic3_init_hwdev(struct hinic3_init_para *para) { - struct hinic3_hwdev *hwdev; + struct hinic3_hwdev *hwdev = NULL; int err;
err = init_hwdew(para); - if (err) + if (err != 0) return err;
hwdev = *para->hwdev; - err = hinic3_init_hwif(hwdev, para->cfg_reg_base, para->intr_reg_base, para->mgmt_reg_base, para->db_base_phy, para->db_base, para->db_dwqe_len); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init hwif\n"); goto init_hwif_err; } @@ -1529,45 +1509,45 @@ int hinic3_init_hwdev(struct hinic3_init_para *para) hinic3_init_heartbeat_detect(hwdev);
err = init_cfg_mgmt(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init config mgmt\n"); goto init_cfg_mgmt_err; }
err = hinic3_init_comm_ch(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init communication channel\n"); goto init_comm_ch_err; }
#ifdef HAVE_DEVLINK_FLASH_UPDATE_PARAMS err = hinic3_init_devlink(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init devlink\n"); goto init_devlink_err; } #endif
err = init_capability(hwdev); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init capability\n"); goto init_cap_err; }
hinic3_init_host_mode_pre(hwdev);
- err = hinic3_multi_host_init(hwdev); - if (err) { + err = hinic3_multi_host_mgmt_init(hwdev); + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to init function mode\n"); goto init_multi_host_fail; }
err = hinic3_init_ppf_work(hwdev); - if (err) + if (err != 0) goto init_ppf_work_fail;
err = hinic3_set_comm_features(hwdev, hwdev->features, COMM_MAX_FEATURE_QWORD); - if (err) { + if (err != 0) { sdk_err(hwdev->dev_hdl, "Failed to set comm features\n"); goto set_feature_err; } @@ -1578,7 +1558,7 @@ int hinic3_init_hwdev(struct hinic3_init_para *para) hinic3_free_ppf_work(hwdev);
init_ppf_work_fail: - hinic3_multi_host_free(hwdev); + hinic3_multi_host_mgmt_free(hwdev);
init_multi_host_fail: free_capability(hwdev); @@ -1623,9 +1603,9 @@ void hinic3_free_hwdev(void *hwdev)
hinic3_free_ppf_work(dev);
- hinic3_multi_host_free(dev); + hinic3_multi_host_mgmt_free(dev);
- hinic3_func_rx_tx_flush(hwdev, HINIC3_CHANNEL_COMM); + hinic3_func_rx_tx_flush(hwdev, HINIC3_CHANNEL_COMM, true);
free_capability(dev);
@@ -1698,7 +1678,7 @@ void *hinic3_get_service_adapter(void *hwdev, enum hinic3_service_type type) } EXPORT_SYMBOL(hinic3_get_service_adapter);
-int hinic3_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, const u16 *out_size) +int hinic3_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, const u32 *out_size) { struct hinic3_hw_stats *tmp_hw_stats = (struct hinic3_hw_stats *)hw_stats; struct card_node *chip_node = NULL; @@ -1712,8 +1692,8 @@ int hinic3_dbg_get_hw_stats(const void *hwdev, u8 *hw_stats, const u16 *out_size return -EFAULT; }
- memcpy(hw_stats, &((struct hinic3_hwdev *)hwdev)->hw_stats, - sizeof(struct hinic3_hw_stats)); + memcpy(hw_stats, + &((struct hinic3_hwdev *)hwdev)->hw_stats, sizeof(struct hinic3_hw_stats));
chip_node = ((struct hinic3_hwdev *)hwdev)->chip_node;
@@ -1835,6 +1815,16 @@ bool hinic3_need_init_stateful_default(void *hwdev) if (hinic3_func_type(hwdev) == TYPE_PPF && (chip_svc_type & CFG_SERVICE_MASK_VIRTIO) != 0) return true;
+ /* vroce have to init cqm */ + if (IS_MASTER_HOST(dev) && + (hinic3_func_type(hwdev) != TYPE_PPF) && + ((chip_svc_type & CFG_SERVICE_MASK_ROCE) != 0)) + return true; + + /* SDI5.1 vm mode nano os PF0 as ppf needs to do stateful init else mailbox will fail */ + if (hinic3_func_type(hwdev) == TYPE_PPF && hinic3_is_vm_slave_host(hwdev)) + return true; + /* Other service type will init cqm when uld call. */ return false; } @@ -1843,6 +1833,8 @@ static inline void stateful_uninit(struct hinic3_hwdev *hwdev) { u32 stateful_en;
+ cqm_uninit(hwdev); + stateful_en = IS_FT_TYPE(hwdev) | IS_RDMA_TYPE(hwdev); if (stateful_en) hinic3_ppf_ext_db_deinit(hwdev); @@ -1869,15 +1861,25 @@ int hinic3_stateful_init(void *hwdev) stateful_en = (int)(IS_FT_TYPE(dev) | IS_RDMA_TYPE(dev)); if (stateful_en != 0 && HINIC3_IS_PPF(dev)) { err = hinic3_ppf_ext_db_init(dev); - if (err) + if (err != 0) goto out; }
+ err = cqm_init(dev); + if (err != 0) { + sdk_err(dev->dev_hdl, "Failed to init cqm, err: %d\n", err); + goto init_cqm_err; + } + mutex_unlock(&dev->stateful_mutex); sdk_info(dev->dev_hdl, "Initialize stateful resource success\n");
return 0;
+init_cqm_err: + if (stateful_en != 0) + hinic3_ppf_ext_db_deinit(dev); + out: dev->stateful_ref_cnt--; mutex_unlock(&dev->stateful_mutex); @@ -1967,6 +1969,26 @@ void hinic3_fault_event_report(void *hwdev, u16 src, u16 level) } EXPORT_SYMBOL(hinic3_fault_event_report);
+int hinic3_is_slave_func(const void *hwdev, bool *is_slave_func) +{ + if (!hwdev) + return -EINVAL; + + *is_slave_func = IS_SLAVE_HOST((struct hinic3_hwdev *)hwdev); + return 0; +} +EXPORT_SYMBOL(hinic3_is_slave_func); + +int hinic3_is_master_func(const void *hwdev, bool *is_master_func) +{ + if (!hwdev) + return -EINVAL; + + *is_master_func = IS_MASTER_HOST((struct hinic3_hwdev *)hwdev); + return 0; +} +EXPORT_SYMBOL(hinic3_is_master_func); + void hinic3_probe_success(void *hwdev) { if (!hwdev) diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h index 9f7d8a485..e739767ed 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwdev.h @@ -4,9 +4,11 @@ #ifndef HINIC3_HWDEV_H #define HINIC3_HWDEV_H
+#include <linux/workqueue.h> #include "hinic3_mt.h" #include "hinic3_crm.h" #include "hinic3_hw.h" +#include "mpu_inband_cmd_defs.h" #include "hinic3_profile.h"
struct cfg_mgmt_info; @@ -30,7 +32,7 @@ struct mqm_addr_trans_tbl_info { u32 search_gpa_num; u32 page_size; u32 page_num; - struct hinic3_page_addr *brm_srch_page_addr; + struct hinic3_dma_addr_align *brm_srch_page_addr; };
struct hinic3_devlink { @@ -80,6 +82,24 @@ enum hinic3_host_mode_e { HINIC3_SDI_MODE_MAX, };
+#define MULTI_HOST_CHIP_MODE_SHIFT 0 +#define MULTI_HOST_MASTER_MBX_STS_SHIFT 17 +#define MULTI_HOST_PRIV_DATA_SHIFT 0x8 + +#define MULTI_HOST_CHIP_MODE_MASK 0xF +#define MULTI_HOST_MASTER_MBX_STS_MASK 0x1 +#define MULTI_HOST_PRIV_DATA_MASK 0xFFFF + +#define MULTI_HOST_REG_SET(val, member) \ + (((val) & MULTI_HOST_##member##_MASK) \ + << MULTI_HOST_##member##_SHIFT) +#define MULTI_HOST_REG_GET(val, member) \ + (((val) >> MULTI_HOST_##member##_SHIFT) \ + & MULTI_HOST_##member##_MASK) +#define MULTI_HOST_REG_CLEAR(val, member) \ + ((val) & (~(MULTI_HOST_##member##_MASK \ + << MULTI_HOST_##member##_SHIFT))) + struct hinic3_hwdev { void *adapter_hdl; /* pointer to hinic3_pcidev or NDIS_Adapter */ void *pcidev_hdl; /* pointer to pcidev or Handler */ @@ -89,6 +109,7 @@ struct hinic3_hwdev {
void *service_adapter[SERVICE_T_MAX]; void *chip_node; + struct semaphore ppf_sem; void *ppf_hwdev;
u32 wq_page_size; @@ -116,6 +137,8 @@ struct hinic3_hwdev { u32 stateful_ref_cnt; u32 rsvd2;
+ struct hinic3_multi_host_mgmt *mhost_mgmt; + struct mutex stateful_mutex; /* protect cqm init and deinit */
struct hinic3_hw_stats hw_stats; @@ -128,7 +151,7 @@ struct hinic3_hwdev {
struct delayed_work sync_time_task; struct delayed_work channel_detect_task; - struct hisdk3_prof_attr *prof_attr; + struct hisdk3_prof_attr *prof_attr; struct hinic3_prof_adapter *prof_adap;
struct workqueue_struct *workq; @@ -149,9 +172,13 @@ struct hinic3_hwdev { enum hinic3_func_mode func_mode; u32 rsvd3;
+ DECLARE_BITMAP(func_probe_in_host, MAX_FUNCTION_NUM); + DECLARE_BITMAP(netdev_setup_state, MAX_FUNCTION_NUM); + u64 cur_recv_aeq_cnt; u64 last_recv_aeq_cnt; u16 aeq_busy_cnt; + u64 rsvd4[8]; };
@@ -172,4 +199,6 @@ struct hinic3_hwdev { #define COMM_SUPPORT_CMDQ_NUM(hwdev) COMM_FEATURE_QW0(hwdev, CMDQ_NUM) #define COMM_SUPPORT_VIRTIO_VQ_SIZE(hwdev) COMM_FEATURE_QW0(hwdev, VIRTIO_VQ_SIZE)
+void set_func_host_mode(struct hinic3_hwdev *hwdev, enum hinic3_func_mode mode); + #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c index 9b749135d..648454042 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_hwif.c @@ -230,6 +230,9 @@ int hinic3_set_host_migrate_enable(void *hwdev, u8 host_id, bool enable)
u32 reg_val;
+ if (!dev || host_id > SPU_HOST_ID) + return -EINVAL; + if (HINIC3_FUNC_TYPE(dev) != TYPE_PPF) { sdk_warn(dev->dev_hdl, "hwdev should be ppf\n"); return -EINVAL; @@ -254,6 +257,9 @@ int hinic3_get_host_migrate_enable(void *hwdev, u8 host_id, u8 *migrate_en)
u32 reg_val;
+ if (!dev || !migrate_en || host_id > SPU_HOST_ID) + return -EINVAL; + if (HINIC3_FUNC_TYPE(dev) != TYPE_PPF) { sdk_warn(dev->dev_hdl, "hwdev should be ppf\n"); return -EINVAL; @@ -712,6 +718,15 @@ static void disable_all_msix(struct hinic3_hwdev *hwdev) hinic3_set_msix_state(hwdev, i, HINIC3_MSIX_DISABLE); }
+static void enable_all_msix(struct hinic3_hwdev *hwdev) +{ + u16 num_irqs = hwdev->hwif->attr.num_irqs; + u16 i; + + for (i = 0; i < num_irqs; i++) + hinic3_set_msix_state(hwdev, i, HINIC3_MSIX_ENABLE); +} + static enum hinic3_wait_return check_db_outbound_enable_handler(void *priv_data) { struct hinic3_hwif *hwif = priv_data; @@ -824,6 +839,7 @@ void hinic3_free_hwif(struct hinic3_hwdev *hwdev) { spin_lock_deinit(&hwdev->hwif->free_db_area.idx_lock); free_db_area(&hwdev->hwif->free_db_area); + enable_all_msix(hwdev); kfree(hwdev->hwif); }
@@ -840,6 +856,44 @@ u16 hinic3_global_func_id(void *hwdev) } EXPORT_SYMBOL(hinic3_global_func_id);
+/** + * get function id from register,used by sriov hot migration process + * @hwdev: the pointer to hw device + */ +u16 hinic3_global_func_id_hw(void *hwdev) +{ + u32 addr, attr0; + struct hinic3_hwdev *dev; + + dev = (struct hinic3_hwdev *)hwdev; + addr = HINIC3_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic3_hwif_read_reg(dev->hwif, addr); + + return HINIC3_AF0_GET(attr0, FUNC_GLOBAL_IDX); +} + +/** + * get function id, used by sriov hot migratition process. + * @hwdev: the pointer to hw device + * @func_id: function id + */ +int hinic3_global_func_id_get(void *hwdev, u16 *func_id) +{ + struct hinic3_hwdev *dev = (struct hinic3_hwdev *)hwdev; + + if (!hwdev || !func_id) + return -EINVAL; + + /* only vf get func_id from chip reg for sriov migrate */ + if (!HINIC3_IS_VF(dev)) { + *func_id = hinic3_global_func_id(hwdev); + return 0; + } + + *func_id = hinic3_global_func_id_hw(dev); + return 0; +} + u16 hinic3_intr_num(void *hwdev) { struct hinic3_hwif *hwif = NULL; diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c index debb4f155..b2851c620 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.c @@ -20,12 +20,14 @@ #include "hinic3_common.h" #include "hinic3_mbox.h"
+#define HINIC3_MBOX_USEC_50 50 + #define HINIC3_MBOX_INT_DST_AEQN_SHIFT 10 #define HINIC3_MBOX_INT_SRC_RESP_AEQN_SHIFT 12 #define HINIC3_MBOX_INT_STAT_DMA_SHIFT 14 /* The size of data to be send (unit of 4 bytes) */ #define HINIC3_MBOX_INT_TX_SIZE_SHIFT 20 -/* SO_RO(strong order, relax order) */ +/* SO_RO(strong order, relax order) */ #define HINIC3_MBOX_INT_STAT_DMA_SO_RO_SHIFT 25 #define HINIC3_MBOX_INT_WB_EN_SHIFT 28
@@ -63,13 +65,10 @@ enum hinic3_mbox_tx_status { #define MBOX_SEGLEN_MASK \ HINIC3_MSG_HEADER_SET(HINIC3_MSG_HEADER_SEG_LEN_MASK, SEG_LEN)
+#define MBOX_MSG_WAIT_ONCE_TIME_US 10 #define MBOX_MSG_POLLING_TIMEOUT 8000 #define HINIC3_MBOX_COMP_TIME 40000U
-#define MBOX_MAX_BUF_SZ 2048U -#define MBOX_HEADER_SZ 8 -#define HINIC3_MBOX_DATA_SIZE (MBOX_MAX_BUF_SZ - MBOX_HEADER_SZ) - /* MBOX size is 64B, 8B for mbox_header, 8B reserved */ #define MBOX_SEG_LEN 48 #define MBOX_SEG_LEN_ALIGN 4 @@ -642,6 +641,7 @@ static void recv_mbox_msg_handler(struct hinic3_mbox *func_to_func, } recv_msg->msg_len = msg_desc->msg_len; memcpy(recv_msg->msg, msg_desc->msg, recv_msg->msg_len); + recv_msg->msg_id = msg_desc->msg_info.msg_id; recv_msg->mod = HINIC3_MSG_HEADER_GET(mbox_header, MODULE); recv_msg->cmd = HINIC3_MSG_HEADER_GET(mbox_header, CMD); @@ -825,14 +825,14 @@ static void hinic3_deinit_mbox_dma_queue(struct hinic3_mbox *func_to_func) #define MBOX_XOR_DATA_ALIGN 4 static u32 mbox_dma_msg_xor(u32 *data, u16 msg_len) { - u32 xor = MBOX_DMA_MSG_INIT_XOR_VAL; + u32 mbox_xor = MBOX_DMA_MSG_INIT_XOR_VAL; u16 dw_len = msg_len / sizeof(u32); u16 i;
for (i = 0; i < dw_len; i++) - xor ^= data[i]; + mbox_xor ^= data[i];
- return xor; + return mbox_xor; }
#define MQ_ID_MASK(mq, idx) ((idx) & ((mq)->depth - 1)) @@ -843,7 +843,7 @@ static int mbox_prepare_dma_entry(struct hinic3_mbox *func_to_func, struct mbox_ struct mbox_dma_msg *dma_msg, void *msg, u16 msg_len) { u64 dma_addr, offset; - void *dma_vaddr; + void *dma_vaddr = NULL;
if (IS_MSG_QUEUE_FULL(mq)) { sdk_err(func_to_func->hwdev->dev_hdl, "Mbox sync message queue is busy, pi: %u, ci: %u\n", @@ -855,6 +855,7 @@ static int mbox_prepare_dma_entry(struct hinic3_mbox *func_to_func, struct mbox_ offset = mq->prod_idx * MBOX_MAX_BUF_SZ; dma_vaddr = (u8 *)mq->dma_buff_vaddr + offset; memcpy(dma_vaddr, msg, msg_len); + dma_addr = mq->dma_buff_paddr + offset; dma_msg->dma_addr_high = upper_32_bits(dma_addr); dma_msg->dma_addr_low = lower_32_bits(dma_addr); @@ -906,9 +907,8 @@ static void mbox_copy_header(struct hinic3_hwdev *hwdev, } }
-static void mbox_copy_send_data(struct hinic3_hwdev *hwdev, - struct hinic3_send_mbox *mbox, void *seg, - u16 seg_len) +static int mbox_copy_send_data(struct hinic3_hwdev *hwdev, + struct hinic3_send_mbox *mbox, void *seg, u16 seg_len) { u32 *data = seg; u32 data_len, chk_sz = sizeof(u32); @@ -928,6 +928,8 @@ static void mbox_copy_send_data(struct hinic3_hwdev *hwdev, __raw_writel(cpu_to_be32(*(data + i)), mbox->data + MBOX_HEADER_SZ + i * sizeof(u32)); } + + return 0; }
static void write_mbox_msg_attr(struct hinic3_mbox *func_to_func, @@ -1020,14 +1022,16 @@ static int send_mbox_seg(struct hinic3_mbox *func_to_func, u64 header,
mbox_copy_header(hwdev, send_mbox, &header);
- mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); + err = mbox_copy_send_data(hwdev, send_mbox, seg, seg_len); + if (err != 0) + return err;
write_mbox_msg_attr(func_to_func, dst_func, dst_aeqn, seg_len);
wmb(); /* writing the mbox msg attributes */
err = hinic3_wait_for_timeout(func_to_func, check_mbox_wb_status, - MBOX_MSG_POLLING_TIMEOUT, USEC_PER_MSEC); + MBOX_MSG_POLLING_TIMEOUT, MBOX_MSG_WAIT_ONCE_TIME_US); wb_status = get_mbox_status(send_mbox); if (err) { sdk_err(hwdev->dev_hdl, "Send mailbox segment timeout, wb status: 0x%x\n", @@ -1117,7 +1121,7 @@ static int send_mbox_msg(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd, }
left -= MBOX_SEG_LEN; - msg_seg += MBOX_SEG_LEN; /*lint !e662 */ + msg_seg += MBOX_SEG_LEN;
seq_id++; header &= ~(HINIC3_MSG_HEADER_SET(HINIC3_MSG_HEADER_SEQID_MASK, @@ -1158,7 +1162,7 @@ static int wait_mbox_msg_completion(struct hinic3_mbox *func_to_func,
wait_time = (timeout != 0) ? timeout : HINIC3_MBOX_COMP_TIME; err = hinic3_wait_for_timeout(func_to_func, check_mbox_msg_finish, - wait_time, USEC_PER_MSEC); + wait_time, WAIT_USEC_50); if (err) { set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); return -ETIMEDOUT; @@ -1205,10 +1209,11 @@ int hinic3_mbox_to_func(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd, return -EPERM;
/* expect response message */ - msg_desc = get_mbox_msg_desc(func_to_func, HINIC3_MSG_RESPONSE, - dst_func); - if (!msg_desc) + msg_desc = get_mbox_msg_desc(func_to_func, HINIC3_MSG_RESPONSE, dst_func); + if (!msg_desc) { + sdk_err(func_to_func->hwdev->dev_hdl, "msg_desc null\n"); return -EFAULT; + }
err = send_mbox_msg_lock(func_to_func, channel); if (err) @@ -1228,7 +1233,7 @@ int hinic3_mbox_to_func(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd, goto send_err; }
- if (wait_mbox_msg_completion(func_to_func, timeout)) { + if (wait_mbox_msg_completion(func_to_func, timeout) != 0) { sdk_err(func_to_func->hwdev->dev_hdl, "Send mbox msg timeout, msg_id: %u\n", msg_info.msg_id); hinic3_dump_aeq_info(func_to_func->hwdev); @@ -1260,7 +1265,6 @@ int hinic3_mbox_to_func(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd,
if (msg_desc->msg_len) memcpy(buf_out, msg_desc->msg, msg_desc->msg_len); - *out_size = msg_desc->msg_len; }
@@ -1292,9 +1296,29 @@ static int mbox_func_params_valid(struct hinic3_mbox *func_to_func, return 0; }
-static int hinic3_mbox_to_func_no_ack(struct hinic3_hwdev *hwdev, u16 func_idx, - u8 mod, u16 cmd, void *buf_in, u16 in_size, - u16 channel) +int hinic3_mbox_to_host(struct hinic3_hwdev *hwdev, u16 dest_host_ppf_id, enum hinic3_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, + u32 timeout, u16 channel) +{ + struct hinic3_mbox *func_to_func = hwdev->func_to_func; + int err; + + err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); + if (err) + return err; + + if (!HINIC3_IS_PPF(hwdev)) { + sdk_err(hwdev->dev_hdl, "Params error, only PPF can send message to other host, func_type: %d\n", + hinic3_func_type(hwdev)); + return -EINVAL; + } + + return hinic3_mbox_to_func(func_to_func, mod, cmd, dest_host_ppf_id, buf_in, in_size, + buf_out, out_size, timeout, channel); +} + +int hinic3_mbox_to_func_no_ack(struct hinic3_hwdev *hwdev, u16 func_idx, + u8 mod, u16 cmd, void *buf_in, u16 in_size, u16 channel) { struct mbox_msg_info msg_info = {0}; int err = mbox_func_params_valid(hwdev->func_to_func, buf_in, in_size, @@ -1475,24 +1499,43 @@ int hinic3_mbox_to_vf(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, } EXPORT_SYMBOL(hinic3_mbox_to_vf);
-int hinic3_mbox_set_channel_status(struct hinic3_hwdev *hwdev, u16 channel, - bool enable) +int hinic3_mbox_to_vf_no_ack(void *hwdev, u16 vf_id, u8 mod, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size, u16 channel) { - if (channel >= HINIC3_CHANNEL_MAX) { - sdk_err(hwdev->dev_hdl, "Invalid channel id: 0x%x\n", channel); + struct hinic3_mbox *func_to_func = NULL; + int err = 0; + u16 dst_func_idx; + + if (!hwdev) + return -EINVAL; + + func_to_func = ((struct hinic3_hwdev *)hwdev)->func_to_func; + err = mbox_func_params_valid(func_to_func, buf_in, in_size, channel); + if (err != 0) + return err; + + if (HINIC3_IS_VF((struct hinic3_hwdev *)hwdev)) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, "Params error, func_type: %d\n", + hinic3_func_type(hwdev)); return -EINVAL; }
- if (enable) - clear_bit(channel, &hwdev->func_to_func->channel_stop); - else - set_bit(channel, &hwdev->func_to_func->channel_stop); + if (!vf_id) { + sdk_err(((struct hinic3_hwdev *)hwdev)->dev_hdl, + "VF id(%u) error!\n", vf_id); + return -EINVAL; + }
- sdk_info(hwdev->dev_hdl, "%s mbox channel 0x%x\n", - enable ? "Enable" : "Disable", channel); + /* vf_offset_to_pf + vf_id is the vf's global function id of vf in + * this pf + */ + dst_func_idx = hinic3_glb_pf_vf_offset(hwdev) + vf_id;
- return 0; + return hinic3_mbox_to_func_no_ack(hwdev, dst_func_idx, mod, cmd, + buf_in, in_size, channel); } +EXPORT_SYMBOL(hinic3_mbox_to_vf_no_ack);
void hinic3_mbox_enable_channel_lock(struct hinic3_hwdev *hwdev, bool enable) { diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.h index bf723e8a6..226f8d6b2 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mbox.h @@ -45,6 +45,10 @@ #define HINIC3_MSG_HEADER_LAST_MASK 0x1 #define HINIC3_MSG_HEADER_DIRECTION_MASK 0x1
+#define MBOX_MAX_BUF_SZ 2048U +#define MBOX_HEADER_SZ 8 +#define HINIC3_MBOX_DATA_SIZE (MBOX_MAX_BUF_SZ - MBOX_HEADER_SZ) + #define HINIC3_MSG_HEADER_GET(val, field) \ (((val) >> HINIC3_MSG_HEADER_##field##_SHIFT) & \ HINIC3_MSG_HEADER_##field##_MASK) @@ -95,12 +99,12 @@ struct mbox_msg_info { };
struct hinic3_msg_desc { - void *msg; - u16 msg_len; - u8 seq_id; - u8 mod; - u16 cmd; - struct mbox_msg_info msg_info; + void *msg; + u16 msg_len; + u8 seq_id; + u8 mod; + u16 cmd; + struct mbox_msg_info msg_info; };
struct hinic3_msg_channel { @@ -112,25 +116,25 @@ struct hinic3_msg_channel {
/* Receive other functions mbox message */ struct hinic3_recv_mbox { - void *msg; - u16 msg_len; - u8 msg_id; - u8 mod; - u16 cmd; - u16 src_func_idx; + void *msg; + u16 msg_len; + u8 msg_id; + u8 mod; + u16 cmd; + u16 src_func_idx;
enum hinic3_msg_ack_type ack_type; - u32 rsvd1; + u32 rsvd1;
- void *resp_buff; + void *resp_buff; };
struct hinic3_send_mbox { - u8 *data; + u8 *data;
- u64 *wb_status; /* write back status */ - void *wb_vaddr; - dma_addr_t wb_paddr; + u64 *wb_status; /* write back status */ + void *wb_vaddr; + dma_addr_t wb_paddr; };
enum mbox_event_state { @@ -152,6 +156,11 @@ enum hinic3_mbox_cb_state { HINIC3_PPF_TO_PF_MBOX_CB_RUNNIG, };
+enum hinic3_mbox_ack_type { + MBOX_ACK, + MBOX_NO_ACK, +}; + struct mbox_dma_msg { u32 xor; u32 dma_addr_high; @@ -161,16 +170,16 @@ struct mbox_dma_msg { };
struct mbox_dma_queue { - void *dma_buff_vaddr; - dma_addr_t dma_buff_paddr; + void *dma_buff_vaddr; + dma_addr_t dma_buff_paddr;
- u16 depth; - u16 prod_idx; - u16 cons_idx; + u16 depth; + u16 prod_idx; + u16 cons_idx; };
struct hinic3_mbox { - struct hinic3_hwdev *hwdev; + struct hinic3_hwdev *hwdev;
bool lock_channel_en; unsigned long channel_stop; @@ -186,7 +195,7 @@ struct hinic3_mbox { struct mbox_dma_queue sync_msg_queue; struct mbox_dma_queue async_msg_queue;
- struct workqueue_struct *workq; + struct workqueue_struct *workq;
struct hinic3_msg_channel mgmt_msg; /* driver and MGMT CPU */ struct hinic3_msg_channel *host_msg; /* PPF message between hosts */ @@ -196,16 +205,16 @@ struct hinic3_mbox {
/* vf receive pf/ppf callback */ hinic3_vf_mbox_cb vf_mbox_cb[HINIC3_MOD_MAX]; - void *vf_mbox_data[HINIC3_MOD_MAX]; + void *vf_mbox_data[HINIC3_MOD_MAX]; /* pf/ppf receive vf callback */ hinic3_pf_mbox_cb pf_mbox_cb[HINIC3_MOD_MAX]; - void *pf_mbox_data[HINIC3_MOD_MAX]; + void *pf_mbox_data[HINIC3_MOD_MAX]; /* ppf receive pf/ppf callback */ hinic3_ppf_mbox_cb ppf_mbox_cb[HINIC3_MOD_MAX]; - void *ppf_mbox_data[HINIC3_MOD_MAX]; + void *ppf_mbox_data[HINIC3_MOD_MAX]; /* pf receive ppf callback */ hinic3_pf_recv_from_ppf_mbox_cb pf_recv_ppf_mbox_cb[HINIC3_MOD_MAX]; - void *pf_recv_ppf_mbox_data[HINIC3_MOD_MAX]; + void *pf_recv_ppf_mbox_data[HINIC3_MOD_MAX]; unsigned long ppf_to_pf_mbox_cb_state[HINIC3_MOD_MAX]; unsigned long ppf_mbox_cb_state[HINIC3_MOD_MAX]; unsigned long pf_mbox_cb_state[HINIC3_MOD_MAX]; @@ -221,8 +230,8 @@ struct hinic3_mbox {
struct hinic3_mbox_work { struct work_struct work; - struct hinic3_mbox *func_to_func; - struct hinic3_recv_mbox *recv_mbox; + struct hinic3_mbox *func_to_func; + struct hinic3_recv_mbox *recv_mbox; struct hinic3_msg_channel *msg_ch; };
@@ -243,6 +252,14 @@ int hinic3_func_to_func_init(struct hinic3_hwdev *hwdev);
void hinic3_func_to_func_free(struct hinic3_hwdev *hwdev);
+int hinic3_mbox_to_host(struct hinic3_hwdev *hwdev, u16 dest_host_ppf_id, + enum hinic3_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); + +int hinic3_mbox_to_func_no_ack(struct hinic3_hwdev *hwdev, u16 func_idx, + u8 mod, u16 cmd, void *buf_in, u16 in_size, + u16 channel); + int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, u32 timeout, u16 channel); @@ -258,9 +275,6 @@ int hinic3_mbox_to_func(struct hinic3_mbox *func_to_func, u8 mod, u16 cmd,
int hinic3_mbox_init_host_msg_channel(struct hinic3_hwdev *hwdev);
-int hinic3_mbox_set_channel_status(struct hinic3_hwdev *hwdev, u16 channel, - bool enable); - void hinic3_mbox_enable_channel_lock(struct hinic3_hwdev *hwdev, bool enable);
#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c index 29464eb07..2bd63f1de 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.c @@ -18,7 +18,7 @@ #include "hinic3_crm.h" #include "hinic3_hw.h" #include "hinic3_common.h" -#include "hinic3_comm_cmd.h" +#include "mpu_inband_cmd.h" #include "hinic3_hwdev.h" #include "hinic3_eqs.h" #include "hinic3_mbox.h" @@ -213,8 +213,7 @@ static void clp_prepare_header(struct hinic3_hwdev *hwdev, u64 *header, * @msg: the data of the message * @msg_len: the length of the message **/ -static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, - int msg_len) +static int prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg, int msg_len) { u8 *mgmt_cmd_new = mgmt_cmd;
@@ -225,6 +224,8 @@ static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, const void *msg,
mgmt_cmd_new += sizeof(*header); memcpy(mgmt_cmd_new, msg, (size_t)(u32)msg_len); + + return 0; }
/** @@ -249,6 +250,7 @@ static int send_msg_to_mgmt_sync(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, u8 node_id = HINIC3_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); u64 header; u16 cmd_size = mgmt_msg_len(msg_len); + int ret;
if (hinic3_get_chip_present_flag(pf_to_mgmt->hwdev) == 0) return -EFAULT; @@ -267,7 +269,9 @@ static int send_msg_to_mgmt_sync(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, if (ack_type == HINIC3_MSG_ACK) pf_to_mgmt_send_event_set(pf_to_mgmt, SEND_EVENT_START);
- prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + ret = prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + if (ret != 0) + return ret;
return hinic3_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); } @@ -291,6 +295,7 @@ static int send_msg_to_mgmt_async(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, u8 node_id = HINIC3_MGMT_CPU_NODE_ID(pf_to_mgmt->hwdev); u64 header; u16 cmd_size = mgmt_msg_len(msg_len); + int ret;
if (hinic3_get_chip_present_flag(pf_to_mgmt->hwdev) == 0) return -EFAULT; @@ -301,24 +306,31 @@ static int send_msg_to_mgmt_async(struct hinic3_msg_pf_to_mgmt *pf_to_mgmt, prepare_header(pf_to_mgmt, &header, msg_len, mod, HINIC3_MSG_NO_ACK, direction, cmd, ASYNC_MSG_ID(pf_to_mgmt));
- prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + ret = prepare_mgmt_cmd((u8 *)mgmt_cmd, &header, msg, msg_len); + if (ret != 0) + return ret;
chain = pf_to_mgmt->cmd_chain[HINIC3_API_CMD_WRITE_ASYNC_TO_MGMT_CPU];
return hinic3_api_cmd_write(chain, node_id, mgmt_cmd, cmd_size); }
-static inline void msg_to_mgmt_pre(u8 mod, void *buf_in) +static inline int msg_to_mgmt_pre(u8 mod, void *buf_in, u16 in_size) { struct hinic3_msg_head *msg_head = NULL;
/* set aeq fix num to 3, need to ensure response aeq id < 3 */ if (mod == HINIC3_MOD_COMM || mod == HINIC3_MOD_L2NIC) { + if (in_size < sizeof(struct hinic3_msg_head)) + return -EINVAL; + msg_head = buf_in;
if (msg_head->resp_aeq_num >= HINIC3_MAX_AEQS) msg_head->resp_aeq_num = 0; } + + return 0; }
int hinic3_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, @@ -336,7 +348,12 @@ int hinic3_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, if (!COMM_SUPPORT_API_CHAIN((struct hinic3_hwdev *)hwdev)) return -EPERM;
- msg_to_mgmt_pre(mod, buf_in); + if (!buf_in || in_size == 0) + return -EINVAL; + + ret = msg_to_mgmt_pre(mod, buf_in, in_size); + if (ret != 0) + return -EINVAL;
pf_to_mgmt = ((struct hinic3_hwdev *)hwdev)->pf_to_mgmt;
@@ -395,7 +412,6 @@ int hinic3_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in,
if (recv_msg->msg_len) memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); - *out_size = recv_msg->msg_len; }
@@ -409,7 +425,7 @@ int hinic3_pf_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, int hinic3_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size) { - struct hinic3_msg_pf_to_mgmt *pf_to_mgmt; + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = NULL; void *dev = ((struct hinic3_hwdev *)hwdev)->dev_hdl; int err;
@@ -434,6 +450,41 @@ int hinic3_pf_to_mgmt_async(void *hwdev, u8 mod, u16 cmd, const void *buf_in, return 0; }
+/* This function is only used by tx/rx flush */ +int hinic3_pf_to_mgmt_no_ack(void *hwdev, enum hinic3_mod_type mod, u8 cmd, void *buf_in, + u16 in_size) +{ + struct hinic3_msg_pf_to_mgmt *pf_to_mgmt = NULL; + void *dev = NULL; + int err = -EINVAL; + struct hinic3_hwdev *tmp_hwdev = NULL; + + if (!hwdev) + return -EINVAL; + + tmp_hwdev = (struct hinic3_hwdev *)hwdev; + dev = tmp_hwdev->dev_hdl; + pf_to_mgmt = tmp_hwdev->pf_to_mgmt; + + if (in_size > HINIC3_MBOX_DATA_SIZE) { + sdk_err(dev, "Mgmt msg buffer size: %u is invalid\n", in_size); + return -EINVAL; + } + + if (!(tmp_hwdev->chip_present_flag)) + return -EPERM; + + /* lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + + err = send_msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, HINIC3_MSG_NO_ACK, + HINIC3_MSG_DIRECT_SEND, MSG_NO_RESP); + + up(&pf_to_mgmt->sync_msg_lock); + + return err; +} + int hinic3_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, u32 timeout) @@ -1358,11 +1409,11 @@ static void hinic3_clear_clp_data(struct hinic3_hwdev *hwdev, int hinic3_pf_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { - struct hinic3_clp_pf_to_mgmt *clp_pf_to_mgmt; + struct hinic3_clp_pf_to_mgmt *clp_pf_to_mgmt = NULL; struct hinic3_hwdev *dev = hwdev; u64 header; u16 real_size; - u8 *clp_msg_buf; + u8 *clp_msg_buf = NULL; int err;
if (!COMM_SUPPORT_CLP(dev)) @@ -1405,6 +1456,7 @@ int hinic3_pf_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in, clp_prepare_header(dev, &header, in_size, mod, 0, 0, cmd, 0);
memcpy(clp_msg_buf, &header, sizeof(header)); + clp_msg_buf += sizeof(header); memcpy(clp_msg_buf, buf_in, in_size);
@@ -1477,7 +1529,7 @@ int hinic3_clp_to_mgmt(void *hwdev, u8 mod, u16 cmd, const void *buf_in,
int hinic3_clp_pf_to_mgmt_init(struct hinic3_hwdev *hwdev) { - struct hinic3_clp_pf_to_mgmt *clp_pf_to_mgmt; + struct hinic3_clp_pf_to_mgmt *clp_pf_to_mgmt = NULL;
if (!COMM_SUPPORT_CLP(hwdev)) return 0; diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.h index ad86a82e7..48970e37a 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_mgmt.h @@ -10,7 +10,7 @@ #include <linux/spinlock.h> #include <linux/workqueue.h>
-#include "comm_defs.h" +#include "mpu_cmd_base_defs.h" #include "hinic3_hw.h" #include "hinic3_api_cmd.h" #include "hinic3_hwdev.h" @@ -164,6 +164,9 @@ int hinic3_pf_msg_to_mgmt_sync(void *hwdev, u8 mod, u16 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, u32 timeout);
+int hinic3_pf_to_mgmt_no_ack(void *hwdev, enum hinic3_mod_type mod, u8 cmd, void *buf_in, + u16 in_size); + int hinic3_api_cmd_read_ack(void *hwdev, u8 dest, const void *cmd, u16 size, void *ack, u16 ack_size);
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.c new file mode 100644 index 000000000..254fcd165 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.c @@ -0,0 +1,1231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": [COMM]" fmt + +#include <linux/kernel.h> +#include <linux/semaphore.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/completion.h> +#include <linux/pci.h> +#include <linux/types.h> + +#include "ossl_knl.h" +#include "hinic3_common.h" +#include "hinic3_hw.h" +#include "hinic3_hwdev.h" +#include "hinic3_csr.h" +#include "hinic3_hwif.h" +#include "hinic3_api_cmd.h" +#include "hinic3_mgmt.h" +#include "hinic3_mbox.h" +#include "hinic3_hwif.h" +#include "hinic3_multi_host_mgmt.h" +#include "hinic3_hw_cfg.h" + +#define HINIC3_SUPPORT_MAX_PF_NUM 32 +#define HINIC3_MBOX_PF_BUSY_ACTIVE_FW 0x2 + +void set_master_host_mbox_enable(struct hinic3_hwdev *hwdev, bool enable) +{ + u32 reg_val; + + if (!IS_MASTER_HOST(hwdev) || HINIC3_FUNC_TYPE(hwdev) != TYPE_PPF) + return; + + reg_val = hinic3_hwif_read_reg(hwdev->hwif, HINIC3_MULT_HOST_MASTER_MBOX_STATUS_ADDR); + reg_val = MULTI_HOST_REG_CLEAR(reg_val, MASTER_MBX_STS); + reg_val |= MULTI_HOST_REG_SET((u8)enable, MASTER_MBX_STS); + hinic3_hwif_write_reg(hwdev->hwif, HINIC3_MULT_HOST_MASTER_MBOX_STATUS_ADDR, reg_val); + + sdk_info(hwdev->dev_hdl, "Multi-host status: %d, reg value: 0x%x\n", + enable, reg_val); +} + +bool hinic3_get_master_host_mbox_enable(void *hwdev) +{ + u32 reg_val; + struct hinic3_hwdev *dev = hwdev; + + if (!hwdev) + return false; + + if (!IS_SLAVE_HOST(dev) || HINIC3_FUNC_TYPE(dev) == TYPE_VF) + return true; + + reg_val = hinic3_hwif_read_reg(dev->hwif, HINIC3_MULT_HOST_MASTER_MBOX_STATUS_ADDR); + + return !!MULTI_HOST_REG_GET(reg_val, MASTER_MBX_STS); +} + +bool hinic3_is_multi_bm(void *hwdev) +{ + struct hinic3_hwdev *hw_dev = hwdev; + + if (!hwdev) + return false; + + return ((IS_BMGW_SLAVE_HOST(hw_dev)) || (IS_BMGW_MASTER_HOST(hw_dev))) ? true : false; +} +EXPORT_SYMBOL(hinic3_is_multi_bm); + +bool hinic3_is_slave_host(void *hwdev) +{ + struct hinic3_hwdev *hw_dev = hwdev; + + if (!hwdev) { + pr_err("hwdev is null\n"); + return false; + } + + return ((IS_BMGW_SLAVE_HOST(hw_dev)) || (IS_VM_SLAVE_HOST(hw_dev))) ? true : false; +} +EXPORT_SYMBOL(hinic3_is_slave_host); + +bool hinic3_is_vm_slave_host(void *hwdev) +{ + struct hinic3_hwdev *hw_dev = hwdev; + + if (!hwdev) { + pr_err("hwdev is null\n"); + return false; + } + + return (IS_VM_SLAVE_HOST(hw_dev)) ? true : false; +} +EXPORT_SYMBOL(hinic3_is_vm_slave_host); + +bool hinic3_is_bm_slave_host(void *hwdev) +{ + struct hinic3_hwdev *hw_dev = hwdev; + + if (!hwdev) { + pr_err("hwdev is null\n"); + return false; + } + + return (IS_BMGW_SLAVE_HOST(hw_dev)) ? true : false; +} +EXPORT_SYMBOL(hinic3_is_bm_slave_host); + +static int __send_mbox_to_host(struct hinic3_hwdev *mbox_hwdev, + struct hinic3_hwdev *hwdev, + enum hinic3_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, + enum hinic3_mbox_ack_type ack_type, u16 channel) +{ + u8 dst_host_func_idx; + struct service_cap *cap = &hwdev->cfg_mgmt->svc_cap; + + if (!mbox_hwdev->chip_present_flag) + return -EPERM; + + if (!hinic3_get_master_host_mbox_enable(hwdev)) { + sdk_err(hwdev->dev_hdl, "Master host not initialized\n"); + return -EFAULT; + } + + if (!mbox_hwdev->mhost_mgmt) { + /* send to master host in default */ + dst_host_func_idx = hinic3_host_ppf_idx(hwdev, cap->master_host_id); + } else { + dst_host_func_idx = IS_MASTER_HOST(hwdev) ? + mbox_hwdev->mhost_mgmt->shost_ppf_idx : + mbox_hwdev->mhost_mgmt->mhost_ppf_idx; + } + + if (ack_type == MBOX_ACK) + return hinic3_mbox_to_host(mbox_hwdev, dst_host_func_idx, + mod, cmd, buf_in, in_size, + buf_out, out_size, timeout, channel); + else + return hinic3_mbox_to_func_no_ack(mbox_hwdev, dst_host_func_idx, + mod, cmd, buf_in, in_size, channel); +} + +int __mbox_to_host(struct hinic3_hwdev *hwdev, enum hinic3_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, void *buf_out, + u16 *out_size, u32 timeout, + enum hinic3_mbox_ack_type ack_type, u16 channel) +{ + struct hinic3_hwdev *mbox_hwdev = hwdev; + int err; + + if (!IS_MULTI_HOST(hwdev) || HINIC3_IS_VF(hwdev)) + return -EPERM; + + if (hinic3_func_type(hwdev) == TYPE_PF) { + down(&hwdev->ppf_sem); + mbox_hwdev = hwdev->ppf_hwdev; + if (!mbox_hwdev) { + err = -EINVAL; + goto release_lock; + } + + if (!test_bit(HINIC3_HWDEV_MBOX_INITED, &mbox_hwdev->func_state)) { + err = -EPERM; + goto release_lock; + } + } + + err = __send_mbox_to_host(mbox_hwdev, hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, timeout, ack_type, channel); + +release_lock: + if (hinic3_func_type(hwdev) == TYPE_PF) + up(&hwdev->ppf_sem); + + return err; +} + +int hinic3_mbox_to_host_sync(void *hwdev, enum hinic3_mod_type mod, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size, u32 timeout, u16 channel) +{ + if (!hwdev) + return -EINVAL; + + return __mbox_to_host((struct hinic3_hwdev *)hwdev, mod, cmd, buf_in, + in_size, buf_out, out_size, timeout, MBOX_ACK, channel); +} +EXPORT_SYMBOL(hinic3_mbox_to_host_sync); + +int hinic3_mbox_to_host_no_ack(struct hinic3_hwdev *hwdev, + enum hinic3_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, u16 channel) +{ + return __mbox_to_host(hwdev, mod, cmd, buf_in, in_size, NULL, NULL, + 0, MBOX_NO_ACK, channel); +} + +static int __get_func_nic_state_from_pf(struct hinic3_hwdev *hwdev, + u16 glb_func_idx, u8 *en); +static int __get_func_vroce_state_from_pf(struct hinic3_hwdev *hwdev, + u16 glb_func_idx, u8 *en); + +int sw_func_pf_mbox_handler(void *pri_handle, u16 vf_id, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic3_hwdev *hwdev = pri_handle; + struct hinic3_slave_func_nic_state *nic_state = NULL; + struct hinic3_slave_func_nic_state *out_state = NULL; + int err; + + switch (cmd) { + case HINIC3_SW_CMD_GET_SLAVE_FUNC_NIC_STATE: + nic_state = buf_in; + out_state = buf_out; + *out_size = sizeof(*nic_state); + + /* find nic state in PPF func_nic_en bitmap */ + err = __get_func_nic_state_from_pf(hwdev, nic_state->func_idx, + &out_state->enable); + out_state->status = err ? 1 : 0; + + break; + case HINIC3_SW_CMD_GET_SLAVE_FUNC_VROCE_STATE: + nic_state = buf_in; + out_state = buf_out; + *out_size = sizeof(*nic_state); + + err = __get_func_vroce_state_from_pf(hwdev, nic_state->func_idx, + &out_state->enable); + out_state->status = err ? 1 : 0; + + break; + default: + break; + } + + return 0; +} + +static int __master_host_sw_func_handler(struct hinic3_hwdev *hwdev, u16 pf_idx, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic3_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt; + struct register_slave_host *out_shost = NULL; + struct register_slave_host *slave_host = NULL; + u64 *vroce_en = NULL; + + int err = 0; + + if (!mhost_mgmt) + return -ENXIO; + switch (cmd) { + case HINIC3_SW_CMD_SLAVE_HOST_PPF_REGISTER: + slave_host = buf_in; + out_shost = buf_out; + *out_size = sizeof(*slave_host); + vroce_en = out_shost->funcs_vroce_en; + + /* just get information about function nic enable */ + if (slave_host->get_nic_en) { + bitmap_copy((ulong *)out_shost->funcs_nic_en, + mhost_mgmt->func_nic_en, + HINIC3_MAX_MGMT_FUNCTIONS); + + if (IS_MASTER_HOST(hwdev)) + bitmap_copy((ulong *)vroce_en, + mhost_mgmt->func_vroce_en, + HINIC3_MAX_MGMT_FUNCTIONS); + out_shost->status = 0; + break; + } + + mhost_mgmt->shost_registered = true; + mhost_mgmt->shost_host_idx = slave_host->host_id; + mhost_mgmt->shost_ppf_idx = slave_host->ppf_idx; + + bitmap_copy((ulong *)out_shost->funcs_nic_en, + mhost_mgmt->func_nic_en, HINIC3_MAX_MGMT_FUNCTIONS); + + if (IS_MASTER_HOST(hwdev)) + bitmap_copy((ulong *)vroce_en, + mhost_mgmt->func_vroce_en, + HINIC3_MAX_MGMT_FUNCTIONS); + + sdk_info(hwdev->dev_hdl, "Slave host registers PPF, host_id: %u, ppf_idx: %u\n", + slave_host->host_id, slave_host->ppf_idx); + + out_shost->status = 0; + break; + case HINIC3_SW_CMD_SLAVE_HOST_PPF_UNREGISTER: + slave_host = buf_in; + mhost_mgmt->shost_registered = false; + sdk_info(hwdev->dev_hdl, "Slave host unregisters PPF, host_id: %u, ppf_idx: %u\n", + slave_host->host_id, slave_host->ppf_idx); + + *out_size = sizeof(*slave_host); + ((struct register_slave_host *)buf_out)->status = 0; + break; + + default: + err = -EINVAL; + break; + } + + return err; +} + +static int __event_func_service_state_handler(struct hinic3_hwdev *hwdev, + u8 sub_cmd, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic3_event_info event_info = {0}; + struct hinic3_mhost_nic_func_state state = {0}; + struct hinic3_slave_func_nic_state *out_state = NULL; + struct hinic3_slave_func_nic_state *in_state = buf_in; + + if (!hwdev->event_callback) + return 0; + + event_info.type = EVENT_COMM_MULTI_HOST_MGMT; + ((struct hinic3_multi_host_mgmt_event *)(void *)event_info.event_data)->sub_cmd = sub_cmd; + ((struct hinic3_multi_host_mgmt_event *)(void *)event_info.event_data)->data = &state; + + state.func_idx = in_state->func_idx; + state.enable = in_state->enable; + + hwdev->event_callback(hwdev->event_pri_handle, &event_info); + + *out_size = sizeof(*out_state); + out_state = buf_out; + out_state->status = state.status; + if (sub_cmd == HINIC3_MHOST_GET_VROCE_STATE) + out_state->opened = state.enable; + + return state.status; +} + +static int __event_set_func_nic_state(struct hinic3_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return __event_func_service_state_handler(hwdev, + HINIC3_MHOST_NIC_STATE_CHANGE, + buf_in, in_size, + buf_out, out_size); +} + +static int __event_set_func_vroce_state(struct hinic3_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return __event_func_service_state_handler(hwdev, + HINIC3_MHOST_VROCE_STATE_CHANGE, + buf_in, in_size, + buf_out, out_size); +} + +static int __event_get_func_vroce_state(struct hinic3_hwdev *hwdev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return __event_func_service_state_handler(hwdev, + HINIC3_MHOST_GET_VROCE_STATE, + buf_in, in_size, + buf_out, out_size); +} + +int vf_sw_func_handler(void *hwdev, u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + int err = 0; + + switch (cmd) { + case HINIC3_SW_CMD_SET_SLAVE_FUNC_VROCE_STATE: + err = __event_set_func_vroce_state(hwdev, buf_in, in_size, + buf_out, out_size); + break; + case HINIC3_SW_CMD_GET_SLAVE_VROCE_DEVICE_STATE: + err = __event_get_func_vroce_state(hwdev, buf_in, in_size, + buf_out, out_size); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int multi_host_event_handler(struct hinic3_hwdev *hwdev, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + int err; + + switch (cmd) { + case HINIC3_SW_CMD_SET_SLAVE_FUNC_VROCE_STATE: + err = __event_set_func_vroce_state(hwdev, buf_in, in_size, + buf_out, out_size); + break; + case HINIC3_SW_CMD_SET_SLAVE_FUNC_NIC_STATE: + err = __event_set_func_nic_state(hwdev, buf_in, in_size, + buf_out, out_size); + break; + case HINIC3_SW_CMD_GET_SLAVE_VROCE_DEVICE_STATE: + err = __event_get_func_vroce_state(hwdev, buf_in, in_size, + buf_out, out_size); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int sw_set_slave_func_nic_state(struct hinic3_hwdev *hwdev, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic3_slave_func_nic_state *nic_state = buf_in; + struct hinic3_slave_func_nic_state *nic_state_out = buf_out; + struct hinic3_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt; + *out_size = sizeof(*nic_state); + nic_state_out->status = 0; + sdk_info(hwdev->dev_hdl, "Slave func %u %s nic\n", + nic_state->func_idx, + nic_state->enable ? "register" : "unregister"); + + if (nic_state->enable) { + set_bit(nic_state->func_idx, mhost_mgmt->func_nic_en); + } else { + if ((test_bit(nic_state->func_idx, mhost_mgmt->func_nic_en)) && + nic_state->func_idx >= HINIC3_SUPPORT_MAX_PF_NUM && + (!test_bit(nic_state->func_idx, hwdev->func_probe_in_host))) { + sdk_warn(hwdev->dev_hdl, "VF%u in vm, delete tap port failed\n", + nic_state->func_idx); + nic_state_out->status = HINIC3_VF_IN_VM; + return 0; + } + clear_bit(nic_state->func_idx, mhost_mgmt->func_nic_en); + } + + return multi_host_event_handler(hwdev, cmd, buf_in, in_size, buf_out, + out_size); +} + +static int sw_set_slave_vroce_state(struct hinic3_hwdev *hwdev, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic3_slave_func_nic_state *nic_state = buf_in; + struct hinic3_slave_func_nic_state *nic_state_out = buf_out; + struct hinic3_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt; + int err; + + nic_state = buf_in; + *out_size = sizeof(*nic_state); + nic_state_out->status = 0; + + sdk_info(hwdev->dev_hdl, "Slave func %u %s vroce\n", nic_state->func_idx, + nic_state->enable ? "register" : "unregister"); + + if (nic_state->enable) + set_bit(nic_state->func_idx, + mhost_mgmt->func_vroce_en); + else + clear_bit(nic_state->func_idx, + mhost_mgmt->func_vroce_en); + + err = multi_host_event_handler(hwdev, cmd, buf_in, in_size, + buf_out, out_size); + + return err; +} + +static int sw_get_slave_vroce_device_state(struct hinic3_hwdev *hwdev, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic3_slave_func_nic_state *nic_state_out = buf_out; + int err; + + *out_size = sizeof(struct hinic3_slave_func_nic_state); + nic_state_out->status = 0; + err = multi_host_event_handler(hwdev, cmd, buf_in, in_size, buf_out, out_size); + + return err; +} + +static void sw_get_slave_netdev_state(struct hinic3_hwdev *hwdev, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic3_slave_func_nic_state *nic_state = buf_in; + struct hinic3_slave_func_nic_state *nic_state_out = buf_out; + + *out_size = sizeof(*nic_state); + nic_state_out->status = 0; + nic_state_out->opened = + test_bit(nic_state->func_idx, + hwdev->netdev_setup_state) ? 1 : 0; +} + +static int __slave_host_sw_func_handler(struct hinic3_hwdev *hwdev, u16 pf_idx, + u8 cmd, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic3_multi_host_mgmt *mhost_mgmt = hwdev->mhost_mgmt; + int err = 0; + + if (!mhost_mgmt) + return -ENXIO; + switch (cmd) { + case HINIC3_SW_CMD_SET_SLAVE_FUNC_NIC_STATE: + err = sw_set_slave_func_nic_state(hwdev, cmd, buf_in, in_size, + buf_out, out_size); + break; + case HINIC3_SW_CMD_SET_SLAVE_FUNC_VROCE_STATE: + err = sw_set_slave_vroce_state(hwdev, cmd, buf_in, in_size, + buf_out, out_size); + break; + case HINIC3_SW_CMD_GET_SLAVE_VROCE_DEVICE_STATE: + err = sw_get_slave_vroce_device_state(hwdev, cmd, + buf_in, in_size, + buf_out, out_size); + break; + case HINIC3_SW_CMD_GET_SLAVE_NETDEV_STATE: + sw_get_slave_netdev_state(hwdev, cmd, buf_in, in_size, + buf_out, out_size); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +int sw_func_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + struct hinic3_hwdev *hwdev = handle; + int err; + + if (IS_MASTER_HOST(hwdev)) + err = __master_host_sw_func_handler(hwdev, pf_idx, (u8)cmd, buf_in, + in_size, buf_out, out_size); + else if (IS_SLAVE_HOST(hwdev)) + err = __slave_host_sw_func_handler(hwdev, pf_idx, (u8)cmd, buf_in, + in_size, buf_out, out_size); + else + err = -EINVAL; + + if (err) + sdk_err(hwdev->dev_hdl, "PPF process sw funcs cmd %u failed, err: %d\n", + cmd, err); + + return err; +} + +int __ppf_process_mbox_msg(struct hinic3_hwdev *hwdev, u16 pf_idx, u16 vf_id, + enum hinic3_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + /* when not support return err */ + int err = -EFAULT; + + if (IS_SLAVE_HOST(hwdev)) { + err = hinic3_mbox_to_host_sync(hwdev, mod, cmd, buf_in, in_size, + buf_out, out_size, 0, HINIC3_CHANNEL_COMM); + if (err) + sdk_err(hwdev->dev_hdl, "Send mailbox to mPF failed, err: %d\n", + err); + } else if (IS_MASTER_HOST(hwdev)) { + if (mod == HINIC3_MOD_COMM && cmd == COMM_MGMT_CMD_START_FLR) + err = hinic3_pf_to_mgmt_no_ack(hwdev, mod, cmd, buf_in, + in_size); + else + err = hinic3_pf_msg_to_mgmt_sync(hwdev, mod, cmd, buf_in, + in_size, buf_out, + out_size, 0U); + if (err && err != HINIC3_MBOX_PF_BUSY_ACTIVE_FW) + sdk_err(hwdev->dev_hdl, "PF mbox mod %d cmd %u callback handler err: %d\n", + mod, cmd, err); + } + + return err; +} + +int hinic3_ppf_process_mbox_msg(struct hinic3_hwdev *hwdev, u16 pf_idx, u16 vf_id, + enum hinic3_mod_type mod, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + bool same_host = false; + int err = -EFAULT; + + /* Currently, only the master ppf and slave ppf communicate with each + * other through ppf messages. If other PF/VFs need to communicate + * with the PPF, modify the same_host based on the + * hinic3_get_hw_pf_infos information. + */ + + switch (hwdev->func_mode) { + case FUNC_MOD_MULTI_VM_MASTER: + case FUNC_MOD_MULTI_BM_MASTER: + if (!same_host) + err = __ppf_process_mbox_msg(hwdev, pf_idx, vf_id, + mod, cmd, buf_in, in_size, + buf_out, out_size); + else + sdk_warn(hwdev->dev_hdl, "Doesn't support PPF mbox message in BM master\n"); + + break; + case FUNC_MOD_MULTI_VM_SLAVE: + case FUNC_MOD_MULTI_BM_SLAVE: + same_host = true; + if (same_host) + err = __ppf_process_mbox_msg(hwdev, pf_idx, vf_id, + mod, cmd, buf_in, in_size, + buf_out, out_size); + else + sdk_warn(hwdev->dev_hdl, "Doesn't support receiving control messages from BM master\n"); + + break; + default: + sdk_warn(hwdev->dev_hdl, "Doesn't support PPF mbox message\n"); + + break; + } + + return err; +} + +int comm_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u16 cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size) +{ + return hinic3_ppf_process_mbox_msg(handle, pf_idx, vf_id, HINIC3_MOD_COMM, + (u8)cmd, buf_in, in_size, buf_out, + out_size); +} + +int hilink_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u16 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return hinic3_ppf_process_mbox_msg(handle, pf_idx, vf_id, + HINIC3_MOD_HILINK, (u8)cmd, buf_in, + in_size, buf_out, out_size); +} + +int hinic3_nic_ppf_mbox_handler(void *handle, u16 pf_idx, u16 vf_id, u16 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + return hinic3_ppf_process_mbox_msg(handle, pf_idx, vf_id, + HINIC3_MOD_L2NIC, (u8)cmd, buf_in, in_size, + buf_out, out_size); +} + +int hinic3_register_slave_ppf(struct hinic3_hwdev *hwdev, bool registered) +{ + struct register_slave_host *host_info = NULL; + u16 out_size = sizeof(struct register_slave_host); + u8 cmd; + int err; + + if (!IS_SLAVE_HOST(hwdev)) + return -EINVAL; + + host_info = kcalloc(1, sizeof(struct register_slave_host), GFP_KERNEL); + if (!host_info) + return -ENOMEM; + + cmd = registered ? HINIC3_SW_CMD_SLAVE_HOST_PPF_REGISTER : + HINIC3_SW_CMD_SLAVE_HOST_PPF_UNREGISTER; + + host_info->host_id = hinic3_pcie_itf_id(hwdev); + host_info->ppf_idx = hinic3_ppf_idx(hwdev); + + err = hinic3_mbox_to_host_sync(hwdev, HINIC3_MOD_SW_FUNC, cmd, + host_info, sizeof(struct register_slave_host), host_info, + &out_size, 0, HINIC3_CHANNEL_COMM); + if (!!err || !out_size || host_info->status) { + sdk_err(hwdev->dev_hdl, "Failed to %s slave host, err: %d, out_size: 0x%x, status: 0x%x\n", + registered ? "register" : "unregister", err, out_size, host_info->status); + + kfree(host_info); + return -EFAULT; + } + bitmap_copy(hwdev->mhost_mgmt->func_nic_en, + (ulong *)host_info->funcs_nic_en, + HINIC3_MAX_MGMT_FUNCTIONS); + + if (IS_SLAVE_HOST(hwdev)) + bitmap_copy(hwdev->mhost_mgmt->func_vroce_en, + (ulong *)host_info->funcs_vroce_en, + HINIC3_MAX_MGMT_FUNCTIONS); + + kfree(host_info); + return 0; +} + +static int get_host_id_by_func_id(struct hinic3_hwdev *hwdev, u16 func_idx, + u8 *host_id) +{ + struct hinic3_hw_pf_infos *pf_infos = NULL; + u16 vf_id_start, vf_id_end; + int i; + + if (!hwdev || !host_id || !hwdev->mhost_mgmt) + return -EINVAL; + + pf_infos = &hwdev->mhost_mgmt->pf_infos; + + for (i = 0; i < pf_infos->num_pfs; i++) { + if (func_idx == pf_infos->infos[i].glb_func_idx) { + *host_id = pf_infos->infos[i].itf_idx; + return 0; + } + + vf_id_start = pf_infos->infos[i].glb_pf_vf_offset + 1; + vf_id_end = pf_infos->infos[i].glb_pf_vf_offset + + pf_infos->infos[i].max_vfs; + if (func_idx >= vf_id_start && func_idx <= vf_id_end) { + *host_id = pf_infos->infos[i].itf_idx; + return 0; + } + } + + return -EFAULT; +} + +int set_slave_func_nic_state(struct hinic3_hwdev *hwdev, + struct hinic3_func_nic_state *state) +{ + struct hinic3_slave_func_nic_state nic_state = {0}; + u16 out_size = sizeof(nic_state); + u8 cmd = HINIC3_SW_CMD_SET_SLAVE_FUNC_NIC_STATE; + int err; + + nic_state.func_idx = state->func_idx; + nic_state.enable = state->state; + nic_state.vroce_flag = state->vroce_flag; + + if (state->vroce_flag) + cmd = HINIC3_SW_CMD_SET_SLAVE_FUNC_VROCE_STATE; + + err = hinic3_mbox_to_host_sync(hwdev, HINIC3_MOD_SW_FUNC, + cmd, &nic_state, sizeof(nic_state), + &nic_state, &out_size, 0, HINIC3_CHANNEL_COMM); + if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) { + sdk_warn(hwdev->dev_hdl, + "Can not notify func %u %s state because slave host isn't initialized\n", + state->func_idx, state->vroce_flag ? "vroce" : "nic"); + } else if (err || !out_size || nic_state.status) { + sdk_err(hwdev->dev_hdl, + "Failed to set slave %s state, err: %d, out_size: 0x%x, status: 0x%x\n", + state->vroce_flag ? "vroce" : "nic", + err, out_size, nic_state.status); + return -EFAULT; + } + + return 0; +} + +int get_slave_func_netdev_state(struct hinic3_hwdev *hwdev, u16 func_idx, + int *opened) +{ + struct hinic3_slave_func_nic_state nic_state = {0}; + u16 out_size = sizeof(nic_state); + int err; + + nic_state.func_idx = func_idx; + err = hinic3_mbox_to_host_sync(hwdev, HINIC3_MOD_SW_FUNC, + HINIC3_SW_CMD_GET_SLAVE_NETDEV_STATE, + &nic_state, sizeof(nic_state), &nic_state, + &out_size, 0, HINIC3_CHANNEL_COMM); + if (err == MBOX_ERRCODE_UNKNOWN_DES_FUNC) { + sdk_warn(hwdev->dev_hdl, + "Can not get func %u netdev state because slave host isn't initialized\n", + func_idx); + } else if (err || !out_size || nic_state.status) { + sdk_err(hwdev->dev_hdl, + "Failed to get netdev state, err: %d, out_size: 0x%x, status: 0x%x\n", + err, out_size, nic_state.status); + return -EFAULT; + } + + *opened = nic_state.opened; + return 0; +} + +static int set_nic_state_params_valid(void *hwdev, + struct hinic3_func_nic_state *state) +{ + struct hinic3_multi_host_mgmt *mhost_mgmt = NULL; + struct hinic3_hwdev *ppf_hwdev = hwdev; + + if (!hwdev || !state) + return -EINVAL; + + if (hinic3_func_type(hwdev) != TYPE_PPF) + ppf_hwdev = ((struct hinic3_hwdev *)hwdev)->ppf_hwdev; + + if (!ppf_hwdev || !IS_MASTER_HOST(ppf_hwdev)) + return -EINVAL; + + mhost_mgmt = ppf_hwdev->mhost_mgmt; + if (!mhost_mgmt || state->func_idx >= HINIC3_MAX_MGMT_FUNCTIONS) + return -EINVAL; + + return 0; +} + +static int get_func_current_state(struct hinic3_multi_host_mgmt *mhost_mgmt, + struct hinic3_func_nic_state *state, + int *old_state) +{ + ulong *func_bitmap = NULL; + + if (state->vroce_flag == 1) + func_bitmap = mhost_mgmt->func_vroce_en; + else + func_bitmap = mhost_mgmt->func_nic_en; + + *old_state = test_bit(state->func_idx, func_bitmap) ? 1 : 0; + if (state->state == HINIC3_FUNC_NIC_DEL) + clear_bit(state->func_idx, func_bitmap); + else if (state->state == HINIC3_FUNC_NIC_ADD) + set_bit(state->func_idx, func_bitmap); + else + return -EINVAL; + + return 0; +} + +static bool check_vroce_state(struct hinic3_multi_host_mgmt *mhost_mgmt, + struct hinic3_func_nic_state *state) +{ + bool is_ready = true; + ulong *func_bitmap = mhost_mgmt->func_vroce_en; + + if (!state->vroce_flag && state->state == HINIC3_FUNC_NIC_DEL) + is_ready = test_bit(state->func_idx, func_bitmap) ? false : true; + + return is_ready; +} + +int hinic3_set_func_nic_state(void *hwdev, struct hinic3_func_nic_state *state) +{ + struct hinic3_multi_host_mgmt *mhost_mgmt = NULL; + struct hinic3_hwdev *ppf_hwdev = hwdev; + u8 host_enable; + int err, old_state = 0; + u8 host_id = 0; + + err = set_nic_state_params_valid(hwdev, state); + if (err) + return err; + + mhost_mgmt = ppf_hwdev->mhost_mgmt; + + if (IS_MASTER_HOST(ppf_hwdev) && + !check_vroce_state(mhost_mgmt, state)) { + sdk_warn(ppf_hwdev->dev_hdl, + "Should disable vroce before disable nic for function %u\n", + state->func_idx); + return -EFAULT; + } + + err = get_func_current_state(mhost_mgmt, state, &old_state); + if (err) { + sdk_err(ppf_hwdev->dev_hdl, "Failed to get function %u current state, err: %d\n", + state->func_idx, err); + return err; + } + + err = get_host_id_by_func_id(ppf_hwdev, state->func_idx, &host_id); + if (err) { + sdk_err(ppf_hwdev->dev_hdl, + "Failed to get function %u host id, err: %d\n", state->func_idx, err); + if (state->vroce_flag) + return -EFAULT; + + old_state ? set_bit(state->func_idx, mhost_mgmt->func_nic_en) : + clear_bit(state->func_idx, mhost_mgmt->func_nic_en); + return -EFAULT; + } + + err = hinic3_get_slave_host_enable(hwdev, host_id, &host_enable); + if (err != 0) { + sdk_err(ppf_hwdev->dev_hdl, + "Get slave host %u enable failed, ret %d\n", host_id, err); + return err; + } + sdk_info(ppf_hwdev->dev_hdl, "Set slave host %u(status: %u) func %u %s %s\n", + host_id, host_enable, state->func_idx, + state->state ? "enable" : "disable", state->vroce_flag ? "vroce" : "nic"); + + if (!host_enable) + return 0; + + /* notify slave host */ + err = set_slave_func_nic_state(hwdev, state); + if (err) { + if (state->vroce_flag) + return -EFAULT; + + old_state ? set_bit(state->func_idx, mhost_mgmt->func_nic_en) : + clear_bit(state->func_idx, mhost_mgmt->func_nic_en); + return err; + } + + return 0; +} +EXPORT_SYMBOL(hinic3_set_func_nic_state); + +int hinic3_get_netdev_state(void *hwdev, u16 func_idx, int *opened) +{ + struct hinic3_hwdev *ppf_hwdev = hwdev; + int err; + u8 host_enable; + u8 host_id = 0; + struct hinic3_func_nic_state state = {0}; + + *opened = 0; + state.func_idx = func_idx; + err = set_nic_state_params_valid(hwdev, &state); + if (err) + return err; + + err = get_host_id_by_func_id(ppf_hwdev, func_idx, &host_id); + if (err) { + sdk_err(ppf_hwdev->dev_hdl, "Failed to get function %u host id, err: %d\n", + func_idx, err); + return -EFAULT; + } + + err = hinic3_get_slave_host_enable(hwdev, host_id, &host_enable); + if (err != 0) { + sdk_err(ppf_hwdev->dev_hdl, "Get slave host %u enable failed, ret %d\n", + host_id, err); + return err; + } + if (!host_enable) + return 0; + + return get_slave_func_netdev_state(hwdev, func_idx, opened); +} +EXPORT_SYMBOL(hinic3_get_netdev_state); + +static int __get_func_nic_state_from_pf(struct hinic3_hwdev *hwdev, + u16 glb_func_idx, u8 *en) +{ + struct hinic3_multi_host_mgmt *mhost_mgmt = NULL; + struct hinic3_hwdev *ppf_hwdev = hwdev; + + down(&hwdev->ppf_sem); + if (hinic3_func_type(hwdev) != TYPE_PPF) + ppf_hwdev = ((struct hinic3_hwdev *)hwdev)->ppf_hwdev; + + if (!ppf_hwdev || !ppf_hwdev->mhost_mgmt) { + up(&hwdev->ppf_sem); + return -EFAULT; + } + + mhost_mgmt = ppf_hwdev->mhost_mgmt; + *en = !!test_bit(glb_func_idx, mhost_mgmt->func_nic_en); + up(&hwdev->ppf_sem); + + return 0; +} + +static int __get_func_vroce_state_from_pf(struct hinic3_hwdev *hwdev, + u16 glb_func_idx, u8 *en) +{ + struct hinic3_multi_host_mgmt *mhost_mgmt = NULL; + struct hinic3_hwdev *ppf_hwdev = hwdev; + + down(&hwdev->ppf_sem); + if (hinic3_func_type(hwdev) != TYPE_PPF) + ppf_hwdev = ((struct hinic3_hwdev *)hwdev)->ppf_hwdev; + + if (!ppf_hwdev || !ppf_hwdev->mhost_mgmt) { + up(&hwdev->ppf_sem); + return -EFAULT; + } + + mhost_mgmt = ppf_hwdev->mhost_mgmt; + *en = !!test_bit(glb_func_idx, mhost_mgmt->func_vroce_en); + up(&hwdev->ppf_sem); + + return 0; +} + +static int __get_vf_func_nic_state(struct hinic3_hwdev *hwdev, u16 glb_func_idx, + bool *en) +{ + struct hinic3_slave_func_nic_state nic_state = {0}; + u16 out_size = sizeof(nic_state); + int err; + + if (hinic3_func_type(hwdev) == TYPE_VF) { + nic_state.func_idx = glb_func_idx; + err = hinic3_mbox_to_pf(hwdev, HINIC3_MOD_SW_FUNC, + HINIC3_SW_CMD_GET_SLAVE_FUNC_NIC_STATE, + &nic_state, sizeof(nic_state), + &nic_state, &out_size, 0, HINIC3_CHANNEL_COMM); + if (err || !out_size || nic_state.status) { + sdk_err(hwdev->dev_hdl, + "Failed to get vf %u state, err: %d, out_size: %u, status: 0x%x\n", + glb_func_idx, err, out_size, nic_state.status); + return -EFAULT; + } + + *en = !!nic_state.enable; + + return 0; + } + + return -EFAULT; +} + +static int __get_func_vroce_state(struct hinic3_hwdev *hwdev, u16 glb_func_idx, + u8 *en) +{ + struct hinic3_slave_func_nic_state vroce_state = {0}; + u16 out_size = sizeof(vroce_state); + int err; + + if (hinic3_func_type(hwdev) == TYPE_VF) { + vroce_state.func_idx = glb_func_idx; + err = hinic3_mbox_to_pf(hwdev, HINIC3_MOD_SW_FUNC, + HINIC3_SW_CMD_GET_SLAVE_FUNC_VROCE_STATE, + &vroce_state, sizeof(vroce_state), + &vroce_state, &out_size, 0, HINIC3_CHANNEL_COMM); + if (err || !out_size || vroce_state.status) { + sdk_err(hwdev->dev_hdl, + "Failed to get vf %u state, err: %d, out_size: %u, status: 0x%x\n", + glb_func_idx, err, out_size, vroce_state.status); + return -EFAULT; + } + + *en = !!vroce_state.enable; + + return 0; + } + + return __get_func_vroce_state_from_pf(hwdev, glb_func_idx, en); +} + +int hinic3_get_func_vroce_enable(void *hwdev, u16 glb_func_idx, u8 *en) +{ + if (!hwdev || !en) + return -EINVAL; + + return __get_func_vroce_state(hwdev, glb_func_idx, en); +} +EXPORT_SYMBOL(hinic3_get_func_vroce_enable); + +int hinic3_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en) +{ + u8 nic_en; + int err; + + if (!hwdev || !en) + return -EINVAL; + + /* if single host, return true. */ + if (!IS_MULTI_HOST((struct hinic3_hwdev *)hwdev)) { + *en = true; + return 0; + } + + if (!IS_SLAVE_HOST((struct hinic3_hwdev *)hwdev)) { + /* if card mode is OVS, VFs don't need attach_uld, so return false. */ + if (hinic3_func_type(hwdev) == TYPE_VF && + hinic3_support_ovs(hwdev, NULL)) + *en = false; + else + *en = true; + + return 0; + } + + /* PF in slave host should be probe in CHIP_MODE_VMGW + * mode for pxe install. + * PF num need (0 ~31) + */ + if (hinic3_func_type(hwdev) != TYPE_VF && + IS_VM_SLAVE_HOST((struct hinic3_hwdev *)hwdev) && + glb_func_idx < HINIC3_SUPPORT_MAX_PF_NUM) { + *en = true; + return 0; + } + + /* try to get function nic state in sdk directly */ + err = __get_func_nic_state_from_pf(hwdev, glb_func_idx, &nic_en); + if (err) { + if (glb_func_idx < HINIC3_SUPPORT_MAX_PF_NUM) + return err; + } else { + *en = !!nic_en; + return 0; + } + + return __get_vf_func_nic_state(hwdev, glb_func_idx, en); +} + +static int slave_host_init(struct hinic3_hwdev *hwdev) +{ + int err; + + if (IS_SLAVE_HOST(hwdev)) { + /* PXE doesn't support to receive mbox from master host */ + set_slave_host_enable(hwdev, hinic3_pcie_itf_id(hwdev), true); + if ((IS_VM_SLAVE_HOST(hwdev) && + hinic3_get_master_host_mbox_enable(hwdev)) || + IS_BMGW_SLAVE_HOST(hwdev)) { + err = hinic3_register_slave_ppf(hwdev, true); + if (err) { + set_slave_host_enable(hwdev, hinic3_pcie_itf_id(hwdev), false); + return err; + } + } + } else { + /* slave host can send message to mgmt cpu + * after setup master mbox + */ + set_master_host_mbox_enable(hwdev, true); + } + + return 0; +} + +int hinic3_multi_host_mgmt_init(struct hinic3_hwdev *hwdev) +{ + int err; + struct service_cap *cap = &hwdev->cfg_mgmt->svc_cap; + + if (!IS_MULTI_HOST(hwdev) || !HINIC3_IS_PPF(hwdev)) + return 0; + + hwdev->mhost_mgmt = kcalloc(1, sizeof(*hwdev->mhost_mgmt), GFP_KERNEL); + if (!hwdev->mhost_mgmt) + return -ENOMEM; + + hwdev->mhost_mgmt->shost_ppf_idx = hinic3_host_ppf_idx(hwdev, HINIC3_MGMT_SHOST_HOST_ID); + hwdev->mhost_mgmt->mhost_ppf_idx = hinic3_host_ppf_idx(hwdev, cap->master_host_id); + + err = hinic3_get_hw_pf_infos(hwdev, &hwdev->mhost_mgmt->pf_infos, HINIC3_CHANNEL_COMM); + if (err) + goto out_free_mhost_mgmt; + + hinic3_register_ppf_mbox_cb(hwdev, HINIC3_MOD_COMM, hwdev, comm_ppf_mbox_handler); + hinic3_register_ppf_mbox_cb(hwdev, HINIC3_MOD_L2NIC, hwdev, hinic3_nic_ppf_mbox_handler); + hinic3_register_ppf_mbox_cb(hwdev, HINIC3_MOD_HILINK, hwdev, hilink_ppf_mbox_handler); + hinic3_register_ppf_mbox_cb(hwdev, HINIC3_MOD_SW_FUNC, hwdev, sw_func_ppf_mbox_handler); + + bitmap_zero(hwdev->mhost_mgmt->func_nic_en, HINIC3_MAX_MGMT_FUNCTIONS); + bitmap_zero(hwdev->mhost_mgmt->func_vroce_en, HINIC3_MAX_MGMT_FUNCTIONS); + + /* Slave host: + * register slave host ppf functions + * Get function's nic state + */ + err = slave_host_init(hwdev); + if (err) + goto out_free_mhost_mgmt; + + return 0; + +out_free_mhost_mgmt: + kfree(hwdev->mhost_mgmt); + hwdev->mhost_mgmt = NULL; + + return err; +} + +int hinic3_multi_host_mgmt_free(struct hinic3_hwdev *hwdev) +{ + if (!IS_MULTI_HOST(hwdev) || !HINIC3_IS_PPF(hwdev)) + return 0; + + if (IS_SLAVE_HOST(hwdev)) { + hinic3_register_slave_ppf(hwdev, false); + + set_slave_host_enable(hwdev, hinic3_pcie_itf_id(hwdev), false); + } else { + set_master_host_mbox_enable(hwdev, false); + } + + hinic3_unregister_ppf_mbox_cb(hwdev, HINIC3_MOD_COMM); + hinic3_unregister_ppf_mbox_cb(hwdev, HINIC3_MOD_L2NIC); + hinic3_unregister_ppf_mbox_cb(hwdev, HINIC3_MOD_HILINK); + hinic3_unregister_ppf_mbox_cb(hwdev, HINIC3_MOD_SW_FUNC); + + kfree(hwdev->mhost_mgmt); + hwdev->mhost_mgmt = NULL; + + return 0; +} + +int hinic3_get_mhost_func_nic_enable(void *hwdev, u16 func_id, bool *en) +{ + struct hinic3_hwdev *dev = hwdev; + u8 func_en; + int ret; + + if (!hwdev || !en || func_id >= HINIC3_MAX_MGMT_FUNCTIONS || !IS_MULTI_HOST(dev)) + return -EINVAL; + + ret = __get_func_nic_state_from_pf(hwdev, func_id, &func_en); + if (ret) + return ret; + + *en = !!func_en; + + return 0; +} +EXPORT_SYMBOL(hinic3_get_mhost_func_nic_enable); diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.h new file mode 100644 index 000000000..fb25160e3 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_multi_host_mgmt.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_MULTI_HOST_MGMT_H +#define HINIC3_MULTI_HOST_MGMT_H + +#define HINIC3_VF_IN_VM 0x3 + +#define HINIC3_MGMT_SHOST_HOST_ID 0 +#define HINIC3_MAX_MGMT_FUNCTIONS 1024 +#define HINIC3_MAX_MGMT_FUNCTIONS_64 (HINIC3_MAX_MGMT_FUNCTIONS / 64) + +struct hinic3_multi_host_mgmt { + struct hinic3_hwdev *hwdev; + + /* slave host registered */ + bool shost_registered; + u8 shost_host_idx; + u8 shost_ppf_idx; + + u8 mhost_ppf_idx; + u8 rsvd1; + + /* slave host functios support nic enable */ + DECLARE_BITMAP(func_nic_en, HINIC3_MAX_MGMT_FUNCTIONS); + DECLARE_BITMAP(func_vroce_en, HINIC3_MAX_MGMT_FUNCTIONS); + + struct hinic3_hw_pf_infos pf_infos; + + u64 rsvd2; +}; + +struct hinic3_host_fwd_head { + unsigned short dst_glb_func_idx; + unsigned char dst_itf_idx; + unsigned char mod; + + unsigned char cmd; + unsigned char rsv[3]; +}; + +/* software cmds, vf->pf and multi-host */ +enum hinic3_sw_funcs_cmd { + HINIC3_SW_CMD_SLAVE_HOST_PPF_REGISTER = 0x0, + HINIC3_SW_CMD_SLAVE_HOST_PPF_UNREGISTER, + HINIC3_SW_CMD_GET_SLAVE_FUNC_NIC_STATE, + HINIC3_SW_CMD_SET_SLAVE_FUNC_NIC_STATE, + HINIC3_SW_CMD_SEND_MSG_TO_VF, + HINIC3_SW_CMD_MIGRATE_READY, + HINIC3_SW_CMD_GET_SLAVE_NETDEV_STATE, + + HINIC3_SW_CMD_GET_SLAVE_FUNC_VROCE_STATE, + HINIC3_SW_CMD_SET_SLAVE_FUNC_VROCE_STATE, + HINIC3_SW_CMD_GET_SLAVE_VROCE_DEVICE_STATE = 0x9, // 与vroce_cfg_vf_do.h宏一致 +}; + +/* multi host mgmt event sub cmd */ +enum hinic3_mhost_even_type { + HINIC3_MHOST_NIC_STATE_CHANGE = 1, + HINIC3_MHOST_VROCE_STATE_CHANGE = 2, + HINIC3_MHOST_GET_VROCE_STATE = 3, +}; + +struct hinic3_mhost_nic_func_state { + u8 status; + u8 enable; + u16 func_idx; +}; + +struct hinic3_multi_host_mgmt_event { + u16 sub_cmd; + u16 rsvd[3]; + + void *data; +}; + +int hinic3_multi_host_mgmt_init(struct hinic3_hwdev *hwdev); +int hinic3_multi_host_mgmt_free(struct hinic3_hwdev *hwdev); +int hinic3_mbox_to_host_no_ack(struct hinic3_hwdev *hwdev, enum hinic3_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, u16 channel); + +struct register_slave_host { + u8 status; + u8 version; + u8 rsvd[6]; + + u8 host_id; + u8 ppf_idx; + u8 get_nic_en; + u8 rsvd2[5]; + + /* 16 * 64 bits for max 1024 functions */ + u64 funcs_nic_en[HINIC3_MAX_MGMT_FUNCTIONS_64]; + /* 16 * 64 bits for max 1024 functions */ + u64 funcs_vroce_en[HINIC3_MAX_MGMT_FUNCTIONS_64]; +}; + +struct hinic3_slave_func_nic_state { + u8 status; + u8 version; + u8 rsvd[6]; + + u16 func_idx; + u8 enable; + u8 opened; + u8 vroce_flag; + u8 rsvd2[7]; +}; + +void set_master_host_mbox_enable(struct hinic3_hwdev *hwdev, bool enable); + +int sw_func_pf_mbox_handler(void *pri_handle, u16 vf_id, u16 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); + +int vf_sw_func_handler(void *hwdev, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size); +int hinic3_set_func_probe_in_host(void *hwdev, u16 func_id, bool probe); +bool hinic3_get_func_probe_in_host(void *hwdev, u16 func_id); + +void *hinic3_get_ppf_hwdev_by_pdev(struct pci_dev *pdev); + +int hinic3_get_func_nic_enable(void *hwdev, u16 glb_func_idx, bool *en); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c index 9d1693faf..4cdc6f6ea 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_nictool.c @@ -40,6 +40,7 @@ int card_id;
typedef int (*hw_driv_module)(struct hinic3_lld_dev *lld_dev, const void *buf_in, u32 in_size, void *buf_out, u32 *out_size); + struct hw_drv_module_handle { enum driver_cmd_type driv_cmd_name; hw_driv_module driv_func; diff --git a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h index 21ab78a8e..d71474ea8 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h +++ b/drivers/net/ethernet/huawei/hinic3/hw/hinic3_pci_id_tbl.h @@ -4,15 +4,39 @@ #ifndef HINIC3_PCI_ID_TBL_H #define HINIC3_PCI_ID_TBL_H
+#define HINIC3_VIRTIO_VNEDER_ID 0x1AF4 +#ifdef CONFIG_SP_VID_DID +#define PCI_VENDOR_ID_SPNIC 0x1F3F +#define HINIC3_DEV_ID_STANDARD 0x9020 +#define HINIC3_DEV_ID_SDI_5_1_PF 0x9032 +#define HINIC3_DEV_ID_SDI_5_0_PF 0x9031 +#define HINIC3_DEV_ID_DPU_PF 0x9030 +#define HINIC3_DEV_ID_SPN120 0x9021 +#define HINIC3_DEV_ID_VF 0x9001 +#define HINIC3_DEV_ID_VF_HV 0x9002 +#define HINIC3_DEV_SDI_5_1_ID_VF 0x9003 +#define HINIC3_DEV_SDI_5_1_ID_VF_HV 0x9004 +#define HINIC3_DEV_ID_SPU 0xAC00 +#define HINIC3_DEV_SDI_5_1_SSDID_VF 0x1000 +#define HINIC3_DEV_SDI_V100_SSDID_MASK (3 << 12) +#else #define PCI_VENDOR_ID_HUAWEI 0x19e5 - #define HINIC3_DEV_ID_STANDARD 0x0222 -#define HINIC3_DEV_ID_DPU_PF 0x0224 -#define HINIC3_DEV_ID_SDI_5_0_PF 0x0225 #define HINIC3_DEV_ID_SDI_5_1_PF 0x0226 +#define HINIC3_DEV_ID_SDI_5_0_PF 0x0225 +#define HINIC3_DEV_ID_DPU_PF 0x0224 #define HINIC3_DEV_ID_VF 0x375F #define HINIC3_DEV_ID_VF_HV 0x379F +#define HINIC3_DEV_SDI_5_1_ID_VF 0x375F +#define HINIC3_DEV_SDI_5_1_ID_VF_HV 0x379F #define HINIC3_DEV_ID_SPU 0xAC00 +#define HINIC3_DEV_SDI_5_1_SSDID_VF 0x1000 +#define HINIC3_DEV_SDI_V100_SSDID_MASK (3 << 12) +#endif + +#define HINIC3_DEV_SSID_2X25G 0x0051 +#define HINIC3_DEV_SSID_4X25G 0x0052 +#define HINIC3_DEV_SSID_2X100G 0x00A1
#endif
diff --git a/drivers/net/ethernet/huawei/hinic3/hw/ossl_knl_linux.c b/drivers/net/ethernet/huawei/hinic3/hw/ossl_knl_linux.c index 874da7606..f8aea696d 100644 --- a/drivers/net/ethernet/huawei/hinic3/hw/ossl_knl_linux.c +++ b/drivers/net/ethernet/huawei/hinic3/hw/ossl_knl_linux.c @@ -23,7 +23,7 @@ void file_close(struct file *file_handle)
u32 get_file_size(struct file *file_handle) { - struct inode *file_inode; + struct inode *file_inode = NULL;
file_inode = file_handle->f_inode;
@@ -35,6 +35,19 @@ void set_file_position(struct file *file_handle, u32 position) file_handle->f_pos = position; }
+int file_read(struct file *file_handle, char *log_buffer, u32 rd_length, + u32 *file_pos) +{ + return (int)kernel_read(file_handle, log_buffer, rd_length, + &file_handle->f_pos); +} + +u32 file_write(struct file *file_handle, const char *log_buffer, u32 wr_length) +{ + return (u32)kernel_write(file_handle, log_buffer, wr_length, + &file_handle->f_pos); +} + static int _linux_thread_func(void *thread) { struct sdk_thread_info *info = (struct sdk_thread_info *)thread; @@ -63,8 +76,7 @@ void stop_thread(struct sdk_thread_info *thread_info)
void utctime_to_localtime(u64 utctime, u64 *localtime) { - *localtime = utctime - sys_tz.tz_minuteswest * - OSSL_MINUTE_BASE; /*lint !e647*/ + *localtime = utctime - (u64)(sys_tz.tz_minuteswest * OSSL_MINUTE_BASE); /*lint !e647 !e571*/ }
#ifndef HAVE_TIMER_SETUP @@ -77,7 +89,7 @@ void initialize_timer(const void *adapter_hdl, struct timer_list *timer) } #endif
-void add_to_timer(struct timer_list *timer, long period) +void add_to_timer(struct timer_list *timer, u64 period) { if (!timer) return; diff --git a/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h b/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h new file mode 100644 index 000000000..5ae3f4e5e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/bond/bond_common_defs.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved. */ + +#ifndef BOND_COMMON_DEFS_H +#define BOND_COMMON_DEFS_H + +#define BOND_NAME_MAX_LEN 16 +#define BOND_PORT_MAX_NUM 4 +#define BOND_ID_INVALID 0xFFFF +#define OVS_PORT_NUM_MAX BOND_PORT_MAX_NUM +#define DEFAULT_ROCE_BOND_FUNC 0xFFFFFFFF + +enum bond_group_id { + BOND_FIRST_ID = 1, + BOND_MAX_ID = 4, + BOND_MAX_NUM, +}; + +#pragma pack(4) +/** + * bond per port statistics + */ +struct tag_bond_port_stat { + /** mpu provide */ + u64 rx_pkts; + u64 rx_bytes; + u64 rx_drops; + u64 rx_errors; + + u64 tx_pkts; + u64 tx_bytes; + u64 tx_drops; + u64 tx_errors; +}; + +#pragma pack() + +/** + * bond port attribute + */ +struct tag_bond_port_attr { + u8 duplex; + u8 status; + u8 rsvd0[2]; + u32 speed; +}; + +/** + * Get bond information command struct defination + * @see OVS_MPU_CMD_BOND_GET_ATTR + */ +struct tag_bond_get { + u16 bond_id_vld; /* 1: used bond_id get bond info, 0: used bond_name */ + u16 bond_id; /* if bond_id_vld=1 input, else output */ + u8 bond_name[BOND_NAME_MAX_LEN]; /* if bond_id_vld=0 input, else output */ + + u16 bond_mode; /* 1 for active-backup,2 for balance-xor,4 for 802.3ad */ + u8 active_slaves; /* active port slaves(bitmaps) */ + u8 slaves; /* bond port id bitmaps */ + + u8 lacp_collect_slaves; /* bond port id bitmaps */ + u8 xmit_hash_policy; /* xmit hash:0 for layer 2, 1 for layer 2+3, 2 for layer 3+4 */ + u16 rsvd0; /* in order to 4B aligned */ + + struct tag_bond_port_stat stat[BOND_PORT_MAX_NUM]; + struct tag_bond_port_attr attr[BOND_PORT_MAX_NUM]; +}; + +#endif /** BOND_COMMON_DEFS_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd.h b/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd.h new file mode 100644 index 000000000..a13b66da9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CFG_MGMT_MPU_CMD_H +#define CFG_MGMT_MPU_CMD_H + +enum cfg_cmd { + CFG_CMD_GET_DEV_CAP = 0, /**< Device capability of pf/vf, @see cfg_cmd_dev_cap */ + CFG_CMD_GET_HOST_TIMER = 1, /**< Capability of host timer, @see cfg_cmd_host_timer */ +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h new file mode 100644 index 000000000..f56df083a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/cfg_mgmt/cfg_mgmt_mpu_cmd_defs.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CFG_MGMT_MPU_CMD_DEFS_H +#define CFG_MGMT_MPU_CMD_DEFS_H + +#include "mpu_cmd_base_defs.h" + +enum servic_bit_define { + SERVICE_BIT_NIC = 0, + SERVICE_BIT_ROCE = 1, + SERVICE_BIT_VBS = 2, + SERVICE_BIT_TOE = 3, + SERVICE_BIT_IPSEC = 4, + SERVICE_BIT_FC = 5, + SERVICE_BIT_VIRTIO = 6, + SERVICE_BIT_OVS = 7, + SERVICE_BIT_NVME = 8, + SERVICE_BIT_ROCEAA = 9, + SERVICE_BIT_CURRENET = 10, + SERVICE_BIT_PPA = 11, + SERVICE_BIT_MIGRATE = 12, + SERVICE_BIT_VROCE = 13, + SERVICE_BIT_MAX +}; + +#define CFG_SERVICE_MASK_NIC (0x1 << SERVICE_BIT_NIC) +#define CFG_SERVICE_MASK_ROCE (0x1 << SERVICE_BIT_ROCE) +#define CFG_SERVICE_MASK_VBS (0x1 << SERVICE_BIT_VBS) +#define CFG_SERVICE_MASK_TOE (0x1 << SERVICE_BIT_TOE) +#define CFG_SERVICE_MASK_IPSEC (0x1 << SERVICE_BIT_IPSEC) +#define CFG_SERVICE_MASK_FC (0x1 << SERVICE_BIT_FC) +#define CFG_SERVICE_MASK_VIRTIO (0x1 << SERVICE_BIT_VIRTIO) +#define CFG_SERVICE_MASK_OVS (0x1 << SERVICE_BIT_OVS) +#define CFG_SERVICE_MASK_NVME (0x1 << SERVICE_BIT_NVME) +#define CFG_SERVICE_MASK_ROCEAA (0x1 << SERVICE_BIT_ROCEAA) +#define CFG_SERVICE_MASK_CURRENET (0x1 << SERVICE_BIT_CURRENET) +#define CFG_SERVICE_MASK_PPA (0x1 << SERVICE_BIT_PPA) +#define CFG_SERVICE_MASK_MIGRATE (0x1 << SERVICE_BIT_MIGRATE) +#define CFG_SERVICE_MASK_VROCE (0x1 << SERVICE_BIT_VROCE) + +/* Definition of the scenario ID in the cfg_data, which is used for SML memory allocation. */ +enum scenes_id_define { + SCENES_ID_FPGA_ETH = 0, + SCENES_ID_COMPUTE_STANDARD = 1, + SCENES_ID_STORAGE_ROCEAA_2x100 = 2, + SCENES_ID_STORAGE_ROCEAA_4x25 = 3, + SCENES_ID_CLOUD = 4, + SCENES_ID_FC = 5, + SCENES_ID_STORAGE_ROCE = 6, + SCENES_ID_COMPUTE_ROCE = 7, + SCENES_ID_STORAGE_TOE = 8, + SCENES_ID_MAX +}; + +/* struct cfg_cmd_dev_cap.sf_svc_attr */ +enum { + SF_SVC_FT_BIT = (1 << 0), + SF_SVC_RDMA_BIT = (1 << 1), +}; + +struct cfg_cmd_host_timer { + struct mgmt_msg_head head; + + u8 host_id; + u8 rsvd1; + + u8 timer_pf_num; + u8 timer_pf_id_start; + u16 timer_vf_num; + u16 timer_vf_id_start; + u32 rsvd2[8]; +}; + +struct cfg_cmd_dev_cap { + struct mgmt_msg_head head; + + u16 func_id; + u16 rsvd1; + + /* Public resources */ + u8 host_id; + u8 ep_id; + u8 er_id; + u8 port_id; + + u16 host_total_func; + u8 host_pf_num; + u8 pf_id_start; + u16 host_vf_num; + u16 vf_id_start; + u8 host_oq_id_mask_val; + u8 timer_en; + u8 host_valid_bitmap; + u8 rsvd_host; + + u16 svc_cap_en; + u16 max_vf; + u8 flexq_en; + u8 valid_cos_bitmap; + /* Reserved for func_valid_cos_bitmap */ + u8 port_cos_valid_bitmap; + u8 rsvd_func1; + u32 rsvd_func2; + + u8 sf_svc_attr; + u8 func_sf_en; + u8 lb_mode; + u8 smf_pg; + + u32 max_conn_num; + u16 max_stick2cache_num; + u16 max_bfilter_start_addr; + u16 bfilter_len; + u16 hash_bucket_num; + + /* shared resource */ + u8 host_sf_en; + u8 master_host_id; + u8 srv_multi_host_mode; + u8 virtio_vq_size; + + u32 rsvd_func3[5]; + + /* l2nic */ + u16 nic_max_sq_id; + u16 nic_max_rq_id; + u16 nic_default_num_queues; + u16 rsvd1_nic; + u32 rsvd2_nic[2]; + + /* RoCE */ + u32 roce_max_qp; + u32 roce_max_cq; + u32 roce_max_srq; + u32 roce_max_mpt; + u32 roce_max_drc_qp; + + u32 roce_cmtt_cl_start; + u32 roce_cmtt_cl_end; + u32 roce_cmtt_cl_size; + + u32 roce_dmtt_cl_start; + u32 roce_dmtt_cl_end; + u32 roce_dmtt_cl_size; + + u32 roce_wqe_cl_start; + u32 roce_wqe_cl_end; + u32 roce_wqe_cl_size; + u8 roce_srq_container_mode; + u8 rsvd_roce1[3]; + u32 rsvd_roce2[5]; + + /* IPsec */ + u32 ipsec_max_sactx; + u16 ipsec_max_cq; + u16 rsvd_ipsec1; + u32 rsvd_ipsec[2]; + + /* OVS */ + u32 ovs_max_qpc; + u32 rsvd_ovs1[3]; + + /* ToE */ + u32 toe_max_pctx; + u32 toe_max_cq; + u16 toe_max_srq; + u16 toe_srq_id_start; + u16 toe_max_mpt; + u16 toe_rsvd_1; + u32 toe_max_cctxt; + u32 rsvd_toe[1]; + + /* FC */ + u32 fc_max_pctx; + u32 fc_max_scq; + u32 fc_max_srq; + + u32 fc_max_cctx; + u32 fc_cctx_id_start; + + u8 fc_vp_id_start; + u8 fc_vp_id_end; + u8 rsvd_fc1[2]; + u32 rsvd_fc2[5]; + + /* VBS */ + u16 vbs_max_volq; + u8 vbs_main_pf_enable; + u8 vbs_vsock_pf_enable; + u8 vbs_fushion_queue_pf_enable; + u8 rsvd0_vbs; + u16 rsvd1_vbs; + u32 rsvd2_vbs[2]; + + u16 fake_vf_start_id; + u16 fake_vf_num; + u32 fake_vf_max_pctx; + u16 fake_vf_bfilter_start_addr; + u16 fake_vf_bfilter_len; + + u32 map_host_id : 3; + u32 fake_vf_en : 1; + u32 fake_vf_start_bit : 4; + u32 fake_vf_end_bit : 4; + u32 fake_vf_page_bit : 4; + u32 rsvd2 : 16; + + u32 rsvd_glb[7]; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/cqm/cqm_npu_cmd.h b/drivers/net/ethernet/huawei/hinic3/include/cqm/cqm_npu_cmd.h new file mode 100644 index 000000000..d4e33f702 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/cqm/cqm_npu_cmd.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_NPU_CMD_H +#define CQM_NPU_CMD_H + +enum cqm_cmd_type { + CQM_CMD_T_INVALID = 0, /* < Invalid command */ + CQM_CMD_T_BAT_UPDATE, /* < Update the bat configuration of the funciton, + * @see struct tag_cqm_cmdq_bat_update + */ + CQM_CMD_T_CLA_UPDATE, /* < Update the cla configuration of the funciton, + * @see struct tag_cqm_cla_update_cmd + */ + CQM_CMD_T_BLOOMFILTER_SET, /* < Set the bloomfilter configuration of the funciton, + * @see struct tag_cqm_bloomfilter_cmd + */ + CQM_CMD_T_BLOOMFILTER_CLEAR, /* < Clear the bloomfilter configuration of the funciton, + * @see struct tag_cqm_bloomfilter_cmd + */ + CQM_CMD_T_RSVD, /* < Unused */ + CQM_CMD_T_CLA_CACHE_INVALID, /* < Invalidate the cla cacheline, + * @see struct tag_cqm_cla_cache_invalid_cmd + */ + CQM_CMD_T_BLOOMFILTER_INIT, /* < Init the bloomfilter configuration of the funciton, + * @see struct tag_cqm_bloomfilter_init_cmd + */ + CQM_CMD_T_MAX +}; + +#endif /* CQM_NPU_CMD_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/include/cqm/cqm_npu_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/cqm/cqm_npu_cmd_defs.h new file mode 100644 index 000000000..28b83edde --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/cqm/cqm_npu_cmd_defs.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_NPU_CMD_DEFS_H +#define CQM_NPU_CMD_DEFS_H + +struct tag_cqm_cla_cache_invalid_cmd { + u32 gpa_h; + u32 gpa_l; + + u32 cache_size; /* CLA cache size=4096B */ + + u32 smf_id; + u32 func_id; +}; + +struct tag_cqm_cla_update_cmd { + /* Gpa address to be updated */ + u32 gpa_h; // byte addr + u32 gpa_l; // byte addr + + /* Updated Value */ + u32 value_h; + u32 value_l; + + u32 smf_id; + u32 func_id; +}; + +struct tag_cqm_bloomfilter_cmd { + u32 rsv1; + +#if (BYTE_ORDER == LITTLE_ENDIAN) + u32 k_en : 4; + u32 func_id : 16; + u32 rsv2 : 12; +#else + u32 rsv2 : 12; + u32 func_id : 16; + u32 k_en : 4; +#endif + + u32 index_h; + u32 index_l; +}; + +#define CQM_BAT_MAX_SIZE 256 +struct tag_cqm_cmdq_bat_update { + u32 offset; // byte offset,16Byte aligned + u32 byte_len; // max size: 256byte + u8 data[CQM_BAT_MAX_SIZE]; + u32 smf_id; + u32 func_id; +}; + +struct tag_cqm_bloomfilter_init_cmd { + u32 bloom_filter_len; // 16Byte aligned + u32 bloom_filter_addr; +}; + +#endif /* CQM_CMDQ_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/include/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/include/hinic3_common.h new file mode 100644 index 000000000..73cb1335a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/hinic3_common.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_COMMON_H +#define HINIC3_COMMON_H + +#include <linux/types.h> + +struct hinic3_dma_addr_align { + u32 real_size; + + void *ori_vaddr; + dma_addr_t ori_paddr; + + void *align_vaddr; + dma_addr_t align_paddr; +}; + +enum hinic3_wait_return { + WAIT_PROCESS_CPL = 0, + WAIT_PROCESS_WAITING = 1, + WAIT_PROCESS_ERR = 2, +}; + +struct hinic3_sge { + u32 hi_addr; + u32 lo_addr; + u32 len; +}; + +/* * + * hinic_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert, must be Multiple of 4B + */ +static inline void hinic3_cpu_to_be32(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/* * + * hinic3_cpu_to_be32 - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + */ +static inline void hinic3_be32_to_cpu(void *data, int len) +{ + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} + +/* * + * hinic3_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + */ +static inline void hinic3_set_sge(struct hinic3_sge *sge, dma_addr_t addr, + int len) +{ + sge->hi_addr = upper_32_bits(addr); + sge->lo_addr = lower_32_bits(addr); + sge->len = (u32)len; +} + +#ifdef HW_CONVERT_ENDIAN +#define hinic3_hw_be32(val) (val) +#define hinic3_hw_cpu32(val) (val) +#define hinic3_hw_cpu16(val) (val) +#else +#define hinic3_hw_be32(val) cpu_to_be32(val) +#define hinic3_hw_cpu32(val) be32_to_cpu(val) +#define hinic3_hw_cpu16(val) be16_to_cpu(val) +#endif + +static inline void hinic3_hw_be32_len(void *data, int len) +{ +#ifndef HW_CONVERT_ENDIAN + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = hinic3_hw_be32(*mem); + mem++; + } +#endif +} + +static inline void hinic3_hw_cpu32_len(void *data, int len) +{ +#ifndef HW_CONVERT_ENDIAN + int i, chunk_sz = sizeof(u32); + int data_len = len; + u32 *mem = data; + + if (!data) + return; + + data_len = data_len / chunk_sz; + + for (i = 0; i < data_len; i++) { + *mem = hinic3_hw_cpu32(*mem); + mem++; + } +#endif +} + +int hinic3_dma_zalloc_coherent_align(void *dev_hdl, u64 size, u64 align, + unsigned int flag, + struct hinic3_dma_addr_align *mem_align); + +void hinic3_dma_free_coherent_align(void *dev_hdl, + struct hinic3_dma_addr_align *mem_align); + +typedef enum hinic3_wait_return (*wait_cpl_handler)(void *priv_data); + +int hinic3_wait_for_timeout(void *priv_data, wait_cpl_handler handler, + u32 wait_total_ms, u32 wait_once_us); + +/* func_attr.glb_func_idx, global function index */ +u16 hinic3_global_func_id(void *hwdev); + +int hinic3_global_func_id_get(void *hwdev, u16 *func_id); + +/* func_attr.p2p_idx, belongs to which pf */ +u8 hinic3_pf_id_of_vf(void *hwdev); + +/* func_attr.itf_idx, pcie interface index */ +u8 hinic3_pcie_itf_id(void *hwdev); +int hinic3_get_vfid_by_vfpci(void *hwdev, struct pci_dev *pdev, u16 *global_func_id); +/* func_attr.vf_in_pf, the vf offset in pf */ +u8 hinic3_vf_in_pf(void *hwdev); + +/* func_attr.func_type, 0-PF 1-VF 2-PPF */ +enum func_type hinic3_func_type(void *hwdev); + +/* The PF func_attr.glb_pf_vf_offset, + * PF use only + */ +u16 hinic3_glb_pf_vf_offset(void *hwdev); + +/* func_attr.mpf_idx, mpf global function index, + * This value is valid only when it is PF + */ +u8 hinic3_mpf_idx(void *hwdev); + +u8 hinic3_ppf_idx(void *hwdev); + +/* func_attr.intr_num, MSI-X table entry in function */ +u16 hinic3_intr_num(void *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/hinic3_cqm.h b/drivers/net/ethernet/huawei/hinic3/include/hinic3_cqm.h new file mode 100644 index 000000000..6653460fc --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/hinic3_cqm.h @@ -0,0 +1,364 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef CQM_H +#define CQM_H + +#include <linux/completion.h> + +#ifndef HIUDK_SDK + +#include "hinic3_cqm_define.h" +#include "vram_common.h" + +#define CQM_SUCCESS 0 +#define CQM_FAIL (-1) +#define CQM_CONTINUE 1 + +#define CQM_WQE_WF_LINK 1 +#define CQM_WQE_WF_NORMAL 0 + +#define CQM_QUEUE_LINK_MODE 0 +#define CQM_QUEUE_RING_MODE 1 +#define CQM_QUEUE_TOE_SRQ_LINK_MODE 2 +#define CQM_QUEUE_RDMA_QUEUE_MODE 3 + +struct tag_cqm_linkwqe { + u32 rsv1 : 14; + u32 wf : 1; + u32 rsv2 : 14; + u32 ctrlsl : 2; + u32 o : 1; + + u32 rsv3 : 31; + u32 lp : 1; /* lp define o-bit is flipping */ + + u32 next_page_gpa_h; /* Record the upper 32 bits of the PADDR of the next page */ + u32 next_page_gpa_l; /* Record the lower 32 bits of the PADDR of the next page */ + + u32 next_buffer_addr_h; /* Record the upper 32 bits of the VADDR of the next page */ + u32 next_buffer_addr_l; /* Record the lower 32 bits of the VADDR of the next page */ +}; + +/* The WQE size cannot exceed the common RQE size. */ +struct tag_cqm_srq_linkwqe { + struct tag_cqm_linkwqe linkwqe; + u32 current_buffer_gpa_h; + u32 current_buffer_gpa_l; + u32 current_buffer_addr_h; + u32 current_buffer_addr_l; + + u32 fast_link_page_addr_h; + u32 fast_link_page_addr_l; + + u32 fixed_next_buffer_addr_h; + u32 fixed_next_buffer_addr_l; +}; + +/* First 64B of standard 128B WQE */ +union tag_cqm_linkwqe_first64B { + struct tag_cqm_linkwqe basic_linkwqe; + struct tag_cqm_srq_linkwqe toe_srq_linkwqe; + u32 value[16]; +}; + +/* Last 64 bytes of the standard 128-byte WQE */ +struct tag_cqm_linkwqe_second64B { + u32 rsvd0[4]; + u32 rsvd1[4]; + union { + struct { + u32 rsvd0[3]; + u32 rsvd1 : 29; + u32 toe_o : 1; + u32 resvd2 : 2; + } bs; + u32 value[4]; + } third_16B; + + union { + struct { + u32 rsvd0[2]; + u32 rsvd1 : 31; + u32 ifoe_o : 1; + u32 rsvd2; + } bs; + u32 value[4]; + } forth_16B; +}; + +/* Standard 128B WQE structure */ +struct tag_cqm_linkwqe_128B { + union tag_cqm_linkwqe_first64B first64B; + struct tag_cqm_linkwqe_second64B second64B; +}; + +enum cqm_aeq_event_type { + CQM_AEQ_BASE_T_NIC = 0, + CQM_AEQ_BASE_T_ROCE = 16, + CQM_AEQ_BASE_T_FC = 48, + CQM_AEQ_BASE_T_IOE = 56, + CQM_AEQ_BASE_T_TOE = 64, + CQM_AEQ_BASE_T_VBS = 96, + CQM_AEQ_BASE_T_IPSEC = 112, + CQM_AEQ_BASE_T_MAX = 128 +}; + +struct tag_service_register_template { + u32 service_type; + u32 srq_ctx_size; + u32 scq_ctx_size; + void *service_handle; /* The ceq/aeq function is called back */ + void (*shared_cq_ceq_callback)(void *service_handle, u32 cqn, void *cq_priv); + void (*embedded_cq_ceq_callback)(void *service_handle, u32 xid, void *qpc_priv); + void (*no_cq_ceq_callback)(void *service_handle, u32 xid, u32 qid, void *qpc_priv); + u8 (*aeq_level_callback)(void *service_handle, u8 event_type, u8 *val); + void (*aeq_callback)(void *service_handle, u8 event_type, u8 *val); +}; + +enum cqm_object_type { + CQM_OBJECT_ROOT_CTX = 0, ///<0:root context + CQM_OBJECT_SERVICE_CTX, ///<1:QPC + CQM_OBJECT_MPT, ///<2:RDMA + + CQM_OBJECT_NONRDMA_EMBEDDED_RQ = 10, + CQM_OBJECT_NONRDMA_EMBEDDED_SQ, + CQM_OBJECT_NONRDMA_SRQ, + CQM_OBJECT_NONRDMA_EMBEDDED_CQ, + CQM_OBJECT_NONRDMA_SCQ, + + CQM_OBJECT_RESV = 20, + + CQM_OBJECT_RDMA_QP = 30, + CQM_OBJECT_RDMA_SRQ, + CQM_OBJECT_RDMA_SCQ, + + CQM_OBJECT_MTT = 50, + CQM_OBJECT_RDMARC, +}; + +#define CQM_INDEX_INVALID ~(0U) +#define CQM_INDEX_RESERVED (0xfffff) + +#define CQM_RDMA_Q_ROOM_1 (1) +#define CQM_RDMA_Q_ROOM_2 (2) + +#define CQM_HARDWARE_DOORBELL (1) +#define CQM_SOFTWARE_DOORBELL (2) + +struct tag_cqm_buf_list { + void *va; + dma_addr_t pa; + u32 refcount; +}; + +struct tag_cqm_buf { + struct tag_cqm_buf_list *buf_list; + struct tag_cqm_buf_list direct; + u32 page_number; + u32 buf_number; + u32 buf_size; + struct vram_buf_info buf_info; + u32 bat_entry_type; +}; + +struct completion; + +struct tag_cqm_object { + u32 service_type; + u32 object_type; + u32 object_size; + atomic_t refcount; + struct completion free; + void *cqm_handle; +}; + +struct tag_cqm_qpc_mpt { + struct tag_cqm_object object; + u32 xid; + dma_addr_t paddr; + void *priv; + u8 *vaddr; +}; + +struct tag_cqm_queue_header { + u64 doorbell_record; + u64 ci_record; + u64 rsv1; + u64 rsv2; +}; + +struct tag_cqm_queue { + struct tag_cqm_object object; + u32 index; + void *priv; + u32 current_q_doorbell; + u32 current_q_room; + struct tag_cqm_buf q_room_buf_1; + struct tag_cqm_buf q_room_buf_2; + struct tag_cqm_queue_header *q_header_vaddr; + dma_addr_t q_header_paddr; + u8 *q_ctx_vaddr; + dma_addr_t q_ctx_paddr; + u32 valid_wqe_num; + u8 *tail_container; + u8 *head_container; + u8 queue_link_mode; +}; + +struct tag_cqm_mtt_rdmarc { + struct tag_cqm_object object; + u32 index_base; + u32 index_number; + u8 *vaddr; +}; + +struct tag_cqm_cmd_buf { + void *buf; + dma_addr_t dma; + u16 size; +}; + +enum cqm_cmd_ack_type_e { + CQM_CMD_ACK_TYPE_CMDQ = 0, + CQM_CMD_ACK_TYPE_SHARE_CQN = 1, + CQM_CMD_ACK_TYPE_APP_CQN = 2 +}; + +#define CQM_CMD_BUF_LEN 0x800 + +#endif + +#define hiudk_cqm_object_delete(x, y) cqm_object_delete(y) +#define hiudk_cqm_object_funcid(x, y) cqm_object_funcid(y) +#define hiudk_cqm_object_offset_addr(x, y, z, m) cqm_object_offset_addr(y, z, m) +#define hiudk_cqm_object_put(x, y) cqm_object_put(y) +#define hiudk_cqm_object_resize_alloc_new(x, y, z) cqm_object_resize_alloc_new(y, z) +#define hiudk_cqm_object_resize_free_new(x, y) cqm_object_resize_free_new(y) +#define hiudk_cqm_object_resize_free_old(x, y) cqm_object_resize_free_old(y) +#define hiudk_cqm_object_share_recv_queue_add_container(x, y) \ + cqm_object_share_recv_queue_add_container(y) +#define hiudk_cqm_object_srq_add_container_free(x, y, z) cqm_object_srq_add_container_free(y, z) +#define hiudk_cqm_ring_software_db(x, y, z) cqm_ring_software_db(y, z) +#define hiudk_cqm_srq_used_rq_container_delete(x, y, z) cqm_srq_used_rq_container_delete(y, z) + +s32 cqm3_init(void *ex_handle); +void cqm3_uninit(void *ex_handle); + +s32 cqm3_service_register(void *ex_handle, + struct tag_service_register_template *service_template); +void cqm3_service_unregister(void *ex_handle, u32 service_type); +s32 cqm3_fake_vf_num_set(void *ex_handle, u16 fake_vf_num_cfg); +bool cqm3_need_secure_mem(void *ex_handle); +struct tag_cqm_queue *cqm3_object_fc_srq_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv); +struct tag_cqm_queue *cqm3_object_recv_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 init_rq_num, u32 container_size, + u32 wqe_size, void *object_priv); +struct tag_cqm_queue *cqm3_object_share_recv_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 container_number, u32 container_size, + u32 wqe_size); +struct tag_cqm_qpc_mpt *cqm3_object_qpc_mpt_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 object_size, void *object_priv, + u32 index, bool low2bit_align_en); + +struct tag_cqm_queue *cqm3_object_nonrdma_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 wqe_number, u32 wqe_size, + void *object_priv); +struct tag_cqm_queue *cqm3_object_rdma_queue_create(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 object_size, void *object_priv, + bool room_header_alloc, u32 xid); +struct tag_cqm_mtt_rdmarc *cqm3_object_rdma_table_get(void *ex_handle, u32 service_type, + enum cqm_object_type object_type, + u32 index_base, u32 index_number); +struct tag_cqm_object *cqm3_object_get(void *ex_handle, enum cqm_object_type object_type, + u32 index, bool bh); +struct tag_cqm_cmd_buf *cqm3_cmd_alloc(void *ex_handle); +void cqm3_cmd_free(void *ex_handle, struct tag_cqm_cmd_buf *cmd_buf); + +s32 cqm3_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, + struct tag_cqm_cmd_buf *buf_in, struct tag_cqm_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel); + +s32 cqm3_lb_send_cmd_box(void *ex_handle, u8 mod, u8 cmd, u8 cos_id, + struct tag_cqm_cmd_buf *buf_in, struct tag_cqm_cmd_buf *buf_out, + u64 *out_param, u32 timeout, u16 channel); +s32 cqm3_lb_send_cmd_box_async(void *ex_handle, u8 mod, u8 cmd, u8 cos_id, + struct tag_cqm_cmd_buf *buf_in, u16 channel); + +s32 cqm3_send_cmd_imm(void *ex_handle, u8 mod, u8 cmd, + struct tag_cqm_cmd_buf *buf_in, u64 *out_param, + u32 timeout, u16 channel); + +s32 cqm3_db_addr_alloc(void *ex_handle, void __iomem **db_addr, void __iomem **dwqe_addr); +void cqm3_db_addr_free(void *ex_handle, const void __iomem *db_addr, + void __iomem *dwqe_addr); + +void *cqm3_get_db_addr(void *ex_handle, u32 service_type); +s32 cqm3_ring_hardware_db(void *ex_handle, u32 service_type, u8 db_count, u64 db); + +s32 cqm3_get_hardware_db_addr(void *ex_handle, u64 *addr, u32 service_type); + +s32 cqm_ring_hardware_db_fc(void *ex_handle, u32 service_type, u8 db_count, u8 pagenum, u64 db); +s32 cqm3_ring_hardware_db_update_pri(void *ex_handle, u32 service_type, u8 db_count, u64 db); +s32 cqm3_bloomfilter_inc(void *ex_handle, u16 func_id, u64 id); +s32 cqm3_bloomfilter_dec(void *ex_handle, u16 func_id, u64 id); +void *cqm3_gid_base(void *ex_handle); +void *cqm3_timer_base(void *ex_handle); +void cqm3_function_timer_clear(void *ex_handle, u32 function_id); +void cqm3_function_hash_buf_clear(void *ex_handle, s32 global_funcid); +s32 cqm3_ring_direct_wqe_db(void *ex_handle, u32 service_type, u8 db_count, void *direct_wqe); +s32 cqm_ring_direct_wqe_db_fc(void *ex_handle, u32 service_type, void *direct_wqe); + +s32 cqm3_object_share_recv_queue_add_container(struct tag_cqm_queue *common); +s32 cqm3_object_srq_add_container_free(struct tag_cqm_queue *common, u8 **container_addr); + +s32 cqm3_ring_software_db(struct tag_cqm_object *object, u64 db_record); +void cqm3_object_put(struct tag_cqm_object *object); + +/** + * @brief Obtains the function ID of an object. + * @param Object Pointer + * @retval >=0 function's ID + * @retval -1 Fails + */ +s32 cqm3_object_funcid(struct tag_cqm_object *object); + +s32 cqm3_object_resize_alloc_new(struct tag_cqm_object *object, u32 object_size); +void cqm3_object_resize_free_new(struct tag_cqm_object *object); +void cqm3_object_resize_free_old(struct tag_cqm_object *object); + +/** + * @brief Releasing a container + * @param Object Pointer + * @param container Pointer to the container to be released + * @retval void + */ +void cqm3_srq_used_rq_container_delete(struct tag_cqm_object *object, u8 *container); + +void cqm3_object_delete(struct tag_cqm_object *object); + +/** + * @brief Obtains the PADDR and VADDR of the specified offset in the object buffer. + * @details Only rdma table lookup is supported + * @param Object Pointer + * @param offset For an RDMA table, the offset is the absolute index number. + * @param paddr The physical address is returned only for the RDMA table. + * @retval u8 *buffer Virtual address at specified offset + */ +u8 *cqm3_object_offset_addr(struct tag_cqm_object *object, u32 offset, dma_addr_t *paddr); + +s32 cqm3_dtoe_share_recv_queue_create(void *ex_handle, u32 contex_size, + u32 *index_count, u32 *index); + +void cqm3_dtoe_free_srq_bitmap_index(void *ex_handle, u32 index_count, u32 index); + +#endif /* CQM_H */ + diff --git a/drivers/net/ethernet/huawei/hinic3/include/hinic3_cqm_define.h b/drivers/net/ethernet/huawei/hinic3/include/hinic3_cqm_define.h new file mode 100644 index 000000000..608b12510 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/hinic3_cqm_define.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_CQM_DEFINE_H +#define HINIC3_CQM_DEFINE_H +#if !defined(HIUDK_ULD) && !defined(HIUDK_SDK_ADPT) +#define cqm_init cqm3_init +#define cqm_uninit cqm3_uninit +#define cqm_service_register cqm3_service_register +#define cqm_service_unregister cqm3_service_unregister +#define cqm_bloomfilter_dec cqm3_bloomfilter_dec +#define cqm_bloomfilter_inc cqm3_bloomfilter_inc +#define cqm_cmd_alloc cqm3_cmd_alloc +#define cqm_get_hardware_db_addr cqm3_get_hardware_db_addr +#define cqm_cmd_free cqm3_cmd_free +#define cqm_send_cmd_box cqm3_send_cmd_box +#define cqm_lb_send_cmd_box cqm3_lb_send_cmd_box +#define cqm_lb_send_cmd_box_async cqm3_lb_send_cmd_box_async +#define cqm_send_cmd_imm cqm3_send_cmd_imm +#define cqm_db_addr_alloc cqm3_db_addr_alloc +#define cqm_db_addr_free cqm3_db_addr_free +#define cqm_ring_hardware_db cqm3_ring_hardware_db +#define cqm_ring_software_db cqm3_ring_software_db +#define cqm_object_fc_srq_create cqm3_object_fc_srq_create +#define cqm_object_share_recv_queue_create cqm3_object_share_recv_queue_create +#define cqm_object_share_recv_queue_add_container cqm3_object_share_recv_queue_add_container +#define cqm_object_srq_add_container_free cqm3_object_srq_add_container_free +#define cqm_object_recv_queue_create cqm3_object_recv_queue_create +#define cqm_object_qpc_mpt_create cqm3_object_qpc_mpt_create +#define cqm_object_nonrdma_queue_create cqm3_object_nonrdma_queue_create +#define cqm_object_rdma_queue_create cqm3_object_rdma_queue_create +#define cqm_object_rdma_table_get cqm3_object_rdma_table_get +#define cqm_object_delete cqm3_object_delete +#define cqm_object_offset_addr cqm3_object_offset_addr +#define cqm_object_get cqm3_object_get +#define cqm_object_put cqm3_object_put +#define cqm_object_funcid cqm3_object_funcid +#define cqm_object_resize_alloc_new cqm3_object_resize_alloc_new +#define cqm_object_resize_free_new cqm3_object_resize_free_new +#define cqm_object_resize_free_old cqm3_object_resize_free_old +#define cqm_function_timer_clear cqm3_function_timer_clear +#define cqm_function_hash_buf_clear cqm3_function_hash_buf_clear +#define cqm_srq_used_rq_container_delete cqm3_srq_used_rq_container_delete +#define cqm_timer_base cqm3_timer_base +#define cqm_dtoe_free_srq_bitmap_index cqm3_dtoe_free_srq_bitmap_index +#define cqm_dtoe_share_recv_queue_create cqm3_dtoe_share_recv_queue_create +#define cqm_get_db_addr cqm3_get_db_addr +#define cqm_ring_direct_wqe_db cqm3_ring_direct_wqe_db +#define cqm_fake_vf_num_set cqm3_fake_vf_num_set +#define cqm_need_secure_mem cqm3_need_secure_mem +#endif +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h new file mode 100644 index 000000000..9a9bfe280 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/hinic3_lld.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_LLD_H +#define HINIC3_LLD_H + +#include "hinic3_crm.h" + +#define WAIT_TIME 1 + +#ifdef HIUDK_SDK + +int hwsdk_set_vf_load_state(struct hinic3_lld_dev *lld_dev, bool vf_load_state); + +int hwsdk_set_vf_service_load(struct hinic3_lld_dev *lld_dev, u16 service, + bool vf_srv_load); + +int hwsdk_set_vf_service_state(struct hinic3_lld_dev *lld_dev, u16 vf_func_id, + u16 service, bool en); +#else +struct hinic3_lld_dev { + struct pci_dev *pdev; + void *hwdev; +}; + +struct hinic3_uld_info { + /* When the function does not need to initialize the corresponding uld, + * @probe needs to return 0 and uld_dev is set to NULL; + * if uld_dev is NULL, @remove will not be called when uninstalling + */ + int (*probe)(struct hinic3_lld_dev *lld_dev, void **uld_dev, char *uld_dev_name); + void (*remove)(struct hinic3_lld_dev *lld_dev, void *uld_dev); + int (*suspend)(struct hinic3_lld_dev *lld_dev, void *uld_dev, pm_message_t state); + int (*resume)(struct hinic3_lld_dev *lld_dev, void *uld_dev); + void (*event)(struct hinic3_lld_dev *lld_dev, void *uld_dev, + struct hinic3_event_info *event); + int (*ioctl)(void *uld_dev, u32 cmd, const void *buf_in, u32 in_size, + void *buf_out, u32 *out_size); +}; +#endif + +#ifndef HIUDK_ULD +/* hinic3_register_uld - register an upper-layer driver + * @type: uld service type + * @uld_info: uld callback + * + * Registers an upper-layer driver. + * Traverse existing devices and call @probe to initialize the uld device. + */ +int hinic3_register_uld(enum hinic3_service_type type, struct hinic3_uld_info *uld_info); + +/** + * hinic3_unregister_uld - unregister an upper-layer driver + * @type: uld service type + * + * Traverse existing devices and call @remove to uninstall the uld device. + * Unregisters an existing upper-layer driver. + */ +void hinic3_unregister_uld(enum hinic3_service_type type); + +void lld_hold(void); +void lld_put(void); + +/** + * @brief hinic3_get_lld_dev_by_chip_name - get lld device by chip name + * @param chip_name: chip name + * + * The value of lld_dev reference increases when lld_dev is obtained. The caller needs + * to release the reference by calling lld_dev_put. + **/ +struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_name(const char *chip_name); + +/** + * @brief lld_dev_hold - get reference to lld_dev + * @param dev: lld device + * + * Hold reference to device to keep it from being freed + **/ +void lld_dev_hold(struct hinic3_lld_dev *dev); + +/** + * @brief lld_dev_put - release reference to lld_dev + * @param dev: lld device + * + * Release reference to device to allow it to be freed + **/ +void lld_dev_put(struct hinic3_lld_dev *dev); + +/** + * @brief hinic3_get_lld_dev_by_dev_name - get lld device by uld device name + * @param dev_name: uld device name + * @param type: uld service type, When the type is SERVICE_T_MAX, try to match + * all ULD names to get uld_dev + * + * The value of lld_dev reference increases when lld_dev is obtained. The caller needs + * to release the reference by calling lld_dev_put. + **/ +struct hinic3_lld_dev *hinic3_get_lld_dev_by_dev_name(const char *dev_name, + enum hinic3_service_type type); + +/** + * @brief hinic3_get_lld_dev_by_dev_name_unsafe - get lld device by uld device name + * @param dev_name: uld device name + * @param type: uld service type, When the type is SERVICE_T_MAX, try to match + * all ULD names to get uld_dev + * + * hinic3_get_lld_dev_by_dev_name_unsafe() is completely analogous to + * hinic3_get_lld_dev_by_dev_name(), The only difference is that the reference + * of lld_dev is not increased when lld_dev is obtained. + * + * The caller must ensure that lld_dev will not be freed during the remove process + * when using lld_dev. + **/ +struct hinic3_lld_dev *hinic3_get_lld_dev_by_dev_name_unsafe(const char *dev_name, + enum hinic3_service_type type); + +/** + * @brief hinic3_get_lld_dev_by_chip_and_port - get lld device by chip name and port id + * @param chip_name: chip name + * @param port_id: port id + **/ +struct hinic3_lld_dev *hinic3_get_lld_dev_by_chip_and_port(const char *chip_name, u8 port_id); + +/** + * @brief hinic3_get_ppf_dev - get ppf device without depend on input parameter + **/ +void *hinic3_get_ppf_dev(void); + +/** + * @brief hinic3_get_ppf_lld_dev - get ppf lld device by current function's lld device + * @param lld_dev: current function's lld device + * + * The value of lld_dev reference increases when lld_dev is obtained. The caller needs + * to release the reference by calling lld_dev_put. + **/ +struct hinic3_lld_dev *hinic3_get_ppf_lld_dev(struct hinic3_lld_dev *lld_dev); + +/** + * @brief hinic3_get_ppf_lld_dev_unsafe - get ppf lld device by current function's lld device + * @param lld_dev: current function's lld device + * + * hinic3_get_ppf_lld_dev_unsafe() is completely analogous to hinic3_get_ppf_lld_dev(), + * The only difference is that the reference of lld_dev is not increased when lld_dev is obtained. + * + * The caller must ensure that ppf's lld_dev will not be freed during the remove process + * when using ppf lld_dev. + **/ +struct hinic3_lld_dev *hinic3_get_ppf_lld_dev_unsafe(struct hinic3_lld_dev *lld_dev); + +/** + * @brief uld_dev_hold - get reference to uld_dev + * @param lld_dev: lld device + * @param type: uld service type + * + * Hold reference to uld device to keep it from being freed + **/ +void uld_dev_hold(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); + +/** + * @brief uld_dev_put - release reference to lld_dev + * @param dev: lld device + * @param type: uld service type + * + * Release reference to uld device to allow it to be freed + **/ +void uld_dev_put(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); + +/** + * @brief hinic3_get_uld_dev - get uld device by lld device + * @param lld_dev: lld device + * @param type: uld service type + * + * The value of uld_dev reference increases when uld_dev is obtained. The caller needs + * to release the reference by calling uld_dev_put. + **/ +void *hinic3_get_uld_dev(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); + +/** + * @brief hinic3_get_uld_dev_unsafe - get uld device by lld device + * @param lld_dev: lld device + * @param type: uld service type + * + * hinic3_get_uld_dev_unsafe() is completely analogous to hinic3_get_uld_dev(), + * The only difference is that the reference of uld_dev is not increased when uld_dev is obtained. + * + * The caller must ensure that uld_dev will not be freed during the remove process + * when using uld_dev. + **/ +void *hinic3_get_uld_dev_unsafe(struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); + +/** + * @brief hinic3_get_chip_name - get chip name by lld device + * @param lld_dev: lld device + * @param chip_name: String for storing the chip name + * @param max_len: Maximum number of characters to be copied for chip_name + **/ +int hinic3_get_chip_name(struct hinic3_lld_dev *lld_dev, char *chip_name, u16 max_len); + +struct card_node *hinic3_get_chip_node_by_lld(struct hinic3_lld_dev *lld_dev); + +struct hinic3_hwdev *hinic3_get_sdk_hwdev_by_lld(struct hinic3_lld_dev *lld_dev); + +bool hinic3_get_vf_service_load(struct pci_dev *pdev, u16 service); + +int hinic3_set_vf_service_load(struct pci_dev *pdev, u16 service, + bool vf_srv_load); + +int hinic3_set_vf_service_state(struct pci_dev *pdev, u16 vf_func_id, + u16 service, bool en); + +bool hinic3_get_vf_load_state(struct pci_dev *pdev); + +int hinic3_set_vf_load_state(struct pci_dev *pdev, bool vf_load_state); + +int hinic3_attach_nic(struct hinic3_lld_dev *lld_dev); + +void hinic3_detach_nic(const struct hinic3_lld_dev *lld_dev); + +int hinic3_attach_service(const struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); +void hinic3_detach_service(const struct hinic3_lld_dev *lld_dev, enum hinic3_service_type type); +const char **hinic3_get_uld_names(void); +#endif +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/hinic3_profile.h b/drivers/net/ethernet/huawei/hinic3/include/hinic3_profile.h new file mode 100644 index 000000000..e0bd2560b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/hinic3_profile.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef HINIC3_PROFILE_H +#define HINIC3_PROFILE_H + +typedef bool (*hinic3_is_match_prof)(void *device); +typedef void *(*hinic3_init_prof_attr)(void *device); +typedef void (*hinic3_deinit_prof_attr)(void *porf_attr); + +enum prof_adapter_type { + PROF_ADAP_TYPE_INVALID, + PROF_ADAP_TYPE_PANGEA = 1, + + /* Add prof adapter type before default */ + PROF_ADAP_TYPE_DEFAULT, +}; + +/** + * struct hinic3_prof_adapter - custom scene's profile adapter + * @type: adapter type + * @match: Check whether the current function is used in the custom scene. + * Implemented in the current source file + * @init: When @match return true, the initialization function called in probe. + * Implemented in the source file of the custom scene + * @deinit: When @match return true, the deinitialization function called when + * remove. Implemented in the source file of the custom scene + */ +struct hinic3_prof_adapter { + enum prof_adapter_type type; + hinic3_is_match_prof match; + hinic3_init_prof_attr init; + hinic3_deinit_prof_attr deinit; +}; + +#ifdef static +#undef static +#define LLT_STATIC_DEF_SAVED +#endif + +static inline struct hinic3_prof_adapter *hinic3_prof_init(void *device, + struct hinic3_prof_adapter *adap_objs, + int num_adap, void **prof_attr) +{ + struct hinic3_prof_adapter *prof_obj = NULL; + int i; + + for (i = 0; i < num_adap; i++) { + prof_obj = &adap_objs[i]; + if (!(prof_obj->match && prof_obj->match(device))) + continue; + + *prof_attr = prof_obj->init ? prof_obj->init(device) : NULL; + + return prof_obj; + } + + return NULL; +} + +static inline void hinic3_prof_deinit(struct hinic3_prof_adapter *prof_obj, void *prof_attr) +{ + if (!prof_obj) + return; + + if (prof_obj->deinit) + prof_obj->deinit(prof_attr); +} + +/* module-level interface */ +#ifdef CONFIG_MODULE_PROF +struct hinic3_module_ops { + int (*module_prof_init)(void); + void (*module_prof_exit)(void); + void (*probe_fault_process)(void *pdev, u16 level); + int (*probe_pre_process)(void *pdev); + void (*probe_pre_unprocess)(void *pdev); +}; + +struct hinic3_module_ops *hinic3_get_module_prof_ops(void); + +static inline void hinic3_probe_fault_process(void *pdev, u16 level) +{ + struct hinic3_module_ops *ops = hinic3_get_module_prof_ops(); + + if (ops && ops->probe_fault_process) + ops->probe_fault_process(pdev, level); +} + +static inline int hinic3_module_pre_init(void) +{ + struct hinic3_module_ops *ops = hinic3_get_module_prof_ops(); + + if (!ops || !ops->module_prof_init) + return -EINVAL; + + return ops->module_prof_init(); +} + +static inline void hinic3_module_post_exit(void) +{ + struct hinic3_module_ops *ops = hinic3_get_module_prof_ops(); + + if (ops && ops->module_prof_exit) + ops->module_prof_exit(); +} + +static inline int hinic3_probe_pre_process(void *pdev) +{ + struct hinic3_module_ops *ops = hinic3_get_module_prof_ops(); + + if (!ops || !ops->probe_pre_process) + return -EINVAL; + + return ops->probe_pre_process(pdev); +} + +static inline void hinic3_probe_pre_unprocess(void *pdev) +{ + struct hinic3_module_ops *ops = hinic3_get_module_prof_ops(); + + if (ops && ops->probe_pre_unprocess) + ops->probe_pre_unprocess(pdev); +} +#else +static inline void hinic3_probe_fault_process(void *pdev, u16 level) { }; + +static inline int hinic3_module_pre_init(void) +{ + return 0; +} + +static inline void hinic3_module_post_exit(void) { }; + +static inline int hinic3_probe_pre_process(void *pdev) +{ + return 0; +} + +static inline void hinic3_probe_pre_unprocess(void *pdev) { }; +#endif + +#ifdef LLT_STATIC_DEF_SAVED +#define static +#undef LLT_STATIC_DEF_SAVED +#endif + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h new file mode 100644 index 000000000..97d34f0c2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mag_mpu_cmd.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef MAG_MPU_CMD_H +#define MAG_MPU_CMD_H + +/* Definition of the SerDes/MAG message command word */ +enum mag_cmd { + SERDES_CMD_PROCESS = 0, /* serdes cmd @see struct serdes_cmd_in */ + + MAG_CMD_SET_PORT_CFG = 1, /* set port cfg function @see struct mag_cmd_set_port_cfg */ + MAG_CMD_SET_PORT_ADAPT = 2, /* set port adapt mode @see struct mag_cmd_set_port_adapt */ + MAG_CMD_CFG_LOOPBACK_MODE = 3, /* set port loopback mode @see mag_cmd_cfg_loopback_mode */ + + MAG_CMD_GET_PORT_ENABLE = 5, /* get port enable status @see mag_cmd_get_port_enable */ + MAG_CMD_SET_PORT_ENABLE = 6, /* set port enable mode @see mag_cmd_set_port_enable */ + MAG_CMD_GET_LINK_STATUS = 7, /* get port link status @see mag_cmd_get_link_status */ + MAG_CMD_SET_LINK_FOLLOW = 8, /* set port link_follow mode @see mag_cmd_set_link_follow */ + MAG_CMD_SET_PMA_ENABLE = 9, /* set pma enable mode @see struct mag_cmd_set_pma_enable */ + MAG_CMD_CFG_FEC_MODE = 10, /* set port fec mode @see struct mag_cmd_cfg_fec_mode */ + MAG_CMD_GET_BOND_STATUS = 11, /* reserved for future use */ + + MAG_CMD_CFG_AN_TYPE = 12, /* reserved for future use */ + MAG_CMD_CFG_LINK_TIME = 13, /* get link time @see struct mag_cmd_get_link_time */ + + MAG_CMD_SET_PANGEA_ADAPT = 15, /* set pangea adapt mode @see mag_cmd_set_pangea_adapt */ + + /* Bios link configuration dependency 30-49 */ + MAG_CMD_CFG_BIOS_LINK_CFG = 31, /* reserved for future use */ + MAG_CMD_RESTORE_LINK_CFG = 32, /* restore link cfg @see mag_cmd_restore_link_cfg */ + MAG_CMD_ACTIVATE_BIOS_LINK_CFG = 33, /* active bios link cfg */ + + /* Optical module、LED, PHY and other peripheral configuration management 50 - 99 */ + /* LED */ + MAG_CMD_SET_LED_CFG = 50, /* set led cfg @see struct mag_cmd_set_led_cfg */ + + /* PHY */ + MAG_CMD_GET_PHY_INIT_STATUS = 55, /* reserved for future use */ + + /* Optical module */ + MAG_CMD_GET_XSFP_INFO = 60, /* get xsfp info @see struct mag_cmd_get_xsfp_info */ + MAG_CMD_SET_XSFP_ENABLE = 61, /* set xsfp enable mode @see mag_cmd_set_xsfp_enable */ + MAG_CMD_GET_XSFP_PRESENT = 62, /* get xsfp present status @see mag_cmd_get_xsfp_present */ + MAG_CMD_SET_XSFP_RW = 63, /* sfp/qsfp single byte read/write, @see mag_cmd_set_xsfp_rw */ + MAG_CMD_CFG_XSFP_TEMPERATURE = 64, /* get xsfp temp @see mag_cmd_sfp_temp_out_info */ + + /* Event reported 100-149 */ + MAG_CMD_WIRE_EVENT = 100, + MAG_CMD_LINK_ERR_EVENT = 101, + + /* DFX、Counter */ + MAG_CMD_EVENT_PORT_INFO = 150, /* get port event info @see mag_cmd_event_port_info */ + MAG_CMD_GET_PORT_STAT = 151, /* get port state @see struct mag_cmd_get_port_stat */ + MAG_CMD_CLR_PORT_STAT = 152, /* clear port state @see struct mag_cmd_port_stats_info */ + MAG_CMD_GET_PORT_INFO = 153, /* get port info @see struct mag_cmd_get_port_info */ + MAG_CMD_GET_PCS_ERR_CNT = 154, /* pcs err count @see struct mag_cmd_event_port_info */ + MAG_CMD_GET_MAG_CNT = 155, /* fec code count @see struct mag_cmd_get_mag_cnt */ + MAG_CMD_DUMP_ANTRAIN_INFO = 156, /* dump anlt info @see mag_cmd_dump_antrain_info */ + + /* patch reserve cmd */ + MAG_CMD_PATCH_RSVD_0 = 200, + MAG_CMD_PATCH_RSVD_1 = 201, + MAG_CMD_PATCH_RSVD_2 = 202, + MAG_CMD_PATCH_RSVD_3 = 203, + MAG_CMD_PATCH_RSVD_4 = 204, + + MAG_CMD_MAX = 0xFF +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_board_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_board_defs.h new file mode 100644 index 000000000..caaba5dfb --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_board_defs.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef MPU_BOARD_DEFS_H +#define MPU_BOARD_DEFS_H + +#define BOARD_TYPE_TEST_RANGE_START 1 +#define BOARD_TYPE_TEST_RANGE_END 29 +#define BOARD_TYPE_STRG_RANGE_START 30 +#define BOARD_TYPE_STRG_RANGE_END 99 +#define BOARD_TYPE_CAL_RANGE_START 100 +#define BOARD_TYPE_CAL_RANGE_END 169 +#define BOARD_TYPE_CLD_RANGE_START 170 +#define BOARD_TYPE_CLD_RANGE_END 239 +#define BOARD_TYPE_RSVD_RANGE_START 240 +#define BOARD_TYPE_RSVD_RANGE_END 255 + +enum board_type_define_e { + BOARD_TYPE_MPU_DEFAULT = 0, /* Default config */ + BOARD_TYPE_TEST_EVB_4X25G = 1, /* EVB Board */ + BOARD_TYPE_TEST_CEM_2X100G = 2, /* 2X100G CEM Card */ + BOARD_TYPE_STRG_SMARTIO_4X32G_FC = 30, /* 4X32G SmartIO FC Card */ + BOARD_TYPE_STRG_SMARTIO_4X25G_TIOE = 31, /* 4X25GE SmartIO TIOE Card */ + BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE = 32, /* 4X25GE SmartIO ROCE Card */ + BOARD_TYPE_STRG_SMARTIO_4X25G_ROCE_AA = 33, /* 4X25GE SmartIO ROCE_AA Card */ + BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV = 34, /* 4X25GE SmartIO container Card */ + BOARD_TYPE_STRG_SMARTIO_4X25G_SRIOV_SW = 35, /* 4X25GE SmartIO container switch Card */ + BOARD_TYPE_STRG_4X25G_COMSTORAGE = 36, /* 4X25GE compute storage Onboard Card */ + BOARD_TYPE_STRG_2X100G_TIOE = 40, /* 2X100G SmartIO TIOE Card */ + BOARD_TYPE_STRG_2X100G_ROCE = 41, /* 2X100G SmartIO ROCE Card */ + BOARD_TYPE_STRG_2X100G_ROCE_AA = 42, /* 2X100G SmartIO ROCE_AA Card */ + BOARD_TYPE_CAL_2X25G_NIC_75MPPS = 100, /* 2X25G ETH Standard card 75MPPS */ + BOARD_TYPE_CAL_2X25G_NIC_40MPPS = 101, /* 2X25G ETH Standard card 40MPPS */ + BOARD_TYPE_CAL_2X100G_DPU = 102, /* 2x100G DPU Card */ + BOARD_TYPE_CAL_4X25G_NIC_120MPPS = 105, /* 4X25G ETH Standard card 120MPPS */ + BOARD_TYPE_CAL_4X25G_COMSTORAGE = 106, /* 4X25GE compute storage Onboard Card */ + BOARD_TYPE_CAL_2X32G_FC_HBA = 110, /* 2X32G FC HBA card */ + BOARD_TYPE_CAL_2X16G_FC_HBA = 111, /* 2X16G FC HBA card */ + BOARD_TYPE_CAL_2X100G_NIC_120MPPS = 115, /* 2X100G ETH Standard card 120MPPS */ + BOARD_TYPE_CAL_2X25G_DPU = 116, /* 2x25G DPU Card */ + BOARD_TYPE_CAL_4X25G_DPU = 118, /* 4x25G DPU Card */ + BOARD_TYPE_CLD_2X100G_SDI5_1 = 170, /* 2X100G SDI 5.1 Card */ + BOARD_TYPE_CLD_2X25G_SDI5_0_LITE = 171, /* 2x25G SDI5.0 Lite Card */ + BOARD_TYPE_CLD_2X100G_SDI5_0 = 172, /* 2x100G SDI5.0 Card */ + BOARD_TYPE_MAX_INDEX = 0xFF +}; + +static inline u32 spu_board_type_valid(u32 board_type) +{ + return ((board_type) == BOARD_TYPE_CLD_2X25G_SDI5_0_LITE) || + ((board_type) == BOARD_TYPE_CLD_2X100G_SDI5_0) || + ((board_type) == BOARD_TYPE_CAL_2X25G_DPU) || + ((board_type) == BOARD_TYPE_CAL_2X100G_DPU) || + ((board_type) == BOARD_TYPE_CAL_4X25G_DPU); +} + +static inline int board_type_is_sdi(u32 board_type) +{ + return ((board_type) == BOARD_TYPE_CLD_2X100G_SDI5_1) || + ((board_type) == BOARD_TYPE_CLD_2X25G_SDI5_0_LITE) || + ((board_type) == BOARD_TYPE_CLD_2X100G_SDI5_0); +} + +static inline int board_type_is_dpu(u32 board_type) +{ + return ((board_type) == BOARD_TYPE_CAL_2X25G_DPU) || + ((board_type) == BOARD_TYPE_CAL_2X100G_DPU) || + ((board_type) == BOARD_TYPE_CAL_4X25G_DPU); +} + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h new file mode 100644 index 000000000..89d5cc42c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_cmd_base_defs.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef COMM_DEFS_H +#define COMM_DEFS_H + +#include "mgmt_msg_base.h" + +/** MPU CMD MODULE TYPE */ +enum hinic3_mod_type { + HINIC3_MOD_COMM = 0, /* HW communication module */ + HINIC3_MOD_L2NIC = 1, /* L2NIC module */ + HINIC3_MOD_ROCE = 2, + HINIC3_MOD_PLOG = 3, + HINIC3_MOD_TOE = 4, + HINIC3_MOD_FLR = 5, + HINIC3_MOD_VROCE = 6, + HINIC3_MOD_CFGM = 7, /* Configuration management */ + HINIC3_MOD_CQM = 8, + HINIC3_MOD_VMSEC = 9, + COMM_MOD_FC = 10, + HINIC3_MOD_OVS = 11, + HINIC3_MOD_DSW = 12, + HINIC3_MOD_MIGRATE = 13, + HINIC3_MOD_HILINK = 14, + HINIC3_MOD_CRYPT = 15, /* secure crypto module */ + HINIC3_MOD_VIO = 16, + HINIC3_MOD_IMU = 17, + HINIC3_MOD_DFX = 18, /* DFX */ + HINIC3_MOD_HW_MAX = 19, /* hardware max module id */ + /* Software module id, for PF/VF and multi-host */ + HINIC3_MOD_SW_FUNC = 20, + HINIC3_MOD_MAX, +}; + +/* Func reset flag, Specifies the resource to be cleaned.*/ +enum func_reset_flag_e { + RES_TYPE_FLUSH_BIT = 0, + RES_TYPE_MQM, + RES_TYPE_SMF, + RES_TYPE_PF_BW_CFG, + + RES_TYPE_COMM = 10, + RES_TYPE_COMM_MGMT_CH, /* clear mbox and aeq, The RES_TYPE_COMM bit must be set */ + RES_TYPE_COMM_CMD_CH, /* clear cmdq and ceq, The RES_TYPE_COMM bit must be set */ + RES_TYPE_NIC, + RES_TYPE_OVS, + RES_TYPE_VBS, + RES_TYPE_ROCE, + RES_TYPE_FC, + RES_TYPE_TOE, + RES_TYPE_IPSEC, + RES_TYPE_MAX, +}; + +#define HINIC3_COMM_RES \ + ((1 << RES_TYPE_COMM) | (1 << RES_TYPE_COMM_CMD_CH) | \ + (1 << RES_TYPE_FLUSH_BIT) | (1 << RES_TYPE_MQM) | \ + (1 << RES_TYPE_SMF) | (1 << RES_TYPE_PF_BW_CFG)) + +#define HINIC3_NIC_RES BIT(RES_TYPE_NIC) +#define HINIC3_OVS_RES BIT(RES_TYPE_OVS) +#define HINIC3_VBS_RES BIT(RES_TYPE_VBS) +#define HINIC3_ROCE_RES BIT(RES_TYPE_ROCE) +#define HINIC3_FC_RES BIT(RES_TYPE_FC) +#define HINIC3_TOE_RES BIT(RES_TYPE_TOE) +#define HINIC3_IPSEC_RES BIT(RES_TYPE_IPSEC) + +/* MODE OVS、NIC、UNKNOWN */ +#define HINIC3_WORK_MODE_OVS 0 +#define HINIC3_WORK_MODE_UNKNOWN 1 +#define HINIC3_WORK_MODE_NIC 2 + +#define DEVICE_TYPE_L2NIC 0 +#define DEVICE_TYPE_NVME 1 +#define DEVICE_TYPE_VIRTIO_NET 2 +#define DEVICE_TYPE_VIRTIO_BLK 3 +#define DEVICE_TYPE_VIRTIO_VSOCK 4 +#define DEVICE_TYPE_VIRTIO_NET_TRANSITION 5 +#define DEVICE_TYPE_VIRTIO_BLK_TRANSITION 6 +#define DEVICE_TYPE_VIRTIO_SCSI_TRANSITION 7 +#define DEVICE_TYPE_VIRTIO_HPC 8 + +enum hinic3_svc_type { + SVC_T_COMM = 0, + SVC_T_NIC, + SVC_T_OVS, + SVC_T_ROCE, + SVC_T_TOE, + SVC_T_IOE, + SVC_T_FC, + SVC_T_VBS, + SVC_T_IPSEC, + SVC_T_VIRTIO, + SVC_T_MIGRATE, + SVC_T_PPA, + SVC_T_MAX, +}; + +/** + * Common header control information of the COMM message interaction command word + * between the driver and PF. + */ +struct comm_info_head { + /** response status code, 0: success, others: error code */ + u8 status; + + /** firmware version for command */ + u8 version; + + /** response aeq number, unused for now */ + u8 rep_aeq_num; + u8 rsvd[5]; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h new file mode 100644 index 000000000..b24e72942 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef MPU_INBAND_CMD_H +#define MPU_INBAND_CMD_H + +enum hinic3_mgmt_cmd { + COMM_MGMT_CMD_FUNC_RESET = 0, /* reset function @see comm_cmd_func_reset */ + COMM_MGMT_CMD_FEATURE_NEGO, /* feature negotiation @see comm_cmd_feature_nego */ + COMM_MGMT_CMD_FLUSH_DOORBELL, /* clear doorbell @see comm_cmd_clear_doorbell */ + COMM_MGMT_CMD_START_FLUSH, /* clear statefull business txrx resource + * @see comm_cmd_clear_resource + */ + COMM_MGMT_CMD_SET_FUNC_FLR, /* set function flr @see comm_cmd_func_flr_set */ + COMM_MGMT_CMD_GET_GLOBAL_ATTR, /* get global attr @see comm_cmd_get_glb_attr */ + COMM_MGMT_CMD_SET_PPF_FLR_TYPE, /* set ppf flr type @see comm_cmd_ppf_flr_type_set */ + COMM_MGMT_CMD_SET_FUNC_SVC_USED_STATE, /* set function service used state + * @see comm_cmd_func_svc_used_state + */ + COMM_MGMT_CMD_START_FLR, /* MPU not use */ + + COMM_MGMT_CMD_CFG_MSIX_NUM = 10, /**< set msix num @see comm_cmd_cfg_msix_num */ + + COMM_MGMT_CMD_SET_CMDQ_CTXT = 20, /* set commandq context @see comm_cmd_cmdq_ctxt */ + COMM_MGMT_CMD_SET_VAT, /** set vat table info @see comm_cmd_root_ctxt */ + COMM_MGMT_CMD_CFG_PAGESIZE, /**< set rootctx pagesize @see comm_cmd_wq_page_size */ + COMM_MGMT_CMD_CFG_MSIX_CTRL_REG, /* config msix ctrl register @see comm_cmd_msix_config */ + COMM_MGMT_CMD_SET_CEQ_CTRL_REG, /**< set ceq ctrl register @see comm_cmd_ceq_ctrl_reg */ + COMM_MGMT_CMD_SET_DMA_ATTR, /**< set PF/VF DMA table attr @see comm_cmd_dma_attr_config */ + COMM_MGMT_CMD_SET_PPF_TBL_HTR_FLG, /* set PPF func table os hotreplace flag + * @see comm_cmd_ppf_tbl_htrp_config + */ + + COMM_MGMT_CMD_GET_MQM_FIX_INFO = 40, /**< get mqm fix info @see comm_cmd_get_eqm_num */ + COMM_MGMT_CMD_SET_MQM_CFG_INFO, /**< set mqm config info @see comm_cmd_eqm_cfg */ + COMM_MGMT_CMD_SET_MQM_SRCH_GPA, /* set mqm search gpa info @see comm_cmd_eqm_search_gpa */ + COMM_MGMT_CMD_SET_PPF_TMR, /**< set ppf tmr @see comm_cmd_ppf_tmr_op */ + COMM_MGMT_CMD_SET_PPF_HT_GPA, /**< set ppf ht gpa @see comm_cmd_ht_gpa */ + COMM_MGMT_CMD_SET_FUNC_TMR_BITMAT, /* @see comm_cmd_func_tmr_bitmap_op */ + COMM_MGMT_CMD_SET_MBX_CRDT, /**< reserved */ + COMM_MGMT_CMD_CFG_TEMPLATE, /**< config template @see comm_cmd_cfg_template */ + COMM_MGMT_CMD_SET_MQM_LIMIT, /**< set mqm limit @see comm_cmd_set_mqm_limit */ + + COMM_MGMT_CMD_GET_FW_VERSION = 60, /**< get firmware version @see comm_cmd_get_fw_version */ + COMM_MGMT_CMD_GET_BOARD_INFO, /**< get board info @see comm_cmd_board_info */ + COMM_MGMT_CMD_SYNC_TIME, /**< synchronize host time to MPU @see comm_cmd_sync_time */ + COMM_MGMT_CMD_GET_HW_PF_INFOS, /**< get pf info @see comm_cmd_hw_pf_infos */ + COMM_MGMT_CMD_SEND_BDF_INFO, /**< send bdf info @see comm_cmd_bdf_info */ + COMM_MGMT_CMD_GET_VIRTIO_BDF_INFO, /**< get virtio bdf info @see mpu_pcie_device_info_s */ + COMM_MGMT_CMD_GET_SML_TABLE_INFO, /**< get sml table info @see comm_cmd_get_sml_tbl_data */ + COMM_MGMT_CMD_GET_SDI_INFO, /**< get sdi info @see comm_cmd_sdi_info */ + COMM_MGMT_CMD_ROOT_CTX_LOAD, /* get root context info @see comm_cmd_root_ctx_load_req_s */ + COMM_MGMT_CMD_GET_HW_BOND, /**< get bond info @see comm_cmd_hw_bond_infos */ + + COMM_MGMT_CMD_UPDATE_FW = 80, /* update firmware @see cmd_update_fw @see comm_info_head */ + COMM_MGMT_CMD_ACTIVE_FW, /**< cold active firmware @see cmd_active_firmware */ + COMM_MGMT_CMD_HOT_ACTIVE_FW, /**< hot active firmware @see cmd_hot_active_fw */ + COMM_MGMT_CMD_HOT_ACTIVE_DONE_NOTICE, /**< reserved */ + COMM_MGMT_CMD_SWITCH_CFG, /**< switch config file @see cmd_switch_cfg */ + COMM_MGMT_CMD_CHECK_FLASH, /**< check flash @see comm_info_check_flash */ + COMM_MGMT_CMD_CHECK_FLASH_RW, /* check whether flash reads and writes normally + * @see comm_cmd_hw_bond_infos + */ + COMM_MGMT_CMD_RESOURCE_CFG, /**< reserved */ + COMM_MGMT_CMD_UPDATE_BIOS, /**< update bios firmware @see cmd_update_fw */ + COMM_MGMT_CMD_MPU_GIT_CODE, /**< get mpu git tag @see cmd_get_mpu_git_code */ + + COMM_MGMT_CMD_FAULT_REPORT = 100, /**< report fault event to driver */ + COMM_MGMT_CMD_WATCHDOG_INFO, /* report software watchdog timeout to driver + * @see comm_info_sw_watchdog + */ + COMM_MGMT_CMD_MGMT_RESET, /**< report mpu chip reset to driver */ + COMM_MGMT_CMD_FFM_SET, /* report except interrupt to driver */ + + COMM_MGMT_CMD_GET_LOG = 120, /* get the log of the dictionary @see nic_log_info_request */ + COMM_MGMT_CMD_TEMP_OP, /* temperature operation @see comm_temp_in_info + * @see comm_temp_out_info + */ + COMM_MGMT_CMD_EN_AUTO_RST_CHIP, /* @see comm_cmd_enable_auto_rst_chip */ + COMM_MGMT_CMD_CFG_REG, /**< reserved */ + COMM_MGMT_CMD_GET_CHIP_ID, /**< get chip id @see comm_chip_id_info */ + COMM_MGMT_CMD_SYSINFO_DFX, /**< reserved */ + COMM_MGMT_CMD_PCIE_DFX_NTC, /**< reserved */ + COMM_MGMT_CMD_DICT_LOG_STATUS, /* @see mpu_log_status_info */ + COMM_MGMT_CMD_MSIX_INFO, /**< read msix map table @see comm_cmd_msix_info */ + COMM_MGMT_CMD_CHANNEL_DETECT, /**< auto channel detect @see comm_cmd_channel_detect */ + COMM_MGMT_CMD_DICT_COUNTER_STATUS, /**< get flash counter status @see flash_counter_info */ + COMM_MGMT_CMD_UCODE_SM_COUNTER, /* get ucode sm counter @see comm_read_ucode_sm_req + * @see comm_read_ucode_sm_resp + */ + COMM_MGMT_CMD_CLEAR_LOG, /**< clear log @see comm_cmd_clear_log_s */ + + COMM_MGMT_CMD_CHECK_IF_SWITCH_WORKMODE = 140, /* check if switch workmode reserved + * @see comm_cmd_check_if_switch_workmode + */ + COMM_MGMT_CMD_SWITCH_WORKMODE, /* switch workmode reserved @see comm_cmd_switch_workmode */ + + COMM_MGMT_CMD_MIGRATE_DFX_HPA = 150, /* query migrate varialbe @see comm_cmd_migrate_dfx */ + COMM_MGMT_CMD_BDF_INFO, /**< get bdf info @see cmd_get_bdf_info_s */ + COMM_MGMT_CMD_NCSI_CFG_INFO_GET_PROC, /**< get ncsi config info @see comm_cmd_ncsi_cfg_s */ + COMM_MGMT_CMD_CPI_TCAM_DBG, /* enable or disable the scheduled cpi tcam task, + * set task interval time @see comm_cmd_cpi_tcam_dbg_s + */ + + COMM_MGMT_CMD_SECTION_RSVD_0 = 160, /**< rsvd0 section */ + COMM_MGMT_CMD_SECTION_RSVD_1 = 170, /**< rsvd1 section */ + COMM_MGMT_CMD_SECTION_RSVD_2 = 180, /**< rsvd2 section */ + COMM_MGMT_CMD_SECTION_RSVD_3 = 190, /**< rsvd3 section */ + + COMM_MGMT_CMD_GET_TDIE_ID = 199, /**< get totem die id @see comm_cmd_get_totem_die_id */ + COMM_MGMT_CMD_GET_UDIE_ID = 200, /**< get unicorn die id @see comm_cmd_get_die_id */ + COMM_MGMT_CMD_GET_EFUSE_TEST, /**< reserved */ + COMM_MGMT_CMD_EFUSE_INFO_CFG, /**< set efuse config @see comm_efuse_cfg_info */ + COMM_MGMT_CMD_GPIO_CTL, /**< reserved */ + COMM_MGMT_CMD_HI30_SERLOOP_START, /* set serloop start @see comm_cmd_hi30_serloop */ + COMM_MGMT_CMD_HI30_SERLOOP_STOP, /* set serloop stop @see comm_cmd_hi30_serloop */ + COMM_MGMT_CMD_HI30_MBIST_SET_FLAG, /**< reserved */ + COMM_MGMT_CMD_HI30_MBIST_GET_RESULT, /**< reserved */ + COMM_MGMT_CMD_ECC_TEST, /**< reserved */ + COMM_MGMT_CMD_FUNC_BIST_TEST, /**< reserved */ + + COMM_MGMT_CMD_VPD_SET = 210, /**< reserved */ + COMM_MGMT_CMD_VPD_GET, /**< reserved */ + + COMM_MGMT_CMD_ERASE_FLASH, /**< erase flash sector @see cmd_sector_info */ + COMM_MGMT_CMD_QUERY_FW_INFO, /**< get firmware info @see cmd_query_fw */ + COMM_MGMT_CMD_GET_CFG_INFO, /* get cfg in flash reserved @see comm_cmd_get_cfg_info_t */ + COMM_MGMT_CMD_GET_UART_LOG, /* collect hinicshell log @see nic_cmd_get_uart_log_info */ + COMM_MGMT_CMD_SET_UART_CMD, /* hinicshell command to mpu @see nic_cmd_set_uart_log_cmd */ + COMM_MGMT_CMD_SPI_TEST, /**< reserved */ + + /* TODO: ALL reg read/write merge to COMM_MGMT_CMD_CFG_REG */ + COMM_MGMT_CMD_MPU_REG_GET, /**< get mpu register value @see dbgtool_up_reg_opt_info */ + COMM_MGMT_CMD_MPU_REG_SET, /**< set mpu register value @see dbgtool_up_reg_opt_info */ + + COMM_MGMT_CMD_REG_READ = 220, /**< read register value @see comm_info_reg_read_write */ + COMM_MGMT_CMD_REG_WRITE, /**< write register value @see comm_info_reg_read_write */ + COMM_MGMT_CMD_MAG_REG_WRITE, /**< write mag register value @see comm_info_dfx_mag_reg */ + COMM_MGMT_CMD_ANLT_REG_WRITE, /**< read register value @see comm_info_dfx_anlt_reg */ + + COMM_MGMT_CMD_HEART_EVENT, /**< ncsi heart event @see comm_cmd_heart_event */ + COMM_MGMT_CMD_NCSI_OEM_GET_DRV_INFO, /**< nsci oem get driver info */ + COMM_MGMT_CMD_LASTWORD_GET, /**< report lastword to driver @see comm_info_up_lastword_s */ + COMM_MGMT_CMD_READ_BIN_DATA, /**< reserved */ + COMM_MGMT_CMD_GET_REG_VAL, /**< read register value @see comm_cmd_mbox_csr_rd_req */ + COMM_MGMT_CMD_SET_REG_VAL, /**< write register value @see comm_cmd_mbox_csr_wt_req */ + + /* TODO: check if needed */ + COMM_MGMT_CMD_SET_VIRTIO_DEV = 230, /* set the virtio device + * @see comm_cmd_set_virtio_dev + */ + COMM_MGMT_CMD_SET_MAC, /**< set mac address @see comm_info_mac */ + /* MPU patch cmd */ + COMM_MGMT_CMD_LOAD_PATCH, /**< load hot patch @see cmd_update_fw */ + COMM_MGMT_CMD_REMOVE_PATCH, /**< remove hot patch @see cmd_patch_remove */ + COMM_MGMT_CMD_PATCH_ACTIVE, /**< actice hot patch @see cmd_patch_active */ + COMM_MGMT_CMD_PATCH_DEACTIVE, /**< deactice hot patch @see cmd_patch_deactive */ + COMM_MGMT_CMD_PATCH_SRAM_OPTIMIZE, /**< set hot patch sram optimize */ + /* container host process */ + COMM_MGMT_CMD_CONTAINER_HOST_PROC, /* container host process reserved + * @see comm_cmd_con_sel_sta + */ + /* nsci counter */ + COMM_MGMT_CMD_NCSI_COUNTER_PROC, /* get ncsi counter @see nsci_counter_in_info_s */ + COMM_MGMT_CMD_CHANNEL_STATUS_CHECK, /* check channel status reserved + * @see channel_status_check_info_s + */ + + COMM_MGMT_CMD_RSVD_0 = 240, /**< hot patch reserved cmd */ + COMM_MGMT_CMD_RSVD_1, /**< hot patch reserved cmd */ + COMM_MGMT_CMD_RSVD_2, /**< hot patch reserved cmd */ + COMM_MGMT_CMD_RSVD_3, /**< hot patch reserved cmd */ + COMM_MGMT_CMD_RSVD_4, /**< hot patch reserved cmd */ + COMM_MGMT_CMD_SEND_API_ACK_BY_UP, /**< reserved */ + + /* for tool ver compatible info */ + COMM_MGMT_CMD_GET_VER_COMPATIBLE_INFO = 254, /* get compatible info + * @see comm_cmd_compatible_info + */ + /* When adding a command word, you cannot change the value of an existing command word. + * Add the command word in the rsvd section. In principle, + * the cmd tables of all branches are the same. + */ + COMM_MGMT_CMD_MAX = 255, +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h new file mode 100644 index 000000000..f53577712 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_inband_cmd_defs.h @@ -0,0 +1,1078 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef MPU_INBAND_CMD_DEFS_H +#define MPU_INBAND_CMD_DEFS_H + +#include "mpu_cmd_base_defs.h" +#include "mpu_outband_ncsi_cmd_defs.h" + +#define HARDWARE_ID_1XX3V100_TAG 31 /* 1xx3v100 tag */ +#define DUMP_16B_PER_LINE 16 +#define DUMP_8_VAR_PER_LINE 8 +#define DUMP_4_VAR_PER_LINE 4 +#define FW_UPDATE_MGMT_TIMEOUT 3000000U + +#define FUNC_RESET_FLAG_MAX_VALUE ((1U << (RES_TYPE_MAX + 1)) - 1) +struct comm_cmd_func_reset { + struct mgmt_msg_head head; + u16 func_id; /**< function id */ + u16 rsvd1[3]; + u64 reset_flag; /**< reset function type flag @see enum func_reset_flag_e */ +}; + +enum { + COMM_F_API_CHAIN = 1U << 0, + COMM_F_CLP = 1U << 1, + COMM_F_CHANNEL_DETECT = 1U << 2, + COMM_F_MBOX_SEGMENT = 1U << 3, + COMM_F_CMDQ_NUM = 1U << 4, + COMM_F_VIRTIO_VQ_SIZE = 1U << 5, +}; + +#define COMM_MAX_FEATURE_QWORD 4 +enum COMM_FEATURE_NEGO_OPCODE { + COMM_FEATURE_NEGO_OPCODE_GET = 0, + COMM_FEATURE_NEGO_OPCODE_SET = 1 +}; + +struct comm_cmd_feature_nego { + struct mgmt_msg_head head; + u16 func_id; /**< function id */ + u8 opcode; /**< operate type 0: get, 1: set */ + u8 rsvd; + u64 s_feature[COMM_MAX_FEATURE_QWORD]; /**< feature info */ +}; + +struct comm_cmd_func_flr_set { + struct mgmt_msg_head head; + + u16 func_id; /**< function id */ + u8 type; /**< 1: flr enable */ + u8 isall; /**< flr type 0: specify PF and associated VF flr, 1: all functions flr */ + u32 rsvd; +}; + +struct comm_cmd_clear_doorbell { + struct mgmt_msg_head head; + + u16 func_id; /**< function id */ + u16 rsvd1[3]; +}; + +struct comm_cmd_clear_resource { + struct mgmt_msg_head head; + + u16 func_id; /**< function id */ + u16 rsvd1[3]; +}; + +struct comm_global_attr { + u8 max_host_num; /**< maximum number of host */ + u8 max_pf_num; /**< maximum number of pf */ + u16 vf_id_start; /**< VF function id start */ + + u8 mgmt_host_node_id; /**< node id */ + u8 cmdq_num; /**< cmdq num */ + u8 rsvd1[2]; + u32 rsvd2[8]; +}; + +struct comm_cmd_get_glb_attr { + struct mgmt_msg_head head; + struct comm_global_attr attr; /**< global attr @see struct comm_global_attr */ +}; + +struct comm_cmd_ppf_flr_type_set { + struct mgmt_msg_head head; + + u16 func_id; + u8 func_service_type; + u8 rsvd1; + u32 ppf_flr_type; /**< function flr type 1:statefull 0:stateless */ +}; + +struct comm_cmd_func_svc_used_state { + struct mgmt_msg_head head; + u16 func_id; + u16 svc_type; + u8 used_state; + u8 rsvd[35]; +}; + +struct comm_cmd_cfg_msix_num { + struct comm_info_head head; + + u16 func_id; + u8 op_code; /**< operate type 1: alloc 0: free */ + u8 rsvd0; + + u16 msix_num; + u16 rsvd1; +}; + +struct cmdq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct comm_cmd_cmdq_ctxt { + struct mgmt_msg_head head; + + u16 func_id; + u8 cmdq_id; + u8 rsvd1[5]; + + struct cmdq_ctxt_info ctxt; +}; + +struct comm_cmd_root_ctxt { + struct mgmt_msg_head head; + + u16 func_id; + u8 set_cmdq_depth; + u8 cmdq_depth; + u16 rx_buf_sz; + u8 lro_en; + u8 rsvd1; + u16 sq_depth; + u16 rq_depth; + u64 rsvd2; +}; + +struct comm_cmd_wq_page_size { + struct mgmt_msg_head head; + + u16 func_id; /**< function id */ + u8 opcode; /**< operate type 0:get , 1:set */ + /* real_size=4KB*2^page_size, range(0~20) must be checked by driver */ + u8 page_size; + + u32 rsvd1; +}; + +struct comm_cmd_msix_config { + struct mgmt_msg_head head; + + u16 func_id; /**< function id */ + u8 opcode; /**< operate type 0:get , 1:set */ + u8 rsvd1; + u16 msix_index; + u8 pending_cnt; + u8 coalesce_timer_cnt; + u8 resend_timer_cnt; + u8 lli_timer_cnt; + u8 lli_credit_cnt; + u8 rsvd2[5]; +}; + +struct comm_cmd_ceq_ctrl_reg { + struct mgmt_msg_head head; + + u16 func_id; /**< function id */ + u16 q_id; + u32 ctrl0; + u32 ctrl1; + u32 rsvd1; +}; + +struct comm_cmd_dma_attr_config { + struct mgmt_msg_head head; + + u16 func_id; /**< function id */ + u8 entry_idx; + u8 st; + u8 at; + u8 ph; + u8 no_snooping; + u8 tph_en; + u32 resv1; +}; + +struct comm_cmd_ppf_tbl_htrp_config { + struct mgmt_msg_head head; + + u32 hotreplace_flag; +}; + +struct comm_cmd_get_eqm_num { + struct mgmt_msg_head head; + + u8 host_id; /**< host id */ + u8 rsvd1[3]; + u32 chunk_num; + u32 search_gpa_num; +}; + +struct comm_cmd_eqm_cfg { + struct mgmt_msg_head head; + + u8 host_id; /**< host id */ + u8 valid; /**< 0:clear config , 1:set config */ + u16 rsvd1; + u32 page_size; /**< page size */ + u32 rsvd2; +}; + +struct comm_cmd_eqm_search_gpa { + struct mgmt_msg_head head; + + u8 host_id; /**< host id Deprecated field, not used */ + u8 rsvd1[3]; + u32 start_idx; /**< start index */ + u32 num; + u32 rsvd2; + u64 gpa_hi52[0]; /**< [gpa data */ +}; + +struct comm_cmd_ppf_tmr_op { + struct mgmt_msg_head head; + + u8 ppf_id; /**< ppf function id */ + u8 opcode; /**< operation type 1: start timer, 0: stop timer */ + u8 rsvd1[6]; +}; + +struct comm_cmd_ht_gpa { + struct mgmt_msg_head head; + + u8 host_id; /**< host id */ + u8 rsvd0[3]; + u32 rsvd1[7]; + u64 page_pa0; + u64 page_pa1; +}; + +struct comm_cmd_func_tmr_bitmap_op { + struct mgmt_msg_head head; + + u16 func_id; /**< function id */ + u8 opcode; /**< operation type 1: start timer, 0: stop timer */ + u8 rsvd1[5]; +}; + +#define DD_CFG_TEMPLATE_MAX_IDX 12 +#define DD_CFG_TEMPLATE_MAX_TXT_LEN 64 +#define CFG_TEMPLATE_OP_QUERY 0 +#define CFG_TEMPLATE_OP_SET 1 +#define CFG_TEMPLATE_SET_MODE_BY_IDX 0 +#define CFG_TEMPLATE_SET_MODE_BY_NAME 1 + +struct comm_cmd_cfg_template { + struct mgmt_msg_head head; + u8 opt_type; /**< operation type 0: query 1: set */ + u8 set_mode; /**< set mode 0:index mode 1:name mode. */ + u8 tp_err; + u8 rsvd0; + + u8 cur_index; /**< current cfg tempalte index. */ + u8 cur_max_index; /** max support cfg tempalte index. */ + u8 rsvd1[2]; + u8 cur_name[DD_CFG_TEMPLATE_MAX_TXT_LEN]; /**< current cfg tempalte name. */ + u8 cur_cfg_temp_info[DD_CFG_TEMPLATE_MAX_IDX][DD_CFG_TEMPLATE_MAX_TXT_LEN]; + + u8 next_index; /**< next reset cfg tempalte index. */ + u8 next_max_index; /**< max support cfg tempalte index. */ + u8 rsvd2[2]; + u8 next_name[DD_CFG_TEMPLATE_MAX_TXT_LEN]; /**< next reset cfg tempalte name. */ + u8 next_cfg_temp_info[DD_CFG_TEMPLATE_MAX_IDX][DD_CFG_TEMPLATE_MAX_TXT_LEN]; +}; + +#define MQM_SUPPORT_COS_NUM 8 +#define MQM_INVALID_WEIGHT 256 +#define MQM_LIMIT_SET_FLAG_READ 0 +#define MQM_LIMIT_SET_FLAG_WRITE 1 +struct comm_cmd_set_mqm_limit { + struct mgmt_msg_head head; + + u16 set_flag; /**< operation type 0: read 1: write */ + u16 func_id; /**< function id */ + /* Indicates the weight of cos_id. The value ranges from 0 to 255. + * The value 0 indicates SP scheduling. + */ + u16 cos_weight[MQM_SUPPORT_COS_NUM]; /**< cos weight range[0,255] */ + u32 host_min_rate; /**< current host minimum rate */ + u32 func_min_rate; /**< current function minimum rate,unit:Mbps */ + u32 func_max_rate; /**< current function maximum rate,unit:Mbps */ + u8 rsvd[64]; /* Reserved */ +}; + +#define HINIC3_FW_VERSION_LEN 16 +#define HINIC3_FW_COMPILE_TIME_LEN 20 + +enum hinic3_fw_ver_type { + HINIC3_FW_VER_TYPE_BOOT, + HINIC3_FW_VER_TYPE_MPU, + HINIC3_FW_VER_TYPE_NPU, + HINIC3_FW_VER_TYPE_SMU_L0, + HINIC3_FW_VER_TYPE_SMU_L1, + HINIC3_FW_VER_TYPE_CFG, +}; + +struct comm_cmd_get_fw_version { + struct mgmt_msg_head head; + + u16 fw_type; /**< firmware type @see enum hinic3_fw_ver_type */ + u16 rsvd1; + u8 ver[HINIC3_FW_VERSION_LEN]; /**< firmware version */ + u8 time[HINIC3_FW_COMPILE_TIME_LEN]; /**< firmware compile time */ +}; + +struct hinic3_board_info { + u8 board_type; /**< board type */ + u8 port_num; /**< current port number */ + u8 port_speed; /**< port speed */ + u8 pcie_width; /**< pcie width */ + u8 host_num; /**< host number */ + u8 pf_num; /**< pf number */ + u16 vf_total_num; /**< vf total number */ + u8 tile_num; /**< tile number */ + u8 qcm_num; /**< qcm number */ + u8 core_num; /**< core number */ + u8 work_mode; /**< work mode */ + u8 service_mode; /**< service mode */ + u8 pcie_mode; /**< pcie mode */ + u8 boot_sel; /**< boot sel */ + u8 board_id; /**< board id */ + u32 rsvd; + u32 service_en_bitmap; /**< service en bitmap */ + u8 scenes_id; /**< scenes id */ + u8 cfg_template_id; /**< cfg template index */ + u8 hardware_id; /**< hardware id */ + u8 spu_en; /**< spu enable flag */ + u16 pf_vendor_id; /**< pf vendor id */ + u8 tile_bitmap; /**< used tile bitmap */ + u8 sm_bitmap; /**< used sm bitmap */ +}; + +struct comm_cmd_board_info { + struct mgmt_msg_head head; + + struct hinic3_board_info info; /**< board info @see struct hinic3_board_info */ + u32 rsvd[22]; +}; + +struct comm_cmd_sync_time { + struct mgmt_msg_head head; + + u64 mstime; /**< time,unit:ms */ + u64 rsvd1; +}; + +struct hw_pf_info { + u16 glb_func_idx; /**< function id */ + u16 glb_pf_vf_offset; + u8 p2p_idx; + u8 itf_idx; /**< host id */ + u16 max_vfs; /**< max vf number */ + u16 max_queue_num; /**< max queue number */ + u16 vf_max_queue_num; + u16 port_id; + u16 rsvd0; + u32 pf_service_en_bitmap; + u32 vf_service_en_bitmap; + u16 rsvd1[2]; + + u8 device_type; + u8 bus_num; /**< bdf info */ + u16 vf_stride; /**< vf stride */ + u16 vf_offset; /**< vf offset */ + u8 rsvd[2]; +}; + +#define CMD_MAX_MAX_PF_NUM 32 +struct hinic3_hw_pf_infos { + u8 num_pfs; /**< pf number */ + u8 rsvd1[3]; + + struct hw_pf_info infos[CMD_MAX_MAX_PF_NUM]; /**< pf info @see struct hw_pf_info */ +}; + +struct comm_cmd_hw_pf_infos { + struct mgmt_msg_head head; + + struct hinic3_hw_pf_infos infos; /**< all pf info @see struct hinic3_hw_pf_infos */ +}; + +struct comm_cmd_bdf_info { + struct mgmt_msg_head head; + + u16 function_idx; /**< function id */ + u8 rsvd1[2]; + u8 bus; /**< bus info */ + u8 device; /**< device info */ + u8 function; /**< function info */ + u8 rsvd2[5]; +}; + +#define TABLE_INDEX_MAX 129 +struct sml_table_id_info { + u8 node_id; + u8 instance_id; +}; + +struct comm_cmd_get_sml_tbl_data { + struct comm_info_head head; /* 8B */ + u8 tbl_data[512]; /**< sml table data */ +}; + +struct comm_cmd_sdi_info { + struct mgmt_msg_head head; + u32 cfg_sdi_mode; /**< host mode, 0:normal 1:virtual machine 2:bare metal */ +}; + +#define HINIC_OVS_BOND_DEFAULT_ID 1 +struct hinic3_hw_bond_infos { + u8 bond_id; + u8 valid; + u8 rsvd1[2]; +}; + +struct comm_cmd_hw_bond_infos { + struct mgmt_msg_head head; + struct hinic3_hw_bond_infos infos; /**< bond info @see struct hinic3_hw_bond_infos */ +}; + +/* 工具数据长度为1536(1.5K),工具最大发2k,包含头部 */ +struct cmd_update_fw { + struct comm_info_head head; // 8B + u16 fw_flag; /**< subfirmware flag, bit 0: last slice flag, bit 1 first slice flag */ + u16 slice_len; /**< current slice length */ + u32 fw_crc; /**< subfirmware crc */ + u32 fw_type; /**< subfirmware type */ + u32 bin_total_len; /**< total firmware length, only fisrt slice is effective */ + u32 bin_section_len; /**< subfirmware length */ + u32 fw_verion; /**< subfirmware version */ + u32 fw_offset; /**< current slice offset of current subfirmware */ + u32 data[0]; /**< data */ +}; + +struct cmd_switch_cfg { + struct comm_info_head msg_head; + u8 index; /**< index, range[0,7] */ + u8 data[7]; +}; + +struct cmd_active_firmware { + struct comm_info_head msg_head; + u8 index; /* 0 ~ 7 */ + u8 data[7]; +}; + +#define HOT_ACTIVE_MPU 1 +#define HOT_ACTIVE_NPU 2 +#define HOT_ACTIVE_MNPU 3 +struct cmd_hot_active_fw { + struct comm_info_head head; + u32 type; /**< hot actice firmware type 1: mpu; 2: ucode; 3: mpu & npu */ + u32 data[3]; +}; + +#define FLASH_CHECK_OK 1 +#define FLASH_CHECK_ERR 2 +#define FLASH_CHECK_DISMATCH 3 + +struct comm_info_check_flash { + struct comm_info_head head; + + u8 status; /**< flash check status */ + u8 rsv[3]; +}; + +struct cmd_get_mpu_git_code { + struct comm_info_head head; /* 8B */ + u32 rsvd; /* reserve */ + char mpu_git_code[64]; /**< mpu git tag and compile time */ +}; + +#define DATA_LEN_1K 1024 +struct comm_info_sw_watchdog { + struct comm_info_head head; + + u32 curr_time_h; /**< infinite loop occurrence time,cycle */ + u32 curr_time_l; /**< infinite loop occurrence time,cycle */ + u32 task_id; /**< task id .task that occur in an infinite loop */ + u32 rsv; + + u64 pc; + + u64 elr; + u64 spsr; + u64 far; + u64 esr; + u64 xzr; + u64 x30; + u64 x29; + u64 x28; + u64 x27; + u64 x26; + u64 x25; + u64 x24; + u64 x23; + u64 x22; + u64 x21; + u64 x20; + u64 x19; + u64 x18; + u64 x17; + u64 x16; + u64 x15; + u64 x14; + u64 x13; + u64 x12; + u64 x11; + u64 x10; + u64 x09; + u64 x08; + u64 x07; + u64 x06; + u64 x05; + u64 x04; + u64 x03; + u64 x02; + u64 x01; + u64 x00; + + u64 stack_top; /**< stack top */ + u64 stack_bottom; /**< stack bottom */ + u64 sp; /**< sp pointer */ + u32 curr_used; /**< the size currently used by the stack */ + u32 peak_used; /**< historical peak of stack usage */ + u32 is_overflow; /**< stack overflow flag */ + + u32 stack_actlen; /**< actual stack length(<=1024) */ + u8 stack_data[DATA_LEN_1K]; /* If the value exceeds 1024, it will be truncated. */ +}; + +struct nic_log_info_request { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 offset; + u8 log_or_index; /* 0:log 1:index */ + u8 type; /* log type 0:up 1:ucode 2:smu 3:mpu lastword 4.npu lastword */ + u8 area; /* area 0:ram 1:flash (this bit is valid only when log_or_index is 0) */ + u8 rsvd1; /* reserved */ +}; + +#define MPU_TEMP_OP_GET 0 +#define MPU_TEMP_THRESHOLD_OP_CFG 1 +struct comm_temp_in_info { + struct comm_info_head head; + u8 opt_type; /**< operation type 0:read operation 1:cfg operation */ + u8 rsv[3]; + s32 max_temp; /**< maximum threshold of temperature */ + s32 min_temp; /**< minimum threshold of temperature */ +}; + +struct comm_temp_out_info { + struct comm_info_head head; + s32 temp_data; /**< current temperature */ + s32 max_temp_threshold; /**< maximum threshold of temperature */ + s32 min_temp_threshold; /**< minimum threshold of temperature */ + s32 max_temp; /**< maximum temperature */ + s32 min_temp; /**< minimum temperature */ +}; + +/* 关闭芯片自复位 */ +struct comm_cmd_enable_auto_rst_chip { + struct comm_info_head head; + u8 op_code; /**< operation type 0:get operation 1:set operation */ + u8 enable; /* auto reset status 0: disable auto reset chip 1: enable */ + u8 rsvd[2]; +}; + +struct comm_chip_id_info { + struct comm_info_head head; + u8 chip_id; /**< chip id */ + u8 rsvd[3]; +}; + +struct mpu_log_status_info { + struct comm_info_head head; + u8 type; /**< operation type 0:read operation 1:write operation */ + u8 log_status; /**< log status 0:idle 1:busy */ + u8 rsvd[2]; +}; + +struct comm_cmd_msix_info { + struct comm_info_head head; + u8 rsvd1; + u8 flag; /**< table flag 0:second table, 1:actual table */ + u8 rsvd[2]; +}; + +struct comm_cmd_channel_detect { + struct mgmt_msg_head head; + + u16 func_id; /**< function id */ + u16 rsvd1[3]; + u32 rsvd2[2]; +}; + +#define MAX_LOG_BUF_SIZE 1024 +#define FLASH_NPU_COUNTER_HEAD_MAGIC (0x5a) +#define FLASH_NPU_COUNTER_NIC_TYPE 0 +#define FLASH_NPU_COUNTER_FC_TYPE 1 + +struct flash_npu_counter_head_s { + u8 magic; + u8 tbl_type; + u8 count_type; /**< 0:nic;1:fc */ + u8 count_num; /**< current count number */ + u16 base_offset; /**< address offset */ + u16 base_count; +}; + +struct flash_counter_info { + struct comm_info_head head; + + u32 length; /**< flash counter buff len */ + u32 offset; /**< flash counter buff offset */ + u8 data[MAX_LOG_BUF_SIZE]; /**< flash counter data */ +}; + +enum mpu_sm_cmd_type { + COMM_SM_CTR_RD16 = 1, + COMM_SM_CTR_RD32, + COMM_SM_CTR_RD64_PAIR, + COMM_SM_CTR_RD64, + COMM_SM_CTR_RD32_CLEAR, + COMM_SM_CTR_RD64_PAIR_CLEAR, + COMM_SM_CTR_RD64_CLEAR, + COMM_SM_CTR_RD16_CLEAR, +}; + +struct comm_read_ucode_sm_req { + struct mgmt_msg_head msg_head; + + u32 node; /**< node id @see enum INTERNAL_RING_NODE_ID_E */ + u32 count_id; /**< count id */ + u32 instanse; /**< instance id */ + u32 type; /**< read type @see enum mpu_sm_cmd_type */ +}; + +struct comm_read_ucode_sm_resp { + struct mgmt_msg_head msg_head; + + u64 val1; + u64 val2; +}; + +enum log_type { + MPU_LOG_CLEAR = 0, + SMU_LOG_CLEAR = 1, + NPU_LOG_CLEAR = 2, + SPU_LOG_CLEAR = 3, + ALL_LOG_CLEAR = 4, +}; + +#define ABLESWITCH 1 +#define IMABLESWITCH 2 +enum switch_workmode_op { + SWITCH_WORKMODE_SWITCH = 0, + SWITCH_WORKMODE_OTHER = 1 +}; + +enum switch_workmode_obj { + SWITCH_WORKMODE_FC = 0, + SWITCH_WORKMODE_TOE = 1, + SWITCH_WORKMODE_ROCE_AND_NOF = 2, + SWITCH_WORKMODE_NOF_AA = 3, + SWITCH_WORKMODE_ETH_CNTR = 4, + SWITCH_WORKMODE_NOF_CNTR = 5, +}; + +struct comm_cmd_check_if_switch_workmode { + struct mgmt_msg_head head; + u8 switch_able; + u8 rsvd1; + u16 rsvd2[3]; + u32 rsvd3[3]; +}; + +#define MIG_NOR_VM_ONE_MAX_SGE_MEM (64 * 8) +#define MIG_NOR_VM_ONE_MAX_MEM (MIG_NOR_VM_ONE_MAX_SGE_MEM + 16) +#define MIG_VM_MAX_SML_ENTRY_NUM 24 + +struct comm_cmd_migrate_dfx_s { + struct mgmt_msg_head head; + u32 hpa_entry_id; /**< hpa entry id */ + u8 vm_hpa[MIG_NOR_VM_ONE_MAX_MEM]; /**< vm hpa info */ +}; + +#define BDF_BUS_BIT 8 +struct pf_bdf_info { + u8 itf_idx; /**< host id */ + u16 bdf; /**< bdf info */ + u8 pf_bdf_info_vld; /**< pf bdf info valid */ +}; + +struct vf_bdf_info { + u16 glb_pf_vf_offset; /**< global_func_id offset of 1st vf in pf */ + u16 max_vfs; /**< vf number */ + u16 vf_stride; /**< VF_RID_SETTING.vf_stride */ + u16 vf_offset; /**< VF_RID_SETTING.vf_offset */ + u8 bus_num; /**< tl_cfg_bus_num */ + u8 rsv[3]; +}; + +struct cmd_get_bdf_info_s { + struct mgmt_msg_head head; + struct pf_bdf_info pf_bdf_info[CMD_MAX_MAX_PF_NUM]; + struct vf_bdf_info vf_bdf_info[CMD_MAX_MAX_PF_NUM]; + u32 vf_num; /**< vf num */ +}; + +#define CPI_TCAM_DBG_CMD_SET_TASK_ENABLE_VALID 0x1 +#define CPI_TCAM_DBG_CMD_SET_TIME_INTERVAL_VALID 0x2 +#define CPI_TCAM_DBG_CMD_TYPE_SET 0 +#define CPI_TCAM_DBG_CMD_TYPE_GET 1 + +#define UDIE_ID_DATA_LEN 8 +#define TDIE_ID_DATA_LEN 18 +struct comm_cmd_get_die_id { + struct comm_info_head head; + + u32 die_id_data[UDIE_ID_DATA_LEN]; /**< die id data */ +}; + +struct comm_cmd_get_totem_die_id { + struct comm_info_head head; + + u32 die_id_data[TDIE_ID_DATA_LEN]; /**< die id data */ +}; + +#define MAX_EFUSE_INFO_BUF_SIZE 1024 + +enum comm_efuse_opt_type { + EFUSE_OPT_UNICORN_EFUSE_BURN = 1, /**< burn unicorn efuse bin */ + EFUSE_OPT_UPDATE_SWSB = 2, /**< hw rotpk switch to guest rotpk */ + EFUSE_OPT_TOTEM_EFUSE_BURN = 3 /**< burn totem efuse bin */ +}; + +struct comm_efuse_cfg_info { + struct comm_info_head head; + u8 opt_type; /**< operation type @see enum comm_efuse_opt_type */ + u8 rsvd[3]; + u32 total_len; /**< entire package leng value */ + u32 data_csum; /**< data csum */ + u8 data[MAX_EFUSE_INFO_BUF_SIZE]; /**< efuse cfg data, size 768byte */ +}; + +/* serloop模块接口 */ +struct comm_cmd_hi30_serloop { + struct comm_info_head head; + + u32 macro; + u32 lane; + u32 prbs_pattern; + u32 result; +}; + +struct cmd_sector_info { + struct comm_info_head head; + u32 offset; /**< flash addr */ + u32 len; /**< flash length */ +}; + +struct cmd_query_fw { + struct comm_info_head head; + u32 offset; /**< offset addr */ + u32 len; /**< length */ +}; + +struct nic_cmd_get_uart_log_info { + struct comm_info_head head; + struct { + u32 ret : 8; + u32 version : 8; + u32 log_elem_real_num : 16; + } log_head; + char uart_log[MAX_LOG_BUF_SIZE]; +}; + +#define MAX_LOG_CMD_BUF_SIZE 128 + +struct nic_cmd_set_uart_log_cmd { + struct comm_info_head head; + struct { + u32 ret : 8; + u32 version : 8; + u32 cmd_elem_real_num : 16; + } log_head; + char uart_cmd[MAX_LOG_CMD_BUF_SIZE]; +}; + +struct dbgtool_up_reg_opt_info { + struct comm_info_head head; + + u8 len; + u8 is_car; + u8 car_clear_flag; + u32 csr_addr; /**< register addr */ + u32 csr_value; /**< register value */ +}; + +struct comm_info_reg_read_write { + struct comm_info_head head; + + u32 reg_addr; /**< register address */ + u32 val_length; /**< register value length */ + + u32 data[2]; /**< register value */ +}; + +#ifndef DFX_MAG_MAX_REG_NUM +#define DFX_MAG_MAX_REG_NUM (32) +#endif +struct comm_info_dfx_mag_reg { + struct comm_info_head head; + u32 write; /**< read or write flag: 0:read; 1:write */ + u32 reg_addr; /**< register address */ + u32 reg_cnt; /**< register num , up to 32 */ + u32 clear; /**< clear flag: 0:do not clear after read 1:clear after read */ + u32 data[DFX_MAG_MAX_REG_NUM]; /**< register data */ +}; + +struct comm_info_dfx_anlt_reg { + struct comm_info_head head; + u32 write; /**< read or write flag: 0:read; 1:write */ + u32 reg_addr; /**< register address */ + u32 reg_cnt; /**< register num , up to 32 */ + u32 clear; /**< clear flag: 0:do not clear after read 1:clear after read */ + u32 data[DFX_MAG_MAX_REG_NUM]; /**< register data */ +}; + +#define MAX_DATA_NUM (240) +struct csr_msg { + struct { + u32 node_id : 5; // [4:0] + u32 data_width : 10; // [14:5] + u32 rsvd : 17; // [31:15] + } bits; + u32 addr; +}; + +struct comm_cmd_heart_event { + struct mgmt_msg_head head; + + u8 init_sta; /* 0: mpu init ok, 1: mpu init error. */ + u8 rsvd1[3]; + u32 heart; /* add one by one */ + u32 heart_handshake; /* should be alwasys: 0x5A5A5A5A */ +}; + +#define XREGS_NUM 31 +struct tag_cpu_tick { + u32 cnt_hi; + u32 cnt_lo; +}; + +struct tag_ax_exc_reg_info { + u64 ttbr0; + u64 ttbr1; + u64 tcr; + u64 mair; + u64 sctlr; + u64 vbar; + u64 current_el; + u64 sp; + /* The memory layout of the following fields is the same as that of TskContext. */ + u64 elr; /* 返回地址 */ + u64 spsr; + u64 far_r; + u64 esr; + u64 xzr; + u64 xregs[XREGS_NUM]; /* 0~30: x30~x0 */ +}; + +struct tag_exc_info { + char os_ver[48]; /**< os version */ + char app_ver[64]; /**< application version*/ + u32 exc_cause; /**< exception reason */ + u32 thread_type; /**< Thread type before exception */ + u32 thread_id; /**< Thread PID before exception */ + u16 byte_order; /**< byte order */ + u16 cpu_type; /**< CPU type */ + u32 cpu_id; /**< CPU ID */ + struct tag_cpu_tick cpu_tick; /**< CPU Tick */ + u32 nest_cnt; /**< exception nesting count */ + u32 fatal_errno; /**< fatal error code, valid when a fatal error occurs */ + u64 uw_sp; /**< exception front stack pointer */ + u64 stack_bottom; /**< bottom of stack before exception */ + /* Context information of the core register when an exception occurs. + * 82\57 must be located in byte 152, If any change is made, + * the OS_EXC_REGINFO_OFFSET macro in sre_platform.eh needs to be updated. + */ + struct tag_ax_exc_reg_info reg_info; /**< register info @see EXC_REGS_S */ +}; + +/* 上报给驱动的up lastword模块接口 */ +#define MPU_LASTWORD_SIZE 1024 +struct tag_comm_info_up_lastword { + struct comm_info_head head; + + struct tag_exc_info stack_info; + u32 stack_actlen; /**< actual stack length (<=1024) */ + u8 stack_data[MPU_LASTWORD_SIZE]; +}; + +struct comm_cmd_mbox_csr_rd_req { + struct mgmt_msg_head head; + struct csr_msg csr_info[MAX_DATA_NUM]; + u32 data_num; +}; + +struct comm_cmd_mbox_csr_wt_req { + struct mgmt_msg_head head; + struct csr_msg csr_info; + u64 value; +}; + +struct comm_cmd_mbox_csr_rd_ret { + struct mgmt_msg_head head; + u64 value[MAX_DATA_NUM]; +}; + +struct comm_cmd_mbox_csr_wt_ret { + struct mgmt_msg_head head; +}; + +enum comm_virtio_dev_type { + COMM_VIRTIO_NET_TYPE = 0, + COMM_VIRTIO_BLK_TYPE = 1, + COMM_VIRTIO_SCSI_TYPE = 4, +}; + +struct comm_virtio_dev_cmd { + u16 device_type; /**< device type @see enum comm_virtio_dev_type */ + u16 device_id; + u32 devid_switch; + u32 sub_vendor_id; + u32 sub_class_code; + u32 flash_en; +}; + +struct comm_virtio_dev_ctl { + u32 device_type_mark; + u32 devid_switch_mark; + u32 sub_vendor_id_mark; + u32 sub_class_code_mark; + u32 flash_en_mark; +}; + +struct comm_cmd_set_virtio_dev { + struct comm_info_head head; + struct comm_virtio_dev_cmd virtio_dev_cmd; /**< @see struct comm_virtio_dev_cmd_s */ + struct comm_virtio_dev_ctl virtio_dev_ctl; /**< @see struct comm_virtio_dev_ctl_s */ +}; + +/* Interfaces of the MAC Module */ +#ifndef MAC_ADDRESS_BYTE_NUM +#define MAC_ADDRESS_BYTE_NUM (6) +#endif +struct comm_info_mac { + struct comm_info_head head; + + u16 is_valid; + u16 rsvd0; + u8 data[MAC_ADDRESS_BYTE_NUM]; + u16 rsvd1; +}; + +struct cmd_patch_active { + struct comm_info_head head; + u32 fw_type; /**< firmware type */ + u32 data[3]; /**< reserved */ +}; + +struct cmd_patch_deactive { + struct comm_info_head head; + u32 fw_type; /**< firmware type */ + u32 data[3]; /**< reserved */ +}; + +struct cmd_patch_remove { + struct comm_info_head head; + u32 fw_type; /**< firmware type */ + u32 data[3]; /**< reserved */ +}; + +struct cmd_patch_sram_optimize { + struct comm_info_head head; + u32 data[4]; /**< reserved */ +}; + +/* ncsi counter */ +struct nsci_counter_in_info_s { + struct comm_info_head head; + u8 opt_type; /**< operate type 0:read counter 1:counter clear */ + u8 rsvd[3]; +}; + +struct channel_status_check_info_s { + struct comm_info_head head; + u32 rsvd1; + u32 rsvd2; +}; + +struct comm_cmd_compatible_info { + struct mgmt_msg_head head; + u8 chip_ver; + u8 host_env; + u8 rsv[13]; + + u8 cmd_count; + union { + struct { + u8 module; + u8 mod_type; + u16 cmd; + } cmd_desc; + u32 cmd_desc_val; + } cmds_desc[24]; + u8 cmd_ver[24]; +}; + +struct tag_ncsi_chan_info { + u8 aen_en; /**< aen enable */ + u8 index; /**< index of channel */ + u8 port; /**< net port number */ + u8 state; /**< ncsi state */ + u8 ncsi_port_en; /**< ncsi port enable flag (1:enable 0:disable) */ + u8 rsv[3]; + struct tag_ncsi_chan_capa capabilities; /**< ncsi channel capabilities*/ + struct tg_g_ncsi_parameters parameters; /**< ncsi state */ +}; + +struct comm_cmd_ncsi_settings { + u8 ncsi_ver; /**< ncsi version */ + u8 ncsi_pkg_id; + u8 arb_en; /**< arbitration en */ + u8 duplex_set; /**< duplex mode */ + u8 chan_num; /**< Number of virtual channels */ + u8 iid; /**< identify new instances of a command */ + u8 lldp_over_ncsi_enable; + u8 lldp_over_mctp_enable; + u32 magicwd; + u8 rsvd[8]; + struct tag_ncsi_chan_info ncsi_chan_info; +}; + +struct comm_cmd_ncsi_cfg { + struct comm_info_head head; + u8 ncsi_cable_state; /**< ncsi cable status 0:cable out of place,1:cable in place */ + u8 setting_type; /**< nsci info type:0:ram cofig, 1: flash config */ + u8 port; /**< net port number */ + u8 erase_flag; /**< flash erase flag, 1: erase flash info */ + struct comm_cmd_ncsi_settings setting_info; +}; + +#define MQM_ATT_PAGE_NUM 128 + +/* Maximum segment data length of the upgrade command */ +#define MAX_FW_FRAGMENT_LEN (1536) + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_ncsi_cmd_defs.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_ncsi_cmd_defs.h new file mode 100644 index 000000000..4e2e1eb91 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/mpu_outband_ncsi_cmd_defs.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef MPU_OUTBAND_NCSI_CMD_DEFS_H +#define MPU_OUTBAND_NCSI_CMD_DEFS_H + +#pragma pack(1) + +enum NCSI_RESPONSE_CODE_E { + COMMAND_COMPLETED = 0x00, /**< command completed */ + COMMAND_FAILED = 0x01, /**< command failed */ + COMMAND_UNAVAILABLE = 0x02, /**< command unavailable */ + COMMAND_UNSPORRTED = 0x03 /**< command unsporrted */ +}; + +enum NCSI_REASON_CODE_E { + NO_ERROR = 0x00, /**< no error */ + INTERFACE_INIT_REQUIRED = 0x01, /**< interface init required */ + INVALID_PARA = 0x02, /**< invalid parameter */ + CHAN_NOT_READY = 0x03, /**< channel not ready */ + PKG_NOT_READY = 0x04, /**< package not ready */ + INVALID_PAYLOAD_LEN = 0x05, /**< invalid payload len */ + LINK_STATUS_ERROR = 0xA06, /**< get link status fail */ + VLAN_TAG_INVALID = 0xB07, /**< vlan tag invalid */ + MAC_ADD_IS_ZERO = 0xE08, /**< mac add is zero */ + FLOW_CONTROL_UNSUPPORTED = 0x09, /**< flow control unsupported */ + CHECKSUM_ERR = 0xA, /**< check sum error */ + /**< the command type is unsupported only when the response code is 0x03 */ + UNSUPPORTED_COMMAND_TYPE = 0x7FFF +}; + +enum NCSI_CLIENT_TYPE_E { + NCSI_RMII_TYPE = 1, /**< rmii client */ + NCSI_MCTP_TYPE = 2, /**< MCTP client */ + NCSI_AEN_TYPE = 3 /**< AEN client */ +}; + +/** + * @brief ncsi ctrl packet header + */ +struct tag_ncsi_ctrl_packet_header { + u8 mc_id; /**< management control ID */ + u8 head_revision; /**< head revision */ + u8 reserved0; /**< reserved */ + u8 iid; /**< instance ID */ + u8 pkt_type; /**< packet type */ +#ifdef NCSI_BIG_ENDIAN + u8 pkg_id : 3; /**< packet ID */ + u8 inter_chan_id : 5; /**< channel ID */ +#else + u8 inter_chan_id : 5; /**< channel ID */ + u8 pkg_id : 3; /**< packet ID */ +#endif +#ifdef BD_BIG_ENDIAN + u8 reserved1 : 4; /**< reserved1 */ + u8 payload_len_hi : 4; /**< payload len have 12bits */ +#else + u8 payload_len_hi : 4; /**< payload len have 12bits */ + u8 reserved1 : 4; /**< reserved1 */ +#endif + u8 payload_len_lo; /**< payload len lo */ + u32 reserved2; /**< reserved2 */ + u32 reserved3; /**< reserved3 */ +}; + +#define NCSI_MAX_PAYLOAD_LEN 1500 +#define NCSI_MAC_LEN 6 + +/** + * @brief ncsi clear initial state command struct defination + * + */ +struct tag_ncsi_ctrl_packet { + struct tag_ncsi_ctrl_packet_header packet_head; /**< ncsi ctrl packet header */ + u8 payload[NCSI_MAX_PAYLOAD_LEN]; /**< ncsi ctrl packet payload */ +}; + +/** + * @brief ethernet header description + * + */ +struct tag_ethernet_header { + u8 dst_addr[NCSI_MAC_LEN]; /**< ethernet destination address */ + u8 src_addr[NCSI_MAC_LEN]; /**< ethernet source address */ + u16 ether_type; /**< ethernet type */ +}; + +/** + * @brief ncsi common packet description + * + */ +struct tg_ncsi_common_packet { + struct tag_ethernet_header frame_head; /**< common packet ethernet frame header */ + struct tag_ncsi_ctrl_packet ctrl_packet; /**< common packet ncsi ctrl packet */ +}; + +/** + * @brief ncsi clear initial state command struct defination + */ +struct tag_ncsi_client_info { + u8 *name; /**< client info client name */ + u32 type; /**< client info type of ncsi media @see enum NCSI_CLIENT_TYPE_E */ + u8 bmc_mac[NCSI_MAC_LEN]; /**< client info BMC mac addr */ + u8 ncsi_mac[NCSI_MAC_LEN]; /**< client info local mac addr */ + u8 reserve[2]; /**< client info reserved, Four-byte alignment */ + u32 rsp_len; /**< client info include pad */ + struct tg_ncsi_common_packet ncsi_packet_rsp; /**< ncsi common packet response */ +}; + +/* AEN Enable Command (0x08) */ +#define AEN_ENABLE_REQ_LEN 8 +#define AEN_ENABLE_RSP_LEN 4 +#define AEN_CTRL_LINK_STATUS_SHIFT 0 +#define AEN_CTRL_CONFIG_REQ_SHIFT 1 +#define AEN_CTRL_DRV_CHANGE_SHIFT 2 + +/* get link status 0x0A */ +#define GET_LINK_STATUS_REQ_LEN 0 +#define GET_LINK_STATUS_RSP_LEN 16 +/* link speed(fc link speed is mapped to unknown) */ +enum NCSI_CMD_LINK_SPEED_E { + LINK_SPEED_10M = 0x2, /**< 10M */ + LINK_SPEED_100M = 0x5, /**< 100M */ + LINK_SPEED_1G = 0x7, /**< 1G */ + LINK_SPEED_10G = 0x8, /**< 10G */ + LINK_SPEED_20G = 0x9, /**< 20G */ + LINK_SPEED_25G = 0xa, /**< 25G */ + LINK_SPEED_40G = 0xb, /**< 40G */ + LINK_SPEED_50G = 0xc, /**< 50G */ + LINK_SPEED_100G = 0xd, /**< 100G */ + LINK_SPEED_2_5G = 0xe, /**< 2.5G */ + LINK_SPEED_UNKNOWN = 0xf +}; + +/* Set Vlan Filter (0x0B) */ +/* Only VLAN-tagged packets that match the enabled VLAN Filter settings are accepted. */ +#define VLAN_MODE_UNSET 0X00 +#define VLAN_ONLY 0x01 +/* if match the MAC address ,any vlan-tagged and non-vlan-tagged will be accepted */ +#define ANYVLAN_NONVLAN 0x03 +#define VLAN_MODE_SUPPORT 0x05 + +/* chanel vlan filter enable */ +#define CHNL_VALN_FL_ENABLE 0x01 +#define CHNL_VALN_FL_DISABLE 0x00 + +/* vlan id invalid */ +#define VLAN_ID_VALID 0x01 +#define VLAN_ID_INVALID 0x00 + +/* VLAN ID */ +#define SET_VLAN_FILTER_REQ_LEN 8 +#define SET_VLAN_FILTER_RSP_LEN 4 + +/* ncsi_get_controller_packet_statistics_config */ +#define NO_INFORMATION_STATISTICS 0xff + +/* Enable VLAN Command (0x0C) */ +#define ENABLE_VLAN_REQ_LEN 4 +#define ENABLE_VLAN_RSP_LEN 4 +#define VLAN_FL_MAX_ID 8 + +/* NCSI channel capabilities */ +struct tag_ncsi_chan_capa { + u32 capa_flags; /**< NCSI channel capabilities capa flags */ + u32 bcast_filter; /**< NCSI channel capabilities bcast filter */ + u32 multicast_filter; /**< NCSI channel capabilities multicast filter */ + u32 buffering; /**< NCSI channel capabilities buffering */ + u32 aen_ctrl; /**< NCSI channel capabilities aen ctrl */ + u8 vlan_count; /**< NCSI channel capabilities vlan count */ + u8 mixed_count; /**< NCSI channel capabilities mixed count */ + u8 multicast_count; /**< NCSI channel capabilities multicast count */ + u8 unicast_count; /**< NCSI channel capabilities unicast count */ + u16 rsvd; /**< NCSI channel capabilities reserved */ + u8 vlan_mode; /**< NCSI channel capabilities vlan mode */ + u8 chan_count; /**< NCSI channel capabilities channel count */ +}; + +struct tg_g_ncsi_parameters { + u8 mac_address_count; + u8 reserved1[2]; + u8 mac_address_flags; + u8 vlan_tag_count; + u8 reserved2; + u16 vlan_tag_flags; + u32 link_settings; + u32 broadcast_packet_filter_settings; + u8 broadcast_packet_filter_status : 1; + u8 channel_enable : 1; + u8 channel_network_tx_enable : 1; + u8 global_mulicast_packet_filter_status : 1; + /**< bit0-3:mac_add0——mac_add3 address type:0 unicast,1 multileaving */ + u8 config_flags_reserved1 : 4; + u8 config_flags_reserved2[3]; + u8 vlan_mode; /**< current vlan mode */ + u8 flow_control_enable; + u16 reserved3; + u32 AEN_control; + u8 mac_add[4][6]; + u16 vlan_tag[VLAN_FL_MAX_ID]; +}; + +#pragma pack() + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h b/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h new file mode 100644 index 000000000..fe663e1fb --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/mpu/nic_cfg_comm.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef NIC_CFG_COMM_H +#define NIC_CFG_COMM_H + +/* rss */ +#define HINIC3_RSS_TYPE_VALID_SHIFT 23 +#define HINIC3_RSS_TYPE_TCP_IPV6_EXT_SHIFT 24 +#define HINIC3_RSS_TYPE_IPV6_EXT_SHIFT 25 +#define HINIC3_RSS_TYPE_TCP_IPV6_SHIFT 26 +#define HINIC3_RSS_TYPE_IPV6_SHIFT 27 +#define HINIC3_RSS_TYPE_TCP_IPV4_SHIFT 28 +#define HINIC3_RSS_TYPE_IPV4_SHIFT 29 +#define HINIC3_RSS_TYPE_UDP_IPV6_SHIFT 30 +#define HINIC3_RSS_TYPE_UDP_IPV4_SHIFT 31 + +#define HINIC3_RSS_TYPE_SET(val, member) (((u32)(val) & 0x1) << HINIC3_RSS_TYPE_##member##_SHIFT) +#define HINIC3_RSS_TYPE_GET(val, member) (((u32)(val) >> HINIC3_RSS_TYPE_##member##_SHIFT) & 0x1) + +enum nic_rss_hash_type { + NIC_RSS_HASH_TYPE_XOR = 0, + NIC_RSS_HASH_TYPE_TOEP, + + NIC_RSS_HASH_TYPE_MAX /* MUST BE THE LAST ONE */ +}; + +#define NIC_RSS_INDIR_SIZE 256 +#define NIC_RSS_KEY_SIZE 40 + +/* * + * Definition of the NIC receiving mode + */ +#define NIC_RX_MODE_UC 0x01 +#define NIC_RX_MODE_MC 0x02 +#define NIC_RX_MODE_BC 0x04 +#define NIC_RX_MODE_MC_ALL 0x08 +#define NIC_RX_MODE_PROMISC 0x10 + +/* IEEE 802.1Qaz std */ +#define NIC_DCB_COS_MAX 0x8 +#define NIC_DCB_UP_MAX 0x8 +#define NIC_DCB_TC_MAX 0x8 +#define NIC_DCB_PG_MAX 0x8 +#define NIC_DCB_TSA_SP 0x0 +#define NIC_DCB_TSA_CBS 0x1 /* hi1822 do NOT support */ +#define NIC_DCB_TSA_ETS 0x2 +#define NIC_DCB_DSCP_NUM 0x8 +#define NIC_DCB_IP_PRI_MAX 0x40 + +#define NIC_DCB_PRIO_DWRR 0x0 +#define NIC_DCB_PRIO_STRICT 0x1 + +#define NIC_DCB_MAX_PFC_NUM 0x4 +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/ossl_types.h b/drivers/net/ethernet/huawei/hinic3/include/ossl_types.h new file mode 100644 index 000000000..c646e7c46 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/ossl_types.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef _OSSL_TYPES_H +#define _OSSL_TYPES_H + +#undef NULL +#if defined(__cplusplus) +#define NULL 0 +#else +#define NULL ((void *)0) +#endif + +#if defined(__LINUX__) +#ifdef __USER__ /* linux user */ +#if defined(__ia64__) || defined(__x86_64__) || defined(__aarch64__) +#define s64 long +#define u64 unsigned long +#else +#define s64 long long +#define u64 unsigned long long +#endif +#define s32 int +#define u32 unsigned int +#define s16 short +#define u16 unsigned short + +#ifdef __hinic_arm__ +#define s8 signed char +#else +#define s8 char +#endif + +#ifndef dma_addr_t +typedef u64 dma_addr_t; +#endif + +#define u8 unsigned char +#define ulong unsigned long +#define uint unsigned int + +#define ushort unsigned short + +#endif +#endif + +#define uda_handle void * + +#define UDA_TRUE 1 +#define UDA_FALSE 0 + +#if defined(__USER__) || defined(USER) +#ifndef F_OK +#define F_OK 0 +#endif +#ifndef F_FAILED +#define F_FAILED (-1) +#endif + +#define uda_status int +#define TOOL_REAL_PATH_MAX_LEN 512 +#define SAFE_FUNCTION_ERR (-1) + +enum { + UDA_SUCCESS = 0x0, // run success + UDA_FAIL, // run failed + UDA_ENXIO, // no device + UDA_ENONMEM, // alloc memory failed + UDA_EBUSY, // card busy or restart + UDA_ECRC, // CRC check error + UDA_EINVAL, // invalid parameter + UDA_EFAULT, // invalid address + UDA_ELEN, // invalid length + UDA_ECMD, // error occurs when execute the cmd + UDA_ENODRIVER, // driver is not installed + UDA_EXIST, // has existed + UDA_EOVERSTEP, // over step + UDA_ENOOBJ, // have no object + UDA_EOBJ, // error object + UDA_ENOMATCH, // driver does not match to firmware + UDA_ETIMEOUT, // timeout + + UDA_CONTOP, + + UDA_REBOOT = 0xFD, + UDA_CANCEL = 0xFE, + UDA_KILLED = 0xFF, +}; + +enum { + UDA_FLOCK_NOBLOCK = 0, + UDA_FLOCK_BLOCK = 1, +}; + +/* array index */ +#define ARRAY_INDEX_0 0 +#define ARRAY_INDEX_1 1 +#define ARRAY_INDEX_2 2 +#define ARRAY_INDEX_3 3 +#define ARRAY_INDEX_4 4 +#define ARRAY_INDEX_5 5 +#define ARRAY_INDEX_6 6 +#define ARRAY_INDEX_7 7 +#define ARRAY_INDEX_8 8 +#define ARRAY_INDEX_12 12 +#define ARRAY_INDEX_13 13 + +/* define shift bits */ +#define SHIFT_BIT_1 1 +#define SHIFT_BIT_2 2 +#define SHIFT_BIT_3 3 +#define SHIFT_BIT_4 4 +#define SHIFT_BIT_6 6 +#define SHIFT_BIT_7 7 +#define SHIFT_BIT_8 8 +#define SHIFT_BIT_11 11 +#define SHIFT_BIT_12 12 +#define SHIFT_BIT_15 15 +#define SHIFT_BIT_16 16 +#define SHIFT_BIT_17 17 +#define SHIFT_BIT_19 19 +#define SHIFT_BIT_20 20 +#define SHIFT_BIT_23 23 +#define SHIFT_BIT_24 24 +#define SHIFT_BIT_25 25 +#define SHIFT_BIT_26 26 +#define SHIFT_BIT_28 28 +#define SHIFT_BIT_29 29 +#define SHIFT_BIT_32 32 +#define SHIFT_BIT_35 35 +#define SHIFT_BIT_37 37 +#define SHIFT_BIT_39 39 +#define SHIFT_BIT_40 40 +#define SHIFT_BIT_43 43 +#define SHIFT_BIT_48 48 +#define SHIFT_BIT_51 51 +#define SHIFT_BIT_56 56 +#define SHIFT_BIT_57 57 +#define SHIFT_BIT_59 59 +#define SHIFT_BIT_60 60 +#define SHIFT_BIT_61 61 + +#endif +#endif /* OSSL_TYPES_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/include/public/npu_cmdq_base_defs.h b/drivers/net/ethernet/huawei/hinic3/include/public/npu_cmdq_base_defs.h new file mode 100644 index 000000000..78236c92a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/public/npu_cmdq_base_defs.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef NPU_CMDQ_BASE_DEFS_H +#define NPU_CMDQ_BASE_DEFS_H + +/* CmdQ Common subtype */ +enum comm_cmdq_cmd { + COMM_CMD_UCODE_ARM_BIT_SET = 2, + COMM_CMD_SEND_NPU_DFT_CMD, +}; + +/* Cmdq ack type */ +enum hinic3_ack_type { + HINIC3_ACK_TYPE_CMDQ, + HINIC3_ACK_TYPE_SHARE_CQN, + HINIC3_ACK_TYPE_APP_CQN, + + HINIC3_MOD_ACK_MAX = 15, +}; + +/* Defines the queue type of the set arm bit. */ +enum { + SET_ARM_BIT_FOR_CMDQ = 0, + SET_ARM_BIT_FOR_L2NIC_SQ, + SET_ARM_BIT_FOR_L2NIC_RQ, + SET_ARM_BIT_TYPE_NUM +}; + +/* Defines the type. Each function supports a maximum of eight CMDQ types. */ +enum { + CMDQ_0 = 0, + CMDQ_1 = 1, /* dedicated and non-blocking queues */ + CMDQ_NUM +}; + +/* *******************cmd common command data structure ************************ */ +// Func->ucode, which is used to set arm bit data, +// The microcode needs to perform big-endian conversion. +struct comm_info_ucode_set_arm_bit { + u32 q_type; + u32 q_id; +}; + +/* *******************WQE data structure ************************ */ +union cmdq_wqe_cs_dw0 { + struct { + u32 err_status : 29; + u32 error_code : 2; + u32 rsvd : 1; + } bs; + u32 val; +}; + +union cmdq_wqe_cs_dw1 { + struct { + u32 token : 16; // [15:0] + u32 cmd : 8; // [23:16] + u32 mod : 5; // [28:24] + u32 ack_type : 2; // [30:29] + u32 obit : 1; // [31] + } drv_wr; // This structure is used when the driver writes the wqe. + + struct { + u32 mod : 5; // [4:0] + u32 ack_type : 3; // [7:5] + u32 cmd : 8; // [15:8] + u32 arm : 1; // [16] + u32 rsvd : 14; // [30:17] + u32 obit : 1; // [31] + } wb; + u32 val; +}; + +/* CmdQ BD information or write back buffer information */ +struct cmdq_sge { + u32 pa_h; // Upper 32 bits of the physical address + u32 pa_l; // Upper 32 bits of the physical address + u32 len; // Invalid bit[31]. + u32 resv; +}; + +/* Ctrls section definition of WQE */ +struct cmdq_wqe_ctrls { + union { + struct { + u32 bdsl : 8; // [7:0] + u32 drvsl : 2; // [9:8] + u32 rsv : 4; // [13:10] + u32 wf : 1; // [14] + u32 cf : 1; // [15] + u32 tsl : 5; // [20:16] + u32 va : 1; // [21] + u32 df : 1; // [22] + u32 cr : 1; // [23] + u32 difsl : 3; // [26:24] + u32 csl : 2; // [28:27] + u32 ctrlsl : 2; // [30:29] + u32 obit : 1; // [31] + } bs; + u32 val; + } header; + u32 qsf; +}; + +/* Complete section definition of WQE */ +struct cmdq_wqe_cs { + union cmdq_wqe_cs_dw0 dw0; + union cmdq_wqe_cs_dw1 dw1; + union { + struct cmdq_sge sge; + u32 dw2_5[4]; + } ack; +}; + +/* Inline header in WQE inline, describing the length of inline data */ +union cmdq_wqe_inline_header { + struct { + u32 buf_len : 11; // [10:0] inline data len + u32 rsv : 21; // [31:11] + } bs; + u32 val; +}; + +/* Definition of buffer descriptor section in WQE */ +union cmdq_wqe_bds { + struct { + struct cmdq_sge bds_sge; + u32 rsvd[4]; /* Zwy is used to transfer the virtual address of the buffer. */ + } lcmd; /* Long command, non-inline, and SGE describe the buffer information. */ +}; + +/* Definition of CMDQ WQE */ +/* (long cmd, 64B) + * +----------------------------------------+ + * | ctrl section(8B) | + * +----------------------------------------+ + * | | + * | complete section(24B) | + * | | + * +----------------------------------------+ + * | | + * | buffer descriptor section(16B) | + * | | + * +----------------------------------------+ + * | driver section(16B) | + * +----------------------------------------+ + * + * + * (middle cmd, 128B) + * +----------------------------------------+ + * | ctrl section(8B) | + * +----------------------------------------+ + * | | + * | complete section(24B) | + * | | + * +----------------------------------------+ + * | | + * | buffer descriptor section(88B) | + * | | + * +----------------------------------------+ + * | driver section(8B) | + * +----------------------------------------+ + * + * + * (short cmd, 64B) + * +----------------------------------------+ + * | ctrl section(8B) | + * +----------------------------------------+ + * | | + * | complete section(24B) | + * | | + * +----------------------------------------+ + * | | + * | buffer descriptor section(24B) | + * | | + * +----------------------------------------+ + * | driver section(8B) | + * +----------------------------------------+ + */ +struct cmdq_wqe { + struct cmdq_wqe_ctrls ctrls; + struct cmdq_wqe_cs cs; + union cmdq_wqe_bds bds; +}; + +/* Definition of ctrls section in inline WQE */ +struct cmdq_wqe_ctrls_inline { + union { + struct { + u32 bdsl : 8; // [7:0] + u32 drvsl : 2; // [9:8] + u32 rsv : 4; // [13:10] + u32 wf : 1; // [14] + u32 cf : 1; // [15] + u32 tsl : 5; // [20:16] + u32 va : 1; // [21] + u32 df : 1; // [22] + u32 cr : 1; // [23] + u32 difsl : 3; // [26:24] + u32 csl : 2; // [28:27] + u32 ctrlsl : 2; // [30:29] + u32 obit : 1; // [31] + } bs; + u32 val; + } header; + u32 qsf; + u64 db; +}; + +/* Buffer descriptor section definition of WQE */ +union cmdq_wqe_bds_inline { + struct { + union cmdq_wqe_inline_header header; + u32 rsvd; + u8 data_inline[80]; + } mcmd; /* Middle command, inline mode */ + + struct { + union cmdq_wqe_inline_header header; + u32 rsvd; + u8 data_inline[16]; + } scmd; /* Short command, inline mode */ +}; + +struct cmdq_wqe_inline { + struct cmdq_wqe_ctrls_inline ctrls; + struct cmdq_wqe_cs cs; + union cmdq_wqe_bds_inline bds; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/include/readme.txt b/drivers/net/ethernet/huawei/hinic3/include/readme.txt new file mode 100644 index 000000000..895f213b5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/readme.txt @@ -0,0 +1 @@ +本目录是业务内部共享接口 \ No newline at end of file diff --git a/drivers/net/ethernet/huawei/hinic3/include/vmsec/vmsec_mpu_common.h b/drivers/net/ethernet/huawei/hinic3/include/vmsec/vmsec_mpu_common.h new file mode 100644 index 000000000..d78dba862 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/vmsec/vmsec_mpu_common.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */ + +#ifndef VMSEC_MPU_COMMON_H +#define VMSEC_MPU_COMMON_H + +#include "mpu_cmd_base_defs.h" + +#define VM_GPA_INFO_MODE_MIG 0 +#define VM_GPA_INFO_MODE_NMIG 1 + +/** + * Commands between VMSEC to MPU + */ +enum tag_vmsec_mpu_cmd { + /* vmsec ctx gpa */ + VMSEC_MPU_CMD_CTX_GPA_SET = 0, + VMSEC_MPU_CMD_CTX_GPA_SHOW, + VMSEC_MPU_CMD_CTX_GPA_DEL, + + /* vmsec pci hole */ + VMSEC_MPU_CMD_PCI_HOLE_SET, + VMSEC_MPU_CMD_PCI_HOLE_SHOW, + VMSEC_MPU_CMD_PCI_HOLE_DEL, + + /* vmsec func cfg */ + VMSEC_MPU_CMD_FUN_CFG_ENTRY_IDX_SET, + VMSEC_MPU_CMD_FUN_CFG_ENTRY_IDX_SHOW, + + VMSEC_MPU_CMD_MAX +}; + +struct vmsec_ctx_gpa_entry { +#if defined(BYTE_ORDER) && (BYTE_ORDER == BIG_ENDIAN) + u32 func_id : 16; + u32 mode : 8; + u32 rsvd : 8; +#else + u32 rsvd : 8; + u32 mode : 8; + u32 func_id : 16; +#endif + + /* sml tbl to wr */ + u32 gpa_addr0_hi; + u32 gpa_addr0_lo; + u32 gpa_len0; +}; + +struct vmsec_pci_hole_idx { +#if defined(BYTE_ORDER) && (BYTE_ORDER == BIG_ENDIAN) + u32 entry_idx : 5; + u32 rsvd : 27; +#else + u32 rsvd : 27; + u32 entry_idx : 5; +#endif +}; + +struct vmsec_pci_hole_entry { + /* sml tbl to wr */ + /* pcie hole 32-bit region */ + u32 gpa_addr0_hi; + u32 gpa_addr0_lo; + u32 gpa_len0_hi; + u32 gpa_len0_lo; + + /* pcie hole 64-bit region */ + u32 gpa_addr1_hi; + u32 gpa_addr1_lo; + u32 gpa_len1_hi; + u32 gpa_len1_lo; + + /* ctrl info used by drv */ + u32 domain_id; /* unique vm id */ +#if defined(BYTE_ORDER) && (BYTE_ORDER == BIG_ENDIAN) + u32 rsvd1 : 21; + u32 vf_nums : 11; +#else + u32 rsvd1 : 21; + u32 vf_nums : 11; +#endif + u32 vroce_vf_bitmap; +}; + +struct vmsec_funcfg_info_entry { + /* funcfg to update */ +#if defined(BYTE_ORDER) && (BYTE_ORDER == BIG_ENDIAN) + u32 func_id : 16; + u32 entry_vld : 1; + u32 entry_idx : 5; + u32 rsvd : 10; +#else + u32 rsvd : 10; + u32 entry_idx : 5; + u32 entry_vld : 1; + u32 func_id : 16; +#endif +}; + +/* set/get/del */ +struct vmsec_cfg_ctx_gpa_entry_cmd { + struct comm_info_head head; + struct vmsec_ctx_gpa_entry entry; +}; + +#endif /* VMSEC_MPU_COMMON_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/include/vram_common.h b/drivers/net/ethernet/huawei/hinic3/include/vram_common.h new file mode 100644 index 000000000..801aeed18 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/include/vram_common.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Huawei Technologies Co., Ltd */ + +#ifndef VRAM_COMMON_H +#define VRAM_COMMON_H + +#include <linux/pci.h> +#include <linux/notifier.h> + +#define VRAM_BLOCK_SIZE_2M 0x200000UL +#define KEXEC_SIGN "hinic-in-kexec" +// now vram_name max len is 14, when add other vram, attention this value +#define VRAM_NAME_MAX_LEN 16 + +#define VRAM_CQM_GLB_FUNC_BASE "F" +#define VRAM_CQM_FAKE_MEM_BASE "FK" +#define VRAM_CQM_CLA_BASE "C" +#define VRAM_CQM_CLA_TYPE_BASE "T" +#define VRAM_CQM_CLA_SMF_BASE "SMF" +#define VRAM_CQM_CLA_COORD_X "X" +#define VRAM_CQM_CLA_COORD_Y "Y" +#define VRAM_CQM_CLA_COORD_Z "Z" +#define VRAM_CQM_BITMAP_BASE "B" + +#define VRAM_NIC_DCB "DCB" +#define VRAM_NIC_VRAM "NIC_VRAM" + +#define VRAM_VBS_BASE_IOCB "BASE_IOCB" +#define VRAM_VBS_EX_IOCB "EX_IOCB" +#define VRAM_VBS_RXQS_CQE "RXQS_CQE" + +#define VRAM_VBS_VOLQ_MTT "VOLQ_MTT" +#define VRAM_VBS_VOLQ_MTT_PAGE "MTT_PAGE" + +#define VRAM_VROCE_ENTRY_POOL "VROCE_ENTRY" +#define VRAM_VROCE_GROUP_POOL "VROCE_GROUP" +#define VRAM_VROCE_UUID "VROCE_UUID" +#define VRAM_VROCE_VID "VROCE_VID" +#define VRAM_VROCE_BASE "VROCE_BASE" +#define VRAM_VROCE_DSCP "VROCE_DSCP" +#define VRAM_VROCE_QOS "VROCE_QOS" +#define VRAM_VROCE_DEV "VROCE_DEV" +#define VRAM_VROCE_RGROUP_HT_CNT "RGROUP_CNT" +#define VRAM_VROCE_RACL_HT_CNT "RACL_CNT" + +#define VRAM_NAME_APPLY_LEN 64 + +#define MPU_OS_HOTREPLACE_FLAG 0x1 +struct vram_buf_info { + char buf_vram_name[VRAM_NAME_APPLY_LEN]; + int use_vram; +}; + +enum KUP_HOOK_POINT { + PRE_FREEZE, + FREEZE_TO_KILL, + PRE_UPDATE_KERNEL, + FLUSH_DURING_KUP, + POST_UPDATE_KERNEL, + UNFREEZE_TO_RUN, + POST_RUN, + KUP_HOOK_MAX, +}; + +#endif /* VRAM_COMMON_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/mag_cmd.h b/drivers/net/ethernet/huawei/hinic3/mag_cmd.h index 799f0b38a..964950361 100644 --- a/drivers/net/ethernet/huawei/hinic3/mag_cmd.h +++ b/drivers/net/ethernet/huawei/hinic3/mag_cmd.h @@ -1,73 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. - * Description: serdes/mag cmd definition between driver and mpu - * Author: ETH group - * Create: 2021-07-30 - */ - -#ifndef MAG_CMD_H -#define MAG_CMD_H - -#include "mgmt_msg_base.h" - -/* serdes/mag消息命令字定义 */ -enum mag_cmd { - /* serdes命令字,统一封装所有serdes命令 */ - SERDES_CMD_PROCESS = 0, - - /* mag命令字,按功能划分 */ - /* 端口配置相关 0-29 */ - MAG_CMD_SET_PORT_CFG = 1, - MAG_CMD_SET_PORT_ADAPT = 2, - MAG_CMD_CFG_LOOPBACK_MODE = 3, - - MAG_CMD_GET_PORT_ENABLE = 5, - MAG_CMD_SET_PORT_ENABLE = 6, - MAG_CMD_GET_LINK_STATUS = 7, - MAG_CMD_SET_LINK_FOLLOW = 8, - MAG_CMD_SET_PMA_ENABLE = 9, - MAG_CMD_CFG_FEC_MODE = 10, - - MAG_CMD_CFG_AN_TYPE = 12, /* reserved for future use */ - MAG_CMD_CFG_LINK_TIME = 13, - - MAG_CMD_SET_PANGEA_ADAPT = 15, - - /* bios link配置相关 30-49 */ - MAG_CMD_CFG_BIOS_LINK_CFG = 31, - MAG_CMD_RESTORE_LINK_CFG = 32, - MAG_CMD_ACTIVATE_BIOS_LINK_CFG = 33, - - /* 光模块、LED、PHY等外设配置管理 50-99 */ - /* LED */ - MAG_CMD_SET_LED_CFG = 50, - - /* PHY */ - MAG_CMD_GET_PHY_INIT_STATUS = 55, /* reserved for future use */ - - /* 光模块 */ - MAG_CMD_GET_XSFP_INFO = 60, - MAG_CMD_SET_XSFP_ENABLE = 61, - MAG_CMD_GET_XSFP_PRESENT = 62, - MAG_CMD_SET_XSFP_RW = 63, /* sfp/qsfp single byte read/write, for equipment test */ - MAG_CMD_CFG_XSFP_TEMPERATURE = 64, - - /* 事件上报 100-149 */ - MAG_CMD_WIRE_EVENT = 100, - MAG_CMD_LINK_ERR_EVENT = 101, +/* Copyright(c) 2024 Huawei Technologies Co., Ltd */
- /* DFX、Counter相关 */ - MAG_CMD_EVENT_PORT_INFO = 150, - MAG_CMD_GET_PORT_STAT = 151, - MAG_CMD_CLR_PORT_STAT = 152, - MAG_CMD_GET_PORT_INFO = 153, - MAG_CMD_GET_PCS_ERR_CNT = 154, - MAG_CMD_GET_MAG_CNT = 155, - MAG_CMD_DUMP_ANTRAIN_INFO = 156, +#ifndef MAG_MPU_CMD_DEFS_H +#define MAG_MPU_CMD_DEFS_H
- MAG_CMD_MAX = 0xFF -}; +#include "mpu_cmd_base_defs.h"
/* serdes cmd struct define */ #define CMD_ARRAY_BUF_SIZE 64 @@ -352,6 +289,14 @@ struct mag_cmd_get_link_status { u8 rsvd0[2]; };
+/* firmware also use this cmd report bond event to driver */ +struct mag_cmd_get_bond_status { + struct mgmt_msg_head head; + + u8 status; /* 0:bond down 1:bond up */ + u8 rsvd0[3]; +}; + struct mag_cmd_set_pma_enable { struct mgmt_msg_head head;
@@ -389,6 +334,75 @@ struct mag_cmd_cfg_fec_mode { u8 rsvd0; };
+/* speed */ +#define PANGEA_ADAPT_10G_BITMAP 0xd +#define PANGEA_ADAPT_25G_BITMAP 0x72 +#define PANGEA_ADAPT_40G_BITMAP 0x680 +#define PANGEA_ADAPT_100G_BITMAP 0x1900 + +/* speed and fec */ +#define PANGEA_10G_NO_BITMAP 0x8 +#define PANGEA_10G_BASE_BITMAP 0x4 +#define PANGEA_25G_NO_BITMAP 0x10 +#define PANGEA_25G_BASE_BITMAP 0x20 +#define PANGEA_25G_RS_BITMAP 0x40 +#define PANGEA_40G_NO_BITMAP 0x400 +#define PANGEA_40G_BASE_BITMAP 0x200 +#define PANGEA_100G_NO_BITMAP 0x800 +#define PANGEA_100G_RS_BITMAP 0x1000 + +/* adapt or fec */ +#define PANGEA_ADAPT_ADAPT_BITMAP 0x183 +#define PANGEA_ADAPT_NO_BITMAP 0xc18 +#define PANGEA_ADAPT_BASE_BITMAP 0x224 +#define PANGEA_ADAPT_RS_BITMAP 0x1040 + +/* default cfg */ +#define PANGEA_ADAPT_CFG_10G_CR 0x200d +#define PANGEA_ADAPT_CFG_10G_SRLR 0xd +#define PANGEA_ADAPT_CFG_25G_CR 0x207f +#define PANGEA_ADAPT_CFG_25G_SRLR 0x72 +#define PANGEA_ADAPT_CFG_40G_CR4 0x2680 +#define PANGEA_ADAPT_CFG_40G_SRLR4 0x680 +#define PANGEA_ADAPT_CFG_100G_CR4 0x3f80 +#define PANGEA_ADAPT_CFG_100G_SRLR4 0x1900 + +union pangea_adapt_bitmap_u { + struct { + u32 adapt_10g : 1; /* [0] adapt_10g */ + u32 adapt_25g : 1; /* [1] adapt_25g */ + u32 base_10g : 1; /* [2] base_10g */ + u32 no_10g : 1; /* [3] no_10g */ + u32 no_25g : 1; /* [4] no_25g */ + u32 base_25g : 1; /* [5] base_25g */ + u32 rs_25g : 1; /* [6] rs_25g */ + u32 adapt_40g : 1; /* [7] adapt_40g */ + u32 adapt_100g : 1; /* [8] adapt_100g */ + u32 base_40g : 1; /* [9] base_40g */ + u32 no_40g : 1; /* [10] no_40g */ + u32 no_100g : 1; /* [11] no_100g */ + u32 rs_100g : 1; /* [12] rs_100g */ + u32 auto_neg : 1; /* [13] auto_neg */ + u32 rsvd0 : 18; /* [31:14] reserved */ + } bits; + + u32 value; +}; + +#define PANGEA_ADAPT_GET 0x0 +#define PANGEA_ADAPT_SET 0x1 +struct mag_cmd_set_pangea_adapt { + struct mgmt_msg_head head; + + u16 port_id; + u8 opcode; /* 0:get adapt info 1:cfg adapt info */ + u8 wire_type; + + union pangea_adapt_bitmap_u cfg_bitmap; + union pangea_adapt_bitmap_u cur_bitmap; + u32 rsvd1[3]; +}; + struct mag_cmd_cfg_bios_link_cfg { struct mgmt_msg_head head;
@@ -621,22 +635,21 @@ struct mag_cmd_event_port_info { u8 event_type; u8 rsvd0[2];
- // 光模块相关 u8 vendor_name[XSFP_VENDOR_NAME_LEN]; - u32 port_type; /* fiber / copper */ - u32 port_sub_type; /* sr / lr */ - u32 cable_length; /* 1/3/5m */ - u8 cable_temp; /* 温度 */ - u8 max_speed; /* 光模块最大速率 */ - u8 sfp_type; /* sfp/qsfp */ + u32 port_type; /* fiber / copper */ + u32 port_sub_type; /* sr / lr */ + u32 cable_length; /* 1/3/5m */ + u8 cable_temp; /* temp */ + u8 max_speed; /* Maximum rate of an optical module */ + u8 sfp_type; /* sfp/qsfp */ u8 rsvd1; - u32 power[4]; /* 光功率 */ + u32 power[4]; /* Optical Power */
u8 an_state; u8 fec; u16 speed;
- u8 gpio_insert; /* 0:present 1:absent */ + u8 gpio_insert; /* 0:present 1:absent */ u8 alos; u8 rx_los; u8 pma_ctrl; @@ -755,6 +768,98 @@ struct mag_cmd_port_stats { u64 mac_rx_unfilter_pkt_num; };
+struct mag_port_stats { + u64 tx_frag_pkts_port; + u64 tx_under_frame_pkts_port; + u64 tx_under_min_pkts_port; + u64 tx_64_oct_pkts_port; + u64 tx_127_oct_pkts_port; + u64 tx_255_oct_pkts_port; + u64 tx_511_oct_pkts_port; + u64 tx_1023_oct_pkts_port; + u64 tx_1518_oct_pkts_port; + u64 tx_2047_oct_pkts_port; + u64 tx_4095_oct_pkts_port; + u64 tx_8191_oct_pkts_port; + u64 tx_9216_oct_pkts_port; + u64 tx_12287_oct_pkts_port; + u64 tx_16383_oct_pkts_port; + u64 tx_1519_to_max_bad_pkts_port; + u64 tx_1519_to_max_good_pkts_port; + u64 tx_oversize_pkts_port; + u64 tx_jabber_pkts_port; + u64 tx_bad_pkts_port; + u64 tx_bad_octs_port; + u64 tx_good_pkts_port; + u64 tx_good_octs_port; + u64 tx_total_pkts_port; + u64 tx_total_octs_port; + u64 tx_unicast_pkts_port; + u64 tx_multicast_pkts_port; + u64 tx_broadcast_pkts_port; + u64 tx_pause_pkts_port; + u64 tx_pfc_pkts_port; + u64 tx_pri_0_pkts_port; + u64 tx_pri_1_pkts_port; + u64 tx_pri_2_pkts_port; + u64 tx_pri_3_pkts_port; + u64 tx_pri_4_pkts_port; + u64 tx_pri_5_pkts_port; + u64 tx_pri_6_pkts_port; + u64 tx_pri_7_pkts_port; + u64 tx_mac_control_pkts_port; + u64 tx_y1731_pkts_port; + u64 tx_1588_pkts_port; + u64 tx_error_pkts_port; + u64 tx_app_good_pkts_port; + u64 tx_app_bad_pkts_port; + u64 rx_frag_pkts_port; + u64 rx_under_frame_pkts_port; + u64 rx_under_min_pkts_port; + u64 rx_64_oct_pkts_port; + u64 rx_127_oct_pkts_port; + u64 rx_255_oct_pkts_port; + u64 rx_511_oct_pkts_port; + u64 rx_1023_oct_pkts_port; + u64 rx_1518_oct_pkts_port; + u64 rx_2047_oct_pkts_port; + u64 rx_4095_oct_pkts_port; + u64 rx_8191_oct_pkts_port; + u64 rx_9216_oct_pkts_port; + u64 rx_12287_oct_pkts_port; + u64 rx_16383_oct_pkts_port; + u64 rx_1519_to_max_bad_pkts_port; + u64 rx_1519_to_max_good_pkts_port; + u64 rx_oversize_pkts_port; + u64 rx_jabber_pkts_port; + u64 rx_bad_pkts_port; + u64 rx_bad_octs_port; + u64 rx_good_pkts_port; + u64 rx_good_octs_port; + u64 rx_total_pkts_port; + u64 rx_total_octs_port; + u64 rx_unicast_pkts_port; + u64 rx_multicast_pkts_port; + u64 rx_broadcast_pkts_port; + u64 rx_pause_pkts_port; + u64 rx_pfc_pkts_port; + u64 rx_pri_0_pkts_port; + u64 rx_pri_1_pkts_port; + u64 rx_pri_2_pkts_port; + u64 rx_pri_3_pkts_port; + u64 rx_pri_4_pkts_port; + u64 rx_pri_5_pkts_port; + u64 rx_pri_6_pkts_port; + u64 rx_pri_7_pkts_port; + u64 rx_mac_control_pkts_port; + u64 rx_y1731_pkts_port; + u64 rx_sym_err_pkts_port; + u64 rx_fcs_err_pkts_port; + u64 rx_app_good_pkts_port; + u64 rx_app_bad_pkts_port; + u64 rx_unfilter_pkts_port; +}; + struct mag_cmd_port_stats_info { struct mgmt_msg_head head;
@@ -799,20 +904,19 @@ struct mag_cmd_dump_antrain_info { };
#define MAG_SFP_PORT_NUM 24 -/* 芯片光模块温度结构体定义 */ struct mag_cmd_sfp_temp_in_info { struct mgmt_msg_head head; /* 8B */ - u8 opt_type; /* 0:read operation 1:cfg operation */ + u8 opt_type; /* 0:read operation 1:cfg operation */ u8 rsv[3]; - s32 max_temp; /* 芯片光模块阈值 */ - s32 min_temp; /* 芯片光模块阈值 */ + s32 max_temp; /* Chip optical module threshold */ + s32 min_temp; /* Chip optical module threshold */ };
struct mag_cmd_sfp_temp_out_info { - struct mgmt_msg_head head; /* 8B */ - s16 sfp_temp_data[MAG_SFP_PORT_NUM]; /* 读出的温度 */ - s32 max_temp; /* 芯片光模块阈值 */ - s32 min_temp; /* 芯片光模块阈值 */ + struct mgmt_msg_head head; /* 8B */ + s16 sfp_temp_data[MAG_SFP_PORT_NUM]; /* Temperature read */ + s32 max_temp; /* Chip optical module threshold */ + s32 min_temp; /* Chip optical module threshold */ };
#endif diff --git a/drivers/net/ethernet/huawei/hinic3/ossl_knl.h b/drivers/net/ethernet/huawei/hinic3/ossl_knl.h index d5d1b3c7a..bb658cba8 100644 --- a/drivers/net/ethernet/huawei/hinic3/ossl_knl.h +++ b/drivers/net/ethernet/huawei/hinic3/ossl_knl.h @@ -5,6 +5,7 @@ #define OSSL_KNL_H
#include "ossl_knl_linux.h" +#include <linux/types.h>
#define sdk_err(dev, format, ...) dev_err(dev, "[COMM]" format, ##__VA_ARGS__) #define sdk_warn(dev, format, ...) dev_warn(dev, "[COMM]" format, ##__VA_ARGS__) @@ -32,4 +33,7 @@ #define USEC_PER_MSEC 1000L #define MSEC_PER_SEC 1000L
+/* Waiting for 50 us */ +#define WAIT_USEC_50 50L + #endif /* OSSL_KNL_H */ diff --git a/drivers/net/ethernet/huawei/hinic3/ossl_knl_linux.h b/drivers/net/ethernet/huawei/hinic3/ossl_knl_linux.h index 8bfc4cf6e..78a8d7f20 100644 --- a/drivers/net/ethernet/huawei/hinic3/ossl_knl_linux.h +++ b/drivers/net/ethernet/huawei/hinic3/ossl_knl_linux.h @@ -31,6 +31,17 @@ #define __GFP_COMP 0 #endif
+#define ossl_get_free_pages __get_free_pages + +#ifndef high_16_bits +#define low_16_bits(x) ((x) & 0xFFFF) +#define high_16_bits(x) (((x) & 0xFFFF0000) >> 16) +#endif + +#ifndef U8_MAX +#define U8_MAX 0xFF +#endif + #define ETH_TYPE_TRANS_SETS_DEV #define HAVE_NETDEV_STATS_IN_NETDEV
@@ -162,10 +173,12 @@ static inline void *_hinic3_dma_zalloc_coherent(struct device *dev, } #endif
+#ifndef DT_KNL_EMU struct timeval { __kernel_old_time_t tv_sec; /* seconds */ __kernel_suseconds_t tv_usec; /* microseconds */ }; +#endif
#ifndef do_gettimeofday #define do_gettimeofday(time) _kc_do_gettimeofday(time) @@ -199,12 +212,62 @@ static inline void _kc_do_gettimeofday(struct timeval *tv)
#define HAVE_XDP_FRAME_SZ
+/* This defines the direction arg to the DMA mapping routines. */ +#define PCI_DMA_BIDIRECTIONAL 0 +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#define PCI_DMA_NONE 3 + +static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, + size_t size, int direction) +{ + return dma_map_single(!hwdev ? NULL : &hwdev->dev, + ptr, size, (enum dma_data_direction)direction +); +} + +static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, + size_t size, int direction) +{ + dma_unmap_single(!hwdev ? NULL : &hwdev->dev, dma_addr, + size, (enum dma_data_direction)direction +); +} + +static inline int pci_dma_mapping_error(struct pci_dev *hwdev, dma_addr_t dma_addr) +{ + return dma_mapping_error(&hwdev->dev, dma_addr); +} + +static inline void *pci_alloc_consistent(struct pci_dev *hwdev, + size_t size, dma_addr_t *dma_handle) +{ + return dma_alloc_coherent(!hwdev ? NULL : &hwdev->dev, + size, dma_handle, GFP_ATOMIC); +} + +static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + dma_free_coherent(!hwdev ? NULL : &hwdev->dev, + size, vaddr, dma_handle); +} + #define HAVE_DEVLINK_FW_FILE_NAME_MEMBER
#define HAVE_ENCAPSULATION_TSO
#define HAVE_ENCAPSULATION_CSUM
+#ifndef eth_zero_addr +static inline void hinic3_eth_zero_addr(u8 *addr) +{ + memset(addr, 0x00, ETH_ALEN); +} + +#define eth_zero_addr(_addr) hinic3_eth_zero_addr(_addr) +#endif + #ifndef netdev_hw_addr_list_for_each #define netdev_hw_addr_list_for_each(ha, l) \ list_for_each_entry(ha, &(l)->list, list) @@ -222,6 +285,11 @@ u32 get_file_size(struct file *file_handle);
void set_file_position(struct file *file_handle, u32 position);
+int file_read(struct file *file_handle, char *log_buffer, u32 rd_length, + u32 *file_pos); + +u32 file_write(struct file *file_handle, const char *log_buffer, u32 wr_length); + struct sdk_thread_info { struct task_struct *thread_obj; char *name; @@ -242,7 +310,7 @@ void utctime_to_localtime(u64 utctime, u64 *localtime); void initialize_timer(const void *adapter_hdl, struct timer_list *timer); #endif
-void add_to_timer(struct timer_list *timer, long period); +void add_to_timer(struct timer_list *timer, u64 period); void stop_timer(struct timer_list *timer); void delete_timer(struct timer_list *timer); u64 ossl_get_real_time(void);